xref: /openbmc/linux/arch/powerpc/kernel/trace/ftrace.c (revision 465191d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 /*
32  * We generally only have a single long_branch tramp and at most 2 or 3 plt
33  * tramps generated. But, we don't use the plt tramps currently. We also allot
34  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
35  * tramps in total. Set aside 8 just to be sure.
36  */
37 #define	NUM_FTRACE_TRAMPS	8
38 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
39 
40 static ppc_inst_t
41 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
42 {
43 	ppc_inst_t op;
44 
45 	addr = ppc_function_entry((void *)addr);
46 
47 	/* if (link) set op to 'bl' else 'b' */
48 	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
49 
50 	return op;
51 }
52 
53 static inline int
54 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
55 {
56 	ppc_inst_t replaced;
57 
58 	/*
59 	 * Note:
60 	 * We are paranoid about modifying text, as if a bug was to happen, it
61 	 * could cause us to read or write to someplace that could cause harm.
62 	 * Carefully read and modify the code with probe_kernel_*(), and make
63 	 * sure what we read is what we expected it to be before modifying it.
64 	 */
65 
66 	/* read the text we want to modify */
67 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
68 		return -EFAULT;
69 
70 	/* Make sure it is what we expect it to be */
71 	if (!ppc_inst_equal(replaced, old)) {
72 		pr_err("%p: replaced (%s) != old (%s)",
73 		(void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
74 		return -EINVAL;
75 	}
76 
77 	/* replace the text with the new text */
78 	return patch_instruction((u32 *)ip, new);
79 }
80 
81 /*
82  * Helper functions that are the same for both PPC64 and PPC32.
83  */
84 static int test_24bit_addr(unsigned long ip, unsigned long addr)
85 {
86 	addr = ppc_function_entry((void *)addr);
87 
88 	return is_offset_in_branch_range(addr - ip);
89 }
90 
91 static int is_bl_op(ppc_inst_t op)
92 {
93 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
94 }
95 
96 static int is_b_op(ppc_inst_t op)
97 {
98 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
99 }
100 
101 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
102 {
103 	int offset;
104 
105 	offset = PPC_LI(ppc_inst_val(op));
106 	/* make it signed */
107 	if (offset & 0x02000000)
108 		offset |= 0xfe000000;
109 
110 	return ip + (long)offset;
111 }
112 
113 #ifdef CONFIG_MODULES
114 static int
115 __ftrace_make_nop(struct module *mod,
116 		  struct dyn_ftrace *rec, unsigned long addr)
117 {
118 	unsigned long entry, ptr, tramp;
119 	unsigned long ip = rec->ip;
120 	ppc_inst_t op, pop;
121 
122 	/* read where this goes */
123 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
124 		pr_err("Fetching opcode failed.\n");
125 		return -EFAULT;
126 	}
127 
128 	/* Make sure that that this is still a 24bit jump */
129 	if (!is_bl_op(op)) {
130 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
131 		return -EINVAL;
132 	}
133 
134 	/* lets find where the pointer goes */
135 	tramp = find_bl_target(ip, op);
136 
137 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
138 
139 	if (module_trampoline_target(mod, tramp, &ptr)) {
140 		pr_err("Failed to get trampoline target\n");
141 		return -EFAULT;
142 	}
143 
144 	pr_devel("trampoline target %lx", ptr);
145 
146 	entry = ppc_global_function_entry((void *)addr);
147 	/* This should match what was called */
148 	if (ptr != entry) {
149 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
150 		return -EINVAL;
151 	}
152 
153 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
154 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
155 			pr_err("Fetching instruction at %lx failed.\n", ip - 4);
156 			return -EFAULT;
157 		}
158 
159 		/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
160 		if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
161 		    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
162 			pr_err("Unexpected instruction %s around bl _mcount\n",
163 			       ppc_inst_as_str(op));
164 			return -EINVAL;
165 		}
166 	} else if (IS_ENABLED(CONFIG_PPC64)) {
167 		/*
168 		 * Check what is in the next instruction. We can see ld r2,40(r1), but
169 		 * on first pass after boot we will see mflr r0.
170 		 */
171 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
172 			pr_err("Fetching op failed.\n");
173 			return -EFAULT;
174 		}
175 
176 		if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
177 			pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
178 			return -EINVAL;
179 		}
180 	}
181 
182 	/*
183 	 * When using -mprofile-kernel or PPC32 there is no load to jump over.
184 	 *
185 	 * Otherwise our original call site looks like:
186 	 *
187 	 * bl <tramp>
188 	 * ld r2,XX(r1)
189 	 *
190 	 * Milton Miller pointed out that we can not simply nop the branch.
191 	 * If a task was preempted when calling a trace function, the nops
192 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
193 	 * get corrupted.
194 	 *
195 	 * Use a b +8 to jump over the load.
196 	 */
197 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
198 		pop = ppc_inst(PPC_RAW_NOP());
199 	else
200 		pop = ppc_inst(PPC_RAW_BRANCH(8));	/* b +8 */
201 
202 	if (patch_instruction((u32 *)ip, pop)) {
203 		pr_err("Patching NOP failed.\n");
204 		return -EPERM;
205 	}
206 
207 	return 0;
208 }
209 #else
210 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
211 {
212 	return 0;
213 }
214 #endif /* CONFIG_MODULES */
215 
216 static unsigned long find_ftrace_tramp(unsigned long ip)
217 {
218 	int i;
219 
220 	/*
221 	 * We have the compiler generated long_branch tramps at the end
222 	 * and we prefer those
223 	 */
224 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
225 		if (!ftrace_tramps[i])
226 			continue;
227 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
228 			return ftrace_tramps[i];
229 
230 	return 0;
231 }
232 
233 static int add_ftrace_tramp(unsigned long tramp)
234 {
235 	int i;
236 
237 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
238 		if (!ftrace_tramps[i]) {
239 			ftrace_tramps[i] = tramp;
240 			return 0;
241 		}
242 
243 	return -1;
244 }
245 
246 /*
247  * If this is a compiler generated long_branch trampoline (essentially, a
248  * trampoline that has a branch to _mcount()), we re-write the branch to
249  * instead go to ftrace_[regs_]caller() and note down the location of this
250  * trampoline.
251  */
252 static int setup_mcount_compiler_tramp(unsigned long tramp)
253 {
254 	int i;
255 	ppc_inst_t op;
256 	unsigned long ptr;
257 
258 	/* Is this a known long jump tramp? */
259 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
260 		if (ftrace_tramps[i] == tramp)
261 			return 0;
262 
263 	/* New trampoline -- read where this goes */
264 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
265 		pr_debug("Fetching opcode failed.\n");
266 		return -1;
267 	}
268 
269 	/* Is this a 24 bit branch? */
270 	if (!is_b_op(op)) {
271 		pr_debug("Trampoline is not a long branch tramp.\n");
272 		return -1;
273 	}
274 
275 	/* lets find where the pointer goes */
276 	ptr = find_bl_target(tramp, op);
277 
278 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
279 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
280 		return -1;
281 	}
282 
283 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
284 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
285 		ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
286 	else
287 		ptr = ppc_global_function_entry((void *)ftrace_caller);
288 
289 	if (patch_branch((u32 *)tramp, ptr, 0)) {
290 		pr_debug("REL24 out of range!\n");
291 		return -1;
292 	}
293 
294 	if (add_ftrace_tramp(tramp)) {
295 		pr_debug("No tramp locations left\n");
296 		return -1;
297 	}
298 
299 	return 0;
300 }
301 
302 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
303 {
304 	unsigned long tramp, ip = rec->ip;
305 	ppc_inst_t op;
306 
307 	/* Read where this goes */
308 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
309 		pr_err("Fetching opcode failed.\n");
310 		return -EFAULT;
311 	}
312 
313 	/* Make sure that that this is still a 24bit jump */
314 	if (!is_bl_op(op)) {
315 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
316 		return -EINVAL;
317 	}
318 
319 	/* Let's find where the pointer goes */
320 	tramp = find_bl_target(ip, op);
321 
322 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
323 
324 	if (setup_mcount_compiler_tramp(tramp)) {
325 		/* Are other trampolines reachable? */
326 		if (!find_ftrace_tramp(ip)) {
327 			pr_err("No ftrace trampolines reachable from %ps\n",
328 					(void *)ip);
329 			return -EINVAL;
330 		}
331 	}
332 
333 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
334 		pr_err("Patching NOP failed.\n");
335 		return -EPERM;
336 	}
337 
338 	return 0;
339 }
340 
341 int ftrace_make_nop(struct module *mod,
342 		    struct dyn_ftrace *rec, unsigned long addr)
343 {
344 	unsigned long ip = rec->ip;
345 	ppc_inst_t old, new;
346 
347 	/*
348 	 * If the calling address is more that 24 bits away,
349 	 * then we had to use a trampoline to make the call.
350 	 * Otherwise just update the call site.
351 	 */
352 	if (test_24bit_addr(ip, addr)) {
353 		/* within range */
354 		old = ftrace_call_replace(ip, addr, 1);
355 		new = ppc_inst(PPC_RAW_NOP());
356 		return ftrace_modify_code(ip, old, new);
357 	} else if (core_kernel_text(ip)) {
358 		return __ftrace_make_nop_kernel(rec, addr);
359 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
360 		return -EINVAL;
361 	}
362 
363 	/*
364 	 * Out of range jumps are called from modules.
365 	 * We should either already have a pointer to the module
366 	 * or it has been passed in.
367 	 */
368 	if (!rec->arch.mod) {
369 		if (!mod) {
370 			pr_err("No module loaded addr=%lx\n", addr);
371 			return -EFAULT;
372 		}
373 		rec->arch.mod = mod;
374 	} else if (mod) {
375 		if (mod != rec->arch.mod) {
376 			pr_err("Record mod %p not equal to passed in mod %p\n",
377 			       rec->arch.mod, mod);
378 			return -EINVAL;
379 		}
380 		/* nothing to do if mod == rec->arch.mod */
381 	} else
382 		mod = rec->arch.mod;
383 
384 	return __ftrace_make_nop(mod, rec, addr);
385 }
386 
387 #ifdef CONFIG_MODULES
388 /*
389  * Examine the existing instructions for __ftrace_make_call.
390  * They should effectively be a NOP, and follow formal constraints,
391  * depending on the ABI. Return false if they don't.
392  */
393 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
394 {
395 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
396 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
397 		       ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
398 	else
399 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
400 }
401 
402 static int
403 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
404 {
405 	ppc_inst_t op[2];
406 	void *ip = (void *)rec->ip;
407 	unsigned long entry, ptr, tramp;
408 	struct module *mod = rec->arch.mod;
409 
410 	/* read where this goes */
411 	if (copy_inst_from_kernel_nofault(op, ip))
412 		return -EFAULT;
413 
414 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1) &&
415 	    copy_inst_from_kernel_nofault(op + 1, ip + 4))
416 		return -EFAULT;
417 
418 	if (!expected_nop_sequence(ip, op[0], op[1])) {
419 		pr_err("Unexpected call sequence at %p: %s %s\n",
420 		ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
421 		return -EINVAL;
422 	}
423 
424 	/* If we never set up ftrace trampoline(s), then bail */
425 	if (!mod->arch.tramp ||
426 	    (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
427 		pr_err("No ftrace trampoline\n");
428 		return -EINVAL;
429 	}
430 
431 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
432 		tramp = mod->arch.tramp_regs;
433 	else
434 		tramp = mod->arch.tramp;
435 
436 	if (module_trampoline_target(mod, tramp, &ptr)) {
437 		pr_err("Failed to get trampoline target\n");
438 		return -EFAULT;
439 	}
440 
441 	pr_devel("trampoline target %lx", ptr);
442 
443 	entry = ppc_global_function_entry((void *)addr);
444 	/* This should match what was called */
445 	if (ptr != entry) {
446 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
447 		return -EINVAL;
448 	}
449 
450 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
451 		pr_err("REL24 out of range!\n");
452 		return -EINVAL;
453 	}
454 
455 	return 0;
456 }
457 #else
458 static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
459 {
460 	return 0;
461 }
462 #endif /* CONFIG_MODULES */
463 
464 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
465 {
466 	ppc_inst_t op;
467 	void *ip = (void *)rec->ip;
468 	unsigned long tramp, entry, ptr;
469 
470 	/* Make sure we're being asked to patch branch to a known ftrace addr */
471 	entry = ppc_global_function_entry((void *)ftrace_caller);
472 	ptr = ppc_global_function_entry((void *)addr);
473 
474 	if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
475 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
476 
477 	if (ptr != entry) {
478 		pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
479 		return -EINVAL;
480 	}
481 
482 	/* Make sure we have a nop */
483 	if (copy_inst_from_kernel_nofault(&op, ip)) {
484 		pr_err("Unable to read ftrace location %p\n", ip);
485 		return -EFAULT;
486 	}
487 
488 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
489 		pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
490 		return -EINVAL;
491 	}
492 
493 	tramp = find_ftrace_tramp((unsigned long)ip);
494 	if (!tramp) {
495 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
496 		return -EINVAL;
497 	}
498 
499 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
500 		pr_err("Error patching branch to ftrace tramp!\n");
501 		return -EINVAL;
502 	}
503 
504 	return 0;
505 }
506 
507 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
508 {
509 	unsigned long ip = rec->ip;
510 	ppc_inst_t old, new;
511 
512 	/*
513 	 * If the calling address is more that 24 bits away,
514 	 * then we had to use a trampoline to make the call.
515 	 * Otherwise just update the call site.
516 	 */
517 	if (test_24bit_addr(ip, addr)) {
518 		/* within range */
519 		old = ppc_inst(PPC_RAW_NOP());
520 		new = ftrace_call_replace(ip, addr, 1);
521 		return ftrace_modify_code(ip, old, new);
522 	} else if (core_kernel_text(ip)) {
523 		return __ftrace_make_call_kernel(rec, addr);
524 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
525 		/* We should not get here without modules */
526 		return -EINVAL;
527 	}
528 
529 	/*
530 	 * Out of range jumps are called from modules.
531 	 * Being that we are converting from nop, it had better
532 	 * already have a module defined.
533 	 */
534 	if (!rec->arch.mod) {
535 		pr_err("No module loaded\n");
536 		return -EINVAL;
537 	}
538 
539 	return __ftrace_make_call(rec, addr);
540 }
541 
542 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
543 #ifdef CONFIG_MODULES
544 static int
545 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
546 					unsigned long addr)
547 {
548 	ppc_inst_t op;
549 	unsigned long ip = rec->ip;
550 	unsigned long entry, ptr, tramp;
551 	struct module *mod = rec->arch.mod;
552 
553 	/* If we never set up ftrace trampolines, then bail */
554 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
555 		pr_err("No ftrace trampoline\n");
556 		return -EINVAL;
557 	}
558 
559 	/* read where this goes */
560 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
561 		pr_err("Fetching opcode failed.\n");
562 		return -EFAULT;
563 	}
564 
565 	/* Make sure that that this is still a 24bit jump */
566 	if (!is_bl_op(op)) {
567 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
568 		return -EINVAL;
569 	}
570 
571 	/* lets find where the pointer goes */
572 	tramp = find_bl_target(ip, op);
573 	entry = ppc_global_function_entry((void *)old_addr);
574 
575 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
576 
577 	if (tramp != entry) {
578 		/* old_addr is not within range, so we must have used a trampoline */
579 		if (module_trampoline_target(mod, tramp, &ptr)) {
580 			pr_err("Failed to get trampoline target\n");
581 			return -EFAULT;
582 		}
583 
584 		pr_devel("trampoline target %lx", ptr);
585 
586 		/* This should match what was called */
587 		if (ptr != entry) {
588 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
589 			return -EINVAL;
590 		}
591 	}
592 
593 	/* The new target may be within range */
594 	if (test_24bit_addr(ip, addr)) {
595 		/* within range */
596 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
597 			pr_err("REL24 out of range!\n");
598 			return -EINVAL;
599 		}
600 
601 		return 0;
602 	}
603 
604 	if (rec->flags & FTRACE_FL_REGS)
605 		tramp = mod->arch.tramp_regs;
606 	else
607 		tramp = mod->arch.tramp;
608 
609 	if (module_trampoline_target(mod, tramp, &ptr)) {
610 		pr_err("Failed to get trampoline target\n");
611 		return -EFAULT;
612 	}
613 
614 	pr_devel("trampoline target %lx", ptr);
615 
616 	entry = ppc_global_function_entry((void *)addr);
617 	/* This should match what was called */
618 	if (ptr != entry) {
619 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
620 		return -EINVAL;
621 	}
622 
623 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
624 		pr_err("REL24 out of range!\n");
625 		return -EINVAL;
626 	}
627 
628 	return 0;
629 }
630 #else
631 static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
632 {
633 	return 0;
634 }
635 #endif
636 
637 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
638 			unsigned long addr)
639 {
640 	unsigned long ip = rec->ip;
641 	ppc_inst_t old, new;
642 
643 	/*
644 	 * If the calling address is more that 24 bits away,
645 	 * then we had to use a trampoline to make the call.
646 	 * Otherwise just update the call site.
647 	 */
648 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
649 		/* within range */
650 		old = ftrace_call_replace(ip, old_addr, 1);
651 		new = ftrace_call_replace(ip, addr, 1);
652 		return ftrace_modify_code(ip, old, new);
653 	} else if (core_kernel_text(ip)) {
654 		/*
655 		 * We always patch out of range locations to go to the regs
656 		 * variant, so there is nothing to do here
657 		 */
658 		return 0;
659 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
660 		/* We should not get here without modules */
661 		return -EINVAL;
662 	}
663 
664 	/*
665 	 * Out of range jumps are called from modules.
666 	 */
667 	if (!rec->arch.mod) {
668 		pr_err("No module loaded\n");
669 		return -EINVAL;
670 	}
671 
672 	return __ftrace_modify_call(rec, old_addr, addr);
673 }
674 #endif
675 
676 int ftrace_update_ftrace_func(ftrace_func_t func)
677 {
678 	unsigned long ip = (unsigned long)(&ftrace_call);
679 	ppc_inst_t old, new;
680 	int ret;
681 
682 	old = ppc_inst_read((u32 *)&ftrace_call);
683 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
684 	ret = ftrace_modify_code(ip, old, new);
685 
686 	/* Also update the regs callback function */
687 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
688 		ip = (unsigned long)(&ftrace_regs_call);
689 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
690 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
691 		ret = ftrace_modify_code(ip, old, new);
692 	}
693 
694 	return ret;
695 }
696 
697 /*
698  * Use the default ftrace_modify_all_code, but without
699  * stop_machine().
700  */
701 void arch_ftrace_update_code(int command)
702 {
703 	ftrace_modify_all_code(command);
704 }
705 
706 #ifdef CONFIG_PPC64
707 #define PACATOC offsetof(struct paca_struct, kernel_toc)
708 
709 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
710 
711 void ftrace_free_init_tramp(void)
712 {
713 	int i;
714 
715 	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
716 		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
717 			ftrace_tramps[i] = 0;
718 			return;
719 		}
720 }
721 
722 int __init ftrace_dyn_arch_init(void)
723 {
724 	int i;
725 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
726 	u32 stub_insns[] = {
727 		PPC_RAW_LD(_R12, _R13, PACATOC),
728 		PPC_RAW_ADDIS(_R12, _R12, 0),
729 		PPC_RAW_ADDI(_R12, _R12, 0),
730 		PPC_RAW_MTCTR(_R12),
731 		PPC_RAW_BCTR()
732 	};
733 	unsigned long addr;
734 	long reladdr;
735 
736 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
737 		addr = ppc_global_function_entry((void *)ftrace_regs_caller);
738 	else
739 		addr = ppc_global_function_entry((void *)ftrace_caller);
740 
741 	reladdr = addr - kernel_toc_addr();
742 
743 	if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
744 		pr_err("Address of %ps out of range of kernel_toc.\n",
745 				(void *)addr);
746 		return -1;
747 	}
748 
749 	for (i = 0; i < 2; i++) {
750 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
751 		tramp[i][1] |= PPC_HA(reladdr);
752 		tramp[i][2] |= PPC_LO(reladdr);
753 		add_ftrace_tramp((unsigned long)tramp[i]);
754 	}
755 
756 	return 0;
757 }
758 #endif
759 
760 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
761 
762 extern void ftrace_graph_call(void);
763 extern void ftrace_graph_stub(void);
764 
765 static int ftrace_modify_ftrace_graph_caller(bool enable)
766 {
767 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
768 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
769 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
770 	ppc_inst_t old, new;
771 
772 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
773 		return 0;
774 
775 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
776 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
777 
778 	return ftrace_modify_code(ip, old, new);
779 }
780 
781 int ftrace_enable_ftrace_graph_caller(void)
782 {
783 	return ftrace_modify_ftrace_graph_caller(true);
784 }
785 
786 int ftrace_disable_ftrace_graph_caller(void)
787 {
788 	return ftrace_modify_ftrace_graph_caller(false);
789 }
790 
791 /*
792  * Hook the return address and push it in the stack of return addrs
793  * in current thread info. Return the address we want to divert to.
794  */
795 static unsigned long
796 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
797 {
798 	unsigned long return_hooker;
799 	int bit;
800 
801 	if (unlikely(ftrace_graph_is_dead()))
802 		goto out;
803 
804 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
805 		goto out;
806 
807 	bit = ftrace_test_recursion_trylock(ip, parent);
808 	if (bit < 0)
809 		goto out;
810 
811 	return_hooker = ppc_function_entry(return_to_handler);
812 
813 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
814 		parent = return_hooker;
815 
816 	ftrace_test_recursion_unlock(bit);
817 out:
818 	return parent;
819 }
820 
821 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
822 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
823 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
824 {
825 	fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
826 }
827 #else
828 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
829 				    unsigned long sp)
830 {
831 	return __prepare_ftrace_return(parent, ip, sp);
832 }
833 #endif
834 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
835 
836 #ifdef CONFIG_PPC64_ELF_ABI_V1
837 char *arch_ftrace_match_adjust(char *str, const char *search)
838 {
839 	if (str[0] == '.' && search[0] != '.')
840 		return str + 1;
841 	else
842 		return str;
843 }
844 #endif /* CONFIG_PPC64_ELF_ABI_V1 */
845