xref: /openbmc/linux/arch/powerpc/kernel/trace/ftrace.c (revision 724ba675)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 /*
32  * We generally only have a single long_branch tramp and at most 2 or 3 plt
33  * tramps generated. But, we don't use the plt tramps currently. We also allot
34  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
35  * tramps in total. Set aside 8 just to be sure.
36  */
37 #define	NUM_FTRACE_TRAMPS	8
38 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
39 
40 static ppc_inst_t
41 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
42 {
43 	ppc_inst_t op;
44 
45 	addr = ppc_function_entry((void *)addr);
46 
47 	/* if (link) set op to 'bl' else 'b' */
48 	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
49 
50 	return op;
51 }
52 
53 static inline int
54 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
55 {
56 	ppc_inst_t replaced;
57 
58 	/*
59 	 * Note:
60 	 * We are paranoid about modifying text, as if a bug was to happen, it
61 	 * could cause us to read or write to someplace that could cause harm.
62 	 * Carefully read and modify the code with probe_kernel_*(), and make
63 	 * sure what we read is what we expected it to be before modifying it.
64 	 */
65 
66 	/* read the text we want to modify */
67 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
68 		return -EFAULT;
69 
70 	/* Make sure it is what we expect it to be */
71 	if (!ppc_inst_equal(replaced, old)) {
72 		pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
73 		       ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
74 		return -EINVAL;
75 	}
76 
77 	/* replace the text with the new text */
78 	return patch_instruction((u32 *)ip, new);
79 }
80 
81 /*
82  * Helper functions that are the same for both PPC64 and PPC32.
83  */
84 static int test_24bit_addr(unsigned long ip, unsigned long addr)
85 {
86 	addr = ppc_function_entry((void *)addr);
87 
88 	return is_offset_in_branch_range(addr - ip);
89 }
90 
91 static int is_bl_op(ppc_inst_t op)
92 {
93 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
94 }
95 
96 static int is_b_op(ppc_inst_t op)
97 {
98 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
99 }
100 
101 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
102 {
103 	int offset;
104 
105 	offset = PPC_LI(ppc_inst_val(op));
106 	/* make it signed */
107 	if (offset & 0x02000000)
108 		offset |= 0xfe000000;
109 
110 	return ip + (long)offset;
111 }
112 
113 #ifdef CONFIG_MODULES
114 static int
115 __ftrace_make_nop(struct module *mod,
116 		  struct dyn_ftrace *rec, unsigned long addr)
117 {
118 	unsigned long entry, ptr, tramp;
119 	unsigned long ip = rec->ip;
120 	ppc_inst_t op, pop;
121 
122 	/* read where this goes */
123 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
124 		pr_err("Fetching opcode failed.\n");
125 		return -EFAULT;
126 	}
127 
128 	/* Make sure that this is still a 24bit jump */
129 	if (!is_bl_op(op)) {
130 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
131 		return -EINVAL;
132 	}
133 
134 	/* lets find where the pointer goes */
135 	tramp = find_bl_target(ip, op);
136 
137 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
138 
139 	if (module_trampoline_target(mod, tramp, &ptr)) {
140 		pr_err("Failed to get trampoline target\n");
141 		return -EFAULT;
142 	}
143 
144 	pr_devel("trampoline target %lx", ptr);
145 
146 	entry = ppc_global_function_entry((void *)addr);
147 	/* This should match what was called */
148 	if (ptr != entry) {
149 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
150 		return -EINVAL;
151 	}
152 
153 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
154 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
155 			pr_err("Fetching instruction at %lx failed.\n", ip - 4);
156 			return -EFAULT;
157 		}
158 
159 		/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
160 		if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
161 		    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
162 			pr_err("Unexpected instruction %08lx around bl _mcount\n",
163 			       ppc_inst_as_ulong(op));
164 			return -EINVAL;
165 		}
166 	} else if (IS_ENABLED(CONFIG_PPC64)) {
167 		/*
168 		 * Check what is in the next instruction. We can see ld r2,40(r1), but
169 		 * on first pass after boot we will see mflr r0.
170 		 */
171 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
172 			pr_err("Fetching op failed.\n");
173 			return -EFAULT;
174 		}
175 
176 		if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
177 			pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
178 			       ppc_inst_as_ulong(op));
179 			return -EINVAL;
180 		}
181 	}
182 
183 	/*
184 	 * When using -mprofile-kernel or PPC32 there is no load to jump over.
185 	 *
186 	 * Otherwise our original call site looks like:
187 	 *
188 	 * bl <tramp>
189 	 * ld r2,XX(r1)
190 	 *
191 	 * Milton Miller pointed out that we can not simply nop the branch.
192 	 * If a task was preempted when calling a trace function, the nops
193 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
194 	 * get corrupted.
195 	 *
196 	 * Use a b +8 to jump over the load.
197 	 * XXX: could make PCREL depend on MPROFILE_KERNEL
198 	 * XXX: check PCREL && MPROFILE_KERNEL calling sequence
199 	 */
200 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
201 		pop = ppc_inst(PPC_RAW_NOP());
202 	else
203 		pop = ppc_inst(PPC_RAW_BRANCH(8));	/* b +8 */
204 
205 	if (patch_instruction((u32 *)ip, pop)) {
206 		pr_err("Patching NOP failed.\n");
207 		return -EPERM;
208 	}
209 
210 	return 0;
211 }
212 #else
213 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
214 {
215 	return 0;
216 }
217 #endif /* CONFIG_MODULES */
218 
219 static unsigned long find_ftrace_tramp(unsigned long ip)
220 {
221 	int i;
222 
223 	/*
224 	 * We have the compiler generated long_branch tramps at the end
225 	 * and we prefer those
226 	 */
227 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
228 		if (!ftrace_tramps[i])
229 			continue;
230 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
231 			return ftrace_tramps[i];
232 
233 	return 0;
234 }
235 
236 static int add_ftrace_tramp(unsigned long tramp)
237 {
238 	int i;
239 
240 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
241 		if (!ftrace_tramps[i]) {
242 			ftrace_tramps[i] = tramp;
243 			return 0;
244 		}
245 
246 	return -1;
247 }
248 
249 /*
250  * If this is a compiler generated long_branch trampoline (essentially, a
251  * trampoline that has a branch to _mcount()), we re-write the branch to
252  * instead go to ftrace_[regs_]caller() and note down the location of this
253  * trampoline.
254  */
255 static int setup_mcount_compiler_tramp(unsigned long tramp)
256 {
257 	int i;
258 	ppc_inst_t op;
259 	unsigned long ptr;
260 
261 	/* Is this a known long jump tramp? */
262 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
263 		if (ftrace_tramps[i] == tramp)
264 			return 0;
265 
266 	/* New trampoline -- read where this goes */
267 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
268 		pr_debug("Fetching opcode failed.\n");
269 		return -1;
270 	}
271 
272 	/* Is this a 24 bit branch? */
273 	if (!is_b_op(op)) {
274 		pr_debug("Trampoline is not a long branch tramp.\n");
275 		return -1;
276 	}
277 
278 	/* lets find where the pointer goes */
279 	ptr = find_bl_target(tramp, op);
280 
281 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
282 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
283 		return -1;
284 	}
285 
286 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
287 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
288 		ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
289 	else
290 		ptr = ppc_global_function_entry((void *)ftrace_caller);
291 
292 	if (patch_branch((u32 *)tramp, ptr, 0)) {
293 		pr_debug("REL24 out of range!\n");
294 		return -1;
295 	}
296 
297 	if (add_ftrace_tramp(tramp)) {
298 		pr_debug("No tramp locations left\n");
299 		return -1;
300 	}
301 
302 	return 0;
303 }
304 
305 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
306 {
307 	unsigned long tramp, ip = rec->ip;
308 	ppc_inst_t op;
309 
310 	/* Read where this goes */
311 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
312 		pr_err("Fetching opcode failed.\n");
313 		return -EFAULT;
314 	}
315 
316 	/* Make sure that this is still a 24bit jump */
317 	if (!is_bl_op(op)) {
318 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
319 		return -EINVAL;
320 	}
321 
322 	/* Let's find where the pointer goes */
323 	tramp = find_bl_target(ip, op);
324 
325 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
326 
327 	if (setup_mcount_compiler_tramp(tramp)) {
328 		/* Are other trampolines reachable? */
329 		if (!find_ftrace_tramp(ip)) {
330 			pr_err("No ftrace trampolines reachable from %ps\n",
331 					(void *)ip);
332 			return -EINVAL;
333 		}
334 	}
335 
336 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
337 		pr_err("Patching NOP failed.\n");
338 		return -EPERM;
339 	}
340 
341 	return 0;
342 }
343 
344 int ftrace_make_nop(struct module *mod,
345 		    struct dyn_ftrace *rec, unsigned long addr)
346 {
347 	unsigned long ip = rec->ip;
348 	ppc_inst_t old, new;
349 
350 	/*
351 	 * If the calling address is more that 24 bits away,
352 	 * then we had to use a trampoline to make the call.
353 	 * Otherwise just update the call site.
354 	 */
355 	if (test_24bit_addr(ip, addr)) {
356 		/* within range */
357 		old = ftrace_call_replace(ip, addr, 1);
358 		new = ppc_inst(PPC_RAW_NOP());
359 		return ftrace_modify_code(ip, old, new);
360 	} else if (core_kernel_text(ip)) {
361 		return __ftrace_make_nop_kernel(rec, addr);
362 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
363 		return -EINVAL;
364 	}
365 
366 	/*
367 	 * Out of range jumps are called from modules.
368 	 * We should either already have a pointer to the module
369 	 * or it has been passed in.
370 	 */
371 	if (!rec->arch.mod) {
372 		if (!mod) {
373 			pr_err("No module loaded addr=%lx\n", addr);
374 			return -EFAULT;
375 		}
376 		rec->arch.mod = mod;
377 	} else if (mod) {
378 		if (mod != rec->arch.mod) {
379 			pr_err("Record mod %p not equal to passed in mod %p\n",
380 			       rec->arch.mod, mod);
381 			return -EINVAL;
382 		}
383 		/* nothing to do if mod == rec->arch.mod */
384 	} else
385 		mod = rec->arch.mod;
386 
387 	return __ftrace_make_nop(mod, rec, addr);
388 }
389 
390 #ifdef CONFIG_MODULES
391 /*
392  * Examine the existing instructions for __ftrace_make_call.
393  * They should effectively be a NOP, and follow formal constraints,
394  * depending on the ABI. Return false if they don't.
395  */
396 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
397 {
398 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
399 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
400 	else
401 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
402 		       ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
403 }
404 
405 static int
406 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
407 {
408 	ppc_inst_t op[2];
409 	void *ip = (void *)rec->ip;
410 	unsigned long entry, ptr, tramp;
411 	struct module *mod = rec->arch.mod;
412 
413 	/* read where this goes */
414 	if (copy_inst_from_kernel_nofault(op, ip))
415 		return -EFAULT;
416 
417 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
418 	    copy_inst_from_kernel_nofault(op + 1, ip + 4))
419 		return -EFAULT;
420 
421 	if (!expected_nop_sequence(ip, op[0], op[1])) {
422 		pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
423 		       ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
424 		return -EINVAL;
425 	}
426 
427 	/* If we never set up ftrace trampoline(s), then bail */
428 	if (!mod->arch.tramp ||
429 	    (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
430 		pr_err("No ftrace trampoline\n");
431 		return -EINVAL;
432 	}
433 
434 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
435 		tramp = mod->arch.tramp_regs;
436 	else
437 		tramp = mod->arch.tramp;
438 
439 	if (module_trampoline_target(mod, tramp, &ptr)) {
440 		pr_err("Failed to get trampoline target\n");
441 		return -EFAULT;
442 	}
443 
444 	pr_devel("trampoline target %lx", ptr);
445 
446 	entry = ppc_global_function_entry((void *)addr);
447 	/* This should match what was called */
448 	if (ptr != entry) {
449 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
450 		return -EINVAL;
451 	}
452 
453 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
454 		pr_err("REL24 out of range!\n");
455 		return -EINVAL;
456 	}
457 
458 	return 0;
459 }
460 #else
461 static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
462 {
463 	return 0;
464 }
465 #endif /* CONFIG_MODULES */
466 
467 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
468 {
469 	ppc_inst_t op;
470 	void *ip = (void *)rec->ip;
471 	unsigned long tramp, entry, ptr;
472 
473 	/* Make sure we're being asked to patch branch to a known ftrace addr */
474 	entry = ppc_global_function_entry((void *)ftrace_caller);
475 	ptr = ppc_global_function_entry((void *)addr);
476 
477 	if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
478 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
479 
480 	if (ptr != entry) {
481 		pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
482 		return -EINVAL;
483 	}
484 
485 	/* Make sure we have a nop */
486 	if (copy_inst_from_kernel_nofault(&op, ip)) {
487 		pr_err("Unable to read ftrace location %p\n", ip);
488 		return -EFAULT;
489 	}
490 
491 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
492 		pr_err("Unexpected call sequence at %p: %08lx\n",
493 		       ip, ppc_inst_as_ulong(op));
494 		return -EINVAL;
495 	}
496 
497 	tramp = find_ftrace_tramp((unsigned long)ip);
498 	if (!tramp) {
499 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
500 		return -EINVAL;
501 	}
502 
503 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
504 		pr_err("Error patching branch to ftrace tramp!\n");
505 		return -EINVAL;
506 	}
507 
508 	return 0;
509 }
510 
511 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
512 {
513 	unsigned long ip = rec->ip;
514 	ppc_inst_t old, new;
515 
516 	/*
517 	 * If the calling address is more that 24 bits away,
518 	 * then we had to use a trampoline to make the call.
519 	 * Otherwise just update the call site.
520 	 */
521 	if (test_24bit_addr(ip, addr)) {
522 		/* within range */
523 		old = ppc_inst(PPC_RAW_NOP());
524 		new = ftrace_call_replace(ip, addr, 1);
525 		return ftrace_modify_code(ip, old, new);
526 	} else if (core_kernel_text(ip)) {
527 		return __ftrace_make_call_kernel(rec, addr);
528 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
529 		/* We should not get here without modules */
530 		return -EINVAL;
531 	}
532 
533 	/*
534 	 * Out of range jumps are called from modules.
535 	 * Being that we are converting from nop, it had better
536 	 * already have a module defined.
537 	 */
538 	if (!rec->arch.mod) {
539 		pr_err("No module loaded\n");
540 		return -EINVAL;
541 	}
542 
543 	return __ftrace_make_call(rec, addr);
544 }
545 
546 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
547 #ifdef CONFIG_MODULES
548 static int
549 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
550 					unsigned long addr)
551 {
552 	ppc_inst_t op;
553 	unsigned long ip = rec->ip;
554 	unsigned long entry, ptr, tramp;
555 	struct module *mod = rec->arch.mod;
556 
557 	/* If we never set up ftrace trampolines, then bail */
558 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
559 		pr_err("No ftrace trampoline\n");
560 		return -EINVAL;
561 	}
562 
563 	/* read where this goes */
564 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
565 		pr_err("Fetching opcode failed.\n");
566 		return -EFAULT;
567 	}
568 
569 	/* Make sure that this is still a 24bit jump */
570 	if (!is_bl_op(op)) {
571 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
572 		return -EINVAL;
573 	}
574 
575 	/* lets find where the pointer goes */
576 	tramp = find_bl_target(ip, op);
577 	entry = ppc_global_function_entry((void *)old_addr);
578 
579 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
580 
581 	if (tramp != entry) {
582 		/* old_addr is not within range, so we must have used a trampoline */
583 		if (module_trampoline_target(mod, tramp, &ptr)) {
584 			pr_err("Failed to get trampoline target\n");
585 			return -EFAULT;
586 		}
587 
588 		pr_devel("trampoline target %lx", ptr);
589 
590 		/* This should match what was called */
591 		if (ptr != entry) {
592 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
593 			return -EINVAL;
594 		}
595 	}
596 
597 	/* The new target may be within range */
598 	if (test_24bit_addr(ip, addr)) {
599 		/* within range */
600 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
601 			pr_err("REL24 out of range!\n");
602 			return -EINVAL;
603 		}
604 
605 		return 0;
606 	}
607 
608 	if (rec->flags & FTRACE_FL_REGS)
609 		tramp = mod->arch.tramp_regs;
610 	else
611 		tramp = mod->arch.tramp;
612 
613 	if (module_trampoline_target(mod, tramp, &ptr)) {
614 		pr_err("Failed to get trampoline target\n");
615 		return -EFAULT;
616 	}
617 
618 	pr_devel("trampoline target %lx", ptr);
619 
620 	entry = ppc_global_function_entry((void *)addr);
621 	/* This should match what was called */
622 	if (ptr != entry) {
623 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
624 		return -EINVAL;
625 	}
626 
627 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
628 		pr_err("REL24 out of range!\n");
629 		return -EINVAL;
630 	}
631 
632 	return 0;
633 }
634 #else
635 static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
636 {
637 	return 0;
638 }
639 #endif
640 
641 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
642 			unsigned long addr)
643 {
644 	unsigned long ip = rec->ip;
645 	ppc_inst_t old, new;
646 
647 	/*
648 	 * If the calling address is more that 24 bits away,
649 	 * then we had to use a trampoline to make the call.
650 	 * Otherwise just update the call site.
651 	 */
652 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
653 		/* within range */
654 		old = ftrace_call_replace(ip, old_addr, 1);
655 		new = ftrace_call_replace(ip, addr, 1);
656 		return ftrace_modify_code(ip, old, new);
657 	} else if (core_kernel_text(ip)) {
658 		/*
659 		 * We always patch out of range locations to go to the regs
660 		 * variant, so there is nothing to do here
661 		 */
662 		return 0;
663 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
664 		/* We should not get here without modules */
665 		return -EINVAL;
666 	}
667 
668 	/*
669 	 * Out of range jumps are called from modules.
670 	 */
671 	if (!rec->arch.mod) {
672 		pr_err("No module loaded\n");
673 		return -EINVAL;
674 	}
675 
676 	return __ftrace_modify_call(rec, old_addr, addr);
677 }
678 #endif
679 
680 int ftrace_update_ftrace_func(ftrace_func_t func)
681 {
682 	unsigned long ip = (unsigned long)(&ftrace_call);
683 	ppc_inst_t old, new;
684 	int ret;
685 
686 	old = ppc_inst_read((u32 *)&ftrace_call);
687 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
688 	ret = ftrace_modify_code(ip, old, new);
689 
690 	/* Also update the regs callback function */
691 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
692 		ip = (unsigned long)(&ftrace_regs_call);
693 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
694 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
695 		ret = ftrace_modify_code(ip, old, new);
696 	}
697 
698 	return ret;
699 }
700 
701 /*
702  * Use the default ftrace_modify_all_code, but without
703  * stop_machine().
704  */
705 void arch_ftrace_update_code(int command)
706 {
707 	ftrace_modify_all_code(command);
708 }
709 
710 #ifdef CONFIG_PPC64
711 #define PACATOC offsetof(struct paca_struct, kernel_toc)
712 
713 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
714 
715 void ftrace_free_init_tramp(void)
716 {
717 	int i;
718 
719 	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
720 		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
721 			ftrace_tramps[i] = 0;
722 			return;
723 		}
724 }
725 
726 int __init ftrace_dyn_arch_init(void)
727 {
728 	int i;
729 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
730 #ifdef CONFIG_PPC_KERNEL_PCREL
731 	u32 stub_insns[] = {
732 		/* pla r12,addr */
733 		PPC_PREFIX_MLS | __PPC_PRFX_R(1),
734 		PPC_INST_PADDI | ___PPC_RT(_R12),
735 		PPC_RAW_MTCTR(_R12),
736 		PPC_RAW_BCTR()
737 	};
738 #else
739 	u32 stub_insns[] = {
740 		PPC_RAW_LD(_R12, _R13, PACATOC),
741 		PPC_RAW_ADDIS(_R12, _R12, 0),
742 		PPC_RAW_ADDI(_R12, _R12, 0),
743 		PPC_RAW_MTCTR(_R12),
744 		PPC_RAW_BCTR()
745 	};
746 #endif
747 
748 	unsigned long addr;
749 	long reladdr;
750 
751 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
752 		addr = ppc_global_function_entry((void *)ftrace_regs_caller);
753 	else
754 		addr = ppc_global_function_entry((void *)ftrace_caller);
755 
756 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
757 		for (i = 0; i < 2; i++) {
758 			reladdr = addr - (unsigned long)tramp[i];
759 
760 			if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
761 				pr_err("Address of %ps out of range of pcrel address.\n",
762 					(void *)addr);
763 				return -1;
764 			}
765 
766 			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
767 			tramp[i][0] |= IMM_H18(reladdr);
768 			tramp[i][1] |= IMM_L(reladdr);
769 			add_ftrace_tramp((unsigned long)tramp[i]);
770 		}
771 	} else {
772 		reladdr = addr - kernel_toc_addr();
773 
774 		if (reladdr >= (long)SZ_2G || reladdr < -(long)SZ_2G) {
775 			pr_err("Address of %ps out of range of kernel_toc.\n",
776 				(void *)addr);
777 			return -1;
778 		}
779 
780 		for (i = 0; i < 2; i++) {
781 			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
782 			tramp[i][1] |= PPC_HA(reladdr);
783 			tramp[i][2] |= PPC_LO(reladdr);
784 			add_ftrace_tramp((unsigned long)tramp[i]);
785 		}
786 	}
787 
788 	return 0;
789 }
790 #endif
791 
792 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
793 
794 extern void ftrace_graph_call(void);
795 extern void ftrace_graph_stub(void);
796 
797 static int ftrace_modify_ftrace_graph_caller(bool enable)
798 {
799 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
800 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
801 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
802 	ppc_inst_t old, new;
803 
804 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
805 		return 0;
806 
807 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
808 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
809 
810 	return ftrace_modify_code(ip, old, new);
811 }
812 
813 int ftrace_enable_ftrace_graph_caller(void)
814 {
815 	return ftrace_modify_ftrace_graph_caller(true);
816 }
817 
818 int ftrace_disable_ftrace_graph_caller(void)
819 {
820 	return ftrace_modify_ftrace_graph_caller(false);
821 }
822 
823 /*
824  * Hook the return address and push it in the stack of return addrs
825  * in current thread info. Return the address we want to divert to.
826  */
827 static unsigned long
828 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
829 {
830 	unsigned long return_hooker;
831 	int bit;
832 
833 	if (unlikely(ftrace_graph_is_dead()))
834 		goto out;
835 
836 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
837 		goto out;
838 
839 	bit = ftrace_test_recursion_trylock(ip, parent);
840 	if (bit < 0)
841 		goto out;
842 
843 	return_hooker = ppc_function_entry(return_to_handler);
844 
845 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
846 		parent = return_hooker;
847 
848 	ftrace_test_recursion_unlock(bit);
849 out:
850 	return parent;
851 }
852 
853 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
854 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
855 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
856 {
857 	fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
858 }
859 #else
860 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
861 				    unsigned long sp)
862 {
863 	return __prepare_ftrace_return(parent, ip, sp);
864 }
865 #endif
866 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
867 
868 #ifdef CONFIG_PPC64_ELF_ABI_V1
869 char *arch_ftrace_match_adjust(char *str, const char *search)
870 {
871 	if (str[0] == '.' && search[0] != '.')
872 		return str + 1;
873 	else
874 		return str;
875 }
876 #endif /* CONFIG_PPC64_ELF_ABI_V1 */
877