xref: /openbmc/linux/arch/powerpc/kernel/trace/ftrace.c (revision cbabf03c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 
32 #ifdef CONFIG_DYNAMIC_FTRACE
33 
34 /*
35  * We generally only have a single long_branch tramp and at most 2 or 3 plt
36  * tramps generated. But, we don't use the plt tramps currently. We also allot
37  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
38  * tramps in total. Set aside 8 just to be sure.
39  */
40 #define	NUM_FTRACE_TRAMPS	8
41 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
42 
43 static ppc_inst_t
44 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
45 {
46 	ppc_inst_t op;
47 
48 	addr = ppc_function_entry((void *)addr);
49 
50 	/* if (link) set op to 'bl' else 'b' */
51 	create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
52 
53 	return op;
54 }
55 
56 static int
57 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
58 {
59 	ppc_inst_t replaced;
60 
61 	/*
62 	 * Note:
63 	 * We are paranoid about modifying text, as if a bug was to happen, it
64 	 * could cause us to read or write to someplace that could cause harm.
65 	 * Carefully read and modify the code with probe_kernel_*(), and make
66 	 * sure what we read is what we expected it to be before modifying it.
67 	 */
68 
69 	/* read the text we want to modify */
70 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
71 		return -EFAULT;
72 
73 	/* Make sure it is what we expect it to be */
74 	if (!ppc_inst_equal(replaced, old)) {
75 		pr_err("%p: replaced (%s) != old (%s)",
76 		(void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
77 		return -EINVAL;
78 	}
79 
80 	/* replace the text with the new text */
81 	if (patch_instruction((u32 *)ip, new))
82 		return -EPERM;
83 
84 	return 0;
85 }
86 
87 /*
88  * Helper functions that are the same for both PPC64 and PPC32.
89  */
90 static int test_24bit_addr(unsigned long ip, unsigned long addr)
91 {
92 	ppc_inst_t op;
93 	addr = ppc_function_entry((void *)addr);
94 
95 	/* use the create_branch to verify that this offset can be branched */
96 	return create_branch(&op, (u32 *)ip, addr, 0) == 0;
97 }
98 
99 static int is_bl_op(ppc_inst_t op)
100 {
101 	return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
102 }
103 
104 static int is_b_op(ppc_inst_t op)
105 {
106 	return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
107 }
108 
109 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
110 {
111 	int offset;
112 
113 	offset = (ppc_inst_val(op) & 0x03fffffc);
114 	/* make it signed */
115 	if (offset & 0x02000000)
116 		offset |= 0xfe000000;
117 
118 	return ip + (long)offset;
119 }
120 
121 #ifdef CONFIG_MODULES
122 #ifdef CONFIG_PPC64
123 static int
124 __ftrace_make_nop(struct module *mod,
125 		  struct dyn_ftrace *rec, unsigned long addr)
126 {
127 	unsigned long entry, ptr, tramp;
128 	unsigned long ip = rec->ip;
129 	ppc_inst_t op, pop;
130 
131 	/* read where this goes */
132 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
133 		pr_err("Fetching opcode failed.\n");
134 		return -EFAULT;
135 	}
136 
137 	/* Make sure that that this is still a 24bit jump */
138 	if (!is_bl_op(op)) {
139 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
140 		return -EINVAL;
141 	}
142 
143 	/* lets find where the pointer goes */
144 	tramp = find_bl_target(ip, op);
145 
146 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
147 
148 	if (module_trampoline_target(mod, tramp, &ptr)) {
149 		pr_err("Failed to get trampoline target\n");
150 		return -EFAULT;
151 	}
152 
153 	pr_devel("trampoline target %lx", ptr);
154 
155 	entry = ppc_global_function_entry((void *)addr);
156 	/* This should match what was called */
157 	if (ptr != entry) {
158 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
159 		return -EINVAL;
160 	}
161 
162 #ifdef CONFIG_MPROFILE_KERNEL
163 	/* When using -mkernel_profile there is no load to jump over */
164 	pop = ppc_inst(PPC_RAW_NOP());
165 
166 	if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
167 		pr_err("Fetching instruction at %lx failed.\n", ip - 4);
168 		return -EFAULT;
169 	}
170 
171 	/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
172 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
173 	    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
174 		pr_err("Unexpected instruction %s around bl _mcount\n",
175 		       ppc_inst_as_str(op));
176 		return -EINVAL;
177 	}
178 #else
179 	/*
180 	 * Our original call site looks like:
181 	 *
182 	 * bl <tramp>
183 	 * ld r2,XX(r1)
184 	 *
185 	 * Milton Miller pointed out that we can not simply nop the branch.
186 	 * If a task was preempted when calling a trace function, the nops
187 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
188 	 * get corrupted.
189 	 *
190 	 * Use a b +8 to jump over the load.
191 	 */
192 
193 	pop = ppc_inst(PPC_INST_BRANCH | 8);	/* b +8 */
194 
195 	/*
196 	 * Check what is in the next instruction. We can see ld r2,40(r1), but
197 	 * on first pass after boot we will see mflr r0.
198 	 */
199 	if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
200 		pr_err("Fetching op failed.\n");
201 		return -EFAULT;
202 	}
203 
204 	if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
205 		pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
206 		return -EINVAL;
207 	}
208 #endif /* CONFIG_MPROFILE_KERNEL */
209 
210 	if (patch_instruction((u32 *)ip, pop)) {
211 		pr_err("Patching NOP failed.\n");
212 		return -EPERM;
213 	}
214 
215 	return 0;
216 }
217 
218 #else /* !PPC64 */
219 static int
220 __ftrace_make_nop(struct module *mod,
221 		  struct dyn_ftrace *rec, unsigned long addr)
222 {
223 	ppc_inst_t op;
224 	unsigned long ip = rec->ip;
225 	unsigned long tramp, ptr;
226 
227 	if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
228 		return -EFAULT;
229 
230 	/* Make sure that that this is still a 24bit jump */
231 	if (!is_bl_op(op)) {
232 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
233 		return -EINVAL;
234 	}
235 
236 	/* lets find where the pointer goes */
237 	tramp = find_bl_target(ip, op);
238 
239 	/* Find where the trampoline jumps to */
240 	if (module_trampoline_target(mod, tramp, &ptr)) {
241 		pr_err("Failed to get trampoline target\n");
242 		return -EFAULT;
243 	}
244 
245 	if (ptr != addr) {
246 		pr_err("Trampoline location %08lx does not match addr\n",
247 		       tramp);
248 		return -EINVAL;
249 	}
250 
251 	op = ppc_inst(PPC_RAW_NOP());
252 
253 	if (patch_instruction((u32 *)ip, op))
254 		return -EPERM;
255 
256 	return 0;
257 }
258 #endif /* PPC64 */
259 #endif /* CONFIG_MODULES */
260 
261 static unsigned long find_ftrace_tramp(unsigned long ip)
262 {
263 	int i;
264 	ppc_inst_t instr;
265 
266 	/*
267 	 * We have the compiler generated long_branch tramps at the end
268 	 * and we prefer those
269 	 */
270 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
271 		if (!ftrace_tramps[i])
272 			continue;
273 		else if (create_branch(&instr, (void *)ip,
274 				       ftrace_tramps[i], 0) == 0)
275 			return ftrace_tramps[i];
276 
277 	return 0;
278 }
279 
280 static int add_ftrace_tramp(unsigned long tramp)
281 {
282 	int i;
283 
284 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
285 		if (!ftrace_tramps[i]) {
286 			ftrace_tramps[i] = tramp;
287 			return 0;
288 		}
289 
290 	return -1;
291 }
292 
293 /*
294  * If this is a compiler generated long_branch trampoline (essentially, a
295  * trampoline that has a branch to _mcount()), we re-write the branch to
296  * instead go to ftrace_[regs_]caller() and note down the location of this
297  * trampoline.
298  */
299 static int setup_mcount_compiler_tramp(unsigned long tramp)
300 {
301 	int i;
302 	ppc_inst_t op;
303 	unsigned long ptr;
304 	ppc_inst_t instr;
305 	static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
306 
307 	/* Is this a known long jump tramp? */
308 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
309 		if (!ftrace_tramps[i])
310 			break;
311 		else if (ftrace_tramps[i] == tramp)
312 			return 0;
313 
314 	/* Is this a known plt tramp? */
315 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
316 		if (!ftrace_plt_tramps[i])
317 			break;
318 		else if (ftrace_plt_tramps[i] == tramp)
319 			return -1;
320 
321 	/* New trampoline -- read where this goes */
322 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
323 		pr_debug("Fetching opcode failed.\n");
324 		return -1;
325 	}
326 
327 	/* Is this a 24 bit branch? */
328 	if (!is_b_op(op)) {
329 		pr_debug("Trampoline is not a long branch tramp.\n");
330 		return -1;
331 	}
332 
333 	/* lets find where the pointer goes */
334 	ptr = find_bl_target(tramp, op);
335 
336 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
337 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
338 		return -1;
339 	}
340 
341 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
342 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
343 	ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
344 #else
345 	ptr = ppc_global_function_entry((void *)ftrace_caller);
346 #endif
347 	if (create_branch(&instr, (void *)tramp, ptr, 0)) {
348 		pr_debug("%ps is not reachable from existing mcount tramp\n",
349 				(void *)ptr);
350 		return -1;
351 	}
352 
353 	if (patch_branch((u32 *)tramp, ptr, 0)) {
354 		pr_debug("REL24 out of range!\n");
355 		return -1;
356 	}
357 
358 	if (add_ftrace_tramp(tramp)) {
359 		pr_debug("No tramp locations left\n");
360 		return -1;
361 	}
362 
363 	return 0;
364 }
365 
366 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
367 {
368 	unsigned long tramp, ip = rec->ip;
369 	ppc_inst_t op;
370 
371 	/* Read where this goes */
372 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
373 		pr_err("Fetching opcode failed.\n");
374 		return -EFAULT;
375 	}
376 
377 	/* Make sure that that this is still a 24bit jump */
378 	if (!is_bl_op(op)) {
379 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
380 		return -EINVAL;
381 	}
382 
383 	/* Let's find where the pointer goes */
384 	tramp = find_bl_target(ip, op);
385 
386 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
387 
388 	if (setup_mcount_compiler_tramp(tramp)) {
389 		/* Are other trampolines reachable? */
390 		if (!find_ftrace_tramp(ip)) {
391 			pr_err("No ftrace trampolines reachable from %ps\n",
392 					(void *)ip);
393 			return -EINVAL;
394 		}
395 	}
396 
397 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
398 		pr_err("Patching NOP failed.\n");
399 		return -EPERM;
400 	}
401 
402 	return 0;
403 }
404 
405 int ftrace_make_nop(struct module *mod,
406 		    struct dyn_ftrace *rec, unsigned long addr)
407 {
408 	unsigned long ip = rec->ip;
409 	ppc_inst_t old, new;
410 
411 	/*
412 	 * If the calling address is more that 24 bits away,
413 	 * then we had to use a trampoline to make the call.
414 	 * Otherwise just update the call site.
415 	 */
416 	if (test_24bit_addr(ip, addr)) {
417 		/* within range */
418 		old = ftrace_call_replace(ip, addr, 1);
419 		new = ppc_inst(PPC_RAW_NOP());
420 		return ftrace_modify_code(ip, old, new);
421 	} else if (core_kernel_text(ip))
422 		return __ftrace_make_nop_kernel(rec, addr);
423 
424 #ifdef CONFIG_MODULES
425 	/*
426 	 * Out of range jumps are called from modules.
427 	 * We should either already have a pointer to the module
428 	 * or it has been passed in.
429 	 */
430 	if (!rec->arch.mod) {
431 		if (!mod) {
432 			pr_err("No module loaded addr=%lx\n", addr);
433 			return -EFAULT;
434 		}
435 		rec->arch.mod = mod;
436 	} else if (mod) {
437 		if (mod != rec->arch.mod) {
438 			pr_err("Record mod %p not equal to passed in mod %p\n",
439 			       rec->arch.mod, mod);
440 			return -EINVAL;
441 		}
442 		/* nothing to do if mod == rec->arch.mod */
443 	} else
444 		mod = rec->arch.mod;
445 
446 	return __ftrace_make_nop(mod, rec, addr);
447 #else
448 	/* We should not get here without modules */
449 	return -EINVAL;
450 #endif /* CONFIG_MODULES */
451 }
452 
453 #ifdef CONFIG_MODULES
454 #ifdef CONFIG_PPC64
455 /*
456  * Examine the existing instructions for __ftrace_make_call.
457  * They should effectively be a NOP, and follow formal constraints,
458  * depending on the ABI. Return false if they don't.
459  */
460 #ifndef CONFIG_MPROFILE_KERNEL
461 static int
462 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
463 {
464 	/*
465 	 * We expect to see:
466 	 *
467 	 * b +8
468 	 * ld r2,XX(r1)
469 	 *
470 	 * The load offset is different depending on the ABI. For simplicity
471 	 * just mask it out when doing the compare.
472 	 */
473 	if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
474 	    (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
475 		return 0;
476 	return 1;
477 }
478 #else
479 static int
480 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
481 {
482 	/* look for patched "NOP" on ppc64 with -mprofile-kernel */
483 	if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
484 		return 0;
485 	return 1;
486 }
487 #endif
488 
489 static int
490 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
491 {
492 	ppc_inst_t op[2];
493 	ppc_inst_t instr;
494 	void *ip = (void *)rec->ip;
495 	unsigned long entry, ptr, tramp;
496 	struct module *mod = rec->arch.mod;
497 
498 	/* read where this goes */
499 	if (copy_inst_from_kernel_nofault(op, ip))
500 		return -EFAULT;
501 
502 	if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
503 		return -EFAULT;
504 
505 	if (!expected_nop_sequence(ip, op[0], op[1])) {
506 		pr_err("Unexpected call sequence at %p: %s %s\n",
507 		ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
508 		return -EINVAL;
509 	}
510 
511 	/* If we never set up ftrace trampoline(s), then bail */
512 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
513 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
514 #else
515 	if (!mod->arch.tramp) {
516 #endif
517 		pr_err("No ftrace trampoline\n");
518 		return -EINVAL;
519 	}
520 
521 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
522 	if (rec->flags & FTRACE_FL_REGS)
523 		tramp = mod->arch.tramp_regs;
524 	else
525 #endif
526 		tramp = mod->arch.tramp;
527 
528 	if (module_trampoline_target(mod, tramp, &ptr)) {
529 		pr_err("Failed to get trampoline target\n");
530 		return -EFAULT;
531 	}
532 
533 	pr_devel("trampoline target %lx", ptr);
534 
535 	entry = ppc_global_function_entry((void *)addr);
536 	/* This should match what was called */
537 	if (ptr != entry) {
538 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
539 		return -EINVAL;
540 	}
541 
542 	/* Ensure branch is within 24 bits */
543 	if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) {
544 		pr_err("Branch out of range\n");
545 		return -EINVAL;
546 	}
547 
548 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
549 		pr_err("REL24 out of range!\n");
550 		return -EINVAL;
551 	}
552 
553 	return 0;
554 }
555 
556 #else  /* !CONFIG_PPC64: */
557 static int
558 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
559 {
560 	int err;
561 	ppc_inst_t op;
562 	u32 *ip = (u32 *)rec->ip;
563 	struct module *mod = rec->arch.mod;
564 	unsigned long tramp;
565 
566 	/* read where this goes */
567 	if (copy_inst_from_kernel_nofault(&op, ip))
568 		return -EFAULT;
569 
570 	/* It should be pointing to a nop */
571 	if (!ppc_inst_equal(op,  ppc_inst(PPC_RAW_NOP()))) {
572 		pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
573 		return -EINVAL;
574 	}
575 
576 	/* If we never set up a trampoline to ftrace_caller, then bail */
577 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
578 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
579 #else
580 	if (!mod->arch.tramp) {
581 #endif
582 		pr_err("No ftrace trampoline\n");
583 		return -EINVAL;
584 	}
585 
586 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
587 	if (rec->flags & FTRACE_FL_REGS)
588 		tramp = mod->arch.tramp_regs;
589 	else
590 #endif
591 		tramp = mod->arch.tramp;
592 	/* create the branch to the trampoline */
593 	err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
594 	if (err) {
595 		pr_err("REL24 out of range!\n");
596 		return -EINVAL;
597 	}
598 
599 	pr_devel("write to %lx\n", rec->ip);
600 
601 	if (patch_instruction(ip, op))
602 		return -EPERM;
603 
604 	return 0;
605 }
606 #endif /* CONFIG_PPC64 */
607 #endif /* CONFIG_MODULES */
608 
609 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
610 {
611 	ppc_inst_t op;
612 	void *ip = (void *)rec->ip;
613 	unsigned long tramp, entry, ptr;
614 
615 	/* Make sure we're being asked to patch branch to a known ftrace addr */
616 	entry = ppc_global_function_entry((void *)ftrace_caller);
617 	ptr = ppc_global_function_entry((void *)addr);
618 
619 	if (ptr != entry) {
620 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
621 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
622 		if (ptr != entry) {
623 #endif
624 			pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
625 			return -EINVAL;
626 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
627 		}
628 #endif
629 	}
630 
631 	/* Make sure we have a nop */
632 	if (copy_inst_from_kernel_nofault(&op, ip)) {
633 		pr_err("Unable to read ftrace location %p\n", ip);
634 		return -EFAULT;
635 	}
636 
637 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
638 		pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
639 		return -EINVAL;
640 	}
641 
642 	tramp = find_ftrace_tramp((unsigned long)ip);
643 	if (!tramp) {
644 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
645 		return -EINVAL;
646 	}
647 
648 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
649 		pr_err("Error patching branch to ftrace tramp!\n");
650 		return -EINVAL;
651 	}
652 
653 	return 0;
654 }
655 
656 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
657 {
658 	unsigned long ip = rec->ip;
659 	ppc_inst_t old, new;
660 
661 	/*
662 	 * If the calling address is more that 24 bits away,
663 	 * then we had to use a trampoline to make the call.
664 	 * Otherwise just update the call site.
665 	 */
666 	if (test_24bit_addr(ip, addr)) {
667 		/* within range */
668 		old = ppc_inst(PPC_RAW_NOP());
669 		new = ftrace_call_replace(ip, addr, 1);
670 		return ftrace_modify_code(ip, old, new);
671 	} else if (core_kernel_text(ip))
672 		return __ftrace_make_call_kernel(rec, addr);
673 
674 #ifdef CONFIG_MODULES
675 	/*
676 	 * Out of range jumps are called from modules.
677 	 * Being that we are converting from nop, it had better
678 	 * already have a module defined.
679 	 */
680 	if (!rec->arch.mod) {
681 		pr_err("No module loaded\n");
682 		return -EINVAL;
683 	}
684 
685 	return __ftrace_make_call(rec, addr);
686 #else
687 	/* We should not get here without modules */
688 	return -EINVAL;
689 #endif /* CONFIG_MODULES */
690 }
691 
692 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
693 #ifdef CONFIG_MODULES
694 static int
695 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
696 					unsigned long addr)
697 {
698 	ppc_inst_t op;
699 	unsigned long ip = rec->ip;
700 	unsigned long entry, ptr, tramp;
701 	struct module *mod = rec->arch.mod;
702 
703 	/* If we never set up ftrace trampolines, then bail */
704 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
705 		pr_err("No ftrace trampoline\n");
706 		return -EINVAL;
707 	}
708 
709 	/* read where this goes */
710 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
711 		pr_err("Fetching opcode failed.\n");
712 		return -EFAULT;
713 	}
714 
715 	/* Make sure that that this is still a 24bit jump */
716 	if (!is_bl_op(op)) {
717 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
718 		return -EINVAL;
719 	}
720 
721 	/* lets find where the pointer goes */
722 	tramp = find_bl_target(ip, op);
723 	entry = ppc_global_function_entry((void *)old_addr);
724 
725 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
726 
727 	if (tramp != entry) {
728 		/* old_addr is not within range, so we must have used a trampoline */
729 		if (module_trampoline_target(mod, tramp, &ptr)) {
730 			pr_err("Failed to get trampoline target\n");
731 			return -EFAULT;
732 		}
733 
734 		pr_devel("trampoline target %lx", ptr);
735 
736 		/* This should match what was called */
737 		if (ptr != entry) {
738 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
739 			return -EINVAL;
740 		}
741 	}
742 
743 	/* The new target may be within range */
744 	if (test_24bit_addr(ip, addr)) {
745 		/* within range */
746 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
747 			pr_err("REL24 out of range!\n");
748 			return -EINVAL;
749 		}
750 
751 		return 0;
752 	}
753 
754 	if (rec->flags & FTRACE_FL_REGS)
755 		tramp = mod->arch.tramp_regs;
756 	else
757 		tramp = mod->arch.tramp;
758 
759 	if (module_trampoline_target(mod, tramp, &ptr)) {
760 		pr_err("Failed to get trampoline target\n");
761 		return -EFAULT;
762 	}
763 
764 	pr_devel("trampoline target %lx", ptr);
765 
766 	entry = ppc_global_function_entry((void *)addr);
767 	/* This should match what was called */
768 	if (ptr != entry) {
769 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
770 		return -EINVAL;
771 	}
772 
773 	/* Ensure branch is within 24 bits */
774 	if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
775 		pr_err("Branch out of range\n");
776 		return -EINVAL;
777 	}
778 
779 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
780 		pr_err("REL24 out of range!\n");
781 		return -EINVAL;
782 	}
783 
784 	return 0;
785 }
786 #endif
787 
788 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
789 			unsigned long addr)
790 {
791 	unsigned long ip = rec->ip;
792 	ppc_inst_t old, new;
793 
794 	/*
795 	 * If the calling address is more that 24 bits away,
796 	 * then we had to use a trampoline to make the call.
797 	 * Otherwise just update the call site.
798 	 */
799 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
800 		/* within range */
801 		old = ftrace_call_replace(ip, old_addr, 1);
802 		new = ftrace_call_replace(ip, addr, 1);
803 		return ftrace_modify_code(ip, old, new);
804 	} else if (core_kernel_text(ip)) {
805 		/*
806 		 * We always patch out of range locations to go to the regs
807 		 * variant, so there is nothing to do here
808 		 */
809 		return 0;
810 	}
811 
812 #ifdef CONFIG_MODULES
813 	/*
814 	 * Out of range jumps are called from modules.
815 	 */
816 	if (!rec->arch.mod) {
817 		pr_err("No module loaded\n");
818 		return -EINVAL;
819 	}
820 
821 	return __ftrace_modify_call(rec, old_addr, addr);
822 #else
823 	/* We should not get here without modules */
824 	return -EINVAL;
825 #endif /* CONFIG_MODULES */
826 }
827 #endif
828 
829 int ftrace_update_ftrace_func(ftrace_func_t func)
830 {
831 	unsigned long ip = (unsigned long)(&ftrace_call);
832 	ppc_inst_t old, new;
833 	int ret;
834 
835 	old = ppc_inst_read((u32 *)&ftrace_call);
836 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
837 	ret = ftrace_modify_code(ip, old, new);
838 
839 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
840 	/* Also update the regs callback function */
841 	if (!ret) {
842 		ip = (unsigned long)(&ftrace_regs_call);
843 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
844 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
845 		ret = ftrace_modify_code(ip, old, new);
846 	}
847 #endif
848 
849 	return ret;
850 }
851 
852 /*
853  * Use the default ftrace_modify_all_code, but without
854  * stop_machine().
855  */
856 void arch_ftrace_update_code(int command)
857 {
858 	ftrace_modify_all_code(command);
859 }
860 
861 #ifdef CONFIG_PPC64
862 #define PACATOC offsetof(struct paca_struct, kernel_toc)
863 
864 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
865 
866 int __init ftrace_dyn_arch_init(void)
867 {
868 	int i;
869 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
870 	u32 stub_insns[] = {
871 		0xe98d0000 | PACATOC,	/* ld      r12,PACATOC(r13)	*/
872 		0x3d8c0000,		/* addis   r12,r12,<high>	*/
873 		0x398c0000,		/* addi    r12,r12,<low>	*/
874 		0x7d8903a6,		/* mtctr   r12			*/
875 		0x4e800420,		/* bctr				*/
876 	};
877 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
878 	unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
879 #else
880 	unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
881 #endif
882 	long reladdr = addr - kernel_toc_addr();
883 
884 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
885 		pr_err("Address of %ps out of range of kernel_toc.\n",
886 				(void *)addr);
887 		return -1;
888 	}
889 
890 	for (i = 0; i < 2; i++) {
891 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
892 		tramp[i][1] |= PPC_HA(reladdr);
893 		tramp[i][2] |= PPC_LO(reladdr);
894 		add_ftrace_tramp((unsigned long)tramp[i]);
895 	}
896 
897 	return 0;
898 }
899 #else
900 int __init ftrace_dyn_arch_init(void)
901 {
902 	return 0;
903 }
904 #endif
905 #endif /* CONFIG_DYNAMIC_FTRACE */
906 
907 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
908 
909 extern void ftrace_graph_call(void);
910 extern void ftrace_graph_stub(void);
911 
912 static int ftrace_modify_ftrace_graph_caller(bool enable)
913 {
914 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
915 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
916 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
917 	ppc_inst_t old, new;
918 
919 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
920 		return 0;
921 
922 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
923 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
924 
925 	return ftrace_modify_code(ip, old, new);
926 }
927 
928 int ftrace_enable_ftrace_graph_caller(void)
929 {
930 	return ftrace_modify_ftrace_graph_caller(true);
931 }
932 
933 int ftrace_disable_ftrace_graph_caller(void)
934 {
935 	return ftrace_modify_ftrace_graph_caller(false);
936 }
937 
938 /*
939  * Hook the return address and push it in the stack of return addrs
940  * in current thread info. Return the address we want to divert to.
941  */
942 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
943 						unsigned long sp)
944 {
945 	unsigned long return_hooker;
946 	int bit;
947 
948 	if (unlikely(ftrace_graph_is_dead()))
949 		goto out;
950 
951 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
952 		goto out;
953 
954 	bit = ftrace_test_recursion_trylock(ip, parent);
955 	if (bit < 0)
956 		goto out;
957 
958 	return_hooker = ppc_function_entry(return_to_handler);
959 
960 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
961 		parent = return_hooker;
962 
963 	ftrace_test_recursion_unlock(bit);
964 out:
965 	return parent;
966 }
967 
968 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
969 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
970 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
971 {
972 	fregs->regs.link = prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
973 }
974 #endif
975 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
976 
977 #ifdef PPC64_ELF_ABI_v1
978 char *arch_ftrace_match_adjust(char *str, const char *search)
979 {
980 	if (str[0] == '.' && search[0] != '.')
981 		return str + 1;
982 	else
983 		return str;
984 }
985 #endif /* PPC64_ELF_ABI_v1 */
986