xref: /openbmc/linux/arch/sparc/kernel/ftrace.c (revision e3b9f1e8)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/spinlock.h>
3 #include <linux/hardirq.h>
4 #include <linux/ftrace.h>
5 #include <linux/percpu.h>
6 #include <linux/init.h>
7 #include <linux/list.h>
8 #include <trace/syscall.h>
9 
10 #include <asm/ftrace.h>
11 
12 #ifdef CONFIG_DYNAMIC_FTRACE
13 static const u32 ftrace_nop = 0x01000000;
14 
15 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
16 {
17 	u32 call;
18 	s32 off;
19 
20 	off = ((s32)addr - (s32)ip);
21 	call = 0x40000000 | ((u32)off >> 2);
22 
23 	return call;
24 }
25 
26 static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
27 {
28 	u32 replaced;
29 	int faulted;
30 
31 	__asm__ __volatile__(
32 	"1:	cas	[%[ip]], %[old], %[new]\n"
33 	"	flush	%[ip]\n"
34 	"	mov	0, %[faulted]\n"
35 	"2:\n"
36 	"	.section .fixup,#alloc,#execinstr\n"
37 	"	.align	4\n"
38 	"3:	sethi	%%hi(2b), %[faulted]\n"
39 	"	jmpl	%[faulted] + %%lo(2b), %%g0\n"
40 	"	 mov	1, %[faulted]\n"
41 	"	.previous\n"
42 	"	.section __ex_table,\"a\"\n"
43 	"	.align	4\n"
44 	"	.word	1b, 3b\n"
45 	"	.previous\n"
46 	: "=r" (replaced), [faulted] "=r" (faulted)
47 	: [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
48 	: "memory");
49 
50 	if (replaced != old && replaced != new)
51 		faulted = 2;
52 
53 	return faulted;
54 }
55 
56 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
57 {
58 	unsigned long ip = rec->ip;
59 	u32 old, new;
60 
61 	old = ftrace_call_replace(ip, addr);
62 	new = ftrace_nop;
63 	return ftrace_modify_code(ip, old, new);
64 }
65 
66 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
67 {
68 	unsigned long ip = rec->ip;
69 	u32 old, new;
70 
71 	old = ftrace_nop;
72 	new = ftrace_call_replace(ip, addr);
73 	return ftrace_modify_code(ip, old, new);
74 }
75 
76 int ftrace_update_ftrace_func(ftrace_func_t func)
77 {
78 	unsigned long ip = (unsigned long)(&ftrace_call);
79 	u32 old, new;
80 
81 	old = *(u32 *) &ftrace_call;
82 	new = ftrace_call_replace(ip, (unsigned long)func);
83 	return ftrace_modify_code(ip, old, new);
84 }
85 
86 int __init ftrace_dyn_arch_init(void)
87 {
88 	return 0;
89 }
90 #endif
91 
92 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
93 
94 #ifdef CONFIG_DYNAMIC_FTRACE
95 extern void ftrace_graph_call(void);
96 
97 int ftrace_enable_ftrace_graph_caller(void)
98 {
99 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
100 	u32 old, new;
101 
102 	old = *(u32 *) &ftrace_graph_call;
103 	new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
104 	return ftrace_modify_code(ip, old, new);
105 }
106 
107 int ftrace_disable_ftrace_graph_caller(void)
108 {
109 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
110 	u32 old, new;
111 
112 	old = *(u32 *) &ftrace_graph_call;
113 	new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
114 
115 	return ftrace_modify_code(ip, old, new);
116 }
117 
118 #endif /* !CONFIG_DYNAMIC_FTRACE */
119 
120 /*
121  * Hook the return address and push it in the stack of return addrs
122  * in current thread info.
123  */
124 unsigned long prepare_ftrace_return(unsigned long parent,
125 				    unsigned long self_addr,
126 				    unsigned long frame_pointer)
127 {
128 	unsigned long return_hooker = (unsigned long) &return_to_handler;
129 	struct ftrace_graph_ent trace;
130 
131 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
132 		return parent + 8UL;
133 
134 	trace.func = self_addr;
135 	trace.depth = current->curr_ret_stack + 1;
136 
137 	/* Only trace if the calling function expects to */
138 	if (!ftrace_graph_entry(&trace))
139 		return parent + 8UL;
140 
141 	if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
142 				     frame_pointer, NULL) == -EBUSY)
143 		return parent + 8UL;
144 
145 	return return_hooker;
146 }
147 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
148