xref: /openbmc/linux/arch/s390/kernel/ftrace.c (revision 6ee73861)
1 /*
2  * Dynamic function tracer architecture backend.
3  *
4  * Copyright IBM Corp. 2009
5  *
6  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7  *
8  */
9 
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <trace/syscall.h>
16 #include <asm/lowcore.h>
17 
18 #ifdef CONFIG_DYNAMIC_FTRACE
19 
20 void ftrace_disable_code(void);
21 void ftrace_disable_return(void);
22 void ftrace_call_code(void);
23 void ftrace_nop_code(void);
24 
25 #define FTRACE_INSN_SIZE 4
26 
27 #ifdef CONFIG_64BIT
28 
29 asm(
30 	"	.align	4\n"
31 	"ftrace_disable_code:\n"
32 	"	j	0f\n"
33 	"	.word	0x0024\n"
34 	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 	"	basr	%r14,%r1\n"
36 	"ftrace_disable_return:\n"
37 	"	lg	%r14,8(15)\n"
38 	"	lgr	%r0,%r0\n"
39 	"0:\n");
40 
41 asm(
42 	"	.align	4\n"
43 	"ftrace_nop_code:\n"
44 	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
45 
46 asm(
47 	"	.align	4\n"
48 	"ftrace_call_code:\n"
49 	"	stg	%r14,8(%r15)\n");
50 
51 #else /* CONFIG_64BIT */
52 
53 asm(
54 	"	.align	4\n"
55 	"ftrace_disable_code:\n"
56 	"	j	0f\n"
57 	"	l	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
58 	"	basr	%r14,%r1\n"
59 	"ftrace_disable_return:\n"
60 	"	l	%r14,4(%r15)\n"
61 	"	j	0f\n"
62 	"	bcr	0,%r7\n"
63 	"	bcr	0,%r7\n"
64 	"	bcr	0,%r7\n"
65 	"	bcr	0,%r7\n"
66 	"	bcr	0,%r7\n"
67 	"	bcr	0,%r7\n"
68 	"0:\n");
69 
70 asm(
71 	"	.align	4\n"
72 	"ftrace_nop_code:\n"
73 	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
74 
75 asm(
76 	"	.align	4\n"
77 	"ftrace_call_code:\n"
78 	"	st	%r14,4(%r15)\n");
79 
80 #endif /* CONFIG_64BIT */
81 
82 static int ftrace_modify_code(unsigned long ip,
83 			      void *old_code, int old_size,
84 			      void *new_code, int new_size)
85 {
86 	unsigned char replaced[MCOUNT_INSN_SIZE];
87 
88 	/*
89 	 * Note: Due to modules code can disappear and change.
90 	 *  We need to protect against faulting as well as code
91 	 *  changing. We do this by using the probe_kernel_*
92 	 *  functions.
93 	 *  This however is just a simple sanity check.
94 	 */
95 	if (probe_kernel_read(replaced, (void *)ip, old_size))
96 		return -EFAULT;
97 	if (memcmp(replaced, old_code, old_size) != 0)
98 		return -EINVAL;
99 	if (probe_kernel_write((void *)ip, new_code, new_size))
100 		return -EPERM;
101 	return 0;
102 }
103 
104 static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 				   unsigned long addr)
106 {
107 	return ftrace_modify_code(rec->ip,
108 				  ftrace_call_code, FTRACE_INSN_SIZE,
109 				  ftrace_disable_code, MCOUNT_INSN_SIZE);
110 }
111 
112 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 		    unsigned long addr)
114 {
115 	if (addr == MCOUNT_ADDR)
116 		return ftrace_make_initial_nop(mod, rec, addr);
117 	return ftrace_modify_code(rec->ip,
118 				  ftrace_call_code, FTRACE_INSN_SIZE,
119 				  ftrace_nop_code, FTRACE_INSN_SIZE);
120 }
121 
122 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123 {
124 	return ftrace_modify_code(rec->ip,
125 				  ftrace_nop_code, FTRACE_INSN_SIZE,
126 				  ftrace_call_code, FTRACE_INSN_SIZE);
127 }
128 
129 int ftrace_update_ftrace_func(ftrace_func_t func)
130 {
131 	ftrace_dyn_func = (unsigned long)func;
132 	return 0;
133 }
134 
135 int __init ftrace_dyn_arch_init(void *data)
136 {
137 	*(unsigned long *)data = 0;
138 	return 0;
139 }
140 
141 #endif /* CONFIG_DYNAMIC_FTRACE */
142 
143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
144 #ifdef CONFIG_DYNAMIC_FTRACE
145 /*
146  * Patch the kernel code at ftrace_graph_caller location:
147  * The instruction there is branch relative on condition. The condition mask
148  * is either all ones (always branch aka disable ftrace_graph_caller) or all
149  * zeroes (nop aka enable ftrace_graph_caller).
150  * Instruction format for brc is a7m4xxxx where m is the condition mask.
151  */
152 int ftrace_enable_ftrace_graph_caller(void)
153 {
154 	unsigned short opcode = 0xa704;
155 
156 	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157 }
158 
159 int ftrace_disable_ftrace_graph_caller(void)
160 {
161 	unsigned short opcode = 0xa7f4;
162 
163 	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164 }
165 
166 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167 {
168 	return addr - (ftrace_disable_return - ftrace_disable_code);
169 }
170 
171 #else /* CONFIG_DYNAMIC_FTRACE */
172 
173 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174 {
175 	return addr - MCOUNT_OFFSET_RET;
176 }
177 
178 #endif /* CONFIG_DYNAMIC_FTRACE */
179 
180 /*
181  * Hook the return address and push it in the stack of return addresses
182  * in current thread info.
183  */
184 unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
185 {
186 	struct ftrace_graph_ent trace;
187 
188 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
189 		goto out;
190 	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
191 		goto out;
192 	trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
193 	/* Only trace if the calling function expects to. */
194 	if (!ftrace_graph_entry(&trace)) {
195 		current->curr_ret_stack--;
196 		goto out;
197 	}
198 	parent = (unsigned long)return_to_handler;
199 out:
200 	return parent;
201 }
202 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203 
204 #ifdef CONFIG_FTRACE_SYSCALLS
205 
206 extern unsigned long __start_syscalls_metadata[];
207 extern unsigned long __stop_syscalls_metadata[];
208 extern unsigned int sys_call_table[];
209 
210 static struct syscall_metadata **syscalls_metadata;
211 
212 struct syscall_metadata *syscall_nr_to_meta(int nr)
213 {
214 	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
215 		return NULL;
216 
217 	return syscalls_metadata[nr];
218 }
219 
220 int syscall_name_to_nr(char *name)
221 {
222 	int i;
223 
224 	if (!syscalls_metadata)
225 		return -1;
226 	for (i = 0; i < NR_syscalls; i++)
227 		if (syscalls_metadata[i])
228 			if (!strcmp(syscalls_metadata[i]->name, name))
229 				return i;
230 	return -1;
231 }
232 
233 void set_syscall_enter_id(int num, int id)
234 {
235 	syscalls_metadata[num]->enter_id = id;
236 }
237 
238 void set_syscall_exit_id(int num, int id)
239 {
240 	syscalls_metadata[num]->exit_id = id;
241 }
242 
243 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
244 {
245 	struct syscall_metadata *start;
246 	struct syscall_metadata *stop;
247 	char str[KSYM_SYMBOL_LEN];
248 
249 	start = (struct syscall_metadata *)__start_syscalls_metadata;
250 	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
251 	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
252 
253 	for ( ; start < stop; start++) {
254 		if (start->name && !strcmp(start->name + 3, str + 3))
255 			return start;
256 	}
257 	return NULL;
258 }
259 
260 static int __init arch_init_ftrace_syscalls(void)
261 {
262 	struct syscall_metadata *meta;
263 	int i;
264 	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
265 				    GFP_KERNEL);
266 	if (!syscalls_metadata)
267 		return -ENOMEM;
268 	for (i = 0; i < NR_syscalls; i++) {
269 		meta = find_syscall_meta((unsigned long)sys_call_table[i]);
270 		syscalls_metadata[i] = meta;
271 	}
272 	return 0;
273 }
274 arch_initcall(arch_init_ftrace_syscalls);
275 #endif
276