xref: /openbmc/linux/arch/s390/kernel/ftrace.c (revision 22fd411a)
1 /*
2  * Dynamic function tracer architecture backend.
3  *
4  * Copyright IBM Corp. 2009
5  *
6  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7  *		Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9 
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/kprobes.h>
16 #include <trace/syscall.h>
17 #include <asm/asm-offsets.h>
18 
19 #ifdef CONFIG_64BIT
20 #define MCOUNT_OFFSET_RET 12
21 #else
22 #define MCOUNT_OFFSET_RET 22
23 #endif
24 
25 #ifdef CONFIG_DYNAMIC_FTRACE
26 
27 void ftrace_disable_code(void);
28 void ftrace_enable_insn(void);
29 
30 #ifdef CONFIG_64BIT
31 /*
32  * The 64-bit mcount code looks like this:
33  *	stg	%r14,8(%r15)		# offset 0
34  * >	larl	%r1,<&counter>		# offset 6
35  * >	brasl	%r14,_mcount		# offset 12
36  *	lg	%r14,8(%r15)		# offset 18
37  * Total length is 24 bytes. The middle two instructions of the mcount
38  * block get overwritten by ftrace_make_nop / ftrace_make_call.
39  * The 64-bit enabled ftrace code block looks like this:
40  *	stg	%r14,8(%r15)		# offset 0
41  * >	lg	%r1,__LC_FTRACE_FUNC	# offset 6
42  * >	lgr	%r0,%r0			# offset 12
43  * >	basr	%r14,%r1		# offset 16
44  *	lg	%r14,8(%15)		# offset 18
45  * The return points of the mcount/ftrace function have the same offset 18.
46  * The 64-bit disable ftrace code block looks like this:
47  *	stg	%r14,8(%r15)		# offset 0
48  * >	jg	.+18			# offset 6
49  * >	lgr	%r0,%r0			# offset 12
50  * >	basr	%r14,%r1		# offset 16
51  *	lg	%r14,8(%15)		# offset 18
52  * The jg instruction branches to offset 24 to skip as many instructions
53  * as possible.
54  */
55 asm(
56 	"	.align	4\n"
57 	"ftrace_disable_code:\n"
58 	"	jg	0f\n"
59 	"	lgr	%r0,%r0\n"
60 	"	basr	%r14,%r1\n"
61 	"0:\n"
62 	"	.align	4\n"
63 	"ftrace_enable_insn:\n"
64 	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n");
65 
66 #define FTRACE_INSN_SIZE	6
67 
68 #else /* CONFIG_64BIT */
69 /*
70  * The 31-bit mcount code looks like this:
71  *	st	%r14,4(%r15)		# offset 0
72  * >	bras	%r1,0f			# offset 4
73  * >	.long	_mcount			# offset 8
74  * >	.long	<&counter>		# offset 12
75  * > 0:	l	%r14,0(%r1)		# offset 16
76  * >	l	%r1,4(%r1)		# offset 20
77  *	basr	%r14,%r14		# offset 24
78  *	l	%r14,4(%r15)		# offset 26
79  * Total length is 30 bytes. The twenty bytes starting from offset 4
80  * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
81  * The 31-bit enabled ftrace code block looks like this:
82  *	st	%r14,4(%r15)		# offset 0
83  * >	l	%r14,__LC_FTRACE_FUNC	# offset 4
84  * >	j	0f			# offset 8
85  * >	.fill	12,1,0x07		# offset 12
86  *   0:	basr	%r14,%r14		# offset 24
87  *	l	%r14,4(%r14)		# offset 26
88  * The return points of the mcount/ftrace function have the same offset 26.
89  * The 31-bit disabled ftrace code block looks like this:
90  *	st	%r14,4(%r15)		# offset 0
91  * >	j	.+26			# offset 4
92  * >	j	0f			# offset 8
93  * >	.fill	12,1,0x07		# offset 12
94  *   0:	basr	%r14,%r14		# offset 24
95  *	l	%r14,4(%r14)		# offset 26
96  * The j instruction branches to offset 30 to skip as many instructions
97  * as possible.
98  */
99 asm(
100 	"	.align	4\n"
101 	"ftrace_disable_code:\n"
102 	"	j	1f\n"
103 	"	j	0f\n"
104 	"	.fill	12,1,0x07\n"
105 	"0:	basr	%r14,%r14\n"
106 	"1:\n"
107 	"	.align	4\n"
108 	"ftrace_enable_insn:\n"
109 	"	l	%r14,"__stringify(__LC_FTRACE_FUNC)"\n");
110 
111 #define FTRACE_INSN_SIZE	4
112 
113 #endif /* CONFIG_64BIT */
114 
115 
116 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
117 		    unsigned long addr)
118 {
119 	if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
120 			       MCOUNT_INSN_SIZE))
121 		return -EPERM;
122 	return 0;
123 }
124 
125 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
126 {
127 	if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
128 			       FTRACE_INSN_SIZE))
129 		return -EPERM;
130 	return 0;
131 }
132 
133 int ftrace_update_ftrace_func(ftrace_func_t func)
134 {
135 	return 0;
136 }
137 
138 int __init ftrace_dyn_arch_init(void *data)
139 {
140 	*(unsigned long *) data = 0;
141 	return 0;
142 }
143 
144 #endif /* CONFIG_DYNAMIC_FTRACE */
145 
146 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
147 /*
148  * Hook the return address and push it in the stack of return addresses
149  * in current thread info.
150  */
151 unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152 					      unsigned long ip)
153 {
154 	struct ftrace_graph_ent trace;
155 
156 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
157 		goto out;
158 	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
159 		goto out;
160 	trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
161 	/* Only trace if the calling function expects to. */
162 	if (!ftrace_graph_entry(&trace)) {
163 		current->curr_ret_stack--;
164 		goto out;
165 	}
166 	parent = (unsigned long) return_to_handler;
167 out:
168 	return parent;
169 }
170 
171 #ifdef CONFIG_DYNAMIC_FTRACE
172 /*
173  * Patch the kernel code at ftrace_graph_caller location. The instruction
174  * there is branch relative and save to prepare_ftrace_return. To disable
175  * the call to prepare_ftrace_return we patch the bras offset to point
176  * directly after the instructions. To enable the call we calculate
177  * the original offset to prepare_ftrace_return and put it back.
178  */
179 int ftrace_enable_ftrace_graph_caller(void)
180 {
181 	unsigned short offset;
182 
183 	offset = ((void *) prepare_ftrace_return -
184 		  (void *) ftrace_graph_caller) / 2;
185 	return probe_kernel_write(ftrace_graph_caller + 2,
186 				  &offset, sizeof(offset));
187 }
188 
189 int ftrace_disable_ftrace_graph_caller(void)
190 {
191 	static unsigned short offset = 0x0002;
192 
193 	return probe_kernel_write(ftrace_graph_caller + 2,
194 				  &offset, sizeof(offset));
195 }
196 
197 #endif /* CONFIG_DYNAMIC_FTRACE */
198 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
199