xref: /openbmc/linux/arch/s390/kernel/ftrace.c (revision 3932b9ca)
1 /*
2  * Dynamic function tracer architecture backend.
3  *
4  * Copyright IBM Corp. 2009
5  *
6  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7  *		Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9 
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/kprobes.h>
16 #include <trace/syscall.h>
17 #include <asm/asm-offsets.h>
18 #include "entry.h"
19 
20 #ifdef CONFIG_DYNAMIC_FTRACE
21 
22 void ftrace_disable_code(void);
23 void ftrace_enable_insn(void);
24 
25 #ifdef CONFIG_64BIT
26 /*
27  * The 64-bit mcount code looks like this:
28  *	stg	%r14,8(%r15)		# offset 0
29  * >	larl	%r1,<&counter>		# offset 6
30  * >	brasl	%r14,_mcount		# offset 12
31  *	lg	%r14,8(%r15)		# offset 18
32  * Total length is 24 bytes. The middle two instructions of the mcount
33  * block get overwritten by ftrace_make_nop / ftrace_make_call.
34  * The 64-bit enabled ftrace code block looks like this:
35  *	stg	%r14,8(%r15)		# offset 0
36  * >	lg	%r1,__LC_FTRACE_FUNC	# offset 6
37  * >	lgr	%r0,%r0			# offset 12
38  * >	basr	%r14,%r1		# offset 16
39  *	lg	%r14,8(%15)		# offset 18
40  * The return points of the mcount/ftrace function have the same offset 18.
41  * The 64-bit disable ftrace code block looks like this:
42  *	stg	%r14,8(%r15)		# offset 0
43  * >	jg	.+18			# offset 6
44  * >	lgr	%r0,%r0			# offset 12
45  * >	basr	%r14,%r1		# offset 16
46  *	lg	%r14,8(%15)		# offset 18
47  * The jg instruction branches to offset 24 to skip as many instructions
48  * as possible.
49  */
50 asm(
51 	"	.align	4\n"
52 	"ftrace_disable_code:\n"
53 	"	jg	0f\n"
54 	"	lgr	%r0,%r0\n"
55 	"	basr	%r14,%r1\n"
56 	"0:\n"
57 	"	.align	4\n"
58 	"ftrace_enable_insn:\n"
59 	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n");
60 
61 #define FTRACE_INSN_SIZE	6
62 
63 #else /* CONFIG_64BIT */
64 /*
65  * The 31-bit mcount code looks like this:
66  *	st	%r14,4(%r15)		# offset 0
67  * >	bras	%r1,0f			# offset 4
68  * >	.long	_mcount			# offset 8
69  * >	.long	<&counter>		# offset 12
70  * > 0:	l	%r14,0(%r1)		# offset 16
71  * >	l	%r1,4(%r1)		# offset 20
72  *	basr	%r14,%r14		# offset 24
73  *	l	%r14,4(%r15)		# offset 26
74  * Total length is 30 bytes. The twenty bytes starting from offset 4
75  * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
76  * The 31-bit enabled ftrace code block looks like this:
77  *	st	%r14,4(%r15)		# offset 0
78  * >	l	%r14,__LC_FTRACE_FUNC	# offset 4
79  * >	j	0f			# offset 8
80  * >	.fill	12,1,0x07		# offset 12
81  *   0:	basr	%r14,%r14		# offset 24
82  *	l	%r14,4(%r14)		# offset 26
83  * The return points of the mcount/ftrace function have the same offset 26.
84  * The 31-bit disabled ftrace code block looks like this:
85  *	st	%r14,4(%r15)		# offset 0
86  * >	j	.+26			# offset 4
87  * >	j	0f			# offset 8
88  * >	.fill	12,1,0x07		# offset 12
89  *   0:	basr	%r14,%r14		# offset 24
90  *	l	%r14,4(%r14)		# offset 26
91  * The j instruction branches to offset 30 to skip as many instructions
92  * as possible.
93  */
94 asm(
95 	"	.align	4\n"
96 	"ftrace_disable_code:\n"
97 	"	j	1f\n"
98 	"	j	0f\n"
99 	"	.fill	12,1,0x07\n"
100 	"0:	basr	%r14,%r14\n"
101 	"1:\n"
102 	"	.align	4\n"
103 	"ftrace_enable_insn:\n"
104 	"	l	%r14,"__stringify(__LC_FTRACE_FUNC)"\n");
105 
106 #define FTRACE_INSN_SIZE	4
107 
108 #endif /* CONFIG_64BIT */
109 
110 
111 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
112 		    unsigned long addr)
113 {
114 	if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
115 			       MCOUNT_INSN_SIZE))
116 		return -EPERM;
117 	return 0;
118 }
119 
120 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
121 {
122 	if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
123 			       FTRACE_INSN_SIZE))
124 		return -EPERM;
125 	return 0;
126 }
127 
128 int ftrace_update_ftrace_func(ftrace_func_t func)
129 {
130 	return 0;
131 }
132 
133 int __init ftrace_dyn_arch_init(void)
134 {
135 	return 0;
136 }
137 
138 #endif /* CONFIG_DYNAMIC_FTRACE */
139 
140 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
141 /*
142  * Hook the return address and push it in the stack of return addresses
143  * in current thread info.
144  */
145 unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
146 					      unsigned long ip)
147 {
148 	struct ftrace_graph_ent trace;
149 
150 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
151 		goto out;
152 	ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
153 	trace.func = ip;
154 	trace.depth = current->curr_ret_stack + 1;
155 	/* Only trace if the calling function expects to. */
156 	if (!ftrace_graph_entry(&trace))
157 		goto out;
158 	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
159 		goto out;
160 	parent = (unsigned long) return_to_handler;
161 out:
162 	return parent;
163 }
164 
165 #ifdef CONFIG_DYNAMIC_FTRACE
166 /*
167  * Patch the kernel code at ftrace_graph_caller location. The instruction
168  * there is branch relative and save to prepare_ftrace_return. To disable
169  * the call to prepare_ftrace_return we patch the bras offset to point
170  * directly after the instructions. To enable the call we calculate
171  * the original offset to prepare_ftrace_return and put it back.
172  */
173 int ftrace_enable_ftrace_graph_caller(void)
174 {
175 	unsigned short offset;
176 
177 	offset = ((void *) prepare_ftrace_return -
178 		  (void *) ftrace_graph_caller) / 2;
179 	return probe_kernel_write((void *) ftrace_graph_caller + 2,
180 				  &offset, sizeof(offset));
181 }
182 
183 int ftrace_disable_ftrace_graph_caller(void)
184 {
185 	static unsigned short offset = 0x0002;
186 
187 	return probe_kernel_write((void *) ftrace_graph_caller + 2,
188 				  &offset, sizeof(offset));
189 }
190 
191 #endif /* CONFIG_DYNAMIC_FTRACE */
192 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
193