xref: /openbmc/linux/arch/csky/kernel/ftrace.c (revision 6644c654)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/ftrace.h>
5 #include <linux/uaccess.h>
6 #include <linux/stop_machine.h>
7 #include <asm/cacheflush.h>
8 
9 #ifdef CONFIG_DYNAMIC_FTRACE
10 
11 #define NOP		0x4000
12 #define NOP32_HI	0xc400
13 #define NOP32_LO	0x4820
14 #define PUSH_LR		0x14d0
15 #define MOVIH_LINK	0xea3a
16 #define ORI_LINK	0xef5a
17 #define JSR_LINK	0xe8fa
18 #define BSR_LINK	0xe000
19 
20 /*
21  * Gcc-csky with -pg will insert stub in function prologue:
22  *	push	lr
23  *	jbsr	_mcount
24  *	nop32
25  *	nop32
26  *
27  * If the (callee - current_pc) is less then 64MB, we'll use bsr:
28  *	push	lr
29  *	bsr	_mcount
30  *	nop32
31  *	nop32
32  * else we'll use (movih + ori + jsr):
33  *	push	lr
34  *	movih	r26, ...
35  *	ori	r26, ...
36  *	jsr	r26
37  *
38  * (r26 is our reserved link-reg)
39  *
40  */
make_jbsr(unsigned long callee,unsigned long pc,uint16_t * call,bool nolr)41 static inline void make_jbsr(unsigned long callee, unsigned long pc,
42 			     uint16_t *call, bool nolr)
43 {
44 	long offset;
45 
46 	call[0]	= nolr ? NOP : PUSH_LR;
47 
48 	offset = (long) callee - (long) pc;
49 
50 	if (unlikely(offset < -67108864 || offset > 67108864)) {
51 		call[1] = MOVIH_LINK;
52 		call[2] = callee >> 16;
53 		call[3] = ORI_LINK;
54 		call[4] = callee & 0xffff;
55 		call[5] = JSR_LINK;
56 		call[6] = 0;
57 	} else {
58 		offset = offset >> 1;
59 
60 		call[1] = BSR_LINK |
61 			 ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
62 		call[2] = (uint16_t)((unsigned long) offset & 0xffff);
63 		call[3] = call[5] = NOP32_HI;
64 		call[4] = call[6] = NOP32_LO;
65 	}
66 }
67 
68 static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO,
69 				NOP32_HI, NOP32_LO};
ftrace_check_current_nop(unsigned long hook)70 static int ftrace_check_current_nop(unsigned long hook)
71 {
72 	uint16_t olds[7];
73 	unsigned long hook_pos = hook - 2;
74 
75 	if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos,
76 			sizeof(nops)))
77 		return -EFAULT;
78 
79 	if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
80 		pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n",
81 			(void *)hook_pos,
82 			olds[0], olds[1], olds[2], olds[3], olds[4], olds[5],
83 			olds[6]);
84 
85 		return -EINVAL;
86 	}
87 
88 	return 0;
89 }
90 
ftrace_modify_code(unsigned long hook,unsigned long target,bool enable,bool nolr)91 static int ftrace_modify_code(unsigned long hook, unsigned long target,
92 			      bool enable, bool nolr)
93 {
94 	uint16_t call[7];
95 
96 	unsigned long hook_pos = hook - 2;
97 	int ret = 0;
98 
99 	make_jbsr(target, hook, call, nolr);
100 
101 	ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops,
102 				 sizeof(nops));
103 	if (ret)
104 		return -EPERM;
105 
106 	flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE);
107 
108 	return 0;
109 }
110 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)111 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
112 {
113 	int ret = ftrace_check_current_nop(rec->ip);
114 
115 	if (ret)
116 		return ret;
117 
118 	return ftrace_modify_code(rec->ip, addr, true, false);
119 }
120 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)121 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
122 		    unsigned long addr)
123 {
124 	return ftrace_modify_code(rec->ip, addr, false, false);
125 }
126 
ftrace_update_ftrace_func(ftrace_func_t func)127 int ftrace_update_ftrace_func(ftrace_func_t func)
128 {
129 	int ret = ftrace_modify_code((unsigned long)&ftrace_call,
130 				(unsigned long)func, true, true);
131 	if (!ret)
132 		ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
133 				(unsigned long)func, true, true);
134 	return ret;
135 }
136 #endif /* CONFIG_DYNAMIC_FTRACE */
137 
138 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)139 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
140 		       unsigned long addr)
141 {
142 	return ftrace_modify_code(rec->ip, addr, true, true);
143 }
144 #endif
145 
146 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)147 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
148 			   unsigned long frame_pointer)
149 {
150 	unsigned long return_hooker = (unsigned long)&return_to_handler;
151 	unsigned long old;
152 
153 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
154 		return;
155 
156 	old = *parent;
157 
158 	if (!function_graph_enter(old, self_addr,
159 			*(unsigned long *)frame_pointer, parent)) {
160 		/*
161 		 * For csky-gcc function has sub-call:
162 		 * subi	sp,	sp, 8
163 		 * stw	r8,	(sp, 0)
164 		 * mov	r8,	sp
165 		 * st.w r15,	(sp, 0x4)
166 		 * push	r15
167 		 * jl	_mcount
168 		 * We only need set *parent for resume
169 		 *
170 		 * For csky-gcc function has no sub-call:
171 		 * subi	sp,	sp, 4
172 		 * stw	r8,	(sp, 0)
173 		 * mov	r8,	sp
174 		 * push	r15
175 		 * jl	_mcount
176 		 * We need set *parent and *(frame_pointer + 4) for resume,
177 		 * because lr is resumed twice.
178 		 */
179 		*parent = return_hooker;
180 		frame_pointer += 4;
181 		if (*(unsigned long *)frame_pointer == old)
182 			*(unsigned long *)frame_pointer = return_hooker;
183 	}
184 }
185 
186 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_enable_ftrace_graph_caller(void)187 int ftrace_enable_ftrace_graph_caller(void)
188 {
189 	return ftrace_modify_code((unsigned long)&ftrace_graph_call,
190 			(unsigned long)&ftrace_graph_caller, true, true);
191 }
192 
ftrace_disable_ftrace_graph_caller(void)193 int ftrace_disable_ftrace_graph_caller(void)
194 {
195 	return ftrace_modify_code((unsigned long)&ftrace_graph_call,
196 			(unsigned long)&ftrace_graph_caller, false, true);
197 }
198 #endif /* CONFIG_DYNAMIC_FTRACE */
199 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
200 
201 #ifdef CONFIG_DYNAMIC_FTRACE
202 #ifndef CONFIG_CPU_HAS_ICACHE_INS
203 struct ftrace_modify_param {
204 	int command;
205 	atomic_t cpu_count;
206 };
207 
__ftrace_modify_code(void * data)208 static int __ftrace_modify_code(void *data)
209 {
210 	struct ftrace_modify_param *param = data;
211 
212 	if (atomic_inc_return(&param->cpu_count) == 1) {
213 		ftrace_modify_all_code(param->command);
214 		atomic_inc(&param->cpu_count);
215 	} else {
216 		while (atomic_read(&param->cpu_count) <= num_online_cpus())
217 			cpu_relax();
218 		local_icache_inv_all(NULL);
219 	}
220 
221 	return 0;
222 }
223 
arch_ftrace_update_code(int command)224 void arch_ftrace_update_code(int command)
225 {
226 	struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
227 
228 	stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
229 }
230 #endif
231 #endif /* CONFIG_DYNAMIC_FTRACE */
232 
233 /* _mcount is defined in abi's mcount.S */
234 EXPORT_SYMBOL(_mcount);
235