xref: /openbmc/linux/arch/arm64/kernel/ftrace.c (revision 74ba9207)
1 /*
2  * arch/arm64/kernel/ftrace.c
3  *
4  * Copyright (C) 2013 Linaro Limited
5  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/swab.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/debug-monitors.h>
19 #include <asm/ftrace.h>
20 #include <asm/insn.h>
21 
22 #ifdef CONFIG_DYNAMIC_FTRACE
23 /*
24  * Replace a single instruction, which may be a branch or NOP.
25  * If @validate == true, a replaced instruction is checked against 'old'.
26  */
27 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
28 			      bool validate)
29 {
30 	u32 replaced;
31 
32 	/*
33 	 * Note:
34 	 * We are paranoid about modifying text, as if a bug were to happen, it
35 	 * could cause us to read or write to someplace that could cause harm.
36 	 * Carefully read and modify the code with aarch64_insn_*() which uses
37 	 * probe_kernel_*(), and make sure what we read is what we expected it
38 	 * to be before modifying it.
39 	 */
40 	if (validate) {
41 		if (aarch64_insn_read((void *)pc, &replaced))
42 			return -EFAULT;
43 
44 		if (replaced != old)
45 			return -EINVAL;
46 	}
47 	if (aarch64_insn_patch_text_nosync((void *)pc, new))
48 		return -EPERM;
49 
50 	return 0;
51 }
52 
53 /*
54  * Replace tracer function in ftrace_caller()
55  */
56 int ftrace_update_ftrace_func(ftrace_func_t func)
57 {
58 	unsigned long pc;
59 	u32 new;
60 
61 	pc = (unsigned long)&ftrace_call;
62 	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
63 					  AARCH64_INSN_BRANCH_LINK);
64 
65 	return ftrace_modify_code(pc, 0, new, false);
66 }
67 
68 /*
69  * Turn on the call to ftrace_caller() in instrumented function
70  */
71 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
72 {
73 	unsigned long pc = rec->ip;
74 	u32 old, new;
75 	long offset = (long)pc - (long)addr;
76 
77 	if (offset < -SZ_128M || offset >= SZ_128M) {
78 #ifdef CONFIG_ARM64_MODULE_PLTS
79 		struct plt_entry trampoline;
80 		struct module *mod;
81 
82 		/*
83 		 * On kernels that support module PLTs, the offset between the
84 		 * branch instruction and its target may legally exceed the
85 		 * range of an ordinary relative 'bl' opcode. In this case, we
86 		 * need to branch via a trampoline in the module.
87 		 *
88 		 * NOTE: __module_text_address() must be called with preemption
89 		 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
90 		 * retains its validity throughout the remainder of this code.
91 		 */
92 		preempt_disable();
93 		mod = __module_text_address(pc);
94 		preempt_enable();
95 
96 		if (WARN_ON(!mod))
97 			return -EINVAL;
98 
99 		/*
100 		 * There is only one ftrace trampoline per module. For now,
101 		 * this is not a problem since on arm64, all dynamic ftrace
102 		 * invocations are routed via ftrace_caller(). This will need
103 		 * to be revisited if support for multiple ftrace entry points
104 		 * is added in the future, but for now, the pr_err() below
105 		 * deals with a theoretical issue only.
106 		 *
107 		 * Note that PLTs are place relative, and plt_entries_equal()
108 		 * checks whether they point to the same target. Here, we need
109 		 * to check if the actual opcodes are in fact identical,
110 		 * regardless of the offset in memory so use memcmp() instead.
111 		 */
112 		trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
113 		if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
114 			   sizeof(trampoline))) {
115 			if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
116 				pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
117 				return -EINVAL;
118 			}
119 
120 			/* point the trampoline to our ftrace entry point */
121 			module_disable_ro(mod);
122 			*mod->arch.ftrace_trampoline = trampoline;
123 			module_enable_ro(mod, true);
124 
125 			/* update trampoline before patching in the branch */
126 			smp_wmb();
127 		}
128 		addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
129 #else /* CONFIG_ARM64_MODULE_PLTS */
130 		return -EINVAL;
131 #endif /* CONFIG_ARM64_MODULE_PLTS */
132 	}
133 
134 	old = aarch64_insn_gen_nop();
135 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
136 
137 	return ftrace_modify_code(pc, old, new, true);
138 }
139 
140 /*
141  * Turn off the call to ftrace_caller() in instrumented function
142  */
143 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
144 		    unsigned long addr)
145 {
146 	unsigned long pc = rec->ip;
147 	bool validate = true;
148 	u32 old = 0, new;
149 	long offset = (long)pc - (long)addr;
150 
151 	if (offset < -SZ_128M || offset >= SZ_128M) {
152 #ifdef CONFIG_ARM64_MODULE_PLTS
153 		u32 replaced;
154 
155 		/*
156 		 * 'mod' is only set at module load time, but if we end up
157 		 * dealing with an out-of-range condition, we can assume it
158 		 * is due to a module being loaded far away from the kernel.
159 		 */
160 		if (!mod) {
161 			preempt_disable();
162 			mod = __module_text_address(pc);
163 			preempt_enable();
164 
165 			if (WARN_ON(!mod))
166 				return -EINVAL;
167 		}
168 
169 		/*
170 		 * The instruction we are about to patch may be a branch and
171 		 * link instruction that was redirected via a PLT entry. In
172 		 * this case, the normal validation will fail, but we can at
173 		 * least check that we are dealing with a branch and link
174 		 * instruction that points into the right module.
175 		 */
176 		if (aarch64_insn_read((void *)pc, &replaced))
177 			return -EFAULT;
178 
179 		if (!aarch64_insn_is_bl(replaced) ||
180 		    !within_module(pc + aarch64_get_branch_offset(replaced),
181 				   mod))
182 			return -EINVAL;
183 
184 		validate = false;
185 #else /* CONFIG_ARM64_MODULE_PLTS */
186 		return -EINVAL;
187 #endif /* CONFIG_ARM64_MODULE_PLTS */
188 	} else {
189 		old = aarch64_insn_gen_branch_imm(pc, addr,
190 						  AARCH64_INSN_BRANCH_LINK);
191 	}
192 
193 	new = aarch64_insn_gen_nop();
194 
195 	return ftrace_modify_code(pc, old, new, validate);
196 }
197 
198 void arch_ftrace_update_code(int command)
199 {
200 	command |= FTRACE_MAY_SLEEP;
201 	ftrace_modify_all_code(command);
202 }
203 
204 int __init ftrace_dyn_arch_init(void)
205 {
206 	return 0;
207 }
208 #endif /* CONFIG_DYNAMIC_FTRACE */
209 
210 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
211 /*
212  * function_graph tracer expects ftrace_return_to_handler() to be called
213  * on the way back to parent. For this purpose, this function is called
214  * in _mcount() or ftrace_caller() to replace return address (*parent) on
215  * the call stack to return_to_handler.
216  *
217  * Note that @frame_pointer is used only for sanity check later.
218  */
219 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
220 			   unsigned long frame_pointer)
221 {
222 	unsigned long return_hooker = (unsigned long)&return_to_handler;
223 	unsigned long old;
224 
225 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
226 		return;
227 
228 	/*
229 	 * Note:
230 	 * No protection against faulting at *parent, which may be seen
231 	 * on other archs. It's unlikely on AArch64.
232 	 */
233 	old = *parent;
234 
235 	if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
236 		*parent = return_hooker;
237 }
238 
239 #ifdef CONFIG_DYNAMIC_FTRACE
240 /*
241  * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
242  * depending on @enable.
243  */
244 static int ftrace_modify_graph_caller(bool enable)
245 {
246 	unsigned long pc = (unsigned long)&ftrace_graph_call;
247 	u32 branch, nop;
248 
249 	branch = aarch64_insn_gen_branch_imm(pc,
250 					     (unsigned long)ftrace_graph_caller,
251 					     AARCH64_INSN_BRANCH_NOLINK);
252 	nop = aarch64_insn_gen_nop();
253 
254 	if (enable)
255 		return ftrace_modify_code(pc, nop, branch, true);
256 	else
257 		return ftrace_modify_code(pc, branch, nop, true);
258 }
259 
260 int ftrace_enable_ftrace_graph_caller(void)
261 {
262 	return ftrace_modify_graph_caller(true);
263 }
264 
265 int ftrace_disable_ftrace_graph_caller(void)
266 {
267 	return ftrace_modify_graph_caller(false);
268 }
269 #endif /* CONFIG_DYNAMIC_FTRACE */
270 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
271