xref: /openbmc/linux/kernel/trace/fprobe.c (revision a957cbc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fprobe - Simple ftrace probe wrapper for function entry.
4  */
5 #define pr_fmt(fmt) "fprobe: " fmt
6 
7 #include <linux/err.h>
8 #include <linux/fprobe.h>
9 #include <linux/kallsyms.h>
10 #include <linux/kprobes.h>
11 #include <linux/rethook.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 
15 #include "trace.h"
16 
17 struct fprobe_rethook_node {
18 	struct rethook_node node;
19 	unsigned long entry_ip;
20 	unsigned long entry_parent_ip;
21 	char data[];
22 };
23 
24 static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip,
25 			struct ftrace_ops *ops, struct ftrace_regs *fregs)
26 {
27 	struct fprobe_rethook_node *fpr;
28 	struct rethook_node *rh = NULL;
29 	struct fprobe *fp;
30 	void *entry_data = NULL;
31 	int ret = 0;
32 
33 	fp = container_of(ops, struct fprobe, ops);
34 
35 	if (fp->exit_handler) {
36 		rh = rethook_try_get(fp->rethook);
37 		if (!rh) {
38 			fp->nmissed++;
39 			return;
40 		}
41 		fpr = container_of(rh, struct fprobe_rethook_node, node);
42 		fpr->entry_ip = ip;
43 		fpr->entry_parent_ip = parent_ip;
44 		if (fp->entry_data_size)
45 			entry_data = fpr->data;
46 	}
47 
48 	if (fp->entry_handler)
49 		ret = fp->entry_handler(fp, ip, ftrace_get_regs(fregs), entry_data);
50 
51 	/* If entry_handler returns !0, nmissed is not counted. */
52 	if (rh) {
53 		if (ret)
54 			rethook_recycle(rh);
55 		else
56 			rethook_hook(rh, ftrace_get_regs(fregs), true);
57 	}
58 }
59 
60 static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
61 		struct ftrace_ops *ops, struct ftrace_regs *fregs)
62 {
63 	struct fprobe *fp;
64 	int bit;
65 
66 	fp = container_of(ops, struct fprobe, ops);
67 	if (fprobe_disabled(fp))
68 		return;
69 
70 	/* recursion detection has to go before any traceable function and
71 	 * all functions before this point should be marked as notrace
72 	 */
73 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
74 	if (bit < 0) {
75 		fp->nmissed++;
76 		return;
77 	}
78 	__fprobe_handler(ip, parent_ip, ops, fregs);
79 	ftrace_test_recursion_unlock(bit);
80 
81 }
82 NOKPROBE_SYMBOL(fprobe_handler);
83 
84 static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
85 				  struct ftrace_ops *ops, struct ftrace_regs *fregs)
86 {
87 	struct fprobe *fp;
88 	int bit;
89 
90 	fp = container_of(ops, struct fprobe, ops);
91 	if (fprobe_disabled(fp))
92 		return;
93 
94 	/* recursion detection has to go before any traceable function and
95 	 * all functions called before this point should be marked as notrace
96 	 */
97 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
98 	if (bit < 0) {
99 		fp->nmissed++;
100 		return;
101 	}
102 
103 	if (unlikely(kprobe_running())) {
104 		fp->nmissed++;
105 		return;
106 	}
107 
108 	kprobe_busy_begin();
109 	__fprobe_handler(ip, parent_ip, ops, fregs);
110 	kprobe_busy_end();
111 	ftrace_test_recursion_unlock(bit);
112 }
113 
114 static void fprobe_exit_handler(struct rethook_node *rh, void *data,
115 				struct pt_regs *regs)
116 {
117 	struct fprobe *fp = (struct fprobe *)data;
118 	struct fprobe_rethook_node *fpr;
119 	int bit;
120 
121 	if (!fp || fprobe_disabled(fp))
122 		return;
123 
124 	fpr = container_of(rh, struct fprobe_rethook_node, node);
125 
126 	/*
127 	 * we need to assure no calls to traceable functions in-between the
128 	 * end of fprobe_handler and the beginning of fprobe_exit_handler.
129 	 */
130 	bit = ftrace_test_recursion_trylock(fpr->entry_ip, fpr->entry_parent_ip);
131 	if (bit < 0) {
132 		fp->nmissed++;
133 		return;
134 	}
135 
136 	fp->exit_handler(fp, fpr->entry_ip, regs,
137 			 fp->entry_data_size ? (void *)fpr->data : NULL);
138 	ftrace_test_recursion_unlock(bit);
139 }
140 NOKPROBE_SYMBOL(fprobe_exit_handler);
141 
142 static int symbols_cmp(const void *a, const void *b)
143 {
144 	const char **str_a = (const char **) a;
145 	const char **str_b = (const char **) b;
146 
147 	return strcmp(*str_a, *str_b);
148 }
149 
150 /* Convert ftrace location address from symbols */
151 static unsigned long *get_ftrace_locations(const char **syms, int num)
152 {
153 	unsigned long *addrs;
154 
155 	/* Convert symbols to symbol address */
156 	addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL);
157 	if (!addrs)
158 		return ERR_PTR(-ENOMEM);
159 
160 	/* ftrace_lookup_symbols expects sorted symbols */
161 	sort(syms, num, sizeof(*syms), symbols_cmp, NULL);
162 
163 	if (!ftrace_lookup_symbols(syms, num, addrs))
164 		return addrs;
165 
166 	kfree(addrs);
167 	return ERR_PTR(-ENOENT);
168 }
169 
170 static void fprobe_init(struct fprobe *fp)
171 {
172 	fp->nmissed = 0;
173 	if (fprobe_shared_with_kprobes(fp))
174 		fp->ops.func = fprobe_kprobe_handler;
175 	else
176 		fp->ops.func = fprobe_handler;
177 	fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
178 }
179 
180 static int fprobe_init_rethook(struct fprobe *fp, int num)
181 {
182 	int i, size;
183 
184 	if (num < 0)
185 		return -EINVAL;
186 
187 	if (!fp->exit_handler) {
188 		fp->rethook = NULL;
189 		return 0;
190 	}
191 
192 	/* Initialize rethook if needed */
193 	if (fp->nr_maxactive)
194 		size = fp->nr_maxactive;
195 	else
196 		size = num * num_possible_cpus() * 2;
197 	if (size < 0)
198 		return -E2BIG;
199 
200 	fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
201 	if (!fp->rethook)
202 		return -ENOMEM;
203 	for (i = 0; i < size; i++) {
204 		struct fprobe_rethook_node *node;
205 
206 		node = kzalloc(sizeof(*node) + fp->entry_data_size, GFP_KERNEL);
207 		if (!node) {
208 			rethook_free(fp->rethook);
209 			fp->rethook = NULL;
210 			return -ENOMEM;
211 		}
212 		rethook_add_node(fp->rethook, &node->node);
213 	}
214 	return 0;
215 }
216 
217 static void fprobe_fail_cleanup(struct fprobe *fp)
218 {
219 	if (fp->rethook) {
220 		/* Don't need to cleanup rethook->handler because this is not used. */
221 		rethook_free(fp->rethook);
222 		fp->rethook = NULL;
223 	}
224 	ftrace_free_filter(&fp->ops);
225 }
226 
227 /**
228  * register_fprobe() - Register fprobe to ftrace by pattern.
229  * @fp: A fprobe data structure to be registered.
230  * @filter: A wildcard pattern of probed symbols.
231  * @notfilter: A wildcard pattern of NOT probed symbols.
232  *
233  * Register @fp to ftrace for enabling the probe on the symbols matched to @filter.
234  * If @notfilter is not NULL, the symbols matched the @notfilter are not probed.
235  *
236  * Return 0 if @fp is registered successfully, -errno if not.
237  */
238 int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
239 {
240 	struct ftrace_hash *hash;
241 	unsigned char *str;
242 	int ret, len;
243 
244 	if (!fp || !filter)
245 		return -EINVAL;
246 
247 	fprobe_init(fp);
248 
249 	len = strlen(filter);
250 	str = kstrdup(filter, GFP_KERNEL);
251 	ret = ftrace_set_filter(&fp->ops, str, len, 0);
252 	kfree(str);
253 	if (ret)
254 		return ret;
255 
256 	if (notfilter) {
257 		len = strlen(notfilter);
258 		str = kstrdup(notfilter, GFP_KERNEL);
259 		ret = ftrace_set_notrace(&fp->ops, str, len, 0);
260 		kfree(str);
261 		if (ret)
262 			goto out;
263 	}
264 
265 	/* TODO:
266 	 * correctly calculate the total number of filtered symbols
267 	 * from both filter and notfilter.
268 	 */
269 	hash = rcu_access_pointer(fp->ops.local_hash.filter_hash);
270 	if (WARN_ON_ONCE(!hash))
271 		goto out;
272 
273 	ret = fprobe_init_rethook(fp, (int)hash->count);
274 	if (!ret)
275 		ret = register_ftrace_function(&fp->ops);
276 
277 out:
278 	if (ret)
279 		fprobe_fail_cleanup(fp);
280 	return ret;
281 }
282 EXPORT_SYMBOL_GPL(register_fprobe);
283 
284 /**
285  * register_fprobe_ips() - Register fprobe to ftrace by address.
286  * @fp: A fprobe data structure to be registered.
287  * @addrs: An array of target ftrace location addresses.
288  * @num: The number of entries of @addrs.
289  *
290  * Register @fp to ftrace for enabling the probe on the address given by @addrs.
291  * The @addrs must be the addresses of ftrace location address, which may be
292  * the symbol address + arch-dependent offset.
293  * If you unsure what this mean, please use other registration functions.
294  *
295  * Return 0 if @fp is registered successfully, -errno if not.
296  */
297 int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
298 {
299 	int ret;
300 
301 	if (!fp || !addrs || num <= 0)
302 		return -EINVAL;
303 
304 	fprobe_init(fp);
305 
306 	ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
307 	if (ret)
308 		return ret;
309 
310 	ret = fprobe_init_rethook(fp, num);
311 	if (!ret)
312 		ret = register_ftrace_function(&fp->ops);
313 
314 	if (ret)
315 		fprobe_fail_cleanup(fp);
316 	return ret;
317 }
318 EXPORT_SYMBOL_GPL(register_fprobe_ips);
319 
320 /**
321  * register_fprobe_syms() - Register fprobe to ftrace by symbols.
322  * @fp: A fprobe data structure to be registered.
323  * @syms: An array of target symbols.
324  * @num: The number of entries of @syms.
325  *
326  * Register @fp to the symbols given by @syms array. This will be useful if
327  * you are sure the symbols exist in the kernel.
328  *
329  * Return 0 if @fp is registered successfully, -errno if not.
330  */
331 int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
332 {
333 	unsigned long *addrs;
334 	int ret;
335 
336 	if (!fp || !syms || num <= 0)
337 		return -EINVAL;
338 
339 	addrs = get_ftrace_locations(syms, num);
340 	if (IS_ERR(addrs))
341 		return PTR_ERR(addrs);
342 
343 	ret = register_fprobe_ips(fp, addrs, num);
344 
345 	kfree(addrs);
346 
347 	return ret;
348 }
349 EXPORT_SYMBOL_GPL(register_fprobe_syms);
350 
351 /**
352  * unregister_fprobe() - Unregister fprobe from ftrace
353  * @fp: A fprobe data structure to be unregistered.
354  *
355  * Unregister fprobe (and remove ftrace hooks from the function entries).
356  *
357  * Return 0 if @fp is unregistered successfully, -errno if not.
358  */
359 int unregister_fprobe(struct fprobe *fp)
360 {
361 	int ret;
362 
363 	if (!fp || (fp->ops.saved_func != fprobe_handler &&
364 		    fp->ops.saved_func != fprobe_kprobe_handler))
365 		return -EINVAL;
366 
367 	/*
368 	 * rethook_free() starts disabling the rethook, but the rethook handlers
369 	 * may be running on other processors at this point. To make sure that all
370 	 * current running handlers are finished, call unregister_ftrace_function()
371 	 * after this.
372 	 */
373 	if (fp->rethook)
374 		rethook_free(fp->rethook);
375 
376 	ret = unregister_ftrace_function(&fp->ops);
377 	if (ret < 0)
378 		return ret;
379 
380 	ftrace_free_filter(&fp->ops);
381 
382 	return ret;
383 }
384 EXPORT_SYMBOL_GPL(unregister_fprobe);
385