1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fprobe - Simple ftrace probe wrapper for function entry.
4 */
5 #define pr_fmt(fmt) "fprobe: " fmt
6
7 #include <linux/err.h>
8 #include <linux/fprobe.h>
9 #include <linux/kallsyms.h>
10 #include <linux/kprobes.h>
11 #include <linux/rethook.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14
15 #include "trace.h"
16
17 struct fprobe_rethook_node {
18 struct rethook_node node;
19 unsigned long entry_ip;
20 unsigned long entry_parent_ip;
21 char data[];
22 };
23
__fprobe_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)24 static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *ops, struct ftrace_regs *fregs)
26 {
27 struct fprobe_rethook_node *fpr;
28 struct rethook_node *rh = NULL;
29 struct fprobe *fp;
30 void *entry_data = NULL;
31 int ret = 0;
32
33 fp = container_of(ops, struct fprobe, ops);
34
35 if (fp->exit_handler) {
36 rh = rethook_try_get(fp->rethook);
37 if (!rh) {
38 fp->nmissed++;
39 return;
40 }
41 fpr = container_of(rh, struct fprobe_rethook_node, node);
42 fpr->entry_ip = ip;
43 fpr->entry_parent_ip = parent_ip;
44 if (fp->entry_data_size)
45 entry_data = fpr->data;
46 }
47
48 if (fp->entry_handler)
49 ret = fp->entry_handler(fp, ip, parent_ip, ftrace_get_regs(fregs), entry_data);
50
51 /* If entry_handler returns !0, nmissed is not counted. */
52 if (rh) {
53 if (ret)
54 rethook_recycle(rh);
55 else
56 rethook_hook(rh, ftrace_get_regs(fregs), true);
57 }
58 }
59
fprobe_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)60 static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
61 struct ftrace_ops *ops, struct ftrace_regs *fregs)
62 {
63 struct fprobe *fp;
64 int bit;
65
66 fp = container_of(ops, struct fprobe, ops);
67 if (fprobe_disabled(fp))
68 return;
69
70 /* recursion detection has to go before any traceable function and
71 * all functions before this point should be marked as notrace
72 */
73 bit = ftrace_test_recursion_trylock(ip, parent_ip);
74 if (bit < 0) {
75 fp->nmissed++;
76 return;
77 }
78 __fprobe_handler(ip, parent_ip, ops, fregs);
79 ftrace_test_recursion_unlock(bit);
80
81 }
82 NOKPROBE_SYMBOL(fprobe_handler);
83
fprobe_kprobe_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)84 static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
85 struct ftrace_ops *ops, struct ftrace_regs *fregs)
86 {
87 struct fprobe *fp;
88 int bit;
89
90 fp = container_of(ops, struct fprobe, ops);
91 if (fprobe_disabled(fp))
92 return;
93
94 /* recursion detection has to go before any traceable function and
95 * all functions called before this point should be marked as notrace
96 */
97 bit = ftrace_test_recursion_trylock(ip, parent_ip);
98 if (bit < 0) {
99 fp->nmissed++;
100 return;
101 }
102
103 /*
104 * This user handler is shared with other kprobes and is not expected to be
105 * called recursively. So if any other kprobe handler is running, this will
106 * exit as kprobe does. See the section 'Share the callbacks with kprobes'
107 * in Documentation/trace/fprobe.rst for more information.
108 */
109 if (unlikely(kprobe_running())) {
110 fp->nmissed++;
111 goto recursion_unlock;
112 }
113
114 kprobe_busy_begin();
115 __fprobe_handler(ip, parent_ip, ops, fregs);
116 kprobe_busy_end();
117
118 recursion_unlock:
119 ftrace_test_recursion_unlock(bit);
120 }
121
fprobe_exit_handler(struct rethook_node * rh,void * data,unsigned long ret_ip,struct pt_regs * regs)122 static void fprobe_exit_handler(struct rethook_node *rh, void *data,
123 unsigned long ret_ip, struct pt_regs *regs)
124 {
125 struct fprobe *fp = (struct fprobe *)data;
126 struct fprobe_rethook_node *fpr;
127 int bit;
128
129 if (!fp || fprobe_disabled(fp))
130 return;
131
132 fpr = container_of(rh, struct fprobe_rethook_node, node);
133
134 /*
135 * we need to assure no calls to traceable functions in-between the
136 * end of fprobe_handler and the beginning of fprobe_exit_handler.
137 */
138 bit = ftrace_test_recursion_trylock(fpr->entry_ip, fpr->entry_parent_ip);
139 if (bit < 0) {
140 fp->nmissed++;
141 return;
142 }
143
144 fp->exit_handler(fp, fpr->entry_ip, ret_ip, regs,
145 fp->entry_data_size ? (void *)fpr->data : NULL);
146 ftrace_test_recursion_unlock(bit);
147 }
148 NOKPROBE_SYMBOL(fprobe_exit_handler);
149
symbols_cmp(const void * a,const void * b)150 static int symbols_cmp(const void *a, const void *b)
151 {
152 const char **str_a = (const char **) a;
153 const char **str_b = (const char **) b;
154
155 return strcmp(*str_a, *str_b);
156 }
157
158 /* Convert ftrace location address from symbols */
get_ftrace_locations(const char ** syms,int num)159 static unsigned long *get_ftrace_locations(const char **syms, int num)
160 {
161 unsigned long *addrs;
162
163 /* Convert symbols to symbol address */
164 addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL);
165 if (!addrs)
166 return ERR_PTR(-ENOMEM);
167
168 /* ftrace_lookup_symbols expects sorted symbols */
169 sort(syms, num, sizeof(*syms), symbols_cmp, NULL);
170
171 if (!ftrace_lookup_symbols(syms, num, addrs))
172 return addrs;
173
174 kfree(addrs);
175 return ERR_PTR(-ENOENT);
176 }
177
fprobe_init(struct fprobe * fp)178 static void fprobe_init(struct fprobe *fp)
179 {
180 fp->nmissed = 0;
181 if (fprobe_shared_with_kprobes(fp))
182 fp->ops.func = fprobe_kprobe_handler;
183 else
184 fp->ops.func = fprobe_handler;
185 fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
186 }
187
fprobe_init_rethook(struct fprobe * fp,int num)188 static int fprobe_init_rethook(struct fprobe *fp, int num)
189 {
190 int i, size;
191
192 if (num <= 0)
193 return -EINVAL;
194
195 if (!fp->exit_handler) {
196 fp->rethook = NULL;
197 return 0;
198 }
199
200 /* Initialize rethook if needed */
201 if (fp->nr_maxactive)
202 size = fp->nr_maxactive;
203 else
204 size = num * num_possible_cpus() * 2;
205 if (size <= 0)
206 return -EINVAL;
207
208 fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
209 if (!fp->rethook)
210 return -ENOMEM;
211 for (i = 0; i < size; i++) {
212 struct fprobe_rethook_node *node;
213
214 node = kzalloc(sizeof(*node) + fp->entry_data_size, GFP_KERNEL);
215 if (!node) {
216 rethook_free(fp->rethook);
217 fp->rethook = NULL;
218 return -ENOMEM;
219 }
220 rethook_add_node(fp->rethook, &node->node);
221 }
222 return 0;
223 }
224
fprobe_fail_cleanup(struct fprobe * fp)225 static void fprobe_fail_cleanup(struct fprobe *fp)
226 {
227 if (fp->rethook) {
228 /* Don't need to cleanup rethook->handler because this is not used. */
229 rethook_free(fp->rethook);
230 fp->rethook = NULL;
231 }
232 ftrace_free_filter(&fp->ops);
233 }
234
235 /**
236 * register_fprobe() - Register fprobe to ftrace by pattern.
237 * @fp: A fprobe data structure to be registered.
238 * @filter: A wildcard pattern of probed symbols.
239 * @notfilter: A wildcard pattern of NOT probed symbols.
240 *
241 * Register @fp to ftrace for enabling the probe on the symbols matched to @filter.
242 * If @notfilter is not NULL, the symbols matched the @notfilter are not probed.
243 *
244 * Return 0 if @fp is registered successfully, -errno if not.
245 */
register_fprobe(struct fprobe * fp,const char * filter,const char * notfilter)246 int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
247 {
248 struct ftrace_hash *hash;
249 unsigned char *str;
250 int ret, len;
251
252 if (!fp || !filter)
253 return -EINVAL;
254
255 fprobe_init(fp);
256
257 len = strlen(filter);
258 str = kstrdup(filter, GFP_KERNEL);
259 ret = ftrace_set_filter(&fp->ops, str, len, 0);
260 kfree(str);
261 if (ret)
262 return ret;
263
264 if (notfilter) {
265 len = strlen(notfilter);
266 str = kstrdup(notfilter, GFP_KERNEL);
267 ret = ftrace_set_notrace(&fp->ops, str, len, 0);
268 kfree(str);
269 if (ret)
270 goto out;
271 }
272
273 /* TODO:
274 * correctly calculate the total number of filtered symbols
275 * from both filter and notfilter.
276 */
277 hash = rcu_access_pointer(fp->ops.local_hash.filter_hash);
278 if (WARN_ON_ONCE(!hash))
279 goto out;
280
281 ret = fprobe_init_rethook(fp, (int)hash->count);
282 if (!ret)
283 ret = register_ftrace_function(&fp->ops);
284
285 out:
286 if (ret)
287 fprobe_fail_cleanup(fp);
288 return ret;
289 }
290 EXPORT_SYMBOL_GPL(register_fprobe);
291
292 /**
293 * register_fprobe_ips() - Register fprobe to ftrace by address.
294 * @fp: A fprobe data structure to be registered.
295 * @addrs: An array of target ftrace location addresses.
296 * @num: The number of entries of @addrs.
297 *
298 * Register @fp to ftrace for enabling the probe on the address given by @addrs.
299 * The @addrs must be the addresses of ftrace location address, which may be
300 * the symbol address + arch-dependent offset.
301 * If you unsure what this mean, please use other registration functions.
302 *
303 * Return 0 if @fp is registered successfully, -errno if not.
304 */
register_fprobe_ips(struct fprobe * fp,unsigned long * addrs,int num)305 int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
306 {
307 int ret;
308
309 if (!fp || !addrs || num <= 0)
310 return -EINVAL;
311
312 fprobe_init(fp);
313
314 ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
315 if (ret)
316 return ret;
317
318 ret = fprobe_init_rethook(fp, num);
319 if (!ret)
320 ret = register_ftrace_function(&fp->ops);
321
322 if (ret)
323 fprobe_fail_cleanup(fp);
324 return ret;
325 }
326 EXPORT_SYMBOL_GPL(register_fprobe_ips);
327
328 /**
329 * register_fprobe_syms() - Register fprobe to ftrace by symbols.
330 * @fp: A fprobe data structure to be registered.
331 * @syms: An array of target symbols.
332 * @num: The number of entries of @syms.
333 *
334 * Register @fp to the symbols given by @syms array. This will be useful if
335 * you are sure the symbols exist in the kernel.
336 *
337 * Return 0 if @fp is registered successfully, -errno if not.
338 */
register_fprobe_syms(struct fprobe * fp,const char ** syms,int num)339 int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
340 {
341 unsigned long *addrs;
342 int ret;
343
344 if (!fp || !syms || num <= 0)
345 return -EINVAL;
346
347 addrs = get_ftrace_locations(syms, num);
348 if (IS_ERR(addrs))
349 return PTR_ERR(addrs);
350
351 ret = register_fprobe_ips(fp, addrs, num);
352
353 kfree(addrs);
354
355 return ret;
356 }
357 EXPORT_SYMBOL_GPL(register_fprobe_syms);
358
fprobe_is_registered(struct fprobe * fp)359 bool fprobe_is_registered(struct fprobe *fp)
360 {
361 if (!fp || (fp->ops.saved_func != fprobe_handler &&
362 fp->ops.saved_func != fprobe_kprobe_handler))
363 return false;
364 return true;
365 }
366
367 /**
368 * unregister_fprobe() - Unregister fprobe from ftrace
369 * @fp: A fprobe data structure to be unregistered.
370 *
371 * Unregister fprobe (and remove ftrace hooks from the function entries).
372 *
373 * Return 0 if @fp is unregistered successfully, -errno if not.
374 */
unregister_fprobe(struct fprobe * fp)375 int unregister_fprobe(struct fprobe *fp)
376 {
377 int ret;
378
379 if (!fprobe_is_registered(fp))
380 return -EINVAL;
381
382 if (fp->rethook)
383 rethook_stop(fp->rethook);
384
385 ret = unregister_ftrace_function(&fp->ops);
386 if (ret < 0)
387 return ret;
388
389 if (fp->rethook)
390 rethook_free(fp->rethook);
391
392 ftrace_free_filter(&fp->ops);
393
394 return ret;
395 }
396 EXPORT_SYMBOL_GPL(unregister_fprobe);
397