1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
9
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
19 #include <linux/filter.h>
20
21 #include "trace_dynevent.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
24
25 #define UPROBE_EVENT_SYSTEM "uprobes"
26
27 struct uprobe_trace_entry_head {
28 struct trace_entry ent;
29 unsigned long vaddr[];
30 };
31
32 #define SIZEOF_TRACE_ENTRY(is_return) \
33 (sizeof(struct uprobe_trace_entry_head) + \
34 sizeof(unsigned long) * (is_return ? 2 : 1))
35
36 #define DATAOF_TRACE_ENTRY(entry, is_return) \
37 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
38
39 static int trace_uprobe_create(const char *raw_command);
40 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
41 static int trace_uprobe_release(struct dyn_event *ev);
42 static bool trace_uprobe_is_busy(struct dyn_event *ev);
43 static bool trace_uprobe_match(const char *system, const char *event,
44 int argc, const char **argv, struct dyn_event *ev);
45
46 static struct dyn_event_operations trace_uprobe_ops = {
47 .create = trace_uprobe_create,
48 .show = trace_uprobe_show,
49 .is_busy = trace_uprobe_is_busy,
50 .free = trace_uprobe_release,
51 .match = trace_uprobe_match,
52 };
53
54 /*
55 * uprobe event core functions
56 */
57 struct trace_uprobe {
58 struct dyn_event devent;
59 struct uprobe_consumer consumer;
60 struct path path;
61 struct inode *inode;
62 char *filename;
63 unsigned long offset;
64 unsigned long ref_ctr_offset;
65 unsigned long nhit;
66 struct trace_probe tp;
67 };
68
is_trace_uprobe(struct dyn_event * ev)69 static bool is_trace_uprobe(struct dyn_event *ev)
70 {
71 return ev->ops == &trace_uprobe_ops;
72 }
73
to_trace_uprobe(struct dyn_event * ev)74 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
75 {
76 return container_of(ev, struct trace_uprobe, devent);
77 }
78
79 /**
80 * for_each_trace_uprobe - iterate over the trace_uprobe list
81 * @pos: the struct trace_uprobe * for each entry
82 * @dpos: the struct dyn_event * to use as a loop cursor
83 */
84 #define for_each_trace_uprobe(pos, dpos) \
85 for_each_dyn_event(dpos) \
86 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
87
88 static int register_uprobe_event(struct trace_uprobe *tu);
89 static int unregister_uprobe_event(struct trace_uprobe *tu);
90
91 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
92 static int uretprobe_dispatcher(struct uprobe_consumer *con,
93 unsigned long func, struct pt_regs *regs);
94
95 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)96 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
97 {
98 return addr - (n * sizeof(long));
99 }
100 #else
adjust_stack_addr(unsigned long addr,unsigned int n)101 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
102 {
103 return addr + (n * sizeof(long));
104 }
105 #endif
106
get_user_stack_nth(struct pt_regs * regs,unsigned int n)107 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
108 {
109 unsigned long ret;
110 unsigned long addr = user_stack_pointer(regs);
111
112 addr = adjust_stack_addr(addr, n);
113
114 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
115 return 0;
116
117 return ret;
118 }
119
120 /*
121 * Uprobes-specific fetch functions
122 */
123 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)124 probe_mem_read(void *dest, void *src, size_t size)
125 {
126 void __user *vaddr = (void __force __user *)src;
127
128 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
129 }
130
131 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)132 probe_mem_read_user(void *dest, void *src, size_t size)
133 {
134 return probe_mem_read(dest, src, size);
135 }
136
137 /*
138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
139 * length and relative data location.
140 */
141 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)142 fetch_store_string(unsigned long addr, void *dest, void *base)
143 {
144 long ret;
145 u32 loc = *(u32 *)dest;
146 int maxlen = get_loc_len(loc);
147 u8 *dst = get_loc_data(dest, base);
148 void __user *src = (void __force __user *) addr;
149
150 if (unlikely(!maxlen))
151 return -ENOMEM;
152
153 if (addr == FETCH_TOKEN_COMM)
154 ret = strlcpy(dst, current->comm, maxlen);
155 else
156 ret = strncpy_from_user(dst, src, maxlen);
157 if (ret >= 0) {
158 if (ret == maxlen)
159 dst[ret - 1] = '\0';
160 else
161 /*
162 * Include the terminating null byte. In this case it
163 * was copied by strncpy_from_user but not accounted
164 * for in ret.
165 */
166 ret++;
167 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
168 } else
169 *(u32 *)dest = make_data_loc(0, (void *)dst - base);
170
171 return ret;
172 }
173
174 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)175 fetch_store_string_user(unsigned long addr, void *dest, void *base)
176 {
177 return fetch_store_string(addr, dest, base);
178 }
179
180 /* Return the length of string -- including null terminal byte */
181 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)182 fetch_store_strlen(unsigned long addr)
183 {
184 int len;
185 void __user *vaddr = (void __force __user *) addr;
186
187 if (addr == FETCH_TOKEN_COMM)
188 len = strlen(current->comm) + 1;
189 else
190 len = strnlen_user(vaddr, MAX_STRING_SIZE);
191
192 return (len > MAX_STRING_SIZE) ? 0 : len;
193 }
194
195 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)196 fetch_store_strlen_user(unsigned long addr)
197 {
198 return fetch_store_strlen(addr);
199 }
200
translate_user_vaddr(unsigned long file_offset)201 static unsigned long translate_user_vaddr(unsigned long file_offset)
202 {
203 unsigned long base_addr;
204 struct uprobe_dispatch_data *udd;
205
206 udd = (void *) current->utask->vaddr;
207
208 base_addr = udd->bp_addr - udd->tu->offset;
209 return base_addr + file_offset;
210 }
211
212 /* Note that we don't verify it, since the code does not come from user space */
213 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * edata,void * dest,void * base)214 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
215 void *dest, void *base)
216 {
217 struct pt_regs *regs = rec;
218 unsigned long val;
219 int ret;
220
221 /* 1st stage: get value from context */
222 switch (code->op) {
223 case FETCH_OP_REG:
224 val = regs_get_register(regs, code->param);
225 break;
226 case FETCH_OP_STACK:
227 val = get_user_stack_nth(regs, code->param);
228 break;
229 case FETCH_OP_STACKP:
230 val = user_stack_pointer(regs);
231 break;
232 case FETCH_OP_RETVAL:
233 val = regs_return_value(regs);
234 break;
235 case FETCH_OP_COMM:
236 val = FETCH_TOKEN_COMM;
237 break;
238 case FETCH_OP_FOFFS:
239 val = translate_user_vaddr(code->immediate);
240 break;
241 default:
242 ret = process_common_fetch_insn(code, &val);
243 if (ret < 0)
244 return ret;
245 }
246 code++;
247
248 return process_fetch_insn_bottom(code, val, dest, base);
249 }
NOKPROBE_SYMBOL(process_fetch_insn)250 NOKPROBE_SYMBOL(process_fetch_insn)
251
252 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
253 {
254 rwlock_init(&filter->rwlock);
255 filter->nr_systemwide = 0;
256 INIT_LIST_HEAD(&filter->perf_events);
257 }
258
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)259 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
260 {
261 return !filter->nr_systemwide && list_empty(&filter->perf_events);
262 }
263
is_ret_probe(struct trace_uprobe * tu)264 static inline bool is_ret_probe(struct trace_uprobe *tu)
265 {
266 return tu->consumer.ret_handler != NULL;
267 }
268
trace_uprobe_is_busy(struct dyn_event * ev)269 static bool trace_uprobe_is_busy(struct dyn_event *ev)
270 {
271 struct trace_uprobe *tu = to_trace_uprobe(ev);
272
273 return trace_probe_is_enabled(&tu->tp);
274 }
275
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)276 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
277 int argc, const char **argv)
278 {
279 char buf[MAX_ARGSTR_LEN + 1];
280 int len;
281
282 if (!argc)
283 return true;
284
285 len = strlen(tu->filename);
286 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
287 return false;
288
289 if (tu->ref_ctr_offset == 0)
290 snprintf(buf, sizeof(buf), "0x%0*lx",
291 (int)(sizeof(void *) * 2), tu->offset);
292 else
293 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
294 (int)(sizeof(void *) * 2), tu->offset,
295 tu->ref_ctr_offset);
296 if (strcmp(buf, &argv[0][len + 1]))
297 return false;
298
299 argc--; argv++;
300
301 return trace_probe_match_command_args(&tu->tp, argc, argv);
302 }
303
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)304 static bool trace_uprobe_match(const char *system, const char *event,
305 int argc, const char **argv, struct dyn_event *ev)
306 {
307 struct trace_uprobe *tu = to_trace_uprobe(ev);
308
309 return (event[0] == '\0' ||
310 strcmp(trace_probe_name(&tu->tp), event) == 0) &&
311 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
312 trace_uprobe_match_command_head(tu, argc, argv);
313 }
314
315 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)316 trace_uprobe_primary_from_call(struct trace_event_call *call)
317 {
318 struct trace_probe *tp;
319
320 tp = trace_probe_primary_from_call(call);
321 if (WARN_ON_ONCE(!tp))
322 return NULL;
323
324 return container_of(tp, struct trace_uprobe, tp);
325 }
326
327 /*
328 * Allocate new trace_uprobe and initialize it (including uprobes).
329 */
330 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)331 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
332 {
333 struct trace_uprobe *tu;
334 int ret;
335
336 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
337 if (!tu)
338 return ERR_PTR(-ENOMEM);
339
340 ret = trace_probe_init(&tu->tp, event, group, true, nargs);
341 if (ret < 0)
342 goto error;
343
344 dyn_event_init(&tu->devent, &trace_uprobe_ops);
345 tu->consumer.handler = uprobe_dispatcher;
346 if (is_ret)
347 tu->consumer.ret_handler = uretprobe_dispatcher;
348 init_trace_uprobe_filter(tu->tp.event->filter);
349 return tu;
350
351 error:
352 kfree(tu);
353
354 return ERR_PTR(ret);
355 }
356
free_trace_uprobe(struct trace_uprobe * tu)357 static void free_trace_uprobe(struct trace_uprobe *tu)
358 {
359 if (!tu)
360 return;
361
362 path_put(&tu->path);
363 trace_probe_cleanup(&tu->tp);
364 kfree(tu->filename);
365 kfree(tu);
366 }
367
find_probe_event(const char * event,const char * group)368 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
369 {
370 struct dyn_event *pos;
371 struct trace_uprobe *tu;
372
373 for_each_trace_uprobe(tu, pos)
374 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
375 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
376 return tu;
377
378 return NULL;
379 }
380
381 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)382 static int unregister_trace_uprobe(struct trace_uprobe *tu)
383 {
384 int ret;
385
386 if (trace_probe_has_sibling(&tu->tp))
387 goto unreg;
388
389 /* If there's a reference to the dynamic event */
390 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
391 return -EBUSY;
392
393 ret = unregister_uprobe_event(tu);
394 if (ret)
395 return ret;
396
397 unreg:
398 dyn_event_remove(&tu->devent);
399 trace_probe_unlink(&tu->tp);
400 free_trace_uprobe(tu);
401 return 0;
402 }
403
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)404 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
405 struct trace_uprobe *comp)
406 {
407 struct trace_probe_event *tpe = orig->tp.event;
408 struct inode *comp_inode = d_real_inode(comp->path.dentry);
409 int i;
410
411 list_for_each_entry(orig, &tpe->probes, tp.list) {
412 if (comp_inode != d_real_inode(orig->path.dentry) ||
413 comp->offset != orig->offset)
414 continue;
415
416 /*
417 * trace_probe_compare_arg_type() ensured that nr_args and
418 * each argument name and type are same. Let's compare comm.
419 */
420 for (i = 0; i < orig->tp.nr_args; i++) {
421 if (strcmp(orig->tp.args[i].comm,
422 comp->tp.args[i].comm))
423 break;
424 }
425
426 if (i == orig->tp.nr_args)
427 return true;
428 }
429
430 return false;
431 }
432
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)433 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
434 {
435 int ret;
436
437 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
438 if (ret) {
439 /* Note that argument starts index = 2 */
440 trace_probe_log_set_index(ret + 1);
441 trace_probe_log_err(0, DIFF_ARG_TYPE);
442 return -EEXIST;
443 }
444 if (trace_uprobe_has_same_uprobe(to, tu)) {
445 trace_probe_log_set_index(0);
446 trace_probe_log_err(0, SAME_PROBE);
447 return -EEXIST;
448 }
449
450 /* Append to existing event */
451 ret = trace_probe_append(&tu->tp, &to->tp);
452 if (!ret)
453 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
454
455 return ret;
456 }
457
458 /*
459 * Uprobe with multiple reference counter is not allowed. i.e.
460 * If inode and offset matches, reference counter offset *must*
461 * match as well. Though, there is one exception: If user is
462 * replacing old trace_uprobe with new one(same group/event),
463 * then we allow same uprobe with new reference counter as far
464 * as the new one does not conflict with any other existing
465 * ones.
466 */
validate_ref_ctr_offset(struct trace_uprobe * new)467 static int validate_ref_ctr_offset(struct trace_uprobe *new)
468 {
469 struct dyn_event *pos;
470 struct trace_uprobe *tmp;
471 struct inode *new_inode = d_real_inode(new->path.dentry);
472
473 for_each_trace_uprobe(tmp, pos) {
474 if (new_inode == d_real_inode(tmp->path.dentry) &&
475 new->offset == tmp->offset &&
476 new->ref_ctr_offset != tmp->ref_ctr_offset) {
477 pr_warn("Reference counter offset mismatch.");
478 return -EINVAL;
479 }
480 }
481 return 0;
482 }
483
484 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)485 static int register_trace_uprobe(struct trace_uprobe *tu)
486 {
487 struct trace_uprobe *old_tu;
488 int ret;
489
490 mutex_lock(&event_mutex);
491
492 ret = validate_ref_ctr_offset(tu);
493 if (ret)
494 goto end;
495
496 /* register as an event */
497 old_tu = find_probe_event(trace_probe_name(&tu->tp),
498 trace_probe_group_name(&tu->tp));
499 if (old_tu) {
500 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
501 trace_probe_log_set_index(0);
502 trace_probe_log_err(0, DIFF_PROBE_TYPE);
503 ret = -EEXIST;
504 } else {
505 ret = append_trace_uprobe(tu, old_tu);
506 }
507 goto end;
508 }
509
510 ret = register_uprobe_event(tu);
511 if (ret) {
512 if (ret == -EEXIST) {
513 trace_probe_log_set_index(0);
514 trace_probe_log_err(0, EVENT_EXIST);
515 } else
516 pr_warn("Failed to register probe event(%d)\n", ret);
517 goto end;
518 }
519
520 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
521
522 end:
523 mutex_unlock(&event_mutex);
524
525 return ret;
526 }
527
528 /*
529 * Argument syntax:
530 * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
531 */
__trace_uprobe_create(int argc,const char ** argv)532 static int __trace_uprobe_create(int argc, const char **argv)
533 {
534 struct trace_uprobe *tu;
535 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
536 char *arg, *filename, *rctr, *rctr_end, *tmp;
537 char buf[MAX_EVENT_NAME_LEN];
538 char gbuf[MAX_EVENT_NAME_LEN];
539 enum probe_print_type ptype;
540 struct path path;
541 unsigned long offset, ref_ctr_offset;
542 bool is_return = false;
543 int i, ret;
544
545 ref_ctr_offset = 0;
546
547 switch (argv[0][0]) {
548 case 'r':
549 is_return = true;
550 break;
551 case 'p':
552 break;
553 default:
554 return -ECANCELED;
555 }
556
557 if (argc < 2)
558 return -ECANCELED;
559 if (argc - 2 > MAX_TRACE_ARGS)
560 return -E2BIG;
561
562 if (argv[0][1] == ':')
563 event = &argv[0][2];
564
565 if (!strchr(argv[1], '/'))
566 return -ECANCELED;
567
568 filename = kstrdup(argv[1], GFP_KERNEL);
569 if (!filename)
570 return -ENOMEM;
571
572 /* Find the last occurrence, in case the path contains ':' too. */
573 arg = strrchr(filename, ':');
574 if (!arg || !isdigit(arg[1])) {
575 kfree(filename);
576 return -ECANCELED;
577 }
578
579 trace_probe_log_init("trace_uprobe", argc, argv);
580 trace_probe_log_set_index(1); /* filename is the 2nd argument */
581
582 *arg++ = '\0';
583 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
584 if (ret) {
585 trace_probe_log_err(0, FILE_NOT_FOUND);
586 kfree(filename);
587 trace_probe_log_clear();
588 return ret;
589 }
590 if (!d_is_reg(path.dentry)) {
591 trace_probe_log_err(0, NO_REGULAR_FILE);
592 ret = -EINVAL;
593 goto fail_address_parse;
594 }
595
596 /* Parse reference counter offset if specified. */
597 rctr = strchr(arg, '(');
598 if (rctr) {
599 rctr_end = strchr(rctr, ')');
600 if (!rctr_end) {
601 ret = -EINVAL;
602 rctr_end = rctr + strlen(rctr);
603 trace_probe_log_err(rctr_end - filename,
604 REFCNT_OPEN_BRACE);
605 goto fail_address_parse;
606 } else if (rctr_end[1] != '\0') {
607 ret = -EINVAL;
608 trace_probe_log_err(rctr_end + 1 - filename,
609 BAD_REFCNT_SUFFIX);
610 goto fail_address_parse;
611 }
612
613 *rctr++ = '\0';
614 *rctr_end = '\0';
615 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
616 if (ret) {
617 trace_probe_log_err(rctr - filename, BAD_REFCNT);
618 goto fail_address_parse;
619 }
620 }
621
622 /* Check if there is %return suffix */
623 tmp = strchr(arg, '%');
624 if (tmp) {
625 if (!strcmp(tmp, "%return")) {
626 *tmp = '\0';
627 is_return = true;
628 } else {
629 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
630 ret = -EINVAL;
631 goto fail_address_parse;
632 }
633 }
634
635 /* Parse uprobe offset. */
636 ret = kstrtoul(arg, 0, &offset);
637 if (ret) {
638 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
639 goto fail_address_parse;
640 }
641
642 /* setup a probe */
643 trace_probe_log_set_index(0);
644 if (event) {
645 ret = traceprobe_parse_event_name(&event, &group, gbuf,
646 event - argv[0]);
647 if (ret)
648 goto fail_address_parse;
649 }
650
651 if (!event) {
652 char *tail;
653 char *ptr;
654
655 tail = kstrdup(kbasename(filename), GFP_KERNEL);
656 if (!tail) {
657 ret = -ENOMEM;
658 goto fail_address_parse;
659 }
660
661 ptr = strpbrk(tail, ".-_");
662 if (ptr)
663 *ptr = '\0';
664
665 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
666 event = buf;
667 kfree(tail);
668 }
669
670 argc -= 2;
671 argv += 2;
672
673 tu = alloc_trace_uprobe(group, event, argc, is_return);
674 if (IS_ERR(tu)) {
675 ret = PTR_ERR(tu);
676 /* This must return -ENOMEM otherwise there is a bug */
677 WARN_ON_ONCE(ret != -ENOMEM);
678 goto fail_address_parse;
679 }
680 tu->offset = offset;
681 tu->ref_ctr_offset = ref_ctr_offset;
682 tu->path = path;
683 tu->filename = filename;
684
685 /* parse arguments */
686 for (i = 0; i < argc; i++) {
687 struct traceprobe_parse_context ctx = {
688 .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
689 };
690
691 trace_probe_log_set_index(i + 2);
692 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
693 traceprobe_finish_parse(&ctx);
694 if (ret)
695 goto error;
696 }
697
698 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
699 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
700 if (ret < 0)
701 goto error;
702
703 ret = register_trace_uprobe(tu);
704 if (!ret)
705 goto out;
706
707 error:
708 free_trace_uprobe(tu);
709 out:
710 trace_probe_log_clear();
711 return ret;
712
713 fail_address_parse:
714 trace_probe_log_clear();
715 path_put(&path);
716 kfree(filename);
717
718 return ret;
719 }
720
trace_uprobe_create(const char * raw_command)721 int trace_uprobe_create(const char *raw_command)
722 {
723 return trace_probe_create(raw_command, __trace_uprobe_create);
724 }
725
create_or_delete_trace_uprobe(const char * raw_command)726 static int create_or_delete_trace_uprobe(const char *raw_command)
727 {
728 int ret;
729
730 if (raw_command[0] == '-')
731 return dyn_event_release(raw_command, &trace_uprobe_ops);
732
733 ret = trace_uprobe_create(raw_command);
734 return ret == -ECANCELED ? -EINVAL : ret;
735 }
736
trace_uprobe_release(struct dyn_event * ev)737 static int trace_uprobe_release(struct dyn_event *ev)
738 {
739 struct trace_uprobe *tu = to_trace_uprobe(ev);
740
741 return unregister_trace_uprobe(tu);
742 }
743
744 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)745 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
746 {
747 struct trace_uprobe *tu = to_trace_uprobe(ev);
748 char c = is_ret_probe(tu) ? 'r' : 'p';
749 int i;
750
751 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
752 trace_probe_name(&tu->tp), tu->filename,
753 (int)(sizeof(void *) * 2), tu->offset);
754
755 if (tu->ref_ctr_offset)
756 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
757
758 for (i = 0; i < tu->tp.nr_args; i++)
759 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
760
761 seq_putc(m, '\n');
762 return 0;
763 }
764
probes_seq_show(struct seq_file * m,void * v)765 static int probes_seq_show(struct seq_file *m, void *v)
766 {
767 struct dyn_event *ev = v;
768
769 if (!is_trace_uprobe(ev))
770 return 0;
771
772 return trace_uprobe_show(m, ev);
773 }
774
775 static const struct seq_operations probes_seq_op = {
776 .start = dyn_event_seq_start,
777 .next = dyn_event_seq_next,
778 .stop = dyn_event_seq_stop,
779 .show = probes_seq_show
780 };
781
probes_open(struct inode * inode,struct file * file)782 static int probes_open(struct inode *inode, struct file *file)
783 {
784 int ret;
785
786 ret = security_locked_down(LOCKDOWN_TRACEFS);
787 if (ret)
788 return ret;
789
790 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
791 ret = dyn_events_release_all(&trace_uprobe_ops);
792 if (ret)
793 return ret;
794 }
795
796 return seq_open(file, &probes_seq_op);
797 }
798
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)799 static ssize_t probes_write(struct file *file, const char __user *buffer,
800 size_t count, loff_t *ppos)
801 {
802 return trace_parse_run_command(file, buffer, count, ppos,
803 create_or_delete_trace_uprobe);
804 }
805
806 static const struct file_operations uprobe_events_ops = {
807 .owner = THIS_MODULE,
808 .open = probes_open,
809 .read = seq_read,
810 .llseek = seq_lseek,
811 .release = seq_release,
812 .write = probes_write,
813 };
814
815 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)816 static int probes_profile_seq_show(struct seq_file *m, void *v)
817 {
818 struct dyn_event *ev = v;
819 struct trace_uprobe *tu;
820
821 if (!is_trace_uprobe(ev))
822 return 0;
823
824 tu = to_trace_uprobe(ev);
825 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
826 trace_probe_name(&tu->tp), tu->nhit);
827 return 0;
828 }
829
830 static const struct seq_operations profile_seq_op = {
831 .start = dyn_event_seq_start,
832 .next = dyn_event_seq_next,
833 .stop = dyn_event_seq_stop,
834 .show = probes_profile_seq_show
835 };
836
profile_open(struct inode * inode,struct file * file)837 static int profile_open(struct inode *inode, struct file *file)
838 {
839 int ret;
840
841 ret = security_locked_down(LOCKDOWN_TRACEFS);
842 if (ret)
843 return ret;
844
845 return seq_open(file, &profile_seq_op);
846 }
847
848 static const struct file_operations uprobe_profile_ops = {
849 .owner = THIS_MODULE,
850 .open = profile_open,
851 .read = seq_read,
852 .llseek = seq_lseek,
853 .release = seq_release,
854 };
855
856 struct uprobe_cpu_buffer {
857 struct mutex mutex;
858 void *buf;
859 int dsize;
860 };
861 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
862 static int uprobe_buffer_refcnt;
863 #define MAX_UCB_BUFFER_SIZE PAGE_SIZE
864
uprobe_buffer_init(void)865 static int uprobe_buffer_init(void)
866 {
867 int cpu, err_cpu;
868
869 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
870 if (uprobe_cpu_buffer == NULL)
871 return -ENOMEM;
872
873 for_each_possible_cpu(cpu) {
874 struct page *p = alloc_pages_node(cpu_to_node(cpu),
875 GFP_KERNEL, 0);
876 if (p == NULL) {
877 err_cpu = cpu;
878 goto err;
879 }
880 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
881 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
882 }
883
884 return 0;
885
886 err:
887 for_each_possible_cpu(cpu) {
888 if (cpu == err_cpu)
889 break;
890 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
891 }
892
893 free_percpu(uprobe_cpu_buffer);
894 return -ENOMEM;
895 }
896
uprobe_buffer_enable(void)897 static int uprobe_buffer_enable(void)
898 {
899 int ret = 0;
900
901 BUG_ON(!mutex_is_locked(&event_mutex));
902
903 if (uprobe_buffer_refcnt++ == 0) {
904 ret = uprobe_buffer_init();
905 if (ret < 0)
906 uprobe_buffer_refcnt--;
907 }
908
909 return ret;
910 }
911
uprobe_buffer_disable(void)912 static void uprobe_buffer_disable(void)
913 {
914 int cpu;
915
916 BUG_ON(!mutex_is_locked(&event_mutex));
917
918 if (--uprobe_buffer_refcnt == 0) {
919 for_each_possible_cpu(cpu)
920 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
921 cpu)->buf);
922
923 free_percpu(uprobe_cpu_buffer);
924 uprobe_cpu_buffer = NULL;
925 }
926 }
927
uprobe_buffer_get(void)928 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
929 {
930 struct uprobe_cpu_buffer *ucb;
931 int cpu;
932
933 cpu = raw_smp_processor_id();
934 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
935
936 /*
937 * Use per-cpu buffers for fastest access, but we might migrate
938 * so the mutex makes sure we have sole access to it.
939 */
940 mutex_lock(&ucb->mutex);
941
942 return ucb;
943 }
944
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)945 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
946 {
947 if (!ucb)
948 return;
949 mutex_unlock(&ucb->mutex);
950 }
951
prepare_uprobe_buffer(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)952 static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
953 struct pt_regs *regs,
954 struct uprobe_cpu_buffer **ucbp)
955 {
956 struct uprobe_cpu_buffer *ucb;
957 int dsize, esize;
958
959 if (*ucbp)
960 return *ucbp;
961
962 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
963 dsize = __get_data_size(&tu->tp, regs, NULL);
964
965 ucb = uprobe_buffer_get();
966 ucb->dsize = tu->tp.size + dsize;
967
968 if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
969 ucb->dsize = MAX_UCB_BUFFER_SIZE;
970 dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
971 }
972
973 store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
974
975 *ucbp = ucb;
976 return ucb;
977 }
978
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,struct trace_event_file * trace_file)979 static void __uprobe_trace_func(struct trace_uprobe *tu,
980 unsigned long func, struct pt_regs *regs,
981 struct uprobe_cpu_buffer *ucb,
982 struct trace_event_file *trace_file)
983 {
984 struct uprobe_trace_entry_head *entry;
985 struct trace_event_buffer fbuffer;
986 void *data;
987 int size, esize;
988 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
989
990 WARN_ON(call != trace_file->event_call);
991
992 if (trace_trigger_soft_disabled(trace_file))
993 return;
994
995 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
996 size = esize + ucb->dsize;
997 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
998 if (!entry)
999 return;
1000
1001 if (is_ret_probe(tu)) {
1002 entry->vaddr[0] = func;
1003 entry->vaddr[1] = instruction_pointer(regs);
1004 data = DATAOF_TRACE_ENTRY(entry, true);
1005 } else {
1006 entry->vaddr[0] = instruction_pointer(regs);
1007 data = DATAOF_TRACE_ENTRY(entry, false);
1008 }
1009
1010 memcpy(data, ucb->buf, ucb->dsize);
1011
1012 trace_event_buffer_commit(&fbuffer);
1013 }
1014
1015 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1016 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
1017 struct uprobe_cpu_buffer **ucbp)
1018 {
1019 struct event_file_link *link;
1020 struct uprobe_cpu_buffer *ucb;
1021
1022 if (is_ret_probe(tu))
1023 return 0;
1024
1025 ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1026
1027 rcu_read_lock();
1028 trace_probe_for_each_link_rcu(link, &tu->tp)
1029 __uprobe_trace_func(tu, 0, regs, ucb, link->file);
1030 rcu_read_unlock();
1031
1032 return 0;
1033 }
1034
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1035 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1036 struct pt_regs *regs,
1037 struct uprobe_cpu_buffer **ucbp)
1038 {
1039 struct event_file_link *link;
1040 struct uprobe_cpu_buffer *ucb;
1041
1042 ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1043
1044 rcu_read_lock();
1045 trace_probe_for_each_link_rcu(link, &tu->tp)
1046 __uprobe_trace_func(tu, func, regs, ucb, link->file);
1047 rcu_read_unlock();
1048 }
1049
1050 /* Event entry printers */
1051 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1052 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1053 {
1054 struct uprobe_trace_entry_head *entry;
1055 struct trace_seq *s = &iter->seq;
1056 struct trace_uprobe *tu;
1057 u8 *data;
1058
1059 entry = (struct uprobe_trace_entry_head *)iter->ent;
1060 tu = trace_uprobe_primary_from_call(
1061 container_of(event, struct trace_event_call, event));
1062 if (unlikely(!tu))
1063 goto out;
1064
1065 if (is_ret_probe(tu)) {
1066 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1067 trace_probe_name(&tu->tp),
1068 entry->vaddr[1], entry->vaddr[0]);
1069 data = DATAOF_TRACE_ENTRY(entry, true);
1070 } else {
1071 trace_seq_printf(s, "%s: (0x%lx)",
1072 trace_probe_name(&tu->tp),
1073 entry->vaddr[0]);
1074 data = DATAOF_TRACE_ENTRY(entry, false);
1075 }
1076
1077 if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1078 goto out;
1079
1080 trace_seq_putc(s, '\n');
1081
1082 out:
1083 return trace_handle_return(s);
1084 }
1085
1086 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1087 enum uprobe_filter_ctx ctx,
1088 struct mm_struct *mm);
1089
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1090 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1091 {
1092 int ret;
1093
1094 tu->consumer.filter = filter;
1095 tu->inode = d_real_inode(tu->path.dentry);
1096
1097 if (tu->ref_ctr_offset)
1098 ret = uprobe_register_refctr(tu->inode, tu->offset,
1099 tu->ref_ctr_offset, &tu->consumer);
1100 else
1101 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1102
1103 if (ret)
1104 tu->inode = NULL;
1105
1106 return ret;
1107 }
1108
__probe_event_disable(struct trace_probe * tp)1109 static void __probe_event_disable(struct trace_probe *tp)
1110 {
1111 struct trace_uprobe *tu;
1112
1113 tu = container_of(tp, struct trace_uprobe, tp);
1114 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1115
1116 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1117 if (!tu->inode)
1118 continue;
1119
1120 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1121 tu->inode = NULL;
1122 }
1123 }
1124
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1125 static int probe_event_enable(struct trace_event_call *call,
1126 struct trace_event_file *file, filter_func_t filter)
1127 {
1128 struct trace_probe *tp;
1129 struct trace_uprobe *tu;
1130 bool enabled;
1131 int ret;
1132
1133 tp = trace_probe_primary_from_call(call);
1134 if (WARN_ON_ONCE(!tp))
1135 return -ENODEV;
1136 enabled = trace_probe_is_enabled(tp);
1137
1138 /* This may also change "enabled" state */
1139 if (file) {
1140 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1141 return -EINTR;
1142
1143 ret = trace_probe_add_file(tp, file);
1144 if (ret < 0)
1145 return ret;
1146 } else {
1147 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1148 return -EINTR;
1149
1150 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1151 }
1152
1153 tu = container_of(tp, struct trace_uprobe, tp);
1154 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1155
1156 if (enabled)
1157 return 0;
1158
1159 ret = uprobe_buffer_enable();
1160 if (ret)
1161 goto err_flags;
1162
1163 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1164 ret = trace_uprobe_enable(tu, filter);
1165 if (ret) {
1166 __probe_event_disable(tp);
1167 goto err_buffer;
1168 }
1169 }
1170
1171 return 0;
1172
1173 err_buffer:
1174 uprobe_buffer_disable();
1175
1176 err_flags:
1177 if (file)
1178 trace_probe_remove_file(tp, file);
1179 else
1180 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1181
1182 return ret;
1183 }
1184
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1185 static void probe_event_disable(struct trace_event_call *call,
1186 struct trace_event_file *file)
1187 {
1188 struct trace_probe *tp;
1189
1190 tp = trace_probe_primary_from_call(call);
1191 if (WARN_ON_ONCE(!tp))
1192 return;
1193
1194 if (!trace_probe_is_enabled(tp))
1195 return;
1196
1197 if (file) {
1198 if (trace_probe_remove_file(tp, file) < 0)
1199 return;
1200
1201 if (trace_probe_is_enabled(tp))
1202 return;
1203 } else
1204 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1205
1206 __probe_event_disable(tp);
1207 uprobe_buffer_disable();
1208 }
1209
uprobe_event_define_fields(struct trace_event_call * event_call)1210 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1211 {
1212 int ret, size;
1213 struct uprobe_trace_entry_head field;
1214 struct trace_uprobe *tu;
1215
1216 tu = trace_uprobe_primary_from_call(event_call);
1217 if (unlikely(!tu))
1218 return -ENODEV;
1219
1220 if (is_ret_probe(tu)) {
1221 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1222 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1223 size = SIZEOF_TRACE_ENTRY(true);
1224 } else {
1225 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1226 size = SIZEOF_TRACE_ENTRY(false);
1227 }
1228
1229 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1230 }
1231
1232 #ifdef CONFIG_PERF_EVENTS
1233 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1234 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1235 {
1236 struct perf_event *event;
1237
1238 if (filter->nr_systemwide)
1239 return true;
1240
1241 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1242 if (event->hw.target->mm == mm)
1243 return true;
1244 }
1245
1246 return false;
1247 }
1248
1249 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1250 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1251 struct perf_event *event)
1252 {
1253 return __uprobe_perf_filter(filter, event->hw.target->mm);
1254 }
1255
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1256 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1257 struct perf_event *event)
1258 {
1259 bool done;
1260
1261 write_lock(&filter->rwlock);
1262 if (event->hw.target) {
1263 list_del(&event->hw.tp_list);
1264 done = filter->nr_systemwide ||
1265 (event->hw.target->flags & PF_EXITING) ||
1266 trace_uprobe_filter_event(filter, event);
1267 } else {
1268 filter->nr_systemwide--;
1269 done = filter->nr_systemwide;
1270 }
1271 write_unlock(&filter->rwlock);
1272
1273 return done;
1274 }
1275
1276 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1277 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1278 struct perf_event *event)
1279 {
1280 bool done;
1281
1282 write_lock(&filter->rwlock);
1283 if (event->hw.target) {
1284 /*
1285 * event->parent != NULL means copy_process(), we can avoid
1286 * uprobe_apply(). current->mm must be probed and we can rely
1287 * on dup_mmap() which preserves the already installed bp's.
1288 *
1289 * attr.enable_on_exec means that exec/mmap will install the
1290 * breakpoints we need.
1291 */
1292 done = filter->nr_systemwide ||
1293 event->parent || event->attr.enable_on_exec ||
1294 trace_uprobe_filter_event(filter, event);
1295 list_add(&event->hw.tp_list, &filter->perf_events);
1296 } else {
1297 done = filter->nr_systemwide;
1298 filter->nr_systemwide++;
1299 }
1300 write_unlock(&filter->rwlock);
1301
1302 return done;
1303 }
1304
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1305 static int uprobe_perf_close(struct trace_event_call *call,
1306 struct perf_event *event)
1307 {
1308 struct trace_probe *tp;
1309 struct trace_uprobe *tu;
1310 int ret = 0;
1311
1312 tp = trace_probe_primary_from_call(call);
1313 if (WARN_ON_ONCE(!tp))
1314 return -ENODEV;
1315
1316 tu = container_of(tp, struct trace_uprobe, tp);
1317 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1318 return 0;
1319
1320 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1321 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1322 if (ret)
1323 break;
1324 }
1325
1326 return ret;
1327 }
1328
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1329 static int uprobe_perf_open(struct trace_event_call *call,
1330 struct perf_event *event)
1331 {
1332 struct trace_probe *tp;
1333 struct trace_uprobe *tu;
1334 int err = 0;
1335
1336 tp = trace_probe_primary_from_call(call);
1337 if (WARN_ON_ONCE(!tp))
1338 return -ENODEV;
1339
1340 tu = container_of(tp, struct trace_uprobe, tp);
1341 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1342 return 0;
1343
1344 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1345 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1346 if (err) {
1347 uprobe_perf_close(call, event);
1348 break;
1349 }
1350 }
1351
1352 return err;
1353 }
1354
uprobe_perf_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)1355 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1356 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1357 {
1358 struct trace_uprobe_filter *filter;
1359 struct trace_uprobe *tu;
1360 int ret;
1361
1362 tu = container_of(uc, struct trace_uprobe, consumer);
1363 filter = tu->tp.event->filter;
1364
1365 read_lock(&filter->rwlock);
1366 ret = __uprobe_perf_filter(filter, mm);
1367 read_unlock(&filter->rwlock);
1368
1369 return ret;
1370 }
1371
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1372 static void __uprobe_perf_func(struct trace_uprobe *tu,
1373 unsigned long func, struct pt_regs *regs,
1374 struct uprobe_cpu_buffer **ucbp)
1375 {
1376 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1377 struct uprobe_trace_entry_head *entry;
1378 struct uprobe_cpu_buffer *ucb;
1379 struct hlist_head *head;
1380 void *data;
1381 int size, esize;
1382 int rctx;
1383
1384 #ifdef CONFIG_BPF_EVENTS
1385 if (bpf_prog_array_valid(call)) {
1386 u32 ret;
1387
1388 ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
1389 if (!ret)
1390 return;
1391 }
1392 #endif /* CONFIG_BPF_EVENTS */
1393
1394 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1395
1396 ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1397 size = esize + ucb->dsize;
1398 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1399 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1400 return;
1401
1402 preempt_disable();
1403 head = this_cpu_ptr(call->perf_events);
1404 if (hlist_empty(head))
1405 goto out;
1406
1407 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1408 if (!entry)
1409 goto out;
1410
1411 if (is_ret_probe(tu)) {
1412 entry->vaddr[0] = func;
1413 entry->vaddr[1] = instruction_pointer(regs);
1414 data = DATAOF_TRACE_ENTRY(entry, true);
1415 } else {
1416 entry->vaddr[0] = instruction_pointer(regs);
1417 data = DATAOF_TRACE_ENTRY(entry, false);
1418 }
1419
1420 memcpy(data, ucb->buf, ucb->dsize);
1421
1422 if (size - esize > ucb->dsize)
1423 memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
1424
1425 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1426 head, NULL);
1427 out:
1428 preempt_enable();
1429 }
1430
1431 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1432 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1433 struct uprobe_cpu_buffer **ucbp)
1434 {
1435 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1436 return UPROBE_HANDLER_REMOVE;
1437
1438 if (!is_ret_probe(tu))
1439 __uprobe_perf_func(tu, 0, regs, ucbp);
1440 return 0;
1441 }
1442
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1443 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1444 struct pt_regs *regs,
1445 struct uprobe_cpu_buffer **ucbp)
1446 {
1447 __uprobe_perf_func(tu, func, regs, ucbp);
1448 }
1449
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,u64 * probe_addr,bool perf_type_tracepoint)1450 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1451 const char **filename, u64 *probe_offset,
1452 u64 *probe_addr, bool perf_type_tracepoint)
1453 {
1454 const char *pevent = trace_event_name(event->tp_event);
1455 const char *group = event->tp_event->class->system;
1456 struct trace_uprobe *tu;
1457
1458 if (perf_type_tracepoint)
1459 tu = find_probe_event(pevent, group);
1460 else
1461 tu = trace_uprobe_primary_from_call(event->tp_event);
1462 if (!tu)
1463 return -EINVAL;
1464
1465 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1466 : BPF_FD_TYPE_UPROBE;
1467 *filename = tu->filename;
1468 *probe_offset = tu->offset;
1469 *probe_addr = 0;
1470 return 0;
1471 }
1472 #endif /* CONFIG_PERF_EVENTS */
1473
1474 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1475 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1476 void *data)
1477 {
1478 struct trace_event_file *file = data;
1479
1480 switch (type) {
1481 case TRACE_REG_REGISTER:
1482 return probe_event_enable(event, file, NULL);
1483
1484 case TRACE_REG_UNREGISTER:
1485 probe_event_disable(event, file);
1486 return 0;
1487
1488 #ifdef CONFIG_PERF_EVENTS
1489 case TRACE_REG_PERF_REGISTER:
1490 return probe_event_enable(event, NULL, uprobe_perf_filter);
1491
1492 case TRACE_REG_PERF_UNREGISTER:
1493 probe_event_disable(event, NULL);
1494 return 0;
1495
1496 case TRACE_REG_PERF_OPEN:
1497 return uprobe_perf_open(event, data);
1498
1499 case TRACE_REG_PERF_CLOSE:
1500 return uprobe_perf_close(event, data);
1501
1502 #endif
1503 default:
1504 return 0;
1505 }
1506 }
1507
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs)1508 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1509 {
1510 struct trace_uprobe *tu;
1511 struct uprobe_dispatch_data udd;
1512 struct uprobe_cpu_buffer *ucb = NULL;
1513 int ret = 0;
1514
1515 tu = container_of(con, struct trace_uprobe, consumer);
1516 tu->nhit++;
1517
1518 udd.tu = tu;
1519 udd.bp_addr = instruction_pointer(regs);
1520
1521 current->utask->vaddr = (unsigned long) &udd;
1522
1523 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1524 return 0;
1525
1526 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1527 ret |= uprobe_trace_func(tu, regs, &ucb);
1528
1529 #ifdef CONFIG_PERF_EVENTS
1530 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1531 ret |= uprobe_perf_func(tu, regs, &ucb);
1532 #endif
1533 uprobe_buffer_put(ucb);
1534 return ret;
1535 }
1536
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)1537 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1538 unsigned long func, struct pt_regs *regs)
1539 {
1540 struct trace_uprobe *tu;
1541 struct uprobe_dispatch_data udd;
1542 struct uprobe_cpu_buffer *ucb = NULL;
1543
1544 tu = container_of(con, struct trace_uprobe, consumer);
1545
1546 udd.tu = tu;
1547 udd.bp_addr = func;
1548
1549 current->utask->vaddr = (unsigned long) &udd;
1550
1551 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1552 return 0;
1553
1554 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1555 uretprobe_trace_func(tu, func, regs, &ucb);
1556
1557 #ifdef CONFIG_PERF_EVENTS
1558 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1559 uretprobe_perf_func(tu, func, regs, &ucb);
1560 #endif
1561 uprobe_buffer_put(ucb);
1562 return 0;
1563 }
1564
1565 static struct trace_event_functions uprobe_funcs = {
1566 .trace = print_uprobe_event
1567 };
1568
1569 static struct trace_event_fields uprobe_fields_array[] = {
1570 { .type = TRACE_FUNCTION_TYPE,
1571 .define_fields = uprobe_event_define_fields },
1572 {}
1573 };
1574
init_trace_event_call(struct trace_uprobe * tu)1575 static inline void init_trace_event_call(struct trace_uprobe *tu)
1576 {
1577 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1578 call->event.funcs = &uprobe_funcs;
1579 call->class->fields_array = uprobe_fields_array;
1580
1581 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1582 call->class->reg = trace_uprobe_register;
1583 }
1584
register_uprobe_event(struct trace_uprobe * tu)1585 static int register_uprobe_event(struct trace_uprobe *tu)
1586 {
1587 init_trace_event_call(tu);
1588
1589 return trace_probe_register_event_call(&tu->tp);
1590 }
1591
unregister_uprobe_event(struct trace_uprobe * tu)1592 static int unregister_uprobe_event(struct trace_uprobe *tu)
1593 {
1594 return trace_probe_unregister_event_call(&tu->tp);
1595 }
1596
1597 #ifdef CONFIG_PERF_EVENTS
1598 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1599 create_local_trace_uprobe(char *name, unsigned long offs,
1600 unsigned long ref_ctr_offset, bool is_return)
1601 {
1602 enum probe_print_type ptype;
1603 struct trace_uprobe *tu;
1604 struct path path;
1605 int ret;
1606
1607 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1608 if (ret)
1609 return ERR_PTR(ret);
1610
1611 if (!d_is_reg(path.dentry)) {
1612 path_put(&path);
1613 return ERR_PTR(-EINVAL);
1614 }
1615
1616 /*
1617 * local trace_kprobes are not added to dyn_event, so they are never
1618 * searched in find_trace_kprobe(). Therefore, there is no concern of
1619 * duplicated name "DUMMY_EVENT" here.
1620 */
1621 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1622 is_return);
1623
1624 if (IS_ERR(tu)) {
1625 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1626 (int)PTR_ERR(tu));
1627 path_put(&path);
1628 return ERR_CAST(tu);
1629 }
1630
1631 tu->offset = offs;
1632 tu->path = path;
1633 tu->ref_ctr_offset = ref_ctr_offset;
1634 tu->filename = kstrdup(name, GFP_KERNEL);
1635 if (!tu->filename) {
1636 ret = -ENOMEM;
1637 goto error;
1638 }
1639
1640 init_trace_event_call(tu);
1641
1642 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1643 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1644 ret = -ENOMEM;
1645 goto error;
1646 }
1647
1648 return trace_probe_event_call(&tu->tp);
1649 error:
1650 free_trace_uprobe(tu);
1651 return ERR_PTR(ret);
1652 }
1653
destroy_local_trace_uprobe(struct trace_event_call * event_call)1654 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1655 {
1656 struct trace_uprobe *tu;
1657
1658 tu = trace_uprobe_primary_from_call(event_call);
1659
1660 free_trace_uprobe(tu);
1661 }
1662 #endif /* CONFIG_PERF_EVENTS */
1663
1664 /* Make a trace interface for controlling probe points */
init_uprobe_trace(void)1665 static __init int init_uprobe_trace(void)
1666 {
1667 int ret;
1668
1669 ret = dyn_event_register(&trace_uprobe_ops);
1670 if (ret)
1671 return ret;
1672
1673 ret = tracing_init_dentry();
1674 if (ret)
1675 return 0;
1676
1677 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1678 NULL, &uprobe_events_ops);
1679 /* Profile interface */
1680 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1681 NULL, &uprobe_profile_ops);
1682 return 0;
1683 }
1684
1685 fs_initcall(init_uprobe_trace);
1686