xref: /openbmc/linux/kernel/trace/trace_uprobe.c (revision 293d5b43)
1 /*
2  * uprobes-based tracing events
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  *
17  * Copyright (C) IBM Corporation, 2010-2012
18  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/uprobes.h>
24 #include <linux/namei.h>
25 #include <linux/string.h>
26 
27 #include "trace_probe.h"
28 
29 #define UPROBE_EVENT_SYSTEM	"uprobes"
30 
31 struct uprobe_trace_entry_head {
32 	struct trace_entry	ent;
33 	unsigned long		vaddr[];
34 };
35 
36 #define SIZEOF_TRACE_ENTRY(is_return)			\
37 	(sizeof(struct uprobe_trace_entry_head) +	\
38 	 sizeof(unsigned long) * (is_return ? 2 : 1))
39 
40 #define DATAOF_TRACE_ENTRY(entry, is_return)		\
41 	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42 
43 struct trace_uprobe_filter {
44 	rwlock_t		rwlock;
45 	int			nr_systemwide;
46 	struct list_head	perf_events;
47 };
48 
49 /*
50  * uprobe event core functions
51  */
52 struct trace_uprobe {
53 	struct list_head		list;
54 	struct trace_uprobe_filter	filter;
55 	struct uprobe_consumer		consumer;
56 	struct inode			*inode;
57 	char				*filename;
58 	unsigned long			offset;
59 	unsigned long			nhit;
60 	struct trace_probe		tp;
61 };
62 
63 #define SIZEOF_TRACE_UPROBE(n)				\
64 	(offsetof(struct trace_uprobe, tp.args) +	\
65 	(sizeof(struct probe_arg) * (n)))
66 
67 static int register_uprobe_event(struct trace_uprobe *tu);
68 static int unregister_uprobe_event(struct trace_uprobe *tu);
69 
70 static DEFINE_MUTEX(uprobe_lock);
71 static LIST_HEAD(uprobe_list);
72 
73 struct uprobe_dispatch_data {
74 	struct trace_uprobe	*tu;
75 	unsigned long		bp_addr;
76 };
77 
78 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
79 static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 				unsigned long func, struct pt_regs *regs);
81 
82 #ifdef CONFIG_STACK_GROWSUP
83 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84 {
85 	return addr - (n * sizeof(long));
86 }
87 #else
88 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89 {
90 	return addr + (n * sizeof(long));
91 }
92 #endif
93 
94 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95 {
96 	unsigned long ret;
97 	unsigned long addr = user_stack_pointer(regs);
98 
99 	addr = adjust_stack_addr(addr, n);
100 
101 	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 		return 0;
103 
104 	return ret;
105 }
106 
107 /*
108  * Uprobes-specific fetch functions
109  */
110 #define DEFINE_FETCH_stack(type)					\
111 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
112 					 void *offset, void *dest)	\
113 {									\
114 	*(type *)dest = (type)get_user_stack_nth(regs,			\
115 					      ((unsigned long)offset)); \
116 }
117 DEFINE_BASIC_FETCH_FUNCS(stack)
118 /* No string on the stack entry */
119 #define fetch_stack_string	NULL
120 #define fetch_stack_string_size	NULL
121 
122 #define DEFINE_FETCH_memory(type)					\
123 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
124 					  void *addr, void *dest)	\
125 {									\
126 	type retval;							\
127 	void __user *vaddr = (void __force __user *) addr;		\
128 									\
129 	if (copy_from_user(&retval, vaddr, sizeof(type)))		\
130 		*(type *)dest = 0;					\
131 	else								\
132 		*(type *) dest = retval;				\
133 }
134 DEFINE_BASIC_FETCH_FUNCS(memory)
135 /*
136  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137  * length and relative data location.
138  */
139 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 					    void *addr, void *dest)
141 {
142 	long ret;
143 	u32 rloc = *(u32 *)dest;
144 	int maxlen  = get_rloc_len(rloc);
145 	u8 *dst = get_rloc_data(dest);
146 	void __user *src = (void __force __user *) addr;
147 
148 	if (!maxlen)
149 		return;
150 
151 	ret = strncpy_from_user(dst, src, maxlen);
152 
153 	if (ret < 0) {	/* Failed to fetch string */
154 		((u8 *)get_rloc_data(dest))[0] = '\0';
155 		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 	} else {
157 		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 	}
159 }
160 
161 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 						 void *addr, void *dest)
163 {
164 	int len;
165 	void __user *vaddr = (void __force __user *) addr;
166 
167 	len = strnlen_user(vaddr, MAX_STRING_SIZE);
168 
169 	if (len == 0 || len > MAX_STRING_SIZE)  /* Failed to check length */
170 		*(u32 *)dest = 0;
171 	else
172 		*(u32 *)dest = len;
173 }
174 
175 static unsigned long translate_user_vaddr(void *file_offset)
176 {
177 	unsigned long base_addr;
178 	struct uprobe_dispatch_data *udd;
179 
180 	udd = (void *) current->utask->vaddr;
181 
182 	base_addr = udd->bp_addr - udd->tu->offset;
183 	return base_addr + (unsigned long)file_offset;
184 }
185 
186 #define DEFINE_FETCH_file_offset(type)					\
187 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,	\
188 					       void *offset, void *dest)\
189 {									\
190 	void *vaddr = (void *)translate_user_vaddr(offset);		\
191 									\
192 	FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest);		\
193 }
194 DEFINE_BASIC_FETCH_FUNCS(file_offset)
195 DEFINE_FETCH_file_offset(string)
196 DEFINE_FETCH_file_offset(string_size)
197 
198 /* Fetch type information table */
199 static const struct fetch_type uprobes_fetch_type_table[] = {
200 	/* Special types */
201 	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 					sizeof(u32), 1, "__data_loc char[]"),
203 	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 					string_size, sizeof(u32), 0, "u32"),
205 	/* Basic types */
206 	ASSIGN_FETCH_TYPE(u8,  u8,  0),
207 	ASSIGN_FETCH_TYPE(u16, u16, 0),
208 	ASSIGN_FETCH_TYPE(u32, u32, 0),
209 	ASSIGN_FETCH_TYPE(u64, u64, 0),
210 	ASSIGN_FETCH_TYPE(s8,  u8,  1),
211 	ASSIGN_FETCH_TYPE(s16, u16, 1),
212 	ASSIGN_FETCH_TYPE(s32, u32, 1),
213 	ASSIGN_FETCH_TYPE(s64, u64, 1),
214 	ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
215 	ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
216 	ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
217 	ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
218 
219 	ASSIGN_FETCH_TYPE_END
220 };
221 
222 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
223 {
224 	rwlock_init(&filter->rwlock);
225 	filter->nr_systemwide = 0;
226 	INIT_LIST_HEAD(&filter->perf_events);
227 }
228 
229 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
230 {
231 	return !filter->nr_systemwide && list_empty(&filter->perf_events);
232 }
233 
234 static inline bool is_ret_probe(struct trace_uprobe *tu)
235 {
236 	return tu->consumer.ret_handler != NULL;
237 }
238 
239 /*
240  * Allocate new trace_uprobe and initialize it (including uprobes).
241  */
242 static struct trace_uprobe *
243 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
244 {
245 	struct trace_uprobe *tu;
246 
247 	if (!event || !is_good_name(event))
248 		return ERR_PTR(-EINVAL);
249 
250 	if (!group || !is_good_name(group))
251 		return ERR_PTR(-EINVAL);
252 
253 	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
254 	if (!tu)
255 		return ERR_PTR(-ENOMEM);
256 
257 	tu->tp.call.class = &tu->tp.class;
258 	tu->tp.call.name = kstrdup(event, GFP_KERNEL);
259 	if (!tu->tp.call.name)
260 		goto error;
261 
262 	tu->tp.class.system = kstrdup(group, GFP_KERNEL);
263 	if (!tu->tp.class.system)
264 		goto error;
265 
266 	INIT_LIST_HEAD(&tu->list);
267 	INIT_LIST_HEAD(&tu->tp.files);
268 	tu->consumer.handler = uprobe_dispatcher;
269 	if (is_ret)
270 		tu->consumer.ret_handler = uretprobe_dispatcher;
271 	init_trace_uprobe_filter(&tu->filter);
272 	return tu;
273 
274 error:
275 	kfree(tu->tp.call.name);
276 	kfree(tu);
277 
278 	return ERR_PTR(-ENOMEM);
279 }
280 
281 static void free_trace_uprobe(struct trace_uprobe *tu)
282 {
283 	int i;
284 
285 	for (i = 0; i < tu->tp.nr_args; i++)
286 		traceprobe_free_probe_arg(&tu->tp.args[i]);
287 
288 	iput(tu->inode);
289 	kfree(tu->tp.call.class->system);
290 	kfree(tu->tp.call.name);
291 	kfree(tu->filename);
292 	kfree(tu);
293 }
294 
295 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
296 {
297 	struct trace_uprobe *tu;
298 
299 	list_for_each_entry(tu, &uprobe_list, list)
300 		if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
301 		    strcmp(tu->tp.call.class->system, group) == 0)
302 			return tu;
303 
304 	return NULL;
305 }
306 
307 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
308 static int unregister_trace_uprobe(struct trace_uprobe *tu)
309 {
310 	int ret;
311 
312 	ret = unregister_uprobe_event(tu);
313 	if (ret)
314 		return ret;
315 
316 	list_del(&tu->list);
317 	free_trace_uprobe(tu);
318 	return 0;
319 }
320 
321 /* Register a trace_uprobe and probe_event */
322 static int register_trace_uprobe(struct trace_uprobe *tu)
323 {
324 	struct trace_uprobe *old_tu;
325 	int ret;
326 
327 	mutex_lock(&uprobe_lock);
328 
329 	/* register as an event */
330 	old_tu = find_probe_event(trace_event_name(&tu->tp.call),
331 			tu->tp.call.class->system);
332 	if (old_tu) {
333 		/* delete old event */
334 		ret = unregister_trace_uprobe(old_tu);
335 		if (ret)
336 			goto end;
337 	}
338 
339 	ret = register_uprobe_event(tu);
340 	if (ret) {
341 		pr_warn("Failed to register probe event(%d)\n", ret);
342 		goto end;
343 	}
344 
345 	list_add_tail(&tu->list, &uprobe_list);
346 
347 end:
348 	mutex_unlock(&uprobe_lock);
349 
350 	return ret;
351 }
352 
353 /*
354  * Argument syntax:
355  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
356  *
357  *  - Remove uprobe: -:[GRP/]EVENT
358  */
359 static int create_trace_uprobe(int argc, char **argv)
360 {
361 	struct trace_uprobe *tu;
362 	struct inode *inode;
363 	char *arg, *event, *group, *filename;
364 	char buf[MAX_EVENT_NAME_LEN];
365 	struct path path;
366 	unsigned long offset;
367 	bool is_delete, is_return;
368 	int i, ret;
369 
370 	inode = NULL;
371 	ret = 0;
372 	is_delete = false;
373 	is_return = false;
374 	event = NULL;
375 	group = NULL;
376 
377 	/* argc must be >= 1 */
378 	if (argv[0][0] == '-')
379 		is_delete = true;
380 	else if (argv[0][0] == 'r')
381 		is_return = true;
382 	else if (argv[0][0] != 'p') {
383 		pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
384 		return -EINVAL;
385 	}
386 
387 	if (argv[0][1] == ':') {
388 		event = &argv[0][2];
389 		arg = strchr(event, '/');
390 
391 		if (arg) {
392 			group = event;
393 			event = arg + 1;
394 			event[-1] = '\0';
395 
396 			if (strlen(group) == 0) {
397 				pr_info("Group name is not specified\n");
398 				return -EINVAL;
399 			}
400 		}
401 		if (strlen(event) == 0) {
402 			pr_info("Event name is not specified\n");
403 			return -EINVAL;
404 		}
405 	}
406 	if (!group)
407 		group = UPROBE_EVENT_SYSTEM;
408 
409 	if (is_delete) {
410 		int ret;
411 
412 		if (!event) {
413 			pr_info("Delete command needs an event name.\n");
414 			return -EINVAL;
415 		}
416 		mutex_lock(&uprobe_lock);
417 		tu = find_probe_event(event, group);
418 
419 		if (!tu) {
420 			mutex_unlock(&uprobe_lock);
421 			pr_info("Event %s/%s doesn't exist.\n", group, event);
422 			return -ENOENT;
423 		}
424 		/* delete an event */
425 		ret = unregister_trace_uprobe(tu);
426 		mutex_unlock(&uprobe_lock);
427 		return ret;
428 	}
429 
430 	if (argc < 2) {
431 		pr_info("Probe point is not specified.\n");
432 		return -EINVAL;
433 	}
434 	if (isdigit(argv[1][0])) {
435 		pr_info("probe point must be have a filename.\n");
436 		return -EINVAL;
437 	}
438 	arg = strchr(argv[1], ':');
439 	if (!arg) {
440 		ret = -EINVAL;
441 		goto fail_address_parse;
442 	}
443 
444 	*arg++ = '\0';
445 	filename = argv[1];
446 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
447 	if (ret)
448 		goto fail_address_parse;
449 
450 	inode = igrab(d_inode(path.dentry));
451 	path_put(&path);
452 
453 	if (!inode || !S_ISREG(inode->i_mode)) {
454 		ret = -EINVAL;
455 		goto fail_address_parse;
456 	}
457 
458 	ret = kstrtoul(arg, 0, &offset);
459 	if (ret)
460 		goto fail_address_parse;
461 
462 	argc -= 2;
463 	argv += 2;
464 
465 	/* setup a probe */
466 	if (!event) {
467 		char *tail;
468 		char *ptr;
469 
470 		tail = kstrdup(kbasename(filename), GFP_KERNEL);
471 		if (!tail) {
472 			ret = -ENOMEM;
473 			goto fail_address_parse;
474 		}
475 
476 		ptr = strpbrk(tail, ".-_");
477 		if (ptr)
478 			*ptr = '\0';
479 
480 		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
481 		event = buf;
482 		kfree(tail);
483 	}
484 
485 	tu = alloc_trace_uprobe(group, event, argc, is_return);
486 	if (IS_ERR(tu)) {
487 		pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
488 		ret = PTR_ERR(tu);
489 		goto fail_address_parse;
490 	}
491 	tu->offset = offset;
492 	tu->inode = inode;
493 	tu->filename = kstrdup(filename, GFP_KERNEL);
494 
495 	if (!tu->filename) {
496 		pr_info("Failed to allocate filename.\n");
497 		ret = -ENOMEM;
498 		goto error;
499 	}
500 
501 	/* parse arguments */
502 	ret = 0;
503 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
504 		struct probe_arg *parg = &tu->tp.args[i];
505 
506 		/* Increment count for freeing args in error case */
507 		tu->tp.nr_args++;
508 
509 		/* Parse argument name */
510 		arg = strchr(argv[i], '=');
511 		if (arg) {
512 			*arg++ = '\0';
513 			parg->name = kstrdup(argv[i], GFP_KERNEL);
514 		} else {
515 			arg = argv[i];
516 			/* If argument name is omitted, set "argN" */
517 			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
518 			parg->name = kstrdup(buf, GFP_KERNEL);
519 		}
520 
521 		if (!parg->name) {
522 			pr_info("Failed to allocate argument[%d] name.\n", i);
523 			ret = -ENOMEM;
524 			goto error;
525 		}
526 
527 		if (!is_good_name(parg->name)) {
528 			pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
529 			ret = -EINVAL;
530 			goto error;
531 		}
532 
533 		if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
534 			pr_info("Argument[%d] name '%s' conflicts with "
535 				"another field.\n", i, argv[i]);
536 			ret = -EINVAL;
537 			goto error;
538 		}
539 
540 		/* Parse fetch argument */
541 		ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
542 						 is_return, false,
543 						 uprobes_fetch_type_table);
544 		if (ret) {
545 			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
546 			goto error;
547 		}
548 	}
549 
550 	ret = register_trace_uprobe(tu);
551 	if (ret)
552 		goto error;
553 	return 0;
554 
555 error:
556 	free_trace_uprobe(tu);
557 	return ret;
558 
559 fail_address_parse:
560 	iput(inode);
561 
562 	pr_info("Failed to parse address or file.\n");
563 
564 	return ret;
565 }
566 
567 static int cleanup_all_probes(void)
568 {
569 	struct trace_uprobe *tu;
570 	int ret = 0;
571 
572 	mutex_lock(&uprobe_lock);
573 	while (!list_empty(&uprobe_list)) {
574 		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
575 		ret = unregister_trace_uprobe(tu);
576 		if (ret)
577 			break;
578 	}
579 	mutex_unlock(&uprobe_lock);
580 	return ret;
581 }
582 
583 /* Probes listing interfaces */
584 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
585 {
586 	mutex_lock(&uprobe_lock);
587 	return seq_list_start(&uprobe_list, *pos);
588 }
589 
590 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
591 {
592 	return seq_list_next(v, &uprobe_list, pos);
593 }
594 
595 static void probes_seq_stop(struct seq_file *m, void *v)
596 {
597 	mutex_unlock(&uprobe_lock);
598 }
599 
600 static int probes_seq_show(struct seq_file *m, void *v)
601 {
602 	struct trace_uprobe *tu = v;
603 	char c = is_ret_probe(tu) ? 'r' : 'p';
604 	int i;
605 
606 	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
607 			trace_event_name(&tu->tp.call));
608 	seq_printf(m, " %s:", tu->filename);
609 
610 	/* Don't print "0x  (null)" when offset is 0 */
611 	if (tu->offset) {
612 		seq_printf(m, "0x%p", (void *)tu->offset);
613 	} else {
614 		switch (sizeof(void *)) {
615 		case 4:
616 			seq_printf(m, "0x00000000");
617 			break;
618 		case 8:
619 		default:
620 			seq_printf(m, "0x0000000000000000");
621 			break;
622 		}
623 	}
624 
625 	for (i = 0; i < tu->tp.nr_args; i++)
626 		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
627 
628 	seq_putc(m, '\n');
629 	return 0;
630 }
631 
632 static const struct seq_operations probes_seq_op = {
633 	.start	= probes_seq_start,
634 	.next	= probes_seq_next,
635 	.stop	= probes_seq_stop,
636 	.show	= probes_seq_show
637 };
638 
639 static int probes_open(struct inode *inode, struct file *file)
640 {
641 	int ret;
642 
643 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
644 		ret = cleanup_all_probes();
645 		if (ret)
646 			return ret;
647 	}
648 
649 	return seq_open(file, &probes_seq_op);
650 }
651 
652 static ssize_t probes_write(struct file *file, const char __user *buffer,
653 			    size_t count, loff_t *ppos)
654 {
655 	return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
656 }
657 
658 static const struct file_operations uprobe_events_ops = {
659 	.owner		= THIS_MODULE,
660 	.open		= probes_open,
661 	.read		= seq_read,
662 	.llseek		= seq_lseek,
663 	.release	= seq_release,
664 	.write		= probes_write,
665 };
666 
667 /* Probes profiling interfaces */
668 static int probes_profile_seq_show(struct seq_file *m, void *v)
669 {
670 	struct trace_uprobe *tu = v;
671 
672 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
673 			trace_event_name(&tu->tp.call), tu->nhit);
674 	return 0;
675 }
676 
677 static const struct seq_operations profile_seq_op = {
678 	.start	= probes_seq_start,
679 	.next	= probes_seq_next,
680 	.stop	= probes_seq_stop,
681 	.show	= probes_profile_seq_show
682 };
683 
684 static int profile_open(struct inode *inode, struct file *file)
685 {
686 	return seq_open(file, &profile_seq_op);
687 }
688 
689 static const struct file_operations uprobe_profile_ops = {
690 	.owner		= THIS_MODULE,
691 	.open		= profile_open,
692 	.read		= seq_read,
693 	.llseek		= seq_lseek,
694 	.release	= seq_release,
695 };
696 
697 struct uprobe_cpu_buffer {
698 	struct mutex mutex;
699 	void *buf;
700 };
701 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
702 static int uprobe_buffer_refcnt;
703 
704 static int uprobe_buffer_init(void)
705 {
706 	int cpu, err_cpu;
707 
708 	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
709 	if (uprobe_cpu_buffer == NULL)
710 		return -ENOMEM;
711 
712 	for_each_possible_cpu(cpu) {
713 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
714 						  GFP_KERNEL, 0);
715 		if (p == NULL) {
716 			err_cpu = cpu;
717 			goto err;
718 		}
719 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
720 		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
721 	}
722 
723 	return 0;
724 
725 err:
726 	for_each_possible_cpu(cpu) {
727 		if (cpu == err_cpu)
728 			break;
729 		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
730 	}
731 
732 	free_percpu(uprobe_cpu_buffer);
733 	return -ENOMEM;
734 }
735 
736 static int uprobe_buffer_enable(void)
737 {
738 	int ret = 0;
739 
740 	BUG_ON(!mutex_is_locked(&event_mutex));
741 
742 	if (uprobe_buffer_refcnt++ == 0) {
743 		ret = uprobe_buffer_init();
744 		if (ret < 0)
745 			uprobe_buffer_refcnt--;
746 	}
747 
748 	return ret;
749 }
750 
751 static void uprobe_buffer_disable(void)
752 {
753 	int cpu;
754 
755 	BUG_ON(!mutex_is_locked(&event_mutex));
756 
757 	if (--uprobe_buffer_refcnt == 0) {
758 		for_each_possible_cpu(cpu)
759 			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
760 							     cpu)->buf);
761 
762 		free_percpu(uprobe_cpu_buffer);
763 		uprobe_cpu_buffer = NULL;
764 	}
765 }
766 
767 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
768 {
769 	struct uprobe_cpu_buffer *ucb;
770 	int cpu;
771 
772 	cpu = raw_smp_processor_id();
773 	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
774 
775 	/*
776 	 * Use per-cpu buffers for fastest access, but we might migrate
777 	 * so the mutex makes sure we have sole access to it.
778 	 */
779 	mutex_lock(&ucb->mutex);
780 
781 	return ucb;
782 }
783 
784 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
785 {
786 	mutex_unlock(&ucb->mutex);
787 }
788 
789 static void __uprobe_trace_func(struct trace_uprobe *tu,
790 				unsigned long func, struct pt_regs *regs,
791 				struct uprobe_cpu_buffer *ucb, int dsize,
792 				struct trace_event_file *trace_file)
793 {
794 	struct uprobe_trace_entry_head *entry;
795 	struct ring_buffer_event *event;
796 	struct ring_buffer *buffer;
797 	void *data;
798 	int size, esize;
799 	struct trace_event_call *call = &tu->tp.call;
800 
801 	WARN_ON(call != trace_file->event_call);
802 
803 	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
804 		return;
805 
806 	if (trace_trigger_soft_disabled(trace_file))
807 		return;
808 
809 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
810 	size = esize + tu->tp.size + dsize;
811 	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
812 						call->event.type, size, 0, 0);
813 	if (!event)
814 		return;
815 
816 	entry = ring_buffer_event_data(event);
817 	if (is_ret_probe(tu)) {
818 		entry->vaddr[0] = func;
819 		entry->vaddr[1] = instruction_pointer(regs);
820 		data = DATAOF_TRACE_ENTRY(entry, true);
821 	} else {
822 		entry->vaddr[0] = instruction_pointer(regs);
823 		data = DATAOF_TRACE_ENTRY(entry, false);
824 	}
825 
826 	memcpy(data, ucb->buf, tu->tp.size + dsize);
827 
828 	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
829 }
830 
831 /* uprobe handler */
832 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
833 			     struct uprobe_cpu_buffer *ucb, int dsize)
834 {
835 	struct event_file_link *link;
836 
837 	if (is_ret_probe(tu))
838 		return 0;
839 
840 	rcu_read_lock();
841 	list_for_each_entry_rcu(link, &tu->tp.files, list)
842 		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
843 	rcu_read_unlock();
844 
845 	return 0;
846 }
847 
848 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
849 				 struct pt_regs *regs,
850 				 struct uprobe_cpu_buffer *ucb, int dsize)
851 {
852 	struct event_file_link *link;
853 
854 	rcu_read_lock();
855 	list_for_each_entry_rcu(link, &tu->tp.files, list)
856 		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
857 	rcu_read_unlock();
858 }
859 
860 /* Event entry printers */
861 static enum print_line_t
862 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
863 {
864 	struct uprobe_trace_entry_head *entry;
865 	struct trace_seq *s = &iter->seq;
866 	struct trace_uprobe *tu;
867 	u8 *data;
868 	int i;
869 
870 	entry = (struct uprobe_trace_entry_head *)iter->ent;
871 	tu = container_of(event, struct trace_uprobe, tp.call.event);
872 
873 	if (is_ret_probe(tu)) {
874 		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
875 				 trace_event_name(&tu->tp.call),
876 				 entry->vaddr[1], entry->vaddr[0]);
877 		data = DATAOF_TRACE_ENTRY(entry, true);
878 	} else {
879 		trace_seq_printf(s, "%s: (0x%lx)",
880 				 trace_event_name(&tu->tp.call),
881 				 entry->vaddr[0]);
882 		data = DATAOF_TRACE_ENTRY(entry, false);
883 	}
884 
885 	for (i = 0; i < tu->tp.nr_args; i++) {
886 		struct probe_arg *parg = &tu->tp.args[i];
887 
888 		if (!parg->type->print(s, parg->name, data + parg->offset, entry))
889 			goto out;
890 	}
891 
892 	trace_seq_putc(s, '\n');
893 
894  out:
895 	return trace_handle_return(s);
896 }
897 
898 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
899 				enum uprobe_filter_ctx ctx,
900 				struct mm_struct *mm);
901 
902 static int
903 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
904 		   filter_func_t filter)
905 {
906 	bool enabled = trace_probe_is_enabled(&tu->tp);
907 	struct event_file_link *link = NULL;
908 	int ret;
909 
910 	if (file) {
911 		if (tu->tp.flags & TP_FLAG_PROFILE)
912 			return -EINTR;
913 
914 		link = kmalloc(sizeof(*link), GFP_KERNEL);
915 		if (!link)
916 			return -ENOMEM;
917 
918 		link->file = file;
919 		list_add_tail_rcu(&link->list, &tu->tp.files);
920 
921 		tu->tp.flags |= TP_FLAG_TRACE;
922 	} else {
923 		if (tu->tp.flags & TP_FLAG_TRACE)
924 			return -EINTR;
925 
926 		tu->tp.flags |= TP_FLAG_PROFILE;
927 	}
928 
929 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
930 
931 	if (enabled)
932 		return 0;
933 
934 	ret = uprobe_buffer_enable();
935 	if (ret)
936 		goto err_flags;
937 
938 	tu->consumer.filter = filter;
939 	ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
940 	if (ret)
941 		goto err_buffer;
942 
943 	return 0;
944 
945  err_buffer:
946 	uprobe_buffer_disable();
947 
948  err_flags:
949 	if (file) {
950 		list_del(&link->list);
951 		kfree(link);
952 		tu->tp.flags &= ~TP_FLAG_TRACE;
953 	} else {
954 		tu->tp.flags &= ~TP_FLAG_PROFILE;
955 	}
956 	return ret;
957 }
958 
959 static void
960 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
961 {
962 	if (!trace_probe_is_enabled(&tu->tp))
963 		return;
964 
965 	if (file) {
966 		struct event_file_link *link;
967 
968 		link = find_event_file_link(&tu->tp, file);
969 		if (!link)
970 			return;
971 
972 		list_del_rcu(&link->list);
973 		/* synchronize with u{,ret}probe_trace_func */
974 		synchronize_sched();
975 		kfree(link);
976 
977 		if (!list_empty(&tu->tp.files))
978 			return;
979 	}
980 
981 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
982 
983 	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
984 	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
985 
986 	uprobe_buffer_disable();
987 }
988 
989 static int uprobe_event_define_fields(struct trace_event_call *event_call)
990 {
991 	int ret, i, size;
992 	struct uprobe_trace_entry_head field;
993 	struct trace_uprobe *tu = event_call->data;
994 
995 	if (is_ret_probe(tu)) {
996 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
997 		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
998 		size = SIZEOF_TRACE_ENTRY(true);
999 	} else {
1000 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1001 		size = SIZEOF_TRACE_ENTRY(false);
1002 	}
1003 	/* Set argument names as fields */
1004 	for (i = 0; i < tu->tp.nr_args; i++) {
1005 		struct probe_arg *parg = &tu->tp.args[i];
1006 
1007 		ret = trace_define_field(event_call, parg->type->fmttype,
1008 					 parg->name, size + parg->offset,
1009 					 parg->type->size, parg->type->is_signed,
1010 					 FILTER_OTHER);
1011 
1012 		if (ret)
1013 			return ret;
1014 	}
1015 	return 0;
1016 }
1017 
1018 #ifdef CONFIG_PERF_EVENTS
1019 static bool
1020 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1021 {
1022 	struct perf_event *event;
1023 
1024 	if (filter->nr_systemwide)
1025 		return true;
1026 
1027 	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1028 		if (event->hw.target->mm == mm)
1029 			return true;
1030 	}
1031 
1032 	return false;
1033 }
1034 
1035 static inline bool
1036 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1037 {
1038 	return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1039 }
1040 
1041 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1042 {
1043 	bool done;
1044 
1045 	write_lock(&tu->filter.rwlock);
1046 	if (event->hw.target) {
1047 		list_del(&event->hw.tp_list);
1048 		done = tu->filter.nr_systemwide ||
1049 			(event->hw.target->flags & PF_EXITING) ||
1050 			uprobe_filter_event(tu, event);
1051 	} else {
1052 		tu->filter.nr_systemwide--;
1053 		done = tu->filter.nr_systemwide;
1054 	}
1055 	write_unlock(&tu->filter.rwlock);
1056 
1057 	if (!done)
1058 		return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1059 
1060 	return 0;
1061 }
1062 
1063 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1064 {
1065 	bool done;
1066 	int err;
1067 
1068 	write_lock(&tu->filter.rwlock);
1069 	if (event->hw.target) {
1070 		/*
1071 		 * event->parent != NULL means copy_process(), we can avoid
1072 		 * uprobe_apply(). current->mm must be probed and we can rely
1073 		 * on dup_mmap() which preserves the already installed bp's.
1074 		 *
1075 		 * attr.enable_on_exec means that exec/mmap will install the
1076 		 * breakpoints we need.
1077 		 */
1078 		done = tu->filter.nr_systemwide ||
1079 			event->parent || event->attr.enable_on_exec ||
1080 			uprobe_filter_event(tu, event);
1081 		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1082 	} else {
1083 		done = tu->filter.nr_systemwide;
1084 		tu->filter.nr_systemwide++;
1085 	}
1086 	write_unlock(&tu->filter.rwlock);
1087 
1088 	err = 0;
1089 	if (!done) {
1090 		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1091 		if (err)
1092 			uprobe_perf_close(tu, event);
1093 	}
1094 	return err;
1095 }
1096 
1097 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1098 				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1099 {
1100 	struct trace_uprobe *tu;
1101 	int ret;
1102 
1103 	tu = container_of(uc, struct trace_uprobe, consumer);
1104 	read_lock(&tu->filter.rwlock);
1105 	ret = __uprobe_perf_filter(&tu->filter, mm);
1106 	read_unlock(&tu->filter.rwlock);
1107 
1108 	return ret;
1109 }
1110 
1111 static void __uprobe_perf_func(struct trace_uprobe *tu,
1112 			       unsigned long func, struct pt_regs *regs,
1113 			       struct uprobe_cpu_buffer *ucb, int dsize)
1114 {
1115 	struct trace_event_call *call = &tu->tp.call;
1116 	struct uprobe_trace_entry_head *entry;
1117 	struct bpf_prog *prog = call->prog;
1118 	struct hlist_head *head;
1119 	void *data;
1120 	int size, esize;
1121 	int rctx;
1122 
1123 	if (prog && !trace_call_bpf(prog, regs))
1124 		return;
1125 
1126 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1127 
1128 	size = esize + tu->tp.size + dsize;
1129 	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1130 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1131 		return;
1132 
1133 	preempt_disable();
1134 	head = this_cpu_ptr(call->perf_events);
1135 	if (hlist_empty(head))
1136 		goto out;
1137 
1138 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1139 	if (!entry)
1140 		goto out;
1141 
1142 	if (is_ret_probe(tu)) {
1143 		entry->vaddr[0] = func;
1144 		entry->vaddr[1] = instruction_pointer(regs);
1145 		data = DATAOF_TRACE_ENTRY(entry, true);
1146 	} else {
1147 		entry->vaddr[0] = instruction_pointer(regs);
1148 		data = DATAOF_TRACE_ENTRY(entry, false);
1149 	}
1150 
1151 	memcpy(data, ucb->buf, tu->tp.size + dsize);
1152 
1153 	if (size - esize > tu->tp.size + dsize) {
1154 		int len = tu->tp.size + dsize;
1155 
1156 		memset(data + len, 0, size - esize - len);
1157 	}
1158 
1159 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1160 			      head, NULL);
1161  out:
1162 	preempt_enable();
1163 }
1164 
1165 /* uprobe profile handler */
1166 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1167 			    struct uprobe_cpu_buffer *ucb, int dsize)
1168 {
1169 	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1170 		return UPROBE_HANDLER_REMOVE;
1171 
1172 	if (!is_ret_probe(tu))
1173 		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1174 	return 0;
1175 }
1176 
1177 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1178 				struct pt_regs *regs,
1179 				struct uprobe_cpu_buffer *ucb, int dsize)
1180 {
1181 	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1182 }
1183 #endif	/* CONFIG_PERF_EVENTS */
1184 
1185 static int
1186 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1187 		      void *data)
1188 {
1189 	struct trace_uprobe *tu = event->data;
1190 	struct trace_event_file *file = data;
1191 
1192 	switch (type) {
1193 	case TRACE_REG_REGISTER:
1194 		return probe_event_enable(tu, file, NULL);
1195 
1196 	case TRACE_REG_UNREGISTER:
1197 		probe_event_disable(tu, file);
1198 		return 0;
1199 
1200 #ifdef CONFIG_PERF_EVENTS
1201 	case TRACE_REG_PERF_REGISTER:
1202 		return probe_event_enable(tu, NULL, uprobe_perf_filter);
1203 
1204 	case TRACE_REG_PERF_UNREGISTER:
1205 		probe_event_disable(tu, NULL);
1206 		return 0;
1207 
1208 	case TRACE_REG_PERF_OPEN:
1209 		return uprobe_perf_open(tu, data);
1210 
1211 	case TRACE_REG_PERF_CLOSE:
1212 		return uprobe_perf_close(tu, data);
1213 
1214 #endif
1215 	default:
1216 		return 0;
1217 	}
1218 	return 0;
1219 }
1220 
1221 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1222 {
1223 	struct trace_uprobe *tu;
1224 	struct uprobe_dispatch_data udd;
1225 	struct uprobe_cpu_buffer *ucb;
1226 	int dsize, esize;
1227 	int ret = 0;
1228 
1229 
1230 	tu = container_of(con, struct trace_uprobe, consumer);
1231 	tu->nhit++;
1232 
1233 	udd.tu = tu;
1234 	udd.bp_addr = instruction_pointer(regs);
1235 
1236 	current->utask->vaddr = (unsigned long) &udd;
1237 
1238 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1239 		return 0;
1240 
1241 	dsize = __get_data_size(&tu->tp, regs);
1242 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1243 
1244 	ucb = uprobe_buffer_get();
1245 	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1246 
1247 	if (tu->tp.flags & TP_FLAG_TRACE)
1248 		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1249 
1250 #ifdef CONFIG_PERF_EVENTS
1251 	if (tu->tp.flags & TP_FLAG_PROFILE)
1252 		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1253 #endif
1254 	uprobe_buffer_put(ucb);
1255 	return ret;
1256 }
1257 
1258 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1259 				unsigned long func, struct pt_regs *regs)
1260 {
1261 	struct trace_uprobe *tu;
1262 	struct uprobe_dispatch_data udd;
1263 	struct uprobe_cpu_buffer *ucb;
1264 	int dsize, esize;
1265 
1266 	tu = container_of(con, struct trace_uprobe, consumer);
1267 
1268 	udd.tu = tu;
1269 	udd.bp_addr = func;
1270 
1271 	current->utask->vaddr = (unsigned long) &udd;
1272 
1273 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1274 		return 0;
1275 
1276 	dsize = __get_data_size(&tu->tp, regs);
1277 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1278 
1279 	ucb = uprobe_buffer_get();
1280 	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1281 
1282 	if (tu->tp.flags & TP_FLAG_TRACE)
1283 		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1284 
1285 #ifdef CONFIG_PERF_EVENTS
1286 	if (tu->tp.flags & TP_FLAG_PROFILE)
1287 		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1288 #endif
1289 	uprobe_buffer_put(ucb);
1290 	return 0;
1291 }
1292 
1293 static struct trace_event_functions uprobe_funcs = {
1294 	.trace		= print_uprobe_event
1295 };
1296 
1297 static int register_uprobe_event(struct trace_uprobe *tu)
1298 {
1299 	struct trace_event_call *call = &tu->tp.call;
1300 	int ret;
1301 
1302 	/* Initialize trace_event_call */
1303 	INIT_LIST_HEAD(&call->class->fields);
1304 	call->event.funcs = &uprobe_funcs;
1305 	call->class->define_fields = uprobe_event_define_fields;
1306 
1307 	if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1308 		return -ENOMEM;
1309 
1310 	ret = register_trace_event(&call->event);
1311 	if (!ret) {
1312 		kfree(call->print_fmt);
1313 		return -ENODEV;
1314 	}
1315 
1316 	call->flags = TRACE_EVENT_FL_UPROBE;
1317 	call->class->reg = trace_uprobe_register;
1318 	call->data = tu;
1319 	ret = trace_add_event_call(call);
1320 
1321 	if (ret) {
1322 		pr_info("Failed to register uprobe event: %s\n",
1323 			trace_event_name(call));
1324 		kfree(call->print_fmt);
1325 		unregister_trace_event(&call->event);
1326 	}
1327 
1328 	return ret;
1329 }
1330 
1331 static int unregister_uprobe_event(struct trace_uprobe *tu)
1332 {
1333 	int ret;
1334 
1335 	/* tu->event is unregistered in trace_remove_event_call() */
1336 	ret = trace_remove_event_call(&tu->tp.call);
1337 	if (ret)
1338 		return ret;
1339 	kfree(tu->tp.call.print_fmt);
1340 	tu->tp.call.print_fmt = NULL;
1341 	return 0;
1342 }
1343 
1344 /* Make a trace interface for controling probe points */
1345 static __init int init_uprobe_trace(void)
1346 {
1347 	struct dentry *d_tracer;
1348 
1349 	d_tracer = tracing_init_dentry();
1350 	if (IS_ERR(d_tracer))
1351 		return 0;
1352 
1353 	trace_create_file("uprobe_events", 0644, d_tracer,
1354 				    NULL, &uprobe_events_ops);
1355 	/* Profile interface */
1356 	trace_create_file("uprobe_profile", 0444, d_tracer,
1357 				    NULL, &uprobe_profile_ops);
1358 	return 0;
1359 }
1360 
1361 fs_initcall(init_uprobe_trace);
1362