xref: /openbmc/linux/kernel/trace/trace_uprobe.c (revision 3932b9ca)
1 /*
2  * uprobes-based tracing events
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  *
17  * Copyright (C) IBM Corporation, 2010-2012
18  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/uprobes.h>
24 #include <linux/namei.h>
25 #include <linux/string.h>
26 
27 #include "trace_probe.h"
28 
29 #define UPROBE_EVENT_SYSTEM	"uprobes"
30 
31 struct uprobe_trace_entry_head {
32 	struct trace_entry	ent;
33 	unsigned long		vaddr[];
34 };
35 
36 #define SIZEOF_TRACE_ENTRY(is_return)			\
37 	(sizeof(struct uprobe_trace_entry_head) +	\
38 	 sizeof(unsigned long) * (is_return ? 2 : 1))
39 
40 #define DATAOF_TRACE_ENTRY(entry, is_return)		\
41 	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42 
43 struct trace_uprobe_filter {
44 	rwlock_t		rwlock;
45 	int			nr_systemwide;
46 	struct list_head	perf_events;
47 };
48 
49 /*
50  * uprobe event core functions
51  */
52 struct trace_uprobe {
53 	struct list_head		list;
54 	struct trace_uprobe_filter	filter;
55 	struct uprobe_consumer		consumer;
56 	struct inode			*inode;
57 	char				*filename;
58 	unsigned long			offset;
59 	unsigned long			nhit;
60 	struct trace_probe		tp;
61 };
62 
63 #define SIZEOF_TRACE_UPROBE(n)				\
64 	(offsetof(struct trace_uprobe, tp.args) +	\
65 	(sizeof(struct probe_arg) * (n)))
66 
67 static int register_uprobe_event(struct trace_uprobe *tu);
68 static int unregister_uprobe_event(struct trace_uprobe *tu);
69 
70 static DEFINE_MUTEX(uprobe_lock);
71 static LIST_HEAD(uprobe_list);
72 
73 struct uprobe_dispatch_data {
74 	struct trace_uprobe	*tu;
75 	unsigned long		bp_addr;
76 };
77 
78 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
79 static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 				unsigned long func, struct pt_regs *regs);
81 
82 #ifdef CONFIG_STACK_GROWSUP
83 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84 {
85 	return addr - (n * sizeof(long));
86 }
87 #else
88 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89 {
90 	return addr + (n * sizeof(long));
91 }
92 #endif
93 
94 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95 {
96 	unsigned long ret;
97 	unsigned long addr = user_stack_pointer(regs);
98 
99 	addr = adjust_stack_addr(addr, n);
100 
101 	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 		return 0;
103 
104 	return ret;
105 }
106 
107 /*
108  * Uprobes-specific fetch functions
109  */
110 #define DEFINE_FETCH_stack(type)					\
111 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
112 					 void *offset, void *dest)	\
113 {									\
114 	*(type *)dest = (type)get_user_stack_nth(regs,			\
115 					      ((unsigned long)offset)); \
116 }
117 DEFINE_BASIC_FETCH_FUNCS(stack)
118 /* No string on the stack entry */
119 #define fetch_stack_string	NULL
120 #define fetch_stack_string_size	NULL
121 
122 #define DEFINE_FETCH_memory(type)					\
123 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
124 					  void *addr, void *dest)	\
125 {									\
126 	type retval;							\
127 	void __user *vaddr = (void __force __user *) addr;		\
128 									\
129 	if (copy_from_user(&retval, vaddr, sizeof(type)))		\
130 		*(type *)dest = 0;					\
131 	else								\
132 		*(type *) dest = retval;				\
133 }
134 DEFINE_BASIC_FETCH_FUNCS(memory)
135 /*
136  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137  * length and relative data location.
138  */
139 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 					    void *addr, void *dest)
141 {
142 	long ret;
143 	u32 rloc = *(u32 *)dest;
144 	int maxlen  = get_rloc_len(rloc);
145 	u8 *dst = get_rloc_data(dest);
146 	void __user *src = (void __force __user *) addr;
147 
148 	if (!maxlen)
149 		return;
150 
151 	ret = strncpy_from_user(dst, src, maxlen);
152 
153 	if (ret < 0) {	/* Failed to fetch string */
154 		((u8 *)get_rloc_data(dest))[0] = '\0';
155 		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 	} else {
157 		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 	}
159 }
160 
161 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 						 void *addr, void *dest)
163 {
164 	int len;
165 	void __user *vaddr = (void __force __user *) addr;
166 
167 	len = strnlen_user(vaddr, MAX_STRING_SIZE);
168 
169 	if (len == 0 || len > MAX_STRING_SIZE)  /* Failed to check length */
170 		*(u32 *)dest = 0;
171 	else
172 		*(u32 *)dest = len;
173 }
174 
175 static unsigned long translate_user_vaddr(void *file_offset)
176 {
177 	unsigned long base_addr;
178 	struct uprobe_dispatch_data *udd;
179 
180 	udd = (void *) current->utask->vaddr;
181 
182 	base_addr = udd->bp_addr - udd->tu->offset;
183 	return base_addr + (unsigned long)file_offset;
184 }
185 
186 #define DEFINE_FETCH_file_offset(type)					\
187 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,	\
188 					       void *offset, void *dest)\
189 {									\
190 	void *vaddr = (void *)translate_user_vaddr(offset);		\
191 									\
192 	FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest);		\
193 }
194 DEFINE_BASIC_FETCH_FUNCS(file_offset)
195 DEFINE_FETCH_file_offset(string)
196 DEFINE_FETCH_file_offset(string_size)
197 
198 /* Fetch type information table */
199 const struct fetch_type uprobes_fetch_type_table[] = {
200 	/* Special types */
201 	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 					sizeof(u32), 1, "__data_loc char[]"),
203 	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 					string_size, sizeof(u32), 0, "u32"),
205 	/* Basic types */
206 	ASSIGN_FETCH_TYPE(u8,  u8,  0),
207 	ASSIGN_FETCH_TYPE(u16, u16, 0),
208 	ASSIGN_FETCH_TYPE(u32, u32, 0),
209 	ASSIGN_FETCH_TYPE(u64, u64, 0),
210 	ASSIGN_FETCH_TYPE(s8,  u8,  1),
211 	ASSIGN_FETCH_TYPE(s16, u16, 1),
212 	ASSIGN_FETCH_TYPE(s32, u32, 1),
213 	ASSIGN_FETCH_TYPE(s64, u64, 1),
214 
215 	ASSIGN_FETCH_TYPE_END
216 };
217 
218 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
219 {
220 	rwlock_init(&filter->rwlock);
221 	filter->nr_systemwide = 0;
222 	INIT_LIST_HEAD(&filter->perf_events);
223 }
224 
225 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
226 {
227 	return !filter->nr_systemwide && list_empty(&filter->perf_events);
228 }
229 
230 static inline bool is_ret_probe(struct trace_uprobe *tu)
231 {
232 	return tu->consumer.ret_handler != NULL;
233 }
234 
235 /*
236  * Allocate new trace_uprobe and initialize it (including uprobes).
237  */
238 static struct trace_uprobe *
239 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
240 {
241 	struct trace_uprobe *tu;
242 
243 	if (!event || !is_good_name(event))
244 		return ERR_PTR(-EINVAL);
245 
246 	if (!group || !is_good_name(group))
247 		return ERR_PTR(-EINVAL);
248 
249 	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
250 	if (!tu)
251 		return ERR_PTR(-ENOMEM);
252 
253 	tu->tp.call.class = &tu->tp.class;
254 	tu->tp.call.name = kstrdup(event, GFP_KERNEL);
255 	if (!tu->tp.call.name)
256 		goto error;
257 
258 	tu->tp.class.system = kstrdup(group, GFP_KERNEL);
259 	if (!tu->tp.class.system)
260 		goto error;
261 
262 	INIT_LIST_HEAD(&tu->list);
263 	INIT_LIST_HEAD(&tu->tp.files);
264 	tu->consumer.handler = uprobe_dispatcher;
265 	if (is_ret)
266 		tu->consumer.ret_handler = uretprobe_dispatcher;
267 	init_trace_uprobe_filter(&tu->filter);
268 	return tu;
269 
270 error:
271 	kfree(tu->tp.call.name);
272 	kfree(tu);
273 
274 	return ERR_PTR(-ENOMEM);
275 }
276 
277 static void free_trace_uprobe(struct trace_uprobe *tu)
278 {
279 	int i;
280 
281 	for (i = 0; i < tu->tp.nr_args; i++)
282 		traceprobe_free_probe_arg(&tu->tp.args[i]);
283 
284 	iput(tu->inode);
285 	kfree(tu->tp.call.class->system);
286 	kfree(tu->tp.call.name);
287 	kfree(tu->filename);
288 	kfree(tu);
289 }
290 
291 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
292 {
293 	struct trace_uprobe *tu;
294 
295 	list_for_each_entry(tu, &uprobe_list, list)
296 		if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
297 		    strcmp(tu->tp.call.class->system, group) == 0)
298 			return tu;
299 
300 	return NULL;
301 }
302 
303 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
304 static int unregister_trace_uprobe(struct trace_uprobe *tu)
305 {
306 	int ret;
307 
308 	ret = unregister_uprobe_event(tu);
309 	if (ret)
310 		return ret;
311 
312 	list_del(&tu->list);
313 	free_trace_uprobe(tu);
314 	return 0;
315 }
316 
317 /* Register a trace_uprobe and probe_event */
318 static int register_trace_uprobe(struct trace_uprobe *tu)
319 {
320 	struct trace_uprobe *old_tu;
321 	int ret;
322 
323 	mutex_lock(&uprobe_lock);
324 
325 	/* register as an event */
326 	old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
327 			tu->tp.call.class->system);
328 	if (old_tu) {
329 		/* delete old event */
330 		ret = unregister_trace_uprobe(old_tu);
331 		if (ret)
332 			goto end;
333 	}
334 
335 	ret = register_uprobe_event(tu);
336 	if (ret) {
337 		pr_warning("Failed to register probe event(%d)\n", ret);
338 		goto end;
339 	}
340 
341 	list_add_tail(&tu->list, &uprobe_list);
342 
343 end:
344 	mutex_unlock(&uprobe_lock);
345 
346 	return ret;
347 }
348 
349 /*
350  * Argument syntax:
351  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
352  *
353  *  - Remove uprobe: -:[GRP/]EVENT
354  */
355 static int create_trace_uprobe(int argc, char **argv)
356 {
357 	struct trace_uprobe *tu;
358 	struct inode *inode;
359 	char *arg, *event, *group, *filename;
360 	char buf[MAX_EVENT_NAME_LEN];
361 	struct path path;
362 	unsigned long offset;
363 	bool is_delete, is_return;
364 	int i, ret;
365 
366 	inode = NULL;
367 	ret = 0;
368 	is_delete = false;
369 	is_return = false;
370 	event = NULL;
371 	group = NULL;
372 
373 	/* argc must be >= 1 */
374 	if (argv[0][0] == '-')
375 		is_delete = true;
376 	else if (argv[0][0] == 'r')
377 		is_return = true;
378 	else if (argv[0][0] != 'p') {
379 		pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
380 		return -EINVAL;
381 	}
382 
383 	if (argv[0][1] == ':') {
384 		event = &argv[0][2];
385 		arg = strchr(event, '/');
386 
387 		if (arg) {
388 			group = event;
389 			event = arg + 1;
390 			event[-1] = '\0';
391 
392 			if (strlen(group) == 0) {
393 				pr_info("Group name is not specified\n");
394 				return -EINVAL;
395 			}
396 		}
397 		if (strlen(event) == 0) {
398 			pr_info("Event name is not specified\n");
399 			return -EINVAL;
400 		}
401 	}
402 	if (!group)
403 		group = UPROBE_EVENT_SYSTEM;
404 
405 	if (is_delete) {
406 		int ret;
407 
408 		if (!event) {
409 			pr_info("Delete command needs an event name.\n");
410 			return -EINVAL;
411 		}
412 		mutex_lock(&uprobe_lock);
413 		tu = find_probe_event(event, group);
414 
415 		if (!tu) {
416 			mutex_unlock(&uprobe_lock);
417 			pr_info("Event %s/%s doesn't exist.\n", group, event);
418 			return -ENOENT;
419 		}
420 		/* delete an event */
421 		ret = unregister_trace_uprobe(tu);
422 		mutex_unlock(&uprobe_lock);
423 		return ret;
424 	}
425 
426 	if (argc < 2) {
427 		pr_info("Probe point is not specified.\n");
428 		return -EINVAL;
429 	}
430 	if (isdigit(argv[1][0])) {
431 		pr_info("probe point must be have a filename.\n");
432 		return -EINVAL;
433 	}
434 	arg = strchr(argv[1], ':');
435 	if (!arg) {
436 		ret = -EINVAL;
437 		goto fail_address_parse;
438 	}
439 
440 	*arg++ = '\0';
441 	filename = argv[1];
442 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
443 	if (ret)
444 		goto fail_address_parse;
445 
446 	inode = igrab(path.dentry->d_inode);
447 	path_put(&path);
448 
449 	if (!inode || !S_ISREG(inode->i_mode)) {
450 		ret = -EINVAL;
451 		goto fail_address_parse;
452 	}
453 
454 	ret = kstrtoul(arg, 0, &offset);
455 	if (ret)
456 		goto fail_address_parse;
457 
458 	argc -= 2;
459 	argv += 2;
460 
461 	/* setup a probe */
462 	if (!event) {
463 		char *tail;
464 		char *ptr;
465 
466 		tail = kstrdup(kbasename(filename), GFP_KERNEL);
467 		if (!tail) {
468 			ret = -ENOMEM;
469 			goto fail_address_parse;
470 		}
471 
472 		ptr = strpbrk(tail, ".-_");
473 		if (ptr)
474 			*ptr = '\0';
475 
476 		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
477 		event = buf;
478 		kfree(tail);
479 	}
480 
481 	tu = alloc_trace_uprobe(group, event, argc, is_return);
482 	if (IS_ERR(tu)) {
483 		pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
484 		ret = PTR_ERR(tu);
485 		goto fail_address_parse;
486 	}
487 	tu->offset = offset;
488 	tu->inode = inode;
489 	tu->filename = kstrdup(filename, GFP_KERNEL);
490 
491 	if (!tu->filename) {
492 		pr_info("Failed to allocate filename.\n");
493 		ret = -ENOMEM;
494 		goto error;
495 	}
496 
497 	/* parse arguments */
498 	ret = 0;
499 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
500 		struct probe_arg *parg = &tu->tp.args[i];
501 
502 		/* Increment count for freeing args in error case */
503 		tu->tp.nr_args++;
504 
505 		/* Parse argument name */
506 		arg = strchr(argv[i], '=');
507 		if (arg) {
508 			*arg++ = '\0';
509 			parg->name = kstrdup(argv[i], GFP_KERNEL);
510 		} else {
511 			arg = argv[i];
512 			/* If argument name is omitted, set "argN" */
513 			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
514 			parg->name = kstrdup(buf, GFP_KERNEL);
515 		}
516 
517 		if (!parg->name) {
518 			pr_info("Failed to allocate argument[%d] name.\n", i);
519 			ret = -ENOMEM;
520 			goto error;
521 		}
522 
523 		if (!is_good_name(parg->name)) {
524 			pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
525 			ret = -EINVAL;
526 			goto error;
527 		}
528 
529 		if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
530 			pr_info("Argument[%d] name '%s' conflicts with "
531 				"another field.\n", i, argv[i]);
532 			ret = -EINVAL;
533 			goto error;
534 		}
535 
536 		/* Parse fetch argument */
537 		ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
538 						 is_return, false);
539 		if (ret) {
540 			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
541 			goto error;
542 		}
543 	}
544 
545 	ret = register_trace_uprobe(tu);
546 	if (ret)
547 		goto error;
548 	return 0;
549 
550 error:
551 	free_trace_uprobe(tu);
552 	return ret;
553 
554 fail_address_parse:
555 	if (inode)
556 		iput(inode);
557 
558 	pr_info("Failed to parse address or file.\n");
559 
560 	return ret;
561 }
562 
563 static int cleanup_all_probes(void)
564 {
565 	struct trace_uprobe *tu;
566 	int ret = 0;
567 
568 	mutex_lock(&uprobe_lock);
569 	while (!list_empty(&uprobe_list)) {
570 		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
571 		ret = unregister_trace_uprobe(tu);
572 		if (ret)
573 			break;
574 	}
575 	mutex_unlock(&uprobe_lock);
576 	return ret;
577 }
578 
579 /* Probes listing interfaces */
580 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
581 {
582 	mutex_lock(&uprobe_lock);
583 	return seq_list_start(&uprobe_list, *pos);
584 }
585 
586 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
587 {
588 	return seq_list_next(v, &uprobe_list, pos);
589 }
590 
591 static void probes_seq_stop(struct seq_file *m, void *v)
592 {
593 	mutex_unlock(&uprobe_lock);
594 }
595 
596 static int probes_seq_show(struct seq_file *m, void *v)
597 {
598 	struct trace_uprobe *tu = v;
599 	char c = is_ret_probe(tu) ? 'r' : 'p';
600 	int i;
601 
602 	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
603 			ftrace_event_name(&tu->tp.call));
604 	seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
605 
606 	for (i = 0; i < tu->tp.nr_args; i++)
607 		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
608 
609 	seq_printf(m, "\n");
610 	return 0;
611 }
612 
613 static const struct seq_operations probes_seq_op = {
614 	.start	= probes_seq_start,
615 	.next	= probes_seq_next,
616 	.stop	= probes_seq_stop,
617 	.show	= probes_seq_show
618 };
619 
620 static int probes_open(struct inode *inode, struct file *file)
621 {
622 	int ret;
623 
624 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
625 		ret = cleanup_all_probes();
626 		if (ret)
627 			return ret;
628 	}
629 
630 	return seq_open(file, &probes_seq_op);
631 }
632 
633 static ssize_t probes_write(struct file *file, const char __user *buffer,
634 			    size_t count, loff_t *ppos)
635 {
636 	return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
637 }
638 
639 static const struct file_operations uprobe_events_ops = {
640 	.owner		= THIS_MODULE,
641 	.open		= probes_open,
642 	.read		= seq_read,
643 	.llseek		= seq_lseek,
644 	.release	= seq_release,
645 	.write		= probes_write,
646 };
647 
648 /* Probes profiling interfaces */
649 static int probes_profile_seq_show(struct seq_file *m, void *v)
650 {
651 	struct trace_uprobe *tu = v;
652 
653 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
654 			ftrace_event_name(&tu->tp.call), tu->nhit);
655 	return 0;
656 }
657 
658 static const struct seq_operations profile_seq_op = {
659 	.start	= probes_seq_start,
660 	.next	= probes_seq_next,
661 	.stop	= probes_seq_stop,
662 	.show	= probes_profile_seq_show
663 };
664 
665 static int profile_open(struct inode *inode, struct file *file)
666 {
667 	return seq_open(file, &profile_seq_op);
668 }
669 
670 static const struct file_operations uprobe_profile_ops = {
671 	.owner		= THIS_MODULE,
672 	.open		= profile_open,
673 	.read		= seq_read,
674 	.llseek		= seq_lseek,
675 	.release	= seq_release,
676 };
677 
678 struct uprobe_cpu_buffer {
679 	struct mutex mutex;
680 	void *buf;
681 };
682 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
683 static int uprobe_buffer_refcnt;
684 
685 static int uprobe_buffer_init(void)
686 {
687 	int cpu, err_cpu;
688 
689 	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
690 	if (uprobe_cpu_buffer == NULL)
691 		return -ENOMEM;
692 
693 	for_each_possible_cpu(cpu) {
694 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
695 						  GFP_KERNEL, 0);
696 		if (p == NULL) {
697 			err_cpu = cpu;
698 			goto err;
699 		}
700 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
701 		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
702 	}
703 
704 	return 0;
705 
706 err:
707 	for_each_possible_cpu(cpu) {
708 		if (cpu == err_cpu)
709 			break;
710 		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
711 	}
712 
713 	free_percpu(uprobe_cpu_buffer);
714 	return -ENOMEM;
715 }
716 
717 static int uprobe_buffer_enable(void)
718 {
719 	int ret = 0;
720 
721 	BUG_ON(!mutex_is_locked(&event_mutex));
722 
723 	if (uprobe_buffer_refcnt++ == 0) {
724 		ret = uprobe_buffer_init();
725 		if (ret < 0)
726 			uprobe_buffer_refcnt--;
727 	}
728 
729 	return ret;
730 }
731 
732 static void uprobe_buffer_disable(void)
733 {
734 	int cpu;
735 
736 	BUG_ON(!mutex_is_locked(&event_mutex));
737 
738 	if (--uprobe_buffer_refcnt == 0) {
739 		for_each_possible_cpu(cpu)
740 			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
741 							     cpu)->buf);
742 
743 		free_percpu(uprobe_cpu_buffer);
744 		uprobe_cpu_buffer = NULL;
745 	}
746 }
747 
748 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
749 {
750 	struct uprobe_cpu_buffer *ucb;
751 	int cpu;
752 
753 	cpu = raw_smp_processor_id();
754 	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
755 
756 	/*
757 	 * Use per-cpu buffers for fastest access, but we might migrate
758 	 * so the mutex makes sure we have sole access to it.
759 	 */
760 	mutex_lock(&ucb->mutex);
761 
762 	return ucb;
763 }
764 
765 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
766 {
767 	mutex_unlock(&ucb->mutex);
768 }
769 
770 static void __uprobe_trace_func(struct trace_uprobe *tu,
771 				unsigned long func, struct pt_regs *regs,
772 				struct uprobe_cpu_buffer *ucb, int dsize,
773 				struct ftrace_event_file *ftrace_file)
774 {
775 	struct uprobe_trace_entry_head *entry;
776 	struct ring_buffer_event *event;
777 	struct ring_buffer *buffer;
778 	void *data;
779 	int size, esize;
780 	struct ftrace_event_call *call = &tu->tp.call;
781 
782 	WARN_ON(call != ftrace_file->event_call);
783 
784 	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
785 		return;
786 
787 	if (ftrace_trigger_soft_disabled(ftrace_file))
788 		return;
789 
790 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
791 	size = esize + tu->tp.size + dsize;
792 	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
793 						call->event.type, size, 0, 0);
794 	if (!event)
795 		return;
796 
797 	entry = ring_buffer_event_data(event);
798 	if (is_ret_probe(tu)) {
799 		entry->vaddr[0] = func;
800 		entry->vaddr[1] = instruction_pointer(regs);
801 		data = DATAOF_TRACE_ENTRY(entry, true);
802 	} else {
803 		entry->vaddr[0] = instruction_pointer(regs);
804 		data = DATAOF_TRACE_ENTRY(entry, false);
805 	}
806 
807 	memcpy(data, ucb->buf, tu->tp.size + dsize);
808 
809 	event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0);
810 }
811 
812 /* uprobe handler */
813 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
814 			     struct uprobe_cpu_buffer *ucb, int dsize)
815 {
816 	struct event_file_link *link;
817 
818 	if (is_ret_probe(tu))
819 		return 0;
820 
821 	rcu_read_lock();
822 	list_for_each_entry_rcu(link, &tu->tp.files, list)
823 		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
824 	rcu_read_unlock();
825 
826 	return 0;
827 }
828 
829 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
830 				 struct pt_regs *regs,
831 				 struct uprobe_cpu_buffer *ucb, int dsize)
832 {
833 	struct event_file_link *link;
834 
835 	rcu_read_lock();
836 	list_for_each_entry_rcu(link, &tu->tp.files, list)
837 		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
838 	rcu_read_unlock();
839 }
840 
841 /* Event entry printers */
842 static enum print_line_t
843 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
844 {
845 	struct uprobe_trace_entry_head *entry;
846 	struct trace_seq *s = &iter->seq;
847 	struct trace_uprobe *tu;
848 	u8 *data;
849 	int i;
850 
851 	entry = (struct uprobe_trace_entry_head *)iter->ent;
852 	tu = container_of(event, struct trace_uprobe, tp.call.event);
853 
854 	if (is_ret_probe(tu)) {
855 		if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
856 					ftrace_event_name(&tu->tp.call),
857 					entry->vaddr[1], entry->vaddr[0]))
858 			goto partial;
859 		data = DATAOF_TRACE_ENTRY(entry, true);
860 	} else {
861 		if (!trace_seq_printf(s, "%s: (0x%lx)",
862 					ftrace_event_name(&tu->tp.call),
863 					entry->vaddr[0]))
864 			goto partial;
865 		data = DATAOF_TRACE_ENTRY(entry, false);
866 	}
867 
868 	for (i = 0; i < tu->tp.nr_args; i++) {
869 		struct probe_arg *parg = &tu->tp.args[i];
870 
871 		if (!parg->type->print(s, parg->name, data + parg->offset, entry))
872 			goto partial;
873 	}
874 
875 	if (trace_seq_puts(s, "\n"))
876 		return TRACE_TYPE_HANDLED;
877 
878 partial:
879 	return TRACE_TYPE_PARTIAL_LINE;
880 }
881 
882 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
883 				enum uprobe_filter_ctx ctx,
884 				struct mm_struct *mm);
885 
886 static int
887 probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
888 		   filter_func_t filter)
889 {
890 	bool enabled = trace_probe_is_enabled(&tu->tp);
891 	struct event_file_link *link = NULL;
892 	int ret;
893 
894 	if (file) {
895 		if (tu->tp.flags & TP_FLAG_PROFILE)
896 			return -EINTR;
897 
898 		link = kmalloc(sizeof(*link), GFP_KERNEL);
899 		if (!link)
900 			return -ENOMEM;
901 
902 		link->file = file;
903 		list_add_tail_rcu(&link->list, &tu->tp.files);
904 
905 		tu->tp.flags |= TP_FLAG_TRACE;
906 	} else {
907 		if (tu->tp.flags & TP_FLAG_TRACE)
908 			return -EINTR;
909 
910 		tu->tp.flags |= TP_FLAG_PROFILE;
911 	}
912 
913 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
914 
915 	if (enabled)
916 		return 0;
917 
918 	ret = uprobe_buffer_enable();
919 	if (ret)
920 		goto err_flags;
921 
922 	tu->consumer.filter = filter;
923 	ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
924 	if (ret)
925 		goto err_buffer;
926 
927 	return 0;
928 
929  err_buffer:
930 	uprobe_buffer_disable();
931 
932  err_flags:
933 	if (file) {
934 		list_del(&link->list);
935 		kfree(link);
936 		tu->tp.flags &= ~TP_FLAG_TRACE;
937 	} else {
938 		tu->tp.flags &= ~TP_FLAG_PROFILE;
939 	}
940 	return ret;
941 }
942 
943 static void
944 probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
945 {
946 	if (!trace_probe_is_enabled(&tu->tp))
947 		return;
948 
949 	if (file) {
950 		struct event_file_link *link;
951 
952 		link = find_event_file_link(&tu->tp, file);
953 		if (!link)
954 			return;
955 
956 		list_del_rcu(&link->list);
957 		/* synchronize with u{,ret}probe_trace_func */
958 		synchronize_sched();
959 		kfree(link);
960 
961 		if (!list_empty(&tu->tp.files))
962 			return;
963 	}
964 
965 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
966 
967 	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
968 	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
969 
970 	uprobe_buffer_disable();
971 }
972 
973 static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
974 {
975 	int ret, i, size;
976 	struct uprobe_trace_entry_head field;
977 	struct trace_uprobe *tu = event_call->data;
978 
979 	if (is_ret_probe(tu)) {
980 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
981 		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
982 		size = SIZEOF_TRACE_ENTRY(true);
983 	} else {
984 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
985 		size = SIZEOF_TRACE_ENTRY(false);
986 	}
987 	/* Set argument names as fields */
988 	for (i = 0; i < tu->tp.nr_args; i++) {
989 		struct probe_arg *parg = &tu->tp.args[i];
990 
991 		ret = trace_define_field(event_call, parg->type->fmttype,
992 					 parg->name, size + parg->offset,
993 					 parg->type->size, parg->type->is_signed,
994 					 FILTER_OTHER);
995 
996 		if (ret)
997 			return ret;
998 	}
999 	return 0;
1000 }
1001 
1002 #ifdef CONFIG_PERF_EVENTS
1003 static bool
1004 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1005 {
1006 	struct perf_event *event;
1007 
1008 	if (filter->nr_systemwide)
1009 		return true;
1010 
1011 	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1012 		if (event->hw.tp_target->mm == mm)
1013 			return true;
1014 	}
1015 
1016 	return false;
1017 }
1018 
1019 static inline bool
1020 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1021 {
1022 	return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
1023 }
1024 
1025 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1026 {
1027 	bool done;
1028 
1029 	write_lock(&tu->filter.rwlock);
1030 	if (event->hw.tp_target) {
1031 		list_del(&event->hw.tp_list);
1032 		done = tu->filter.nr_systemwide ||
1033 			(event->hw.tp_target->flags & PF_EXITING) ||
1034 			uprobe_filter_event(tu, event);
1035 	} else {
1036 		tu->filter.nr_systemwide--;
1037 		done = tu->filter.nr_systemwide;
1038 	}
1039 	write_unlock(&tu->filter.rwlock);
1040 
1041 	if (!done)
1042 		return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1043 
1044 	return 0;
1045 }
1046 
1047 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1048 {
1049 	bool done;
1050 	int err;
1051 
1052 	write_lock(&tu->filter.rwlock);
1053 	if (event->hw.tp_target) {
1054 		/*
1055 		 * event->parent != NULL means copy_process(), we can avoid
1056 		 * uprobe_apply(). current->mm must be probed and we can rely
1057 		 * on dup_mmap() which preserves the already installed bp's.
1058 		 *
1059 		 * attr.enable_on_exec means that exec/mmap will install the
1060 		 * breakpoints we need.
1061 		 */
1062 		done = tu->filter.nr_systemwide ||
1063 			event->parent || event->attr.enable_on_exec ||
1064 			uprobe_filter_event(tu, event);
1065 		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1066 	} else {
1067 		done = tu->filter.nr_systemwide;
1068 		tu->filter.nr_systemwide++;
1069 	}
1070 	write_unlock(&tu->filter.rwlock);
1071 
1072 	err = 0;
1073 	if (!done) {
1074 		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1075 		if (err)
1076 			uprobe_perf_close(tu, event);
1077 	}
1078 	return err;
1079 }
1080 
1081 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1082 				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1083 {
1084 	struct trace_uprobe *tu;
1085 	int ret;
1086 
1087 	tu = container_of(uc, struct trace_uprobe, consumer);
1088 	read_lock(&tu->filter.rwlock);
1089 	ret = __uprobe_perf_filter(&tu->filter, mm);
1090 	read_unlock(&tu->filter.rwlock);
1091 
1092 	return ret;
1093 }
1094 
1095 static void __uprobe_perf_func(struct trace_uprobe *tu,
1096 			       unsigned long func, struct pt_regs *regs,
1097 			       struct uprobe_cpu_buffer *ucb, int dsize)
1098 {
1099 	struct ftrace_event_call *call = &tu->tp.call;
1100 	struct uprobe_trace_entry_head *entry;
1101 	struct hlist_head *head;
1102 	void *data;
1103 	int size, esize;
1104 	int rctx;
1105 
1106 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1107 
1108 	size = esize + tu->tp.size + dsize;
1109 	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1110 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1111 		return;
1112 
1113 	preempt_disable();
1114 	head = this_cpu_ptr(call->perf_events);
1115 	if (hlist_empty(head))
1116 		goto out;
1117 
1118 	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1119 	if (!entry)
1120 		goto out;
1121 
1122 	if (is_ret_probe(tu)) {
1123 		entry->vaddr[0] = func;
1124 		entry->vaddr[1] = instruction_pointer(regs);
1125 		data = DATAOF_TRACE_ENTRY(entry, true);
1126 	} else {
1127 		entry->vaddr[0] = instruction_pointer(regs);
1128 		data = DATAOF_TRACE_ENTRY(entry, false);
1129 	}
1130 
1131 	memcpy(data, ucb->buf, tu->tp.size + dsize);
1132 
1133 	if (size - esize > tu->tp.size + dsize) {
1134 		int len = tu->tp.size + dsize;
1135 
1136 		memset(data + len, 0, size - esize - len);
1137 	}
1138 
1139 	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1140  out:
1141 	preempt_enable();
1142 }
1143 
1144 /* uprobe profile handler */
1145 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1146 			    struct uprobe_cpu_buffer *ucb, int dsize)
1147 {
1148 	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1149 		return UPROBE_HANDLER_REMOVE;
1150 
1151 	if (!is_ret_probe(tu))
1152 		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1153 	return 0;
1154 }
1155 
1156 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1157 				struct pt_regs *regs,
1158 				struct uprobe_cpu_buffer *ucb, int dsize)
1159 {
1160 	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1161 }
1162 #endif	/* CONFIG_PERF_EVENTS */
1163 
1164 static int
1165 trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
1166 		      void *data)
1167 {
1168 	struct trace_uprobe *tu = event->data;
1169 	struct ftrace_event_file *file = data;
1170 
1171 	switch (type) {
1172 	case TRACE_REG_REGISTER:
1173 		return probe_event_enable(tu, file, NULL);
1174 
1175 	case TRACE_REG_UNREGISTER:
1176 		probe_event_disable(tu, file);
1177 		return 0;
1178 
1179 #ifdef CONFIG_PERF_EVENTS
1180 	case TRACE_REG_PERF_REGISTER:
1181 		return probe_event_enable(tu, NULL, uprobe_perf_filter);
1182 
1183 	case TRACE_REG_PERF_UNREGISTER:
1184 		probe_event_disable(tu, NULL);
1185 		return 0;
1186 
1187 	case TRACE_REG_PERF_OPEN:
1188 		return uprobe_perf_open(tu, data);
1189 
1190 	case TRACE_REG_PERF_CLOSE:
1191 		return uprobe_perf_close(tu, data);
1192 
1193 #endif
1194 	default:
1195 		return 0;
1196 	}
1197 	return 0;
1198 }
1199 
1200 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1201 {
1202 	struct trace_uprobe *tu;
1203 	struct uprobe_dispatch_data udd;
1204 	struct uprobe_cpu_buffer *ucb;
1205 	int dsize, esize;
1206 	int ret = 0;
1207 
1208 
1209 	tu = container_of(con, struct trace_uprobe, consumer);
1210 	tu->nhit++;
1211 
1212 	udd.tu = tu;
1213 	udd.bp_addr = instruction_pointer(regs);
1214 
1215 	current->utask->vaddr = (unsigned long) &udd;
1216 
1217 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1218 		return 0;
1219 
1220 	dsize = __get_data_size(&tu->tp, regs);
1221 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1222 
1223 	ucb = uprobe_buffer_get();
1224 	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1225 
1226 	if (tu->tp.flags & TP_FLAG_TRACE)
1227 		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1228 
1229 #ifdef CONFIG_PERF_EVENTS
1230 	if (tu->tp.flags & TP_FLAG_PROFILE)
1231 		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1232 #endif
1233 	uprobe_buffer_put(ucb);
1234 	return ret;
1235 }
1236 
1237 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1238 				unsigned long func, struct pt_regs *regs)
1239 {
1240 	struct trace_uprobe *tu;
1241 	struct uprobe_dispatch_data udd;
1242 	struct uprobe_cpu_buffer *ucb;
1243 	int dsize, esize;
1244 
1245 	tu = container_of(con, struct trace_uprobe, consumer);
1246 
1247 	udd.tu = tu;
1248 	udd.bp_addr = func;
1249 
1250 	current->utask->vaddr = (unsigned long) &udd;
1251 
1252 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1253 		return 0;
1254 
1255 	dsize = __get_data_size(&tu->tp, regs);
1256 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1257 
1258 	ucb = uprobe_buffer_get();
1259 	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1260 
1261 	if (tu->tp.flags & TP_FLAG_TRACE)
1262 		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1263 
1264 #ifdef CONFIG_PERF_EVENTS
1265 	if (tu->tp.flags & TP_FLAG_PROFILE)
1266 		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1267 #endif
1268 	uprobe_buffer_put(ucb);
1269 	return 0;
1270 }
1271 
1272 static struct trace_event_functions uprobe_funcs = {
1273 	.trace		= print_uprobe_event
1274 };
1275 
1276 static int register_uprobe_event(struct trace_uprobe *tu)
1277 {
1278 	struct ftrace_event_call *call = &tu->tp.call;
1279 	int ret;
1280 
1281 	/* Initialize ftrace_event_call */
1282 	INIT_LIST_HEAD(&call->class->fields);
1283 	call->event.funcs = &uprobe_funcs;
1284 	call->class->define_fields = uprobe_event_define_fields;
1285 
1286 	if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1287 		return -ENOMEM;
1288 
1289 	ret = register_ftrace_event(&call->event);
1290 	if (!ret) {
1291 		kfree(call->print_fmt);
1292 		return -ENODEV;
1293 	}
1294 
1295 	call->class->reg = trace_uprobe_register;
1296 	call->data = tu;
1297 	ret = trace_add_event_call(call);
1298 
1299 	if (ret) {
1300 		pr_info("Failed to register uprobe event: %s\n",
1301 			ftrace_event_name(call));
1302 		kfree(call->print_fmt);
1303 		unregister_ftrace_event(&call->event);
1304 	}
1305 
1306 	return ret;
1307 }
1308 
1309 static int unregister_uprobe_event(struct trace_uprobe *tu)
1310 {
1311 	int ret;
1312 
1313 	/* tu->event is unregistered in trace_remove_event_call() */
1314 	ret = trace_remove_event_call(&tu->tp.call);
1315 	if (ret)
1316 		return ret;
1317 	kfree(tu->tp.call.print_fmt);
1318 	tu->tp.call.print_fmt = NULL;
1319 	return 0;
1320 }
1321 
1322 /* Make a trace interface for controling probe points */
1323 static __init int init_uprobe_trace(void)
1324 {
1325 	struct dentry *d_tracer;
1326 
1327 	d_tracer = tracing_init_dentry();
1328 	if (!d_tracer)
1329 		return 0;
1330 
1331 	trace_create_file("uprobe_events", 0644, d_tracer,
1332 				    NULL, &uprobe_events_ops);
1333 	/* Profile interface */
1334 	trace_create_file("uprobe_profile", 0444, d_tracer,
1335 				    NULL, &uprobe_profile_ops);
1336 	return 0;
1337 }
1338 
1339 fs_initcall(init_uprobe_trace);
1340