xref: /openbmc/linux/kernel/trace/trace_kprobe.c (revision cdfce539)
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 
23 #include "trace_probe.h"
24 
25 #define KPROBE_EVENT_SYSTEM "kprobes"
26 
27 /**
28  * Kprobe event core functions
29  */
30 struct trace_probe {
31 	struct list_head	list;
32 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33 	unsigned long 		nhit;
34 	unsigned int		flags;	/* For TP_FLAG_* */
35 	const char		*symbol;	/* symbol name */
36 	struct ftrace_event_class	class;
37 	struct ftrace_event_call	call;
38 	struct ftrace_event_file * __rcu *files;
39 	ssize_t			size;		/* trace entry size */
40 	unsigned int		nr_args;
41 	struct probe_arg	args[];
42 };
43 
44 #define SIZEOF_TRACE_PROBE(n)			\
45 	(offsetof(struct trace_probe, args) +	\
46 	(sizeof(struct probe_arg) * (n)))
47 
48 
49 static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50 {
51 	return tp->rp.handler != NULL;
52 }
53 
54 static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55 {
56 	return tp->symbol ? tp->symbol : "unknown";
57 }
58 
59 static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
60 {
61 	return tp->rp.kp.offset;
62 }
63 
64 static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
65 {
66 	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
67 }
68 
69 static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
70 {
71 	return !!(tp->flags & TP_FLAG_REGISTERED);
72 }
73 
74 static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
75 {
76 	return !!(kprobe_gone(&tp->rp.kp));
77 }
78 
79 static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
80 						struct module *mod)
81 {
82 	int len = strlen(mod->name);
83 	const char *name = trace_probe_symbol(tp);
84 	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
85 }
86 
87 static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
88 {
89 	return !!strchr(trace_probe_symbol(tp), ':');
90 }
91 
92 static int register_probe_event(struct trace_probe *tp);
93 static void unregister_probe_event(struct trace_probe *tp);
94 
95 static DEFINE_MUTEX(probe_lock);
96 static LIST_HEAD(probe_list);
97 
98 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
99 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
100 				struct pt_regs *regs);
101 
102 /*
103  * Allocate new trace_probe and initialize it (including kprobes).
104  */
105 static struct trace_probe *alloc_trace_probe(const char *group,
106 					     const char *event,
107 					     void *addr,
108 					     const char *symbol,
109 					     unsigned long offs,
110 					     int nargs, bool is_return)
111 {
112 	struct trace_probe *tp;
113 	int ret = -ENOMEM;
114 
115 	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116 	if (!tp)
117 		return ERR_PTR(ret);
118 
119 	if (symbol) {
120 		tp->symbol = kstrdup(symbol, GFP_KERNEL);
121 		if (!tp->symbol)
122 			goto error;
123 		tp->rp.kp.symbol_name = tp->symbol;
124 		tp->rp.kp.offset = offs;
125 	} else
126 		tp->rp.kp.addr = addr;
127 
128 	if (is_return)
129 		tp->rp.handler = kretprobe_dispatcher;
130 	else
131 		tp->rp.kp.pre_handler = kprobe_dispatcher;
132 
133 	if (!event || !is_good_name(event)) {
134 		ret = -EINVAL;
135 		goto error;
136 	}
137 
138 	tp->call.class = &tp->class;
139 	tp->call.name = kstrdup(event, GFP_KERNEL);
140 	if (!tp->call.name)
141 		goto error;
142 
143 	if (!group || !is_good_name(group)) {
144 		ret = -EINVAL;
145 		goto error;
146 	}
147 
148 	tp->class.system = kstrdup(group, GFP_KERNEL);
149 	if (!tp->class.system)
150 		goto error;
151 
152 	INIT_LIST_HEAD(&tp->list);
153 	return tp;
154 error:
155 	kfree(tp->call.name);
156 	kfree(tp->symbol);
157 	kfree(tp);
158 	return ERR_PTR(ret);
159 }
160 
161 static void free_trace_probe(struct trace_probe *tp)
162 {
163 	int i;
164 
165 	for (i = 0; i < tp->nr_args; i++)
166 		traceprobe_free_probe_arg(&tp->args[i]);
167 
168 	kfree(tp->call.class->system);
169 	kfree(tp->call.name);
170 	kfree(tp->symbol);
171 	kfree(tp);
172 }
173 
174 static struct trace_probe *find_trace_probe(const char *event,
175 					    const char *group)
176 {
177 	struct trace_probe *tp;
178 
179 	list_for_each_entry(tp, &probe_list, list)
180 		if (strcmp(tp->call.name, event) == 0 &&
181 		    strcmp(tp->call.class->system, group) == 0)
182 			return tp;
183 	return NULL;
184 }
185 
186 static int trace_probe_nr_files(struct trace_probe *tp)
187 {
188 	struct ftrace_event_file **file;
189 	int ret = 0;
190 
191 	/*
192 	 * Since all tp->files updater is protected by probe_enable_lock,
193 	 * we don't need to lock an rcu_read_lock.
194 	 */
195 	file = rcu_dereference_raw(tp->files);
196 	if (file)
197 		while (*(file++))
198 			ret++;
199 
200 	return ret;
201 }
202 
203 static DEFINE_MUTEX(probe_enable_lock);
204 
205 /*
206  * Enable trace_probe
207  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
208  */
209 static int
210 enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
211 {
212 	int ret = 0;
213 
214 	mutex_lock(&probe_enable_lock);
215 
216 	if (file) {
217 		struct ftrace_event_file **new, **old;
218 		int n = trace_probe_nr_files(tp);
219 
220 		old = rcu_dereference_raw(tp->files);
221 		/* 1 is for new one and 1 is for stopper */
222 		new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
223 			      GFP_KERNEL);
224 		if (!new) {
225 			ret = -ENOMEM;
226 			goto out_unlock;
227 		}
228 		memcpy(new, old, n * sizeof(struct ftrace_event_file *));
229 		new[n] = file;
230 		/* The last one keeps a NULL */
231 
232 		rcu_assign_pointer(tp->files, new);
233 		tp->flags |= TP_FLAG_TRACE;
234 
235 		if (old) {
236 			/* Make sure the probe is done with old files */
237 			synchronize_sched();
238 			kfree(old);
239 		}
240 	} else
241 		tp->flags |= TP_FLAG_PROFILE;
242 
243 	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
244 	    !trace_probe_has_gone(tp)) {
245 		if (trace_probe_is_return(tp))
246 			ret = enable_kretprobe(&tp->rp);
247 		else
248 			ret = enable_kprobe(&tp->rp.kp);
249 	}
250 
251  out_unlock:
252 	mutex_unlock(&probe_enable_lock);
253 
254 	return ret;
255 }
256 
257 static int
258 trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
259 {
260 	struct ftrace_event_file **files;
261 	int i;
262 
263 	/*
264 	 * Since all tp->files updater is protected by probe_enable_lock,
265 	 * we don't need to lock an rcu_read_lock.
266 	 */
267 	files = rcu_dereference_raw(tp->files);
268 	if (files) {
269 		for (i = 0; files[i]; i++)
270 			if (files[i] == file)
271 				return i;
272 	}
273 
274 	return -1;
275 }
276 
277 /*
278  * Disable trace_probe
279  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
280  */
281 static int
282 disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
283 {
284 	int ret = 0;
285 
286 	mutex_lock(&probe_enable_lock);
287 
288 	if (file) {
289 		struct ftrace_event_file **new, **old;
290 		int n = trace_probe_nr_files(tp);
291 		int i, j;
292 
293 		old = rcu_dereference_raw(tp->files);
294 		if (n == 0 || trace_probe_file_index(tp, file) < 0) {
295 			ret = -EINVAL;
296 			goto out_unlock;
297 		}
298 
299 		if (n == 1) {	/* Remove the last file */
300 			tp->flags &= ~TP_FLAG_TRACE;
301 			new = NULL;
302 		} else {
303 			new = kzalloc(n * sizeof(struct ftrace_event_file *),
304 				      GFP_KERNEL);
305 			if (!new) {
306 				ret = -ENOMEM;
307 				goto out_unlock;
308 			}
309 
310 			/* This copy & check loop copies the NULL stopper too */
311 			for (i = 0, j = 0; j < n && i < n + 1; i++)
312 				if (old[i] != file)
313 					new[j++] = old[i];
314 		}
315 
316 		rcu_assign_pointer(tp->files, new);
317 
318 		/* Make sure the probe is done with old files */
319 		synchronize_sched();
320 		kfree(old);
321 	} else
322 		tp->flags &= ~TP_FLAG_PROFILE;
323 
324 	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
325 		if (trace_probe_is_return(tp))
326 			disable_kretprobe(&tp->rp);
327 		else
328 			disable_kprobe(&tp->rp.kp);
329 	}
330 
331  out_unlock:
332 	mutex_unlock(&probe_enable_lock);
333 
334 	return ret;
335 }
336 
337 /* Internal register function - just handle k*probes and flags */
338 static int __register_trace_probe(struct trace_probe *tp)
339 {
340 	int i, ret;
341 
342 	if (trace_probe_is_registered(tp))
343 		return -EINVAL;
344 
345 	for (i = 0; i < tp->nr_args; i++)
346 		traceprobe_update_arg(&tp->args[i]);
347 
348 	/* Set/clear disabled flag according to tp->flag */
349 	if (trace_probe_is_enabled(tp))
350 		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
351 	else
352 		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
353 
354 	if (trace_probe_is_return(tp))
355 		ret = register_kretprobe(&tp->rp);
356 	else
357 		ret = register_kprobe(&tp->rp.kp);
358 
359 	if (ret == 0)
360 		tp->flags |= TP_FLAG_REGISTERED;
361 	else {
362 		pr_warning("Could not insert probe at %s+%lu: %d\n",
363 			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
364 		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
365 			pr_warning("This probe might be able to register after"
366 				   "target module is loaded. Continue.\n");
367 			ret = 0;
368 		} else if (ret == -EILSEQ) {
369 			pr_warning("Probing address(0x%p) is not an "
370 				   "instruction boundary.\n",
371 				   tp->rp.kp.addr);
372 			ret = -EINVAL;
373 		}
374 	}
375 
376 	return ret;
377 }
378 
379 /* Internal unregister function - just handle k*probes and flags */
380 static void __unregister_trace_probe(struct trace_probe *tp)
381 {
382 	if (trace_probe_is_registered(tp)) {
383 		if (trace_probe_is_return(tp))
384 			unregister_kretprobe(&tp->rp);
385 		else
386 			unregister_kprobe(&tp->rp.kp);
387 		tp->flags &= ~TP_FLAG_REGISTERED;
388 		/* Cleanup kprobe for reuse */
389 		if (tp->rp.kp.symbol_name)
390 			tp->rp.kp.addr = NULL;
391 	}
392 }
393 
394 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
395 static int unregister_trace_probe(struct trace_probe *tp)
396 {
397 	/* Enabled event can not be unregistered */
398 	if (trace_probe_is_enabled(tp))
399 		return -EBUSY;
400 
401 	__unregister_trace_probe(tp);
402 	list_del(&tp->list);
403 	unregister_probe_event(tp);
404 
405 	return 0;
406 }
407 
408 /* Register a trace_probe and probe_event */
409 static int register_trace_probe(struct trace_probe *tp)
410 {
411 	struct trace_probe *old_tp;
412 	int ret;
413 
414 	mutex_lock(&probe_lock);
415 
416 	/* Delete old (same name) event if exist */
417 	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
418 	if (old_tp) {
419 		ret = unregister_trace_probe(old_tp);
420 		if (ret < 0)
421 			goto end;
422 		free_trace_probe(old_tp);
423 	}
424 
425 	/* Register new event */
426 	ret = register_probe_event(tp);
427 	if (ret) {
428 		pr_warning("Failed to register probe event(%d)\n", ret);
429 		goto end;
430 	}
431 
432 	/* Register k*probe */
433 	ret = __register_trace_probe(tp);
434 	if (ret < 0)
435 		unregister_probe_event(tp);
436 	else
437 		list_add_tail(&tp->list, &probe_list);
438 
439 end:
440 	mutex_unlock(&probe_lock);
441 	return ret;
442 }
443 
444 /* Module notifier call back, checking event on the module */
445 static int trace_probe_module_callback(struct notifier_block *nb,
446 				       unsigned long val, void *data)
447 {
448 	struct module *mod = data;
449 	struct trace_probe *tp;
450 	int ret;
451 
452 	if (val != MODULE_STATE_COMING)
453 		return NOTIFY_DONE;
454 
455 	/* Update probes on coming module */
456 	mutex_lock(&probe_lock);
457 	list_for_each_entry(tp, &probe_list, list) {
458 		if (trace_probe_within_module(tp, mod)) {
459 			/* Don't need to check busy - this should have gone. */
460 			__unregister_trace_probe(tp);
461 			ret = __register_trace_probe(tp);
462 			if (ret)
463 				pr_warning("Failed to re-register probe %s on"
464 					   "%s: %d\n",
465 					   tp->call.name, mod->name, ret);
466 		}
467 	}
468 	mutex_unlock(&probe_lock);
469 
470 	return NOTIFY_DONE;
471 }
472 
473 static struct notifier_block trace_probe_module_nb = {
474 	.notifier_call = trace_probe_module_callback,
475 	.priority = 1	/* Invoked after kprobe module callback */
476 };
477 
478 static int create_trace_probe(int argc, char **argv)
479 {
480 	/*
481 	 * Argument syntax:
482 	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
483 	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
484 	 * Fetch args:
485 	 *  $retval	: fetch return value
486 	 *  $stack	: fetch stack address
487 	 *  $stackN	: fetch Nth of stack (N:0-)
488 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
489 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
490 	 *  %REG	: fetch register REG
491 	 * Dereferencing memory fetch:
492 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
493 	 * Alias name of args:
494 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
495 	 * Type of args:
496 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
497 	 */
498 	struct trace_probe *tp;
499 	int i, ret = 0;
500 	bool is_return = false, is_delete = false;
501 	char *symbol = NULL, *event = NULL, *group = NULL;
502 	char *arg;
503 	unsigned long offset = 0;
504 	void *addr = NULL;
505 	char buf[MAX_EVENT_NAME_LEN];
506 
507 	/* argc must be >= 1 */
508 	if (argv[0][0] == 'p')
509 		is_return = false;
510 	else if (argv[0][0] == 'r')
511 		is_return = true;
512 	else if (argv[0][0] == '-')
513 		is_delete = true;
514 	else {
515 		pr_info("Probe definition must be started with 'p', 'r' or"
516 			" '-'.\n");
517 		return -EINVAL;
518 	}
519 
520 	if (argv[0][1] == ':') {
521 		event = &argv[0][2];
522 		if (strchr(event, '/')) {
523 			group = event;
524 			event = strchr(group, '/') + 1;
525 			event[-1] = '\0';
526 			if (strlen(group) == 0) {
527 				pr_info("Group name is not specified\n");
528 				return -EINVAL;
529 			}
530 		}
531 		if (strlen(event) == 0) {
532 			pr_info("Event name is not specified\n");
533 			return -EINVAL;
534 		}
535 	}
536 	if (!group)
537 		group = KPROBE_EVENT_SYSTEM;
538 
539 	if (is_delete) {
540 		if (!event) {
541 			pr_info("Delete command needs an event name.\n");
542 			return -EINVAL;
543 		}
544 		mutex_lock(&probe_lock);
545 		tp = find_trace_probe(event, group);
546 		if (!tp) {
547 			mutex_unlock(&probe_lock);
548 			pr_info("Event %s/%s doesn't exist.\n", group, event);
549 			return -ENOENT;
550 		}
551 		/* delete an event */
552 		ret = unregister_trace_probe(tp);
553 		if (ret == 0)
554 			free_trace_probe(tp);
555 		mutex_unlock(&probe_lock);
556 		return ret;
557 	}
558 
559 	if (argc < 2) {
560 		pr_info("Probe point is not specified.\n");
561 		return -EINVAL;
562 	}
563 	if (isdigit(argv[1][0])) {
564 		if (is_return) {
565 			pr_info("Return probe point must be a symbol.\n");
566 			return -EINVAL;
567 		}
568 		/* an address specified */
569 		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
570 		if (ret) {
571 			pr_info("Failed to parse address.\n");
572 			return ret;
573 		}
574 	} else {
575 		/* a symbol specified */
576 		symbol = argv[1];
577 		/* TODO: support .init module functions */
578 		ret = traceprobe_split_symbol_offset(symbol, &offset);
579 		if (ret) {
580 			pr_info("Failed to parse symbol.\n");
581 			return ret;
582 		}
583 		if (offset && is_return) {
584 			pr_info("Return probe must be used without offset.\n");
585 			return -EINVAL;
586 		}
587 	}
588 	argc -= 2; argv += 2;
589 
590 	/* setup a probe */
591 	if (!event) {
592 		/* Make a new event name */
593 		if (symbol)
594 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
595 				 is_return ? 'r' : 'p', symbol, offset);
596 		else
597 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
598 				 is_return ? 'r' : 'p', addr);
599 		event = buf;
600 	}
601 	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
602 			       is_return);
603 	if (IS_ERR(tp)) {
604 		pr_info("Failed to allocate trace_probe.(%d)\n",
605 			(int)PTR_ERR(tp));
606 		return PTR_ERR(tp);
607 	}
608 
609 	/* parse arguments */
610 	ret = 0;
611 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
612 		/* Increment count for freeing args in error case */
613 		tp->nr_args++;
614 
615 		/* Parse argument name */
616 		arg = strchr(argv[i], '=');
617 		if (arg) {
618 			*arg++ = '\0';
619 			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
620 		} else {
621 			arg = argv[i];
622 			/* If argument name is omitted, set "argN" */
623 			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
624 			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
625 		}
626 
627 		if (!tp->args[i].name) {
628 			pr_info("Failed to allocate argument[%d] name.\n", i);
629 			ret = -ENOMEM;
630 			goto error;
631 		}
632 
633 		if (!is_good_name(tp->args[i].name)) {
634 			pr_info("Invalid argument[%d] name: %s\n",
635 				i, tp->args[i].name);
636 			ret = -EINVAL;
637 			goto error;
638 		}
639 
640 		if (traceprobe_conflict_field_name(tp->args[i].name,
641 							tp->args, i)) {
642 			pr_info("Argument[%d] name '%s' conflicts with "
643 				"another field.\n", i, argv[i]);
644 			ret = -EINVAL;
645 			goto error;
646 		}
647 
648 		/* Parse fetch argument */
649 		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
650 						is_return, true);
651 		if (ret) {
652 			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
653 			goto error;
654 		}
655 	}
656 
657 	ret = register_trace_probe(tp);
658 	if (ret)
659 		goto error;
660 	return 0;
661 
662 error:
663 	free_trace_probe(tp);
664 	return ret;
665 }
666 
667 static int release_all_trace_probes(void)
668 {
669 	struct trace_probe *tp;
670 	int ret = 0;
671 
672 	mutex_lock(&probe_lock);
673 	/* Ensure no probe is in use. */
674 	list_for_each_entry(tp, &probe_list, list)
675 		if (trace_probe_is_enabled(tp)) {
676 			ret = -EBUSY;
677 			goto end;
678 		}
679 	/* TODO: Use batch unregistration */
680 	while (!list_empty(&probe_list)) {
681 		tp = list_entry(probe_list.next, struct trace_probe, list);
682 		unregister_trace_probe(tp);
683 		free_trace_probe(tp);
684 	}
685 
686 end:
687 	mutex_unlock(&probe_lock);
688 
689 	return ret;
690 }
691 
692 /* Probes listing interfaces */
693 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
694 {
695 	mutex_lock(&probe_lock);
696 	return seq_list_start(&probe_list, *pos);
697 }
698 
699 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
700 {
701 	return seq_list_next(v, &probe_list, pos);
702 }
703 
704 static void probes_seq_stop(struct seq_file *m, void *v)
705 {
706 	mutex_unlock(&probe_lock);
707 }
708 
709 static int probes_seq_show(struct seq_file *m, void *v)
710 {
711 	struct trace_probe *tp = v;
712 	int i;
713 
714 	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
715 	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
716 
717 	if (!tp->symbol)
718 		seq_printf(m, " 0x%p", tp->rp.kp.addr);
719 	else if (tp->rp.kp.offset)
720 		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
721 			   tp->rp.kp.offset);
722 	else
723 		seq_printf(m, " %s", trace_probe_symbol(tp));
724 
725 	for (i = 0; i < tp->nr_args; i++)
726 		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
727 	seq_printf(m, "\n");
728 
729 	return 0;
730 }
731 
732 static const struct seq_operations probes_seq_op = {
733 	.start  = probes_seq_start,
734 	.next   = probes_seq_next,
735 	.stop   = probes_seq_stop,
736 	.show   = probes_seq_show
737 };
738 
739 static int probes_open(struct inode *inode, struct file *file)
740 {
741 	int ret;
742 
743 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
744 		ret = release_all_trace_probes();
745 		if (ret < 0)
746 			return ret;
747 	}
748 
749 	return seq_open(file, &probes_seq_op);
750 }
751 
752 static ssize_t probes_write(struct file *file, const char __user *buffer,
753 			    size_t count, loff_t *ppos)
754 {
755 	return traceprobe_probes_write(file, buffer, count, ppos,
756 			create_trace_probe);
757 }
758 
759 static const struct file_operations kprobe_events_ops = {
760 	.owner          = THIS_MODULE,
761 	.open           = probes_open,
762 	.read           = seq_read,
763 	.llseek         = seq_lseek,
764 	.release        = seq_release,
765 	.write		= probes_write,
766 };
767 
768 /* Probes profiling interfaces */
769 static int probes_profile_seq_show(struct seq_file *m, void *v)
770 {
771 	struct trace_probe *tp = v;
772 
773 	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
774 		   tp->rp.kp.nmissed);
775 
776 	return 0;
777 }
778 
779 static const struct seq_operations profile_seq_op = {
780 	.start  = probes_seq_start,
781 	.next   = probes_seq_next,
782 	.stop   = probes_seq_stop,
783 	.show   = probes_profile_seq_show
784 };
785 
786 static int profile_open(struct inode *inode, struct file *file)
787 {
788 	return seq_open(file, &profile_seq_op);
789 }
790 
791 static const struct file_operations kprobe_profile_ops = {
792 	.owner          = THIS_MODULE,
793 	.open           = profile_open,
794 	.read           = seq_read,
795 	.llseek         = seq_lseek,
796 	.release        = seq_release,
797 };
798 
799 /* Sum up total data length for dynamic arraies (strings) */
800 static __kprobes int __get_data_size(struct trace_probe *tp,
801 				     struct pt_regs *regs)
802 {
803 	int i, ret = 0;
804 	u32 len;
805 
806 	for (i = 0; i < tp->nr_args; i++)
807 		if (unlikely(tp->args[i].fetch_size.fn)) {
808 			call_fetch(&tp->args[i].fetch_size, regs, &len);
809 			ret += len;
810 		}
811 
812 	return ret;
813 }
814 
815 /* Store the value of each argument */
816 static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
817 				       struct pt_regs *regs,
818 				       u8 *data, int maxlen)
819 {
820 	int i;
821 	u32 end = tp->size;
822 	u32 *dl;	/* Data (relative) location */
823 
824 	for (i = 0; i < tp->nr_args; i++) {
825 		if (unlikely(tp->args[i].fetch_size.fn)) {
826 			/*
827 			 * First, we set the relative location and
828 			 * maximum data length to *dl
829 			 */
830 			dl = (u32 *)(data + tp->args[i].offset);
831 			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
832 			/* Then try to fetch string or dynamic array data */
833 			call_fetch(&tp->args[i].fetch, regs, dl);
834 			/* Reduce maximum length */
835 			end += get_rloc_len(*dl);
836 			maxlen -= get_rloc_len(*dl);
837 			/* Trick here, convert data_rloc to data_loc */
838 			*dl = convert_rloc_to_loc(*dl,
839 				 ent_size + tp->args[i].offset);
840 		} else
841 			/* Just fetching data normally */
842 			call_fetch(&tp->args[i].fetch, regs,
843 				   data + tp->args[i].offset);
844 	}
845 }
846 
847 /* Kprobe handler */
848 static __kprobes void
849 __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
850 		    struct ftrace_event_file *ftrace_file)
851 {
852 	struct kprobe_trace_entry_head *entry;
853 	struct ring_buffer_event *event;
854 	struct ring_buffer *buffer;
855 	int size, dsize, pc;
856 	unsigned long irq_flags;
857 	struct ftrace_event_call *call = &tp->call;
858 
859 	WARN_ON(call != ftrace_file->event_call);
860 
861 	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
862 		return;
863 
864 	local_save_flags(irq_flags);
865 	pc = preempt_count();
866 
867 	dsize = __get_data_size(tp, regs);
868 	size = sizeof(*entry) + tp->size + dsize;
869 
870 	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
871 						call->event.type,
872 						size, irq_flags, pc);
873 	if (!event)
874 		return;
875 
876 	entry = ring_buffer_event_data(event);
877 	entry->ip = (unsigned long)tp->rp.kp.addr;
878 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
879 
880 	if (!filter_current_check_discard(buffer, call, entry, event))
881 		trace_buffer_unlock_commit_regs(buffer, event,
882 						irq_flags, pc, regs);
883 }
884 
885 static __kprobes void
886 kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
887 {
888 	/*
889 	 * Note: preempt is already disabled around the kprobe handler.
890 	 * However, we still need an smp_read_barrier_depends() corresponding
891 	 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
892 	 */
893 	struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
894 
895 	if (unlikely(!file))
896 		return;
897 
898 	while (*file) {
899 		__kprobe_trace_func(tp, regs, *file);
900 		file++;
901 	}
902 }
903 
904 /* Kretprobe handler */
905 static __kprobes void
906 __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
907 		       struct pt_regs *regs,
908 		       struct ftrace_event_file *ftrace_file)
909 {
910 	struct kretprobe_trace_entry_head *entry;
911 	struct ring_buffer_event *event;
912 	struct ring_buffer *buffer;
913 	int size, pc, dsize;
914 	unsigned long irq_flags;
915 	struct ftrace_event_call *call = &tp->call;
916 
917 	WARN_ON(call != ftrace_file->event_call);
918 
919 	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
920 		return;
921 
922 	local_save_flags(irq_flags);
923 	pc = preempt_count();
924 
925 	dsize = __get_data_size(tp, regs);
926 	size = sizeof(*entry) + tp->size + dsize;
927 
928 	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
929 						call->event.type,
930 						size, irq_flags, pc);
931 	if (!event)
932 		return;
933 
934 	entry = ring_buffer_event_data(event);
935 	entry->func = (unsigned long)tp->rp.kp.addr;
936 	entry->ret_ip = (unsigned long)ri->ret_addr;
937 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
938 
939 	if (!filter_current_check_discard(buffer, call, entry, event))
940 		trace_buffer_unlock_commit_regs(buffer, event,
941 						irq_flags, pc, regs);
942 }
943 
944 static __kprobes void
945 kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
946 		     struct pt_regs *regs)
947 {
948 	/*
949 	 * Note: preempt is already disabled around the kprobe handler.
950 	 * However, we still need an smp_read_barrier_depends() corresponding
951 	 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
952 	 */
953 	struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
954 
955 	if (unlikely(!file))
956 		return;
957 
958 	while (*file) {
959 		__kretprobe_trace_func(tp, ri, regs, *file);
960 		file++;
961 	}
962 }
963 
964 /* Event entry printers */
965 static enum print_line_t
966 print_kprobe_event(struct trace_iterator *iter, int flags,
967 		   struct trace_event *event)
968 {
969 	struct kprobe_trace_entry_head *field;
970 	struct trace_seq *s = &iter->seq;
971 	struct trace_probe *tp;
972 	u8 *data;
973 	int i;
974 
975 	field = (struct kprobe_trace_entry_head *)iter->ent;
976 	tp = container_of(event, struct trace_probe, call.event);
977 
978 	if (!trace_seq_printf(s, "%s: (", tp->call.name))
979 		goto partial;
980 
981 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
982 		goto partial;
983 
984 	if (!trace_seq_puts(s, ")"))
985 		goto partial;
986 
987 	data = (u8 *)&field[1];
988 	for (i = 0; i < tp->nr_args; i++)
989 		if (!tp->args[i].type->print(s, tp->args[i].name,
990 					     data + tp->args[i].offset, field))
991 			goto partial;
992 
993 	if (!trace_seq_puts(s, "\n"))
994 		goto partial;
995 
996 	return TRACE_TYPE_HANDLED;
997 partial:
998 	return TRACE_TYPE_PARTIAL_LINE;
999 }
1000 
1001 static enum print_line_t
1002 print_kretprobe_event(struct trace_iterator *iter, int flags,
1003 		      struct trace_event *event)
1004 {
1005 	struct kretprobe_trace_entry_head *field;
1006 	struct trace_seq *s = &iter->seq;
1007 	struct trace_probe *tp;
1008 	u8 *data;
1009 	int i;
1010 
1011 	field = (struct kretprobe_trace_entry_head *)iter->ent;
1012 	tp = container_of(event, struct trace_probe, call.event);
1013 
1014 	if (!trace_seq_printf(s, "%s: (", tp->call.name))
1015 		goto partial;
1016 
1017 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1018 		goto partial;
1019 
1020 	if (!trace_seq_puts(s, " <- "))
1021 		goto partial;
1022 
1023 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1024 		goto partial;
1025 
1026 	if (!trace_seq_puts(s, ")"))
1027 		goto partial;
1028 
1029 	data = (u8 *)&field[1];
1030 	for (i = 0; i < tp->nr_args; i++)
1031 		if (!tp->args[i].type->print(s, tp->args[i].name,
1032 					     data + tp->args[i].offset, field))
1033 			goto partial;
1034 
1035 	if (!trace_seq_puts(s, "\n"))
1036 		goto partial;
1037 
1038 	return TRACE_TYPE_HANDLED;
1039 partial:
1040 	return TRACE_TYPE_PARTIAL_LINE;
1041 }
1042 
1043 
1044 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1045 {
1046 	int ret, i;
1047 	struct kprobe_trace_entry_head field;
1048 	struct trace_probe *tp = (struct trace_probe *)event_call->data;
1049 
1050 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1051 	/* Set argument names as fields */
1052 	for (i = 0; i < tp->nr_args; i++) {
1053 		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1054 					 tp->args[i].name,
1055 					 sizeof(field) + tp->args[i].offset,
1056 					 tp->args[i].type->size,
1057 					 tp->args[i].type->is_signed,
1058 					 FILTER_OTHER);
1059 		if (ret)
1060 			return ret;
1061 	}
1062 	return 0;
1063 }
1064 
1065 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1066 {
1067 	int ret, i;
1068 	struct kretprobe_trace_entry_head field;
1069 	struct trace_probe *tp = (struct trace_probe *)event_call->data;
1070 
1071 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1072 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1073 	/* Set argument names as fields */
1074 	for (i = 0; i < tp->nr_args; i++) {
1075 		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1076 					 tp->args[i].name,
1077 					 sizeof(field) + tp->args[i].offset,
1078 					 tp->args[i].type->size,
1079 					 tp->args[i].type->is_signed,
1080 					 FILTER_OTHER);
1081 		if (ret)
1082 			return ret;
1083 	}
1084 	return 0;
1085 }
1086 
1087 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1088 {
1089 	int i;
1090 	int pos = 0;
1091 
1092 	const char *fmt, *arg;
1093 
1094 	if (!trace_probe_is_return(tp)) {
1095 		fmt = "(%lx)";
1096 		arg = "REC->" FIELD_STRING_IP;
1097 	} else {
1098 		fmt = "(%lx <- %lx)";
1099 		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1100 	}
1101 
1102 	/* When len=0, we just calculate the needed length */
1103 #define LEN_OR_ZERO (len ? len - pos : 0)
1104 
1105 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1106 
1107 	for (i = 0; i < tp->nr_args; i++) {
1108 		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
1109 				tp->args[i].name, tp->args[i].type->fmt);
1110 	}
1111 
1112 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1113 
1114 	for (i = 0; i < tp->nr_args; i++) {
1115 		if (strcmp(tp->args[i].type->name, "string") == 0)
1116 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1117 					", __get_str(%s)",
1118 					tp->args[i].name);
1119 		else
1120 			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1121 					tp->args[i].name);
1122 	}
1123 
1124 #undef LEN_OR_ZERO
1125 
1126 	/* return the length of print_fmt */
1127 	return pos;
1128 }
1129 
1130 static int set_print_fmt(struct trace_probe *tp)
1131 {
1132 	int len;
1133 	char *print_fmt;
1134 
1135 	/* First: called with 0 length to calculate the needed length */
1136 	len = __set_print_fmt(tp, NULL, 0);
1137 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
1138 	if (!print_fmt)
1139 		return -ENOMEM;
1140 
1141 	/* Second: actually write the @print_fmt */
1142 	__set_print_fmt(tp, print_fmt, len + 1);
1143 	tp->call.print_fmt = print_fmt;
1144 
1145 	return 0;
1146 }
1147 
1148 #ifdef CONFIG_PERF_EVENTS
1149 
1150 /* Kprobe profile handler */
1151 static __kprobes void
1152 kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1153 {
1154 	struct ftrace_event_call *call = &tp->call;
1155 	struct kprobe_trace_entry_head *entry;
1156 	struct hlist_head *head;
1157 	int size, __size, dsize;
1158 	int rctx;
1159 
1160 	dsize = __get_data_size(tp, regs);
1161 	__size = sizeof(*entry) + tp->size + dsize;
1162 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1163 	size -= sizeof(u32);
1164 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1165 		     "profile buffer not large enough"))
1166 		return;
1167 
1168 	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1169 	if (!entry)
1170 		return;
1171 
1172 	entry->ip = (unsigned long)tp->rp.kp.addr;
1173 	memset(&entry[1], 0, dsize);
1174 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1175 
1176 	head = this_cpu_ptr(call->perf_events);
1177 	perf_trace_buf_submit(entry, size, rctx,
1178 					entry->ip, 1, regs, head, NULL);
1179 }
1180 
1181 /* Kretprobe profile handler */
1182 static __kprobes void
1183 kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1184 		    struct pt_regs *regs)
1185 {
1186 	struct ftrace_event_call *call = &tp->call;
1187 	struct kretprobe_trace_entry_head *entry;
1188 	struct hlist_head *head;
1189 	int size, __size, dsize;
1190 	int rctx;
1191 
1192 	dsize = __get_data_size(tp, regs);
1193 	__size = sizeof(*entry) + tp->size + dsize;
1194 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1195 	size -= sizeof(u32);
1196 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1197 		     "profile buffer not large enough"))
1198 		return;
1199 
1200 	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1201 	if (!entry)
1202 		return;
1203 
1204 	entry->func = (unsigned long)tp->rp.kp.addr;
1205 	entry->ret_ip = (unsigned long)ri->ret_addr;
1206 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1207 
1208 	head = this_cpu_ptr(call->perf_events);
1209 	perf_trace_buf_submit(entry, size, rctx,
1210 					entry->ret_ip, 1, regs, head, NULL);
1211 }
1212 #endif	/* CONFIG_PERF_EVENTS */
1213 
1214 static __kprobes
1215 int kprobe_register(struct ftrace_event_call *event,
1216 		    enum trace_reg type, void *data)
1217 {
1218 	struct trace_probe *tp = (struct trace_probe *)event->data;
1219 	struct ftrace_event_file *file = data;
1220 
1221 	switch (type) {
1222 	case TRACE_REG_REGISTER:
1223 		return enable_trace_probe(tp, file);
1224 	case TRACE_REG_UNREGISTER:
1225 		return disable_trace_probe(tp, file);
1226 
1227 #ifdef CONFIG_PERF_EVENTS
1228 	case TRACE_REG_PERF_REGISTER:
1229 		return enable_trace_probe(tp, NULL);
1230 	case TRACE_REG_PERF_UNREGISTER:
1231 		return disable_trace_probe(tp, NULL);
1232 	case TRACE_REG_PERF_OPEN:
1233 	case TRACE_REG_PERF_CLOSE:
1234 	case TRACE_REG_PERF_ADD:
1235 	case TRACE_REG_PERF_DEL:
1236 		return 0;
1237 #endif
1238 	}
1239 	return 0;
1240 }
1241 
1242 static __kprobes
1243 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1244 {
1245 	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1246 
1247 	tp->nhit++;
1248 
1249 	if (tp->flags & TP_FLAG_TRACE)
1250 		kprobe_trace_func(tp, regs);
1251 #ifdef CONFIG_PERF_EVENTS
1252 	if (tp->flags & TP_FLAG_PROFILE)
1253 		kprobe_perf_func(tp, regs);
1254 #endif
1255 	return 0;	/* We don't tweek kernel, so just return 0 */
1256 }
1257 
1258 static __kprobes
1259 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1260 {
1261 	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1262 
1263 	tp->nhit++;
1264 
1265 	if (tp->flags & TP_FLAG_TRACE)
1266 		kretprobe_trace_func(tp, ri, regs);
1267 #ifdef CONFIG_PERF_EVENTS
1268 	if (tp->flags & TP_FLAG_PROFILE)
1269 		kretprobe_perf_func(tp, ri, regs);
1270 #endif
1271 	return 0;	/* We don't tweek kernel, so just return 0 */
1272 }
1273 
1274 static struct trace_event_functions kretprobe_funcs = {
1275 	.trace		= print_kretprobe_event
1276 };
1277 
1278 static struct trace_event_functions kprobe_funcs = {
1279 	.trace		= print_kprobe_event
1280 };
1281 
1282 static int register_probe_event(struct trace_probe *tp)
1283 {
1284 	struct ftrace_event_call *call = &tp->call;
1285 	int ret;
1286 
1287 	/* Initialize ftrace_event_call */
1288 	INIT_LIST_HEAD(&call->class->fields);
1289 	if (trace_probe_is_return(tp)) {
1290 		call->event.funcs = &kretprobe_funcs;
1291 		call->class->define_fields = kretprobe_event_define_fields;
1292 	} else {
1293 		call->event.funcs = &kprobe_funcs;
1294 		call->class->define_fields = kprobe_event_define_fields;
1295 	}
1296 	if (set_print_fmt(tp) < 0)
1297 		return -ENOMEM;
1298 	ret = register_ftrace_event(&call->event);
1299 	if (!ret) {
1300 		kfree(call->print_fmt);
1301 		return -ENODEV;
1302 	}
1303 	call->flags = 0;
1304 	call->class->reg = kprobe_register;
1305 	call->data = tp;
1306 	ret = trace_add_event_call(call);
1307 	if (ret) {
1308 		pr_info("Failed to register kprobe event: %s\n", call->name);
1309 		kfree(call->print_fmt);
1310 		unregister_ftrace_event(&call->event);
1311 	}
1312 	return ret;
1313 }
1314 
1315 static void unregister_probe_event(struct trace_probe *tp)
1316 {
1317 	/* tp->event is unregistered in trace_remove_event_call() */
1318 	trace_remove_event_call(&tp->call);
1319 	kfree(tp->call.print_fmt);
1320 }
1321 
1322 /* Make a debugfs interface for controlling probe points */
1323 static __init int init_kprobe_trace(void)
1324 {
1325 	struct dentry *d_tracer;
1326 	struct dentry *entry;
1327 
1328 	if (register_module_notifier(&trace_probe_module_nb))
1329 		return -EINVAL;
1330 
1331 	d_tracer = tracing_init_dentry();
1332 	if (!d_tracer)
1333 		return 0;
1334 
1335 	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1336 				    NULL, &kprobe_events_ops);
1337 
1338 	/* Event list interface */
1339 	if (!entry)
1340 		pr_warning("Could not create debugfs "
1341 			   "'kprobe_events' entry\n");
1342 
1343 	/* Profile interface */
1344 	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1345 				    NULL, &kprobe_profile_ops);
1346 
1347 	if (!entry)
1348 		pr_warning("Could not create debugfs "
1349 			   "'kprobe_profile' entry\n");
1350 	return 0;
1351 }
1352 fs_initcall(init_kprobe_trace);
1353 
1354 
1355 #ifdef CONFIG_FTRACE_STARTUP_TEST
1356 
1357 /*
1358  * The "__used" keeps gcc from removing the function symbol
1359  * from the kallsyms table.
1360  */
1361 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1362 					       int a4, int a5, int a6)
1363 {
1364 	return a1 + a2 + a3 + a4 + a5 + a6;
1365 }
1366 
1367 static struct ftrace_event_file *
1368 find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
1369 {
1370 	struct ftrace_event_file *file;
1371 
1372 	list_for_each_entry(file, &tr->events, list)
1373 		if (file->event_call == &tp->call)
1374 			return file;
1375 
1376 	return NULL;
1377 }
1378 
1379 static __init int kprobe_trace_self_tests_init(void)
1380 {
1381 	int ret, warn = 0;
1382 	int (*target)(int, int, int, int, int, int);
1383 	struct trace_probe *tp;
1384 	struct ftrace_event_file *file;
1385 
1386 	target = kprobe_trace_selftest_target;
1387 
1388 	pr_info("Testing kprobe tracing: ");
1389 
1390 	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1391 				  "$stack $stack0 +0($stack)",
1392 				  create_trace_probe);
1393 	if (WARN_ON_ONCE(ret)) {
1394 		pr_warn("error on probing function entry.\n");
1395 		warn++;
1396 	} else {
1397 		/* Enable trace point */
1398 		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1399 		if (WARN_ON_ONCE(tp == NULL)) {
1400 			pr_warn("error on getting new probe.\n");
1401 			warn++;
1402 		} else {
1403 			file = find_trace_probe_file(tp, top_trace_array());
1404 			if (WARN_ON_ONCE(file == NULL)) {
1405 				pr_warn("error on getting probe file.\n");
1406 				warn++;
1407 			} else
1408 				enable_trace_probe(tp, file);
1409 		}
1410 	}
1411 
1412 	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1413 				  "$retval", create_trace_probe);
1414 	if (WARN_ON_ONCE(ret)) {
1415 		pr_warn("error on probing function return.\n");
1416 		warn++;
1417 	} else {
1418 		/* Enable trace point */
1419 		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1420 		if (WARN_ON_ONCE(tp == NULL)) {
1421 			pr_warn("error on getting 2nd new probe.\n");
1422 			warn++;
1423 		} else {
1424 			file = find_trace_probe_file(tp, top_trace_array());
1425 			if (WARN_ON_ONCE(file == NULL)) {
1426 				pr_warn("error on getting probe file.\n");
1427 				warn++;
1428 			} else
1429 				enable_trace_probe(tp, file);
1430 		}
1431 	}
1432 
1433 	if (warn)
1434 		goto end;
1435 
1436 	ret = target(1, 2, 3, 4, 5, 6);
1437 
1438 	/* Disable trace points before removing it */
1439 	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1440 	if (WARN_ON_ONCE(tp == NULL)) {
1441 		pr_warn("error on getting test probe.\n");
1442 		warn++;
1443 	} else {
1444 		file = find_trace_probe_file(tp, top_trace_array());
1445 		if (WARN_ON_ONCE(file == NULL)) {
1446 			pr_warn("error on getting probe file.\n");
1447 			warn++;
1448 		} else
1449 			disable_trace_probe(tp, file);
1450 	}
1451 
1452 	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1453 	if (WARN_ON_ONCE(tp == NULL)) {
1454 		pr_warn("error on getting 2nd test probe.\n");
1455 		warn++;
1456 	} else {
1457 		file = find_trace_probe_file(tp, top_trace_array());
1458 		if (WARN_ON_ONCE(file == NULL)) {
1459 			pr_warn("error on getting probe file.\n");
1460 			warn++;
1461 		} else
1462 			disable_trace_probe(tp, file);
1463 	}
1464 
1465 	ret = traceprobe_command("-:testprobe", create_trace_probe);
1466 	if (WARN_ON_ONCE(ret)) {
1467 		pr_warn("error on deleting a probe.\n");
1468 		warn++;
1469 	}
1470 
1471 	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1472 	if (WARN_ON_ONCE(ret)) {
1473 		pr_warn("error on deleting a probe.\n");
1474 		warn++;
1475 	}
1476 
1477 end:
1478 	release_all_trace_probes();
1479 	if (warn)
1480 		pr_cont("NG: Some tests are failed. Please check them.\n");
1481 	else
1482 		pr_cont("OK\n");
1483 	return 0;
1484 }
1485 
1486 late_initcall(kprobe_trace_self_tests_init);
1487 
1488 #endif
1489