xref: /openbmc/linux/kernel/trace/trace_kprobe.c (revision b296a6d5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)	"trace_kprobe: " fmt
9 
10 #include <linux/security.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/rculist.h>
14 #include <linux/error-injection.h>
15 
16 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
17 
18 #include "trace_dynevent.h"
19 #include "trace_kprobe_selftest.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22 
23 #define KPROBE_EVENT_SYSTEM "kprobes"
24 #define KRETPROBE_MAXACTIVE_MAX 4096
25 
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28 static bool kprobe_boot_events_enabled __initdata;
29 
30 static int __init set_kprobe_boot_events(char *str)
31 {
32 	strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
33 	return 0;
34 }
35 __setup("kprobe_event=", set_kprobe_boot_events);
36 
37 static int trace_kprobe_create(int argc, const char **argv);
38 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_kprobe_release(struct dyn_event *ev);
40 static bool trace_kprobe_is_busy(struct dyn_event *ev);
41 static bool trace_kprobe_match(const char *system, const char *event,
42 			int argc, const char **argv, struct dyn_event *ev);
43 
44 static struct dyn_event_operations trace_kprobe_ops = {
45 	.create = trace_kprobe_create,
46 	.show = trace_kprobe_show,
47 	.is_busy = trace_kprobe_is_busy,
48 	.free = trace_kprobe_release,
49 	.match = trace_kprobe_match,
50 };
51 
52 /*
53  * Kprobe event core functions
54  */
55 struct trace_kprobe {
56 	struct dyn_event	devent;
57 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
58 	unsigned long __percpu *nhit;
59 	const char		*symbol;	/* symbol name */
60 	struct trace_probe	tp;
61 };
62 
63 static bool is_trace_kprobe(struct dyn_event *ev)
64 {
65 	return ev->ops == &trace_kprobe_ops;
66 }
67 
68 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
69 {
70 	return container_of(ev, struct trace_kprobe, devent);
71 }
72 
73 /**
74  * for_each_trace_kprobe - iterate over the trace_kprobe list
75  * @pos:	the struct trace_kprobe * for each entry
76  * @dpos:	the struct dyn_event * to use as a loop cursor
77  */
78 #define for_each_trace_kprobe(pos, dpos)	\
79 	for_each_dyn_event(dpos)		\
80 		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
81 
82 #define SIZEOF_TRACE_KPROBE(n)				\
83 	(offsetof(struct trace_kprobe, tp.args) +	\
84 	(sizeof(struct probe_arg) * (n)))
85 
86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
87 {
88 	return tk->rp.handler != NULL;
89 }
90 
91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
92 {
93 	return tk->symbol ? tk->symbol : "unknown";
94 }
95 
96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
97 {
98 	return tk->rp.kp.offset;
99 }
100 
101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
102 {
103 	return !!(kprobe_gone(&tk->rp.kp));
104 }
105 
106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
107 						 struct module *mod)
108 {
109 	int len = strlen(module_name(mod));
110 	const char *name = trace_kprobe_symbol(tk);
111 
112 	return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
113 }
114 
115 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
116 {
117 	char *p;
118 	bool ret;
119 
120 	if (!tk->symbol)
121 		return false;
122 	p = strchr(tk->symbol, ':');
123 	if (!p)
124 		return true;
125 	*p = '\0';
126 	mutex_lock(&module_mutex);
127 	ret = !!find_module(tk->symbol);
128 	mutex_unlock(&module_mutex);
129 	*p = ':';
130 
131 	return ret;
132 }
133 
134 static bool trace_kprobe_is_busy(struct dyn_event *ev)
135 {
136 	struct trace_kprobe *tk = to_trace_kprobe(ev);
137 
138 	return trace_probe_is_enabled(&tk->tp);
139 }
140 
141 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
142 					    int argc, const char **argv)
143 {
144 	char buf[MAX_ARGSTR_LEN + 1];
145 
146 	if (!argc)
147 		return true;
148 
149 	if (!tk->symbol)
150 		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
151 	else if (tk->rp.kp.offset)
152 		snprintf(buf, sizeof(buf), "%s+%u",
153 			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
154 	else
155 		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
156 	if (strcmp(buf, argv[0]))
157 		return false;
158 	argc--; argv++;
159 
160 	return trace_probe_match_command_args(&tk->tp, argc, argv);
161 }
162 
163 static bool trace_kprobe_match(const char *system, const char *event,
164 			int argc, const char **argv, struct dyn_event *ev)
165 {
166 	struct trace_kprobe *tk = to_trace_kprobe(ev);
167 
168 	return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
169 	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
170 	    trace_kprobe_match_command_head(tk, argc, argv);
171 }
172 
173 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
174 {
175 	unsigned long nhit = 0;
176 	int cpu;
177 
178 	for_each_possible_cpu(cpu)
179 		nhit += *per_cpu_ptr(tk->nhit, cpu);
180 
181 	return nhit;
182 }
183 
184 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
185 {
186 	return !(list_empty(&tk->rp.kp.list) &&
187 		 hlist_unhashed(&tk->rp.kp.hlist));
188 }
189 
190 /* Return 0 if it fails to find the symbol address */
191 static nokprobe_inline
192 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
193 {
194 	unsigned long addr;
195 
196 	if (tk->symbol) {
197 		addr = (unsigned long)
198 			kallsyms_lookup_name(trace_kprobe_symbol(tk));
199 		if (addr)
200 			addr += tk->rp.kp.offset;
201 	} else {
202 		addr = (unsigned long)tk->rp.kp.addr;
203 	}
204 	return addr;
205 }
206 
207 static nokprobe_inline struct trace_kprobe *
208 trace_kprobe_primary_from_call(struct trace_event_call *call)
209 {
210 	struct trace_probe *tp;
211 
212 	tp = trace_probe_primary_from_call(call);
213 	if (WARN_ON_ONCE(!tp))
214 		return NULL;
215 
216 	return container_of(tp, struct trace_kprobe, tp);
217 }
218 
219 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
220 {
221 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
222 
223 	return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
224 			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
225 			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
226 }
227 
228 bool trace_kprobe_error_injectable(struct trace_event_call *call)
229 {
230 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
231 
232 	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
233 	       false;
234 }
235 
236 static int register_kprobe_event(struct trace_kprobe *tk);
237 static int unregister_kprobe_event(struct trace_kprobe *tk);
238 
239 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
240 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
241 				struct pt_regs *regs);
242 
243 static void free_trace_kprobe(struct trace_kprobe *tk)
244 {
245 	if (tk) {
246 		trace_probe_cleanup(&tk->tp);
247 		kfree(tk->symbol);
248 		free_percpu(tk->nhit);
249 		kfree(tk);
250 	}
251 }
252 
253 /*
254  * Allocate new trace_probe and initialize it (including kprobes).
255  */
256 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
257 					     const char *event,
258 					     void *addr,
259 					     const char *symbol,
260 					     unsigned long offs,
261 					     int maxactive,
262 					     int nargs, bool is_return)
263 {
264 	struct trace_kprobe *tk;
265 	int ret = -ENOMEM;
266 
267 	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
268 	if (!tk)
269 		return ERR_PTR(ret);
270 
271 	tk->nhit = alloc_percpu(unsigned long);
272 	if (!tk->nhit)
273 		goto error;
274 
275 	if (symbol) {
276 		tk->symbol = kstrdup(symbol, GFP_KERNEL);
277 		if (!tk->symbol)
278 			goto error;
279 		tk->rp.kp.symbol_name = tk->symbol;
280 		tk->rp.kp.offset = offs;
281 	} else
282 		tk->rp.kp.addr = addr;
283 
284 	if (is_return)
285 		tk->rp.handler = kretprobe_dispatcher;
286 	else
287 		tk->rp.kp.pre_handler = kprobe_dispatcher;
288 
289 	tk->rp.maxactive = maxactive;
290 	INIT_HLIST_NODE(&tk->rp.kp.hlist);
291 	INIT_LIST_HEAD(&tk->rp.kp.list);
292 
293 	ret = trace_probe_init(&tk->tp, event, group, false);
294 	if (ret < 0)
295 		goto error;
296 
297 	dyn_event_init(&tk->devent, &trace_kprobe_ops);
298 	return tk;
299 error:
300 	free_trace_kprobe(tk);
301 	return ERR_PTR(ret);
302 }
303 
304 static struct trace_kprobe *find_trace_kprobe(const char *event,
305 					      const char *group)
306 {
307 	struct dyn_event *pos;
308 	struct trace_kprobe *tk;
309 
310 	for_each_trace_kprobe(tk, pos)
311 		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
312 		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
313 			return tk;
314 	return NULL;
315 }
316 
317 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
318 {
319 	int ret = 0;
320 
321 	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
322 		if (trace_kprobe_is_return(tk))
323 			ret = enable_kretprobe(&tk->rp);
324 		else
325 			ret = enable_kprobe(&tk->rp.kp);
326 	}
327 
328 	return ret;
329 }
330 
331 static void __disable_trace_kprobe(struct trace_probe *tp)
332 {
333 	struct trace_probe *pos;
334 	struct trace_kprobe *tk;
335 
336 	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
337 		tk = container_of(pos, struct trace_kprobe, tp);
338 		if (!trace_kprobe_is_registered(tk))
339 			continue;
340 		if (trace_kprobe_is_return(tk))
341 			disable_kretprobe(&tk->rp);
342 		else
343 			disable_kprobe(&tk->rp.kp);
344 	}
345 }
346 
347 /*
348  * Enable trace_probe
349  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
350  */
351 static int enable_trace_kprobe(struct trace_event_call *call,
352 				struct trace_event_file *file)
353 {
354 	struct trace_probe *pos, *tp;
355 	struct trace_kprobe *tk;
356 	bool enabled;
357 	int ret = 0;
358 
359 	tp = trace_probe_primary_from_call(call);
360 	if (WARN_ON_ONCE(!tp))
361 		return -ENODEV;
362 	enabled = trace_probe_is_enabled(tp);
363 
364 	/* This also changes "enabled" state */
365 	if (file) {
366 		ret = trace_probe_add_file(tp, file);
367 		if (ret)
368 			return ret;
369 	} else
370 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
371 
372 	if (enabled)
373 		return 0;
374 
375 	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
376 		tk = container_of(pos, struct trace_kprobe, tp);
377 		if (trace_kprobe_has_gone(tk))
378 			continue;
379 		ret = __enable_trace_kprobe(tk);
380 		if (ret)
381 			break;
382 		enabled = true;
383 	}
384 
385 	if (ret) {
386 		/* Failed to enable one of them. Roll back all */
387 		if (enabled)
388 			__disable_trace_kprobe(tp);
389 		if (file)
390 			trace_probe_remove_file(tp, file);
391 		else
392 			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
393 	}
394 
395 	return ret;
396 }
397 
398 /*
399  * Disable trace_probe
400  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
401  */
402 static int disable_trace_kprobe(struct trace_event_call *call,
403 				struct trace_event_file *file)
404 {
405 	struct trace_probe *tp;
406 
407 	tp = trace_probe_primary_from_call(call);
408 	if (WARN_ON_ONCE(!tp))
409 		return -ENODEV;
410 
411 	if (file) {
412 		if (!trace_probe_get_file_link(tp, file))
413 			return -ENOENT;
414 		if (!trace_probe_has_single_file(tp))
415 			goto out;
416 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
417 	} else
418 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
419 
420 	if (!trace_probe_is_enabled(tp))
421 		__disable_trace_kprobe(tp);
422 
423  out:
424 	if (file)
425 		/*
426 		 * Synchronization is done in below function. For perf event,
427 		 * file == NULL and perf_trace_event_unreg() calls
428 		 * tracepoint_synchronize_unregister() to ensure synchronize
429 		 * event. We don't need to care about it.
430 		 */
431 		trace_probe_remove_file(tp, file);
432 
433 	return 0;
434 }
435 
436 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
437 	!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
438 static bool __within_notrace_func(unsigned long addr)
439 {
440 	unsigned long offset, size;
441 
442 	if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
443 		return false;
444 
445 	/* Get the entry address of the target function */
446 	addr -= offset;
447 
448 	/*
449 	 * Since ftrace_location_range() does inclusive range check, we need
450 	 * to subtract 1 byte from the end address.
451 	 */
452 	return !ftrace_location_range(addr, addr + size - 1);
453 }
454 
455 static bool within_notrace_func(struct trace_kprobe *tk)
456 {
457 	unsigned long addr = trace_kprobe_address(tk);
458 	char symname[KSYM_NAME_LEN], *p;
459 
460 	if (!__within_notrace_func(addr))
461 		return false;
462 
463 	/* Check if the address is on a suffixed-symbol */
464 	if (!lookup_symbol_name(addr, symname)) {
465 		p = strchr(symname, '.');
466 		if (!p)
467 			return true;
468 		*p = '\0';
469 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
470 		if (addr)
471 			return __within_notrace_func(addr);
472 	}
473 
474 	return true;
475 }
476 #else
477 #define within_notrace_func(tk)	(false)
478 #endif
479 
480 /* Internal register function - just handle k*probes and flags */
481 static int __register_trace_kprobe(struct trace_kprobe *tk)
482 {
483 	int i, ret;
484 
485 	ret = security_locked_down(LOCKDOWN_KPROBES);
486 	if (ret)
487 		return ret;
488 
489 	if (trace_kprobe_is_registered(tk))
490 		return -EINVAL;
491 
492 	if (within_notrace_func(tk)) {
493 		pr_warn("Could not probe notrace function %s\n",
494 			trace_kprobe_symbol(tk));
495 		return -EINVAL;
496 	}
497 
498 	for (i = 0; i < tk->tp.nr_args; i++) {
499 		ret = traceprobe_update_arg(&tk->tp.args[i]);
500 		if (ret)
501 			return ret;
502 	}
503 
504 	/* Set/clear disabled flag according to tp->flag */
505 	if (trace_probe_is_enabled(&tk->tp))
506 		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
507 	else
508 		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
509 
510 	if (trace_kprobe_is_return(tk))
511 		ret = register_kretprobe(&tk->rp);
512 	else
513 		ret = register_kprobe(&tk->rp.kp);
514 
515 	return ret;
516 }
517 
518 /* Internal unregister function - just handle k*probes and flags */
519 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
520 {
521 	if (trace_kprobe_is_registered(tk)) {
522 		if (trace_kprobe_is_return(tk))
523 			unregister_kretprobe(&tk->rp);
524 		else
525 			unregister_kprobe(&tk->rp.kp);
526 		/* Cleanup kprobe for reuse and mark it unregistered */
527 		INIT_HLIST_NODE(&tk->rp.kp.hlist);
528 		INIT_LIST_HEAD(&tk->rp.kp.list);
529 		if (tk->rp.kp.symbol_name)
530 			tk->rp.kp.addr = NULL;
531 	}
532 }
533 
534 /* Unregister a trace_probe and probe_event */
535 static int unregister_trace_kprobe(struct trace_kprobe *tk)
536 {
537 	/* If other probes are on the event, just unregister kprobe */
538 	if (trace_probe_has_sibling(&tk->tp))
539 		goto unreg;
540 
541 	/* Enabled event can not be unregistered */
542 	if (trace_probe_is_enabled(&tk->tp))
543 		return -EBUSY;
544 
545 	/* Will fail if probe is being used by ftrace or perf */
546 	if (unregister_kprobe_event(tk))
547 		return -EBUSY;
548 
549 unreg:
550 	__unregister_trace_kprobe(tk);
551 	dyn_event_remove(&tk->devent);
552 	trace_probe_unlink(&tk->tp);
553 
554 	return 0;
555 }
556 
557 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
558 					 struct trace_kprobe *comp)
559 {
560 	struct trace_probe_event *tpe = orig->tp.event;
561 	struct trace_probe *pos;
562 	int i;
563 
564 	list_for_each_entry(pos, &tpe->probes, list) {
565 		orig = container_of(pos, struct trace_kprobe, tp);
566 		if (strcmp(trace_kprobe_symbol(orig),
567 			   trace_kprobe_symbol(comp)) ||
568 		    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
569 			continue;
570 
571 		/*
572 		 * trace_probe_compare_arg_type() ensured that nr_args and
573 		 * each argument name and type are same. Let's compare comm.
574 		 */
575 		for (i = 0; i < orig->tp.nr_args; i++) {
576 			if (strcmp(orig->tp.args[i].comm,
577 				   comp->tp.args[i].comm))
578 				break;
579 		}
580 
581 		if (i == orig->tp.nr_args)
582 			return true;
583 	}
584 
585 	return false;
586 }
587 
588 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
589 {
590 	int ret;
591 
592 	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
593 	if (ret) {
594 		/* Note that argument starts index = 2 */
595 		trace_probe_log_set_index(ret + 1);
596 		trace_probe_log_err(0, DIFF_ARG_TYPE);
597 		return -EEXIST;
598 	}
599 	if (trace_kprobe_has_same_kprobe(to, tk)) {
600 		trace_probe_log_set_index(0);
601 		trace_probe_log_err(0, SAME_PROBE);
602 		return -EEXIST;
603 	}
604 
605 	/* Append to existing event */
606 	ret = trace_probe_append(&tk->tp, &to->tp);
607 	if (ret)
608 		return ret;
609 
610 	/* Register k*probe */
611 	ret = __register_trace_kprobe(tk);
612 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
613 		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
614 		ret = 0;
615 	}
616 
617 	if (ret)
618 		trace_probe_unlink(&tk->tp);
619 	else
620 		dyn_event_add(&tk->devent);
621 
622 	return ret;
623 }
624 
625 /* Register a trace_probe and probe_event */
626 static int register_trace_kprobe(struct trace_kprobe *tk)
627 {
628 	struct trace_kprobe *old_tk;
629 	int ret;
630 
631 	mutex_lock(&event_mutex);
632 
633 	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
634 				   trace_probe_group_name(&tk->tp));
635 	if (old_tk) {
636 		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
637 			trace_probe_log_set_index(0);
638 			trace_probe_log_err(0, DIFF_PROBE_TYPE);
639 			ret = -EEXIST;
640 		} else {
641 			ret = append_trace_kprobe(tk, old_tk);
642 		}
643 		goto end;
644 	}
645 
646 	/* Register new event */
647 	ret = register_kprobe_event(tk);
648 	if (ret) {
649 		pr_warn("Failed to register probe event(%d)\n", ret);
650 		goto end;
651 	}
652 
653 	/* Register k*probe */
654 	ret = __register_trace_kprobe(tk);
655 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
656 		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
657 		ret = 0;
658 	}
659 
660 	if (ret < 0)
661 		unregister_kprobe_event(tk);
662 	else
663 		dyn_event_add(&tk->devent);
664 
665 end:
666 	mutex_unlock(&event_mutex);
667 	return ret;
668 }
669 
670 /* Module notifier call back, checking event on the module */
671 static int trace_kprobe_module_callback(struct notifier_block *nb,
672 				       unsigned long val, void *data)
673 {
674 	struct module *mod = data;
675 	struct dyn_event *pos;
676 	struct trace_kprobe *tk;
677 	int ret;
678 
679 	if (val != MODULE_STATE_COMING)
680 		return NOTIFY_DONE;
681 
682 	/* Update probes on coming module */
683 	mutex_lock(&event_mutex);
684 	for_each_trace_kprobe(tk, pos) {
685 		if (trace_kprobe_within_module(tk, mod)) {
686 			/* Don't need to check busy - this should have gone. */
687 			__unregister_trace_kprobe(tk);
688 			ret = __register_trace_kprobe(tk);
689 			if (ret)
690 				pr_warn("Failed to re-register probe %s on %s: %d\n",
691 					trace_probe_name(&tk->tp),
692 					module_name(mod), ret);
693 		}
694 	}
695 	mutex_unlock(&event_mutex);
696 
697 	return NOTIFY_DONE;
698 }
699 
700 static struct notifier_block trace_kprobe_module_nb = {
701 	.notifier_call = trace_kprobe_module_callback,
702 	.priority = 1	/* Invoked after kprobe module callback */
703 };
704 
705 /* Convert certain expected symbols into '_' when generating event names */
706 static inline void sanitize_event_name(char *name)
707 {
708 	while (*name++ != '\0')
709 		if (*name == ':' || *name == '.')
710 			*name = '_';
711 }
712 
713 static int trace_kprobe_create(int argc, const char *argv[])
714 {
715 	/*
716 	 * Argument syntax:
717 	 *  - Add kprobe:
718 	 *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
719 	 *  - Add kretprobe:
720 	 *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
721 	 * Fetch args:
722 	 *  $retval	: fetch return value
723 	 *  $stack	: fetch stack address
724 	 *  $stackN	: fetch Nth of stack (N:0-)
725 	 *  $comm       : fetch current task comm
726 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
727 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
728 	 *  %REG	: fetch register REG
729 	 * Dereferencing memory fetch:
730 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
731 	 * Alias name of args:
732 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
733 	 * Type of args:
734 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
735 	 */
736 	struct trace_kprobe *tk = NULL;
737 	int i, len, ret = 0;
738 	bool is_return = false;
739 	char *symbol = NULL, *tmp = NULL;
740 	const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
741 	int maxactive = 0;
742 	long offset = 0;
743 	void *addr = NULL;
744 	char buf[MAX_EVENT_NAME_LEN];
745 	unsigned int flags = TPARG_FL_KERNEL;
746 
747 	switch (argv[0][0]) {
748 	case 'r':
749 		is_return = true;
750 		flags |= TPARG_FL_RETURN;
751 		break;
752 	case 'p':
753 		break;
754 	default:
755 		return -ECANCELED;
756 	}
757 	if (argc < 2)
758 		return -ECANCELED;
759 
760 	trace_probe_log_init("trace_kprobe", argc, argv);
761 
762 	event = strchr(&argv[0][1], ':');
763 	if (event)
764 		event++;
765 
766 	if (isdigit(argv[0][1])) {
767 		if (!is_return) {
768 			trace_probe_log_err(1, MAXACT_NO_KPROBE);
769 			goto parse_error;
770 		}
771 		if (event)
772 			len = event - &argv[0][1] - 1;
773 		else
774 			len = strlen(&argv[0][1]);
775 		if (len > MAX_EVENT_NAME_LEN - 1) {
776 			trace_probe_log_err(1, BAD_MAXACT);
777 			goto parse_error;
778 		}
779 		memcpy(buf, &argv[0][1], len);
780 		buf[len] = '\0';
781 		ret = kstrtouint(buf, 0, &maxactive);
782 		if (ret || !maxactive) {
783 			trace_probe_log_err(1, BAD_MAXACT);
784 			goto parse_error;
785 		}
786 		/* kretprobes instances are iterated over via a list. The
787 		 * maximum should stay reasonable.
788 		 */
789 		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
790 			trace_probe_log_err(1, MAXACT_TOO_BIG);
791 			goto parse_error;
792 		}
793 	}
794 
795 	/* try to parse an address. if that fails, try to read the
796 	 * input as a symbol. */
797 	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
798 		trace_probe_log_set_index(1);
799 		/* Check whether uprobe event specified */
800 		if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
801 			ret = -ECANCELED;
802 			goto error;
803 		}
804 		/* a symbol specified */
805 		symbol = kstrdup(argv[1], GFP_KERNEL);
806 		if (!symbol)
807 			return -ENOMEM;
808 		/* TODO: support .init module functions */
809 		ret = traceprobe_split_symbol_offset(symbol, &offset);
810 		if (ret || offset < 0 || offset > UINT_MAX) {
811 			trace_probe_log_err(0, BAD_PROBE_ADDR);
812 			goto parse_error;
813 		}
814 		if (kprobe_on_func_entry(NULL, symbol, offset))
815 			flags |= TPARG_FL_FENTRY;
816 		if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
817 			trace_probe_log_err(0, BAD_RETPROBE);
818 			goto parse_error;
819 		}
820 	}
821 
822 	trace_probe_log_set_index(0);
823 	if (event) {
824 		ret = traceprobe_parse_event_name(&event, &group, buf,
825 						  event - argv[0]);
826 		if (ret)
827 			goto parse_error;
828 	} else {
829 		/* Make a new event name */
830 		if (symbol)
831 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
832 				 is_return ? 'r' : 'p', symbol, offset);
833 		else
834 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
835 				 is_return ? 'r' : 'p', addr);
836 		sanitize_event_name(buf);
837 		event = buf;
838 	}
839 
840 	/* setup a probe */
841 	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
842 			       argc - 2, is_return);
843 	if (IS_ERR(tk)) {
844 		ret = PTR_ERR(tk);
845 		/* This must return -ENOMEM, else there is a bug */
846 		WARN_ON_ONCE(ret != -ENOMEM);
847 		goto out;	/* We know tk is not allocated */
848 	}
849 	argc -= 2; argv += 2;
850 
851 	/* parse arguments */
852 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
853 		tmp = kstrdup(argv[i], GFP_KERNEL);
854 		if (!tmp) {
855 			ret = -ENOMEM;
856 			goto error;
857 		}
858 
859 		trace_probe_log_set_index(i + 2);
860 		ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
861 		kfree(tmp);
862 		if (ret)
863 			goto error;	/* This can be -ENOMEM */
864 	}
865 
866 	ret = traceprobe_set_print_fmt(&tk->tp, is_return);
867 	if (ret < 0)
868 		goto error;
869 
870 	ret = register_trace_kprobe(tk);
871 	if (ret) {
872 		trace_probe_log_set_index(1);
873 		if (ret == -EILSEQ)
874 			trace_probe_log_err(0, BAD_INSN_BNDRY);
875 		else if (ret == -ENOENT)
876 			trace_probe_log_err(0, BAD_PROBE_ADDR);
877 		else if (ret != -ENOMEM && ret != -EEXIST)
878 			trace_probe_log_err(0, FAIL_REG_PROBE);
879 		goto error;
880 	}
881 
882 out:
883 	trace_probe_log_clear();
884 	kfree(symbol);
885 	return ret;
886 
887 parse_error:
888 	ret = -EINVAL;
889 error:
890 	free_trace_kprobe(tk);
891 	goto out;
892 }
893 
894 static int create_or_delete_trace_kprobe(int argc, char **argv)
895 {
896 	int ret;
897 
898 	if (argv[0][0] == '-')
899 		return dyn_event_release(argc, argv, &trace_kprobe_ops);
900 
901 	ret = trace_kprobe_create(argc, (const char **)argv);
902 	return ret == -ECANCELED ? -EINVAL : ret;
903 }
904 
905 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
906 {
907 	return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
908 }
909 
910 /**
911  * kprobe_event_cmd_init - Initialize a kprobe event command object
912  * @cmd: A pointer to the dynevent_cmd struct representing the new event
913  * @buf: A pointer to the buffer used to build the command
914  * @maxlen: The length of the buffer passed in @buf
915  *
916  * Initialize a synthetic event command object.  Use this before
917  * calling any of the other kprobe_event functions.
918  */
919 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
920 {
921 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
922 			  trace_kprobe_run_command);
923 }
924 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
925 
926 /**
927  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
928  * @cmd: A pointer to the dynevent_cmd struct representing the new event
929  * @name: The name of the kprobe event
930  * @loc: The location of the kprobe event
931  * @kretprobe: Is this a return probe?
932  * @args: Variable number of arg (pairs), one pair for each field
933  *
934  * NOTE: Users normally won't want to call this function directly, but
935  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
936  * adds a NULL to the end of the arg list.  If this function is used
937  * directly, make sure the last arg in the variable arg list is NULL.
938  *
939  * Generate a kprobe event command to be executed by
940  * kprobe_event_gen_cmd_end().  This function can be used to generate the
941  * complete command or only the first part of it; in the latter case,
942  * kprobe_event_add_fields() can be used to add more fields following this.
943  *
944  * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
945  * returns -EINVAL if @loc == NULL.
946  *
947  * Return: 0 if successful, error otherwise.
948  */
949 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
950 				 const char *name, const char *loc, ...)
951 {
952 	char buf[MAX_EVENT_NAME_LEN];
953 	struct dynevent_arg arg;
954 	va_list args;
955 	int ret;
956 
957 	if (cmd->type != DYNEVENT_TYPE_KPROBE)
958 		return -EINVAL;
959 
960 	if (!loc)
961 		return -EINVAL;
962 
963 	if (kretprobe)
964 		snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
965 	else
966 		snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
967 
968 	ret = dynevent_str_add(cmd, buf);
969 	if (ret)
970 		return ret;
971 
972 	dynevent_arg_init(&arg, 0);
973 	arg.str = loc;
974 	ret = dynevent_arg_add(cmd, &arg, NULL);
975 	if (ret)
976 		return ret;
977 
978 	va_start(args, loc);
979 	for (;;) {
980 		const char *field;
981 
982 		field = va_arg(args, const char *);
983 		if (!field)
984 			break;
985 
986 		if (++cmd->n_fields > MAX_TRACE_ARGS) {
987 			ret = -EINVAL;
988 			break;
989 		}
990 
991 		arg.str = field;
992 		ret = dynevent_arg_add(cmd, &arg, NULL);
993 		if (ret)
994 			break;
995 	}
996 	va_end(args);
997 
998 	return ret;
999 }
1000 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1001 
1002 /**
1003  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1004  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1005  * @args: Variable number of arg (pairs), one pair for each field
1006  *
1007  * NOTE: Users normally won't want to call this function directly, but
1008  * rather use the kprobe_event_add_fields() wrapper, which
1009  * automatically adds a NULL to the end of the arg list.  If this
1010  * function is used directly, make sure the last arg in the variable
1011  * arg list is NULL.
1012  *
1013  * Add probe fields to an existing kprobe command using a variable
1014  * list of args.  Fields are added in the same order they're listed.
1015  *
1016  * Return: 0 if successful, error otherwise.
1017  */
1018 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1019 {
1020 	struct dynevent_arg arg;
1021 	va_list args;
1022 	int ret = 0;
1023 
1024 	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1025 		return -EINVAL;
1026 
1027 	dynevent_arg_init(&arg, 0);
1028 
1029 	va_start(args, cmd);
1030 	for (;;) {
1031 		const char *field;
1032 
1033 		field = va_arg(args, const char *);
1034 		if (!field)
1035 			break;
1036 
1037 		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1038 			ret = -EINVAL;
1039 			break;
1040 		}
1041 
1042 		arg.str = field;
1043 		ret = dynevent_arg_add(cmd, &arg, NULL);
1044 		if (ret)
1045 			break;
1046 	}
1047 	va_end(args);
1048 
1049 	return ret;
1050 }
1051 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1052 
1053 /**
1054  * kprobe_event_delete - Delete a kprobe event
1055  * @name: The name of the kprobe event to delete
1056  *
1057  * Delete a kprobe event with the give @name from kernel code rather
1058  * than directly from the command line.
1059  *
1060  * Return: 0 if successful, error otherwise.
1061  */
1062 int kprobe_event_delete(const char *name)
1063 {
1064 	char buf[MAX_EVENT_NAME_LEN];
1065 
1066 	snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1067 
1068 	return trace_run_command(buf, create_or_delete_trace_kprobe);
1069 }
1070 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1071 
1072 static int trace_kprobe_release(struct dyn_event *ev)
1073 {
1074 	struct trace_kprobe *tk = to_trace_kprobe(ev);
1075 	int ret = unregister_trace_kprobe(tk);
1076 
1077 	if (!ret)
1078 		free_trace_kprobe(tk);
1079 	return ret;
1080 }
1081 
1082 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1083 {
1084 	struct trace_kprobe *tk = to_trace_kprobe(ev);
1085 	int i;
1086 
1087 	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1088 	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1089 		seq_printf(m, "%d", tk->rp.maxactive);
1090 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1091 				trace_probe_name(&tk->tp));
1092 
1093 	if (!tk->symbol)
1094 		seq_printf(m, " 0x%p", tk->rp.kp.addr);
1095 	else if (tk->rp.kp.offset)
1096 		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1097 			   tk->rp.kp.offset);
1098 	else
1099 		seq_printf(m, " %s", trace_kprobe_symbol(tk));
1100 
1101 	for (i = 0; i < tk->tp.nr_args; i++)
1102 		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1103 	seq_putc(m, '\n');
1104 
1105 	return 0;
1106 }
1107 
1108 static int probes_seq_show(struct seq_file *m, void *v)
1109 {
1110 	struct dyn_event *ev = v;
1111 
1112 	if (!is_trace_kprobe(ev))
1113 		return 0;
1114 
1115 	return trace_kprobe_show(m, ev);
1116 }
1117 
1118 static const struct seq_operations probes_seq_op = {
1119 	.start  = dyn_event_seq_start,
1120 	.next   = dyn_event_seq_next,
1121 	.stop   = dyn_event_seq_stop,
1122 	.show   = probes_seq_show
1123 };
1124 
1125 static int probes_open(struct inode *inode, struct file *file)
1126 {
1127 	int ret;
1128 
1129 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1130 	if (ret)
1131 		return ret;
1132 
1133 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1134 		ret = dyn_events_release_all(&trace_kprobe_ops);
1135 		if (ret < 0)
1136 			return ret;
1137 	}
1138 
1139 	return seq_open(file, &probes_seq_op);
1140 }
1141 
1142 static ssize_t probes_write(struct file *file, const char __user *buffer,
1143 			    size_t count, loff_t *ppos)
1144 {
1145 	return trace_parse_run_command(file, buffer, count, ppos,
1146 				       create_or_delete_trace_kprobe);
1147 }
1148 
1149 static const struct file_operations kprobe_events_ops = {
1150 	.owner          = THIS_MODULE,
1151 	.open           = probes_open,
1152 	.read           = seq_read,
1153 	.llseek         = seq_lseek,
1154 	.release        = seq_release,
1155 	.write		= probes_write,
1156 };
1157 
1158 /* Probes profiling interfaces */
1159 static int probes_profile_seq_show(struct seq_file *m, void *v)
1160 {
1161 	struct dyn_event *ev = v;
1162 	struct trace_kprobe *tk;
1163 
1164 	if (!is_trace_kprobe(ev))
1165 		return 0;
1166 
1167 	tk = to_trace_kprobe(ev);
1168 	seq_printf(m, "  %-44s %15lu %15lu\n",
1169 		   trace_probe_name(&tk->tp),
1170 		   trace_kprobe_nhit(tk),
1171 		   tk->rp.kp.nmissed);
1172 
1173 	return 0;
1174 }
1175 
1176 static const struct seq_operations profile_seq_op = {
1177 	.start  = dyn_event_seq_start,
1178 	.next   = dyn_event_seq_next,
1179 	.stop   = dyn_event_seq_stop,
1180 	.show   = probes_profile_seq_show
1181 };
1182 
1183 static int profile_open(struct inode *inode, struct file *file)
1184 {
1185 	int ret;
1186 
1187 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1188 	if (ret)
1189 		return ret;
1190 
1191 	return seq_open(file, &profile_seq_op);
1192 }
1193 
1194 static const struct file_operations kprobe_profile_ops = {
1195 	.owner          = THIS_MODULE,
1196 	.open           = profile_open,
1197 	.read           = seq_read,
1198 	.llseek         = seq_lseek,
1199 	.release        = seq_release,
1200 };
1201 
1202 /* Kprobe specific fetch functions */
1203 
1204 /* Return the length of string -- including null terminal byte */
1205 static nokprobe_inline int
1206 fetch_store_strlen_user(unsigned long addr)
1207 {
1208 	const void __user *uaddr =  (__force const void __user *)addr;
1209 
1210 	return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1211 }
1212 
1213 /* Return the length of string -- including null terminal byte */
1214 static nokprobe_inline int
1215 fetch_store_strlen(unsigned long addr)
1216 {
1217 	int ret, len = 0;
1218 	u8 c;
1219 
1220 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1221 	if (addr < TASK_SIZE)
1222 		return fetch_store_strlen_user(addr);
1223 #endif
1224 
1225 	do {
1226 		ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1227 		len++;
1228 	} while (c && ret == 0 && len < MAX_STRING_SIZE);
1229 
1230 	return (ret < 0) ? ret : len;
1231 }
1232 
1233 /*
1234  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1235  * with max length and relative data location.
1236  */
1237 static nokprobe_inline int
1238 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1239 {
1240 	const void __user *uaddr =  (__force const void __user *)addr;
1241 	int maxlen = get_loc_len(*(u32 *)dest);
1242 	void *__dest;
1243 	long ret;
1244 
1245 	if (unlikely(!maxlen))
1246 		return -ENOMEM;
1247 
1248 	__dest = get_loc_data(dest, base);
1249 
1250 	ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1251 	if (ret >= 0)
1252 		*(u32 *)dest = make_data_loc(ret, __dest - base);
1253 
1254 	return ret;
1255 }
1256 
1257 /*
1258  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1259  * length and relative data location.
1260  */
1261 static nokprobe_inline int
1262 fetch_store_string(unsigned long addr, void *dest, void *base)
1263 {
1264 	int maxlen = get_loc_len(*(u32 *)dest);
1265 	void *__dest;
1266 	long ret;
1267 
1268 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1269 	if ((unsigned long)addr < TASK_SIZE)
1270 		return fetch_store_string_user(addr, dest, base);
1271 #endif
1272 
1273 	if (unlikely(!maxlen))
1274 		return -ENOMEM;
1275 
1276 	__dest = get_loc_data(dest, base);
1277 
1278 	/*
1279 	 * Try to get string again, since the string can be changed while
1280 	 * probing.
1281 	 */
1282 	ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1283 	if (ret >= 0)
1284 		*(u32 *)dest = make_data_loc(ret, __dest - base);
1285 
1286 	return ret;
1287 }
1288 
1289 static nokprobe_inline int
1290 probe_mem_read_user(void *dest, void *src, size_t size)
1291 {
1292 	const void __user *uaddr =  (__force const void __user *)src;
1293 
1294 	return copy_from_user_nofault(dest, uaddr, size);
1295 }
1296 
1297 static nokprobe_inline int
1298 probe_mem_read(void *dest, void *src, size_t size)
1299 {
1300 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1301 	if ((unsigned long)src < TASK_SIZE)
1302 		return probe_mem_read_user(dest, src, size);
1303 #endif
1304 	return copy_from_kernel_nofault(dest, src, size);
1305 }
1306 
1307 /* Note that we don't verify it, since the code does not come from user space */
1308 static int
1309 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1310 		   void *base)
1311 {
1312 	unsigned long val;
1313 
1314 retry:
1315 	/* 1st stage: get value from context */
1316 	switch (code->op) {
1317 	case FETCH_OP_REG:
1318 		val = regs_get_register(regs, code->param);
1319 		break;
1320 	case FETCH_OP_STACK:
1321 		val = regs_get_kernel_stack_nth(regs, code->param);
1322 		break;
1323 	case FETCH_OP_STACKP:
1324 		val = kernel_stack_pointer(regs);
1325 		break;
1326 	case FETCH_OP_RETVAL:
1327 		val = regs_return_value(regs);
1328 		break;
1329 	case FETCH_OP_IMM:
1330 		val = code->immediate;
1331 		break;
1332 	case FETCH_OP_COMM:
1333 		val = (unsigned long)current->comm;
1334 		break;
1335 	case FETCH_OP_DATA:
1336 		val = (unsigned long)code->data;
1337 		break;
1338 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1339 	case FETCH_OP_ARG:
1340 		val = regs_get_kernel_argument(regs, code->param);
1341 		break;
1342 #endif
1343 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
1344 		code++;
1345 		goto retry;
1346 	default:
1347 		return -EILSEQ;
1348 	}
1349 	code++;
1350 
1351 	return process_fetch_insn_bottom(code, val, dest, base);
1352 }
1353 NOKPROBE_SYMBOL(process_fetch_insn)
1354 
1355 /* Kprobe handler */
1356 static nokprobe_inline void
1357 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1358 		    struct trace_event_file *trace_file)
1359 {
1360 	struct kprobe_trace_entry_head *entry;
1361 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1362 	struct trace_event_buffer fbuffer;
1363 	int dsize;
1364 
1365 	WARN_ON(call != trace_file->event_call);
1366 
1367 	if (trace_trigger_soft_disabled(trace_file))
1368 		return;
1369 
1370 	local_save_flags(fbuffer.flags);
1371 	fbuffer.pc = preempt_count();
1372 	fbuffer.trace_file = trace_file;
1373 
1374 	dsize = __get_data_size(&tk->tp, regs);
1375 
1376 	fbuffer.event =
1377 		trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1378 					call->event.type,
1379 					sizeof(*entry) + tk->tp.size + dsize,
1380 					fbuffer.flags, fbuffer.pc);
1381 	if (!fbuffer.event)
1382 		return;
1383 
1384 	fbuffer.regs = regs;
1385 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1386 	entry->ip = (unsigned long)tk->rp.kp.addr;
1387 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1388 
1389 	trace_event_buffer_commit(&fbuffer);
1390 }
1391 
1392 static void
1393 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1394 {
1395 	struct event_file_link *link;
1396 
1397 	trace_probe_for_each_link_rcu(link, &tk->tp)
1398 		__kprobe_trace_func(tk, regs, link->file);
1399 }
1400 NOKPROBE_SYMBOL(kprobe_trace_func);
1401 
1402 /* Kretprobe handler */
1403 static nokprobe_inline void
1404 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1405 		       struct pt_regs *regs,
1406 		       struct trace_event_file *trace_file)
1407 {
1408 	struct kretprobe_trace_entry_head *entry;
1409 	struct trace_event_buffer fbuffer;
1410 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1411 	int dsize;
1412 
1413 	WARN_ON(call != trace_file->event_call);
1414 
1415 	if (trace_trigger_soft_disabled(trace_file))
1416 		return;
1417 
1418 	local_save_flags(fbuffer.flags);
1419 	fbuffer.pc = preempt_count();
1420 	fbuffer.trace_file = trace_file;
1421 
1422 	dsize = __get_data_size(&tk->tp, regs);
1423 	fbuffer.event =
1424 		trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1425 					call->event.type,
1426 					sizeof(*entry) + tk->tp.size + dsize,
1427 					fbuffer.flags, fbuffer.pc);
1428 	if (!fbuffer.event)
1429 		return;
1430 
1431 	fbuffer.regs = regs;
1432 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1433 	entry->func = (unsigned long)tk->rp.kp.addr;
1434 	entry->ret_ip = (unsigned long)ri->ret_addr;
1435 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1436 
1437 	trace_event_buffer_commit(&fbuffer);
1438 }
1439 
1440 static void
1441 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1442 		     struct pt_regs *regs)
1443 {
1444 	struct event_file_link *link;
1445 
1446 	trace_probe_for_each_link_rcu(link, &tk->tp)
1447 		__kretprobe_trace_func(tk, ri, regs, link->file);
1448 }
1449 NOKPROBE_SYMBOL(kretprobe_trace_func);
1450 
1451 /* Event entry printers */
1452 static enum print_line_t
1453 print_kprobe_event(struct trace_iterator *iter, int flags,
1454 		   struct trace_event *event)
1455 {
1456 	struct kprobe_trace_entry_head *field;
1457 	struct trace_seq *s = &iter->seq;
1458 	struct trace_probe *tp;
1459 
1460 	field = (struct kprobe_trace_entry_head *)iter->ent;
1461 	tp = trace_probe_primary_from_call(
1462 		container_of(event, struct trace_event_call, event));
1463 	if (WARN_ON_ONCE(!tp))
1464 		goto out;
1465 
1466 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1467 
1468 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1469 		goto out;
1470 
1471 	trace_seq_putc(s, ')');
1472 
1473 	if (print_probe_args(s, tp->args, tp->nr_args,
1474 			     (u8 *)&field[1], field) < 0)
1475 		goto out;
1476 
1477 	trace_seq_putc(s, '\n');
1478  out:
1479 	return trace_handle_return(s);
1480 }
1481 
1482 static enum print_line_t
1483 print_kretprobe_event(struct trace_iterator *iter, int flags,
1484 		      struct trace_event *event)
1485 {
1486 	struct kretprobe_trace_entry_head *field;
1487 	struct trace_seq *s = &iter->seq;
1488 	struct trace_probe *tp;
1489 
1490 	field = (struct kretprobe_trace_entry_head *)iter->ent;
1491 	tp = trace_probe_primary_from_call(
1492 		container_of(event, struct trace_event_call, event));
1493 	if (WARN_ON_ONCE(!tp))
1494 		goto out;
1495 
1496 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1497 
1498 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1499 		goto out;
1500 
1501 	trace_seq_puts(s, " <- ");
1502 
1503 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1504 		goto out;
1505 
1506 	trace_seq_putc(s, ')');
1507 
1508 	if (print_probe_args(s, tp->args, tp->nr_args,
1509 			     (u8 *)&field[1], field) < 0)
1510 		goto out;
1511 
1512 	trace_seq_putc(s, '\n');
1513 
1514  out:
1515 	return trace_handle_return(s);
1516 }
1517 
1518 
1519 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1520 {
1521 	int ret;
1522 	struct kprobe_trace_entry_head field;
1523 	struct trace_probe *tp;
1524 
1525 	tp = trace_probe_primary_from_call(event_call);
1526 	if (WARN_ON_ONCE(!tp))
1527 		return -ENOENT;
1528 
1529 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1530 
1531 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1532 }
1533 
1534 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1535 {
1536 	int ret;
1537 	struct kretprobe_trace_entry_head field;
1538 	struct trace_probe *tp;
1539 
1540 	tp = trace_probe_primary_from_call(event_call);
1541 	if (WARN_ON_ONCE(!tp))
1542 		return -ENOENT;
1543 
1544 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1545 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1546 
1547 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1548 }
1549 
1550 #ifdef CONFIG_PERF_EVENTS
1551 
1552 /* Kprobe profile handler */
1553 static int
1554 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1555 {
1556 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1557 	struct kprobe_trace_entry_head *entry;
1558 	struct hlist_head *head;
1559 	int size, __size, dsize;
1560 	int rctx;
1561 
1562 	if (bpf_prog_array_valid(call)) {
1563 		unsigned long orig_ip = instruction_pointer(regs);
1564 		int ret;
1565 
1566 		ret = trace_call_bpf(call, regs);
1567 
1568 		/*
1569 		 * We need to check and see if we modified the pc of the
1570 		 * pt_regs, and if so return 1 so that we don't do the
1571 		 * single stepping.
1572 		 */
1573 		if (orig_ip != instruction_pointer(regs))
1574 			return 1;
1575 		if (!ret)
1576 			return 0;
1577 	}
1578 
1579 	head = this_cpu_ptr(call->perf_events);
1580 	if (hlist_empty(head))
1581 		return 0;
1582 
1583 	dsize = __get_data_size(&tk->tp, regs);
1584 	__size = sizeof(*entry) + tk->tp.size + dsize;
1585 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1586 	size -= sizeof(u32);
1587 
1588 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1589 	if (!entry)
1590 		return 0;
1591 
1592 	entry->ip = (unsigned long)tk->rp.kp.addr;
1593 	memset(&entry[1], 0, dsize);
1594 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1595 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1596 			      head, NULL);
1597 	return 0;
1598 }
1599 NOKPROBE_SYMBOL(kprobe_perf_func);
1600 
1601 /* Kretprobe profile handler */
1602 static void
1603 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1604 		    struct pt_regs *regs)
1605 {
1606 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1607 	struct kretprobe_trace_entry_head *entry;
1608 	struct hlist_head *head;
1609 	int size, __size, dsize;
1610 	int rctx;
1611 
1612 	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1613 		return;
1614 
1615 	head = this_cpu_ptr(call->perf_events);
1616 	if (hlist_empty(head))
1617 		return;
1618 
1619 	dsize = __get_data_size(&tk->tp, regs);
1620 	__size = sizeof(*entry) + tk->tp.size + dsize;
1621 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1622 	size -= sizeof(u32);
1623 
1624 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1625 	if (!entry)
1626 		return;
1627 
1628 	entry->func = (unsigned long)tk->rp.kp.addr;
1629 	entry->ret_ip = (unsigned long)ri->ret_addr;
1630 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1631 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1632 			      head, NULL);
1633 }
1634 NOKPROBE_SYMBOL(kretprobe_perf_func);
1635 
1636 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1637 			const char **symbol, u64 *probe_offset,
1638 			u64 *probe_addr, bool perf_type_tracepoint)
1639 {
1640 	const char *pevent = trace_event_name(event->tp_event);
1641 	const char *group = event->tp_event->class->system;
1642 	struct trace_kprobe *tk;
1643 
1644 	if (perf_type_tracepoint)
1645 		tk = find_trace_kprobe(pevent, group);
1646 	else
1647 		tk = trace_kprobe_primary_from_call(event->tp_event);
1648 	if (!tk)
1649 		return -EINVAL;
1650 
1651 	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1652 					      : BPF_FD_TYPE_KPROBE;
1653 	if (tk->symbol) {
1654 		*symbol = tk->symbol;
1655 		*probe_offset = tk->rp.kp.offset;
1656 		*probe_addr = 0;
1657 	} else {
1658 		*symbol = NULL;
1659 		*probe_offset = 0;
1660 		*probe_addr = (unsigned long)tk->rp.kp.addr;
1661 	}
1662 	return 0;
1663 }
1664 #endif	/* CONFIG_PERF_EVENTS */
1665 
1666 /*
1667  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1668  *
1669  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1670  * lockless, but we can't race with this __init function.
1671  */
1672 static int kprobe_register(struct trace_event_call *event,
1673 			   enum trace_reg type, void *data)
1674 {
1675 	struct trace_event_file *file = data;
1676 
1677 	switch (type) {
1678 	case TRACE_REG_REGISTER:
1679 		return enable_trace_kprobe(event, file);
1680 	case TRACE_REG_UNREGISTER:
1681 		return disable_trace_kprobe(event, file);
1682 
1683 #ifdef CONFIG_PERF_EVENTS
1684 	case TRACE_REG_PERF_REGISTER:
1685 		return enable_trace_kprobe(event, NULL);
1686 	case TRACE_REG_PERF_UNREGISTER:
1687 		return disable_trace_kprobe(event, NULL);
1688 	case TRACE_REG_PERF_OPEN:
1689 	case TRACE_REG_PERF_CLOSE:
1690 	case TRACE_REG_PERF_ADD:
1691 	case TRACE_REG_PERF_DEL:
1692 		return 0;
1693 #endif
1694 	}
1695 	return 0;
1696 }
1697 
1698 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1699 {
1700 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1701 	int ret = 0;
1702 
1703 	raw_cpu_inc(*tk->nhit);
1704 
1705 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1706 		kprobe_trace_func(tk, regs);
1707 #ifdef CONFIG_PERF_EVENTS
1708 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1709 		ret = kprobe_perf_func(tk, regs);
1710 #endif
1711 	return ret;
1712 }
1713 NOKPROBE_SYMBOL(kprobe_dispatcher);
1714 
1715 static int
1716 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1717 {
1718 	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1719 
1720 	raw_cpu_inc(*tk->nhit);
1721 
1722 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1723 		kretprobe_trace_func(tk, ri, regs);
1724 #ifdef CONFIG_PERF_EVENTS
1725 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1726 		kretprobe_perf_func(tk, ri, regs);
1727 #endif
1728 	return 0;	/* We don't tweek kernel, so just return 0 */
1729 }
1730 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1731 
1732 static struct trace_event_functions kretprobe_funcs = {
1733 	.trace		= print_kretprobe_event
1734 };
1735 
1736 static struct trace_event_functions kprobe_funcs = {
1737 	.trace		= print_kprobe_event
1738 };
1739 
1740 static struct trace_event_fields kretprobe_fields_array[] = {
1741 	{ .type = TRACE_FUNCTION_TYPE,
1742 	  .define_fields = kretprobe_event_define_fields },
1743 	{}
1744 };
1745 
1746 static struct trace_event_fields kprobe_fields_array[] = {
1747 	{ .type = TRACE_FUNCTION_TYPE,
1748 	  .define_fields = kprobe_event_define_fields },
1749 	{}
1750 };
1751 
1752 static inline void init_trace_event_call(struct trace_kprobe *tk)
1753 {
1754 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1755 
1756 	if (trace_kprobe_is_return(tk)) {
1757 		call->event.funcs = &kretprobe_funcs;
1758 		call->class->fields_array = kretprobe_fields_array;
1759 	} else {
1760 		call->event.funcs = &kprobe_funcs;
1761 		call->class->fields_array = kprobe_fields_array;
1762 	}
1763 
1764 	call->flags = TRACE_EVENT_FL_KPROBE;
1765 	call->class->reg = kprobe_register;
1766 }
1767 
1768 static int register_kprobe_event(struct trace_kprobe *tk)
1769 {
1770 	init_trace_event_call(tk);
1771 
1772 	return trace_probe_register_event_call(&tk->tp);
1773 }
1774 
1775 static int unregister_kprobe_event(struct trace_kprobe *tk)
1776 {
1777 	return trace_probe_unregister_event_call(&tk->tp);
1778 }
1779 
1780 #ifdef CONFIG_PERF_EVENTS
1781 /* create a trace_kprobe, but don't add it to global lists */
1782 struct trace_event_call *
1783 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1784 			  bool is_return)
1785 {
1786 	struct trace_kprobe *tk;
1787 	int ret;
1788 	char *event;
1789 
1790 	/*
1791 	 * local trace_kprobes are not added to dyn_event, so they are never
1792 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1793 	 * duplicated name here.
1794 	 */
1795 	event = func ? func : "DUMMY_EVENT";
1796 
1797 	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1798 				offs, 0 /* maxactive */, 0 /* nargs */,
1799 				is_return);
1800 
1801 	if (IS_ERR(tk)) {
1802 		pr_info("Failed to allocate trace_probe.(%d)\n",
1803 			(int)PTR_ERR(tk));
1804 		return ERR_CAST(tk);
1805 	}
1806 
1807 	init_trace_event_call(tk);
1808 
1809 	if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1810 		ret = -ENOMEM;
1811 		goto error;
1812 	}
1813 
1814 	ret = __register_trace_kprobe(tk);
1815 	if (ret < 0)
1816 		goto error;
1817 
1818 	return trace_probe_event_call(&tk->tp);
1819 error:
1820 	free_trace_kprobe(tk);
1821 	return ERR_PTR(ret);
1822 }
1823 
1824 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1825 {
1826 	struct trace_kprobe *tk;
1827 
1828 	tk = trace_kprobe_primary_from_call(event_call);
1829 	if (unlikely(!tk))
1830 		return;
1831 
1832 	if (trace_probe_is_enabled(&tk->tp)) {
1833 		WARN_ON(1);
1834 		return;
1835 	}
1836 
1837 	__unregister_trace_kprobe(tk);
1838 
1839 	free_trace_kprobe(tk);
1840 }
1841 #endif /* CONFIG_PERF_EVENTS */
1842 
1843 static __init void enable_boot_kprobe_events(void)
1844 {
1845 	struct trace_array *tr = top_trace_array();
1846 	struct trace_event_file *file;
1847 	struct trace_kprobe *tk;
1848 	struct dyn_event *pos;
1849 
1850 	mutex_lock(&event_mutex);
1851 	for_each_trace_kprobe(tk, pos) {
1852 		list_for_each_entry(file, &tr->events, list)
1853 			if (file->event_call == trace_probe_event_call(&tk->tp))
1854 				trace_event_enable_disable(file, 1, 0);
1855 	}
1856 	mutex_unlock(&event_mutex);
1857 }
1858 
1859 static __init void setup_boot_kprobe_events(void)
1860 {
1861 	char *p, *cmd = kprobe_boot_events_buf;
1862 	int ret;
1863 
1864 	strreplace(kprobe_boot_events_buf, ',', ' ');
1865 
1866 	while (cmd && *cmd != '\0') {
1867 		p = strchr(cmd, ';');
1868 		if (p)
1869 			*p++ = '\0';
1870 
1871 		ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1872 		if (ret)
1873 			pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1874 		else
1875 			kprobe_boot_events_enabled = true;
1876 
1877 		cmd = p;
1878 	}
1879 
1880 	enable_boot_kprobe_events();
1881 }
1882 
1883 /*
1884  * Register dynevent at subsys_initcall. This allows kernel to setup kprobe
1885  * events in fs_initcall without tracefs.
1886  */
1887 static __init int init_kprobe_trace_early(void)
1888 {
1889 	int ret;
1890 
1891 	ret = dyn_event_register(&trace_kprobe_ops);
1892 	if (ret)
1893 		return ret;
1894 
1895 	if (register_module_notifier(&trace_kprobe_module_nb))
1896 		return -EINVAL;
1897 
1898 	return 0;
1899 }
1900 subsys_initcall(init_kprobe_trace_early);
1901 
1902 /* Make a tracefs interface for controlling probe points */
1903 static __init int init_kprobe_trace(void)
1904 {
1905 	struct dentry *d_tracer;
1906 	struct dentry *entry;
1907 
1908 	d_tracer = tracing_init_dentry();
1909 	if (IS_ERR(d_tracer))
1910 		return 0;
1911 
1912 	entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1913 				    NULL, &kprobe_events_ops);
1914 
1915 	/* Event list interface */
1916 	if (!entry)
1917 		pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1918 
1919 	/* Profile interface */
1920 	entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1921 				    NULL, &kprobe_profile_ops);
1922 
1923 	if (!entry)
1924 		pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1925 
1926 	setup_boot_kprobe_events();
1927 
1928 	return 0;
1929 }
1930 fs_initcall(init_kprobe_trace);
1931 
1932 
1933 #ifdef CONFIG_FTRACE_STARTUP_TEST
1934 static __init struct trace_event_file *
1935 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1936 {
1937 	struct trace_event_file *file;
1938 
1939 	list_for_each_entry(file, &tr->events, list)
1940 		if (file->event_call == trace_probe_event_call(&tk->tp))
1941 			return file;
1942 
1943 	return NULL;
1944 }
1945 
1946 /*
1947  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1948  * stage, we can do this lockless.
1949  */
1950 static __init int kprobe_trace_self_tests_init(void)
1951 {
1952 	int ret, warn = 0;
1953 	int (*target)(int, int, int, int, int, int);
1954 	struct trace_kprobe *tk;
1955 	struct trace_event_file *file;
1956 
1957 	if (tracing_is_disabled())
1958 		return -ENODEV;
1959 
1960 	if (kprobe_boot_events_enabled) {
1961 		pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1962 		return 0;
1963 	}
1964 
1965 	target = kprobe_trace_selftest_target;
1966 
1967 	pr_info("Testing kprobe tracing: ");
1968 
1969 	ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1970 				create_or_delete_trace_kprobe);
1971 	if (WARN_ON_ONCE(ret)) {
1972 		pr_warn("error on probing function entry.\n");
1973 		warn++;
1974 	} else {
1975 		/* Enable trace point */
1976 		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1977 		if (WARN_ON_ONCE(tk == NULL)) {
1978 			pr_warn("error on getting new probe.\n");
1979 			warn++;
1980 		} else {
1981 			file = find_trace_probe_file(tk, top_trace_array());
1982 			if (WARN_ON_ONCE(file == NULL)) {
1983 				pr_warn("error on getting probe file.\n");
1984 				warn++;
1985 			} else
1986 				enable_trace_kprobe(
1987 					trace_probe_event_call(&tk->tp), file);
1988 		}
1989 	}
1990 
1991 	ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1992 				create_or_delete_trace_kprobe);
1993 	if (WARN_ON_ONCE(ret)) {
1994 		pr_warn("error on probing function return.\n");
1995 		warn++;
1996 	} else {
1997 		/* Enable trace point */
1998 		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1999 		if (WARN_ON_ONCE(tk == NULL)) {
2000 			pr_warn("error on getting 2nd new probe.\n");
2001 			warn++;
2002 		} else {
2003 			file = find_trace_probe_file(tk, top_trace_array());
2004 			if (WARN_ON_ONCE(file == NULL)) {
2005 				pr_warn("error on getting probe file.\n");
2006 				warn++;
2007 			} else
2008 				enable_trace_kprobe(
2009 					trace_probe_event_call(&tk->tp), file);
2010 		}
2011 	}
2012 
2013 	if (warn)
2014 		goto end;
2015 
2016 	ret = target(1, 2, 3, 4, 5, 6);
2017 
2018 	/*
2019 	 * Not expecting an error here, the check is only to prevent the
2020 	 * optimizer from removing the call to target() as otherwise there
2021 	 * are no side-effects and the call is never performed.
2022 	 */
2023 	if (ret != 21)
2024 		warn++;
2025 
2026 	/* Disable trace points before removing it */
2027 	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2028 	if (WARN_ON_ONCE(tk == NULL)) {
2029 		pr_warn("error on getting test probe.\n");
2030 		warn++;
2031 	} else {
2032 		if (trace_kprobe_nhit(tk) != 1) {
2033 			pr_warn("incorrect number of testprobe hits\n");
2034 			warn++;
2035 		}
2036 
2037 		file = find_trace_probe_file(tk, top_trace_array());
2038 		if (WARN_ON_ONCE(file == NULL)) {
2039 			pr_warn("error on getting probe file.\n");
2040 			warn++;
2041 		} else
2042 			disable_trace_kprobe(
2043 				trace_probe_event_call(&tk->tp), file);
2044 	}
2045 
2046 	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2047 	if (WARN_ON_ONCE(tk == NULL)) {
2048 		pr_warn("error on getting 2nd test probe.\n");
2049 		warn++;
2050 	} else {
2051 		if (trace_kprobe_nhit(tk) != 1) {
2052 			pr_warn("incorrect number of testprobe2 hits\n");
2053 			warn++;
2054 		}
2055 
2056 		file = find_trace_probe_file(tk, top_trace_array());
2057 		if (WARN_ON_ONCE(file == NULL)) {
2058 			pr_warn("error on getting probe file.\n");
2059 			warn++;
2060 		} else
2061 			disable_trace_kprobe(
2062 				trace_probe_event_call(&tk->tp), file);
2063 	}
2064 
2065 	ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
2066 	if (WARN_ON_ONCE(ret)) {
2067 		pr_warn("error on deleting a probe.\n");
2068 		warn++;
2069 	}
2070 
2071 	ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
2072 	if (WARN_ON_ONCE(ret)) {
2073 		pr_warn("error on deleting a probe.\n");
2074 		warn++;
2075 	}
2076 
2077 end:
2078 	ret = dyn_events_release_all(&trace_kprobe_ops);
2079 	if (WARN_ON_ONCE(ret)) {
2080 		pr_warn("error on cleaning up probes.\n");
2081 		warn++;
2082 	}
2083 	/*
2084 	 * Wait for the optimizer work to finish. Otherwise it might fiddle
2085 	 * with probes in already freed __init text.
2086 	 */
2087 	wait_for_kprobe_optimizer();
2088 	if (warn)
2089 		pr_cont("NG: Some tests are failed. Please check them.\n");
2090 	else
2091 		pr_cont("OK\n");
2092 	return 0;
2093 }
2094 
2095 late_initcall(kprobe_trace_self_tests_init);
2096 
2097 #endif
2098