xref: /openbmc/linux/kernel/trace/trace_eprobe.c (revision 78091edc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * event probes
4  *
5  * Part of this code was copied from kernel/trace/trace_kprobe.c written by
6  * Masami Hiramatsu <mhiramat@kernel.org>
7  *
8  * Copyright (C) 2021, VMware Inc, Steven Rostedt <rostedt@goodmis.org>
9  * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com>
10  *
11  */
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/ftrace.h>
15 
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_tmpl.h"
19 
20 #define EPROBE_EVENT_SYSTEM "eprobes"
21 
22 struct trace_eprobe {
23 	/* tracepoint system */
24 	const char *event_system;
25 
26 	/* tracepoint event */
27 	const char *event_name;
28 
29 	struct trace_event_call *event;
30 
31 	struct dyn_event	devent;
32 	struct trace_probe	tp;
33 };
34 
35 struct eprobe_data {
36 	struct trace_event_file	*file;
37 	struct trace_eprobe	*ep;
38 };
39 
40 static int __trace_eprobe_create(int argc, const char *argv[]);
41 
42 static void trace_event_probe_cleanup(struct trace_eprobe *ep)
43 {
44 	if (!ep)
45 		return;
46 	trace_probe_cleanup(&ep->tp);
47 	kfree(ep->event_name);
48 	kfree(ep->event_system);
49 	if (ep->event)
50 		trace_event_put_ref(ep->event);
51 	kfree(ep);
52 }
53 
54 static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev)
55 {
56 	return container_of(ev, struct trace_eprobe, devent);
57 }
58 
59 static int eprobe_dyn_event_create(const char *raw_command)
60 {
61 	return trace_probe_create(raw_command, __trace_eprobe_create);
62 }
63 
64 static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev)
65 {
66 	struct trace_eprobe *ep = to_trace_eprobe(ev);
67 	int i;
68 
69 	seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp),
70 				trace_probe_name(&ep->tp));
71 	seq_printf(m, " %s.%s", ep->event_system, ep->event_name);
72 
73 	for (i = 0; i < ep->tp.nr_args; i++)
74 		seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm);
75 	seq_putc(m, '\n');
76 
77 	return 0;
78 }
79 
80 static int unregister_trace_eprobe(struct trace_eprobe *ep)
81 {
82 	/* If other probes are on the event, just unregister eprobe */
83 	if (trace_probe_has_sibling(&ep->tp))
84 		goto unreg;
85 
86 	/* Enabled event can not be unregistered */
87 	if (trace_probe_is_enabled(&ep->tp))
88 		return -EBUSY;
89 
90 	/* Will fail if probe is being used by ftrace or perf */
91 	if (trace_probe_unregister_event_call(&ep->tp))
92 		return -EBUSY;
93 
94 unreg:
95 	dyn_event_remove(&ep->devent);
96 	trace_probe_unlink(&ep->tp);
97 
98 	return 0;
99 }
100 
101 static int eprobe_dyn_event_release(struct dyn_event *ev)
102 {
103 	struct trace_eprobe *ep = to_trace_eprobe(ev);
104 	int ret = unregister_trace_eprobe(ep);
105 
106 	if (!ret)
107 		trace_event_probe_cleanup(ep);
108 	return ret;
109 }
110 
111 static bool eprobe_dyn_event_is_busy(struct dyn_event *ev)
112 {
113 	struct trace_eprobe *ep = to_trace_eprobe(ev);
114 
115 	return trace_probe_is_enabled(&ep->tp);
116 }
117 
118 static bool eprobe_dyn_event_match(const char *system, const char *event,
119 			int argc, const char **argv, struct dyn_event *ev)
120 {
121 	struct trace_eprobe *ep = to_trace_eprobe(ev);
122 	const char *slash;
123 
124 	/*
125 	 * We match the following:
126 	 *  event only			- match all eprobes with event name
127 	 *  system and event only	- match all system/event probes
128 	 *  system only			- match all system probes
129 	 *
130 	 * The below has the above satisfied with more arguments:
131 	 *
132 	 *  attached system/event	- If the arg has the system and event
133 	 *				  the probe is attached to, match
134 	 *				  probes with the attachment.
135 	 *
136 	 *  If any more args are given, then it requires a full match.
137 	 */
138 
139 	/*
140 	 * If system exists, but this probe is not part of that system
141 	 * do not match.
142 	 */
143 	if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
144 		return false;
145 
146 	/* Must match the event name */
147 	if (event[0] != '\0' && strcmp(trace_probe_name(&ep->tp), event) != 0)
148 		return false;
149 
150 	/* No arguments match all */
151 	if (argc < 1)
152 		return true;
153 
154 	/* First argument is the system/event the probe is attached to */
155 
156 	slash = strchr(argv[0], '/');
157 	if (!slash)
158 		slash = strchr(argv[0], '.');
159 	if (!slash)
160 		return false;
161 
162 	if (strncmp(ep->event_system, argv[0], slash - argv[0]))
163 		return false;
164 	if (strcmp(ep->event_name, slash + 1))
165 		return false;
166 
167 	argc--;
168 	argv++;
169 
170 	/* If there are no other args, then match */
171 	if (argc < 1)
172 		return true;
173 
174 	return trace_probe_match_command_args(&ep->tp, argc, argv);
175 }
176 
177 static struct dyn_event_operations eprobe_dyn_event_ops = {
178 	.create = eprobe_dyn_event_create,
179 	.show = eprobe_dyn_event_show,
180 	.is_busy = eprobe_dyn_event_is_busy,
181 	.free = eprobe_dyn_event_release,
182 	.match = eprobe_dyn_event_match,
183 };
184 
185 static struct trace_eprobe *alloc_event_probe(const char *group,
186 					      const char *this_event,
187 					      struct trace_event_call *event,
188 					      int nargs)
189 {
190 	struct trace_eprobe *ep;
191 	const char *event_name;
192 	const char *sys_name;
193 	int ret = -ENOMEM;
194 
195 	if (!event)
196 		return ERR_PTR(-ENODEV);
197 
198 	sys_name = event->class->system;
199 	event_name = trace_event_name(event);
200 
201 	ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL);
202 	if (!ep) {
203 		trace_event_put_ref(event);
204 		goto error;
205 	}
206 	ep->event = event;
207 	ep->event_name = kstrdup(event_name, GFP_KERNEL);
208 	if (!ep->event_name)
209 		goto error;
210 	ep->event_system = kstrdup(sys_name, GFP_KERNEL);
211 	if (!ep->event_system)
212 		goto error;
213 
214 	ret = trace_probe_init(&ep->tp, this_event, group, false);
215 	if (ret < 0)
216 		goto error;
217 
218 	dyn_event_init(&ep->devent, &eprobe_dyn_event_ops);
219 	return ep;
220 error:
221 	trace_event_probe_cleanup(ep);
222 	return ERR_PTR(ret);
223 }
224 
225 static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
226 {
227 	struct probe_arg *parg = &ep->tp.args[i];
228 	struct ftrace_event_field *field;
229 	struct list_head *head;
230 	int ret = -ENOENT;
231 
232 	head = trace_get_fields(ep->event);
233 	list_for_each_entry(field, head, link) {
234 		if (!strcmp(parg->code->data, field->name)) {
235 			kfree(parg->code->data);
236 			parg->code->data = field;
237 			return 0;
238 		}
239 	}
240 
241 	/*
242 	 * Argument not found on event. But allow for comm and COMM
243 	 * to be used to get the current->comm.
244 	 */
245 	if (strcmp(parg->code->data, "COMM") == 0 ||
246 	    strcmp(parg->code->data, "comm") == 0) {
247 		parg->code->op = FETCH_OP_COMM;
248 		ret = 0;
249 	}
250 
251 	kfree(parg->code->data);
252 	parg->code->data = NULL;
253 	return ret;
254 }
255 
256 static int eprobe_event_define_fields(struct trace_event_call *event_call)
257 {
258 	struct eprobe_trace_entry_head field;
259 	struct trace_probe *tp;
260 
261 	tp = trace_probe_primary_from_call(event_call);
262 	if (WARN_ON_ONCE(!tp))
263 		return -ENOENT;
264 
265 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
266 }
267 
268 static struct trace_event_fields eprobe_fields_array[] = {
269 	{ .type = TRACE_FUNCTION_TYPE,
270 	  .define_fields = eprobe_event_define_fields },
271 	{}
272 };
273 
274 /* Event entry printers */
275 static enum print_line_t
276 print_eprobe_event(struct trace_iterator *iter, int flags,
277 		   struct trace_event *event)
278 {
279 	struct eprobe_trace_entry_head *field;
280 	struct trace_event_call *pevent;
281 	struct trace_event *probed_event;
282 	struct trace_seq *s = &iter->seq;
283 	struct trace_eprobe *ep;
284 	struct trace_probe *tp;
285 	unsigned int type;
286 
287 	field = (struct eprobe_trace_entry_head *)iter->ent;
288 	tp = trace_probe_primary_from_call(
289 		container_of(event, struct trace_event_call, event));
290 	if (WARN_ON_ONCE(!tp))
291 		goto out;
292 
293 	ep = container_of(tp, struct trace_eprobe, tp);
294 	type = ep->event->event.type;
295 
296 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
297 
298 	probed_event = ftrace_find_event(type);
299 	if (probed_event) {
300 		pevent = container_of(probed_event, struct trace_event_call, event);
301 		trace_seq_printf(s, "%s.%s", pevent->class->system,
302 				 trace_event_name(pevent));
303 	} else {
304 		trace_seq_printf(s, "%u", type);
305 	}
306 
307 	trace_seq_putc(s, ')');
308 
309 	if (print_probe_args(s, tp->args, tp->nr_args,
310 			     (u8 *)&field[1], field) < 0)
311 		goto out;
312 
313 	trace_seq_putc(s, '\n');
314  out:
315 	return trace_handle_return(s);
316 }
317 
318 static unsigned long get_event_field(struct fetch_insn *code, void *rec)
319 {
320 	struct ftrace_event_field *field = code->data;
321 	unsigned long val;
322 	void *addr;
323 
324 	addr = rec + field->offset;
325 
326 	if (is_string_field(field)) {
327 		switch (field->filter_type) {
328 		case FILTER_DYN_STRING:
329 			val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff));
330 			break;
331 		case FILTER_RDYN_STRING:
332 			val = (unsigned long)(addr + (*(unsigned int *)addr & 0xffff));
333 			break;
334 		case FILTER_STATIC_STRING:
335 			val = (unsigned long)addr;
336 			break;
337 		case FILTER_PTR_STRING:
338 			val = (unsigned long)(*(char *)addr);
339 			break;
340 		default:
341 			WARN_ON_ONCE(1);
342 			return 0;
343 		}
344 		return val;
345 	}
346 
347 	switch (field->size) {
348 	case 1:
349 		if (field->is_signed)
350 			val = *(char *)addr;
351 		else
352 			val = *(unsigned char *)addr;
353 		break;
354 	case 2:
355 		if (field->is_signed)
356 			val = *(short *)addr;
357 		else
358 			val = *(unsigned short *)addr;
359 		break;
360 	case 4:
361 		if (field->is_signed)
362 			val = *(int *)addr;
363 		else
364 			val = *(unsigned int *)addr;
365 		break;
366 	default:
367 		if (field->is_signed)
368 			val = *(long *)addr;
369 		else
370 			val = *(unsigned long *)addr;
371 		break;
372 	}
373 	return val;
374 }
375 
376 static int get_eprobe_size(struct trace_probe *tp, void *rec)
377 {
378 	struct fetch_insn *code;
379 	struct probe_arg *arg;
380 	int i, len, ret = 0;
381 
382 	for (i = 0; i < tp->nr_args; i++) {
383 		arg = tp->args + i;
384 		if (arg->dynamic) {
385 			unsigned long val;
386 
387 			code = arg->code;
388  retry:
389 			switch (code->op) {
390 			case FETCH_OP_TP_ARG:
391 				val = get_event_field(code, rec);
392 				break;
393 			case FETCH_OP_IMM:
394 				val = code->immediate;
395 				break;
396 			case FETCH_OP_COMM:
397 				val = (unsigned long)current->comm;
398 				break;
399 			case FETCH_OP_DATA:
400 				val = (unsigned long)code->data;
401 				break;
402 			case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
403 				code++;
404 				goto retry;
405 			default:
406 				continue;
407 			}
408 			code++;
409 			len = process_fetch_insn_bottom(code, val, NULL, NULL);
410 			if (len > 0)
411 				ret += len;
412 		}
413 	}
414 
415 	return ret;
416 }
417 
418 /* Kprobe specific fetch functions */
419 
420 /* Note that we don't verify it, since the code does not come from user space */
421 static int
422 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
423 		   void *base)
424 {
425 	unsigned long val;
426 
427  retry:
428 	switch (code->op) {
429 	case FETCH_OP_TP_ARG:
430 		val = get_event_field(code, rec);
431 		break;
432 	case FETCH_OP_IMM:
433 		val = code->immediate;
434 		break;
435 	case FETCH_OP_COMM:
436 		val = (unsigned long)current->comm;
437 		break;
438 	case FETCH_OP_DATA:
439 		val = (unsigned long)code->data;
440 		break;
441 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
442 		code++;
443 		goto retry;
444 	default:
445 		return -EILSEQ;
446 	}
447 	code++;
448 	return process_fetch_insn_bottom(code, val, dest, base);
449 }
450 NOKPROBE_SYMBOL(process_fetch_insn)
451 
452 /* Return the length of string -- including null terminal byte */
453 static nokprobe_inline int
454 fetch_store_strlen_user(unsigned long addr)
455 {
456 	const void __user *uaddr =  (__force const void __user *)addr;
457 
458 	return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
459 }
460 
461 /* Return the length of string -- including null terminal byte */
462 static nokprobe_inline int
463 fetch_store_strlen(unsigned long addr)
464 {
465 	int ret, len = 0;
466 	u8 c;
467 
468 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
469 	if (addr < TASK_SIZE)
470 		return fetch_store_strlen_user(addr);
471 #endif
472 
473 	do {
474 		ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
475 		len++;
476 	} while (c && ret == 0 && len < MAX_STRING_SIZE);
477 
478 	return (ret < 0) ? ret : len;
479 }
480 
481 /*
482  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
483  * with max length and relative data location.
484  */
485 static nokprobe_inline int
486 fetch_store_string_user(unsigned long addr, void *dest, void *base)
487 {
488 	const void __user *uaddr =  (__force const void __user *)addr;
489 	int maxlen = get_loc_len(*(u32 *)dest);
490 	void *__dest;
491 	long ret;
492 
493 	if (unlikely(!maxlen))
494 		return -ENOMEM;
495 
496 	__dest = get_loc_data(dest, base);
497 
498 	ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
499 	if (ret >= 0)
500 		*(u32 *)dest = make_data_loc(ret, __dest - base);
501 
502 	return ret;
503 }
504 
505 /*
506  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
507  * length and relative data location.
508  */
509 static nokprobe_inline int
510 fetch_store_string(unsigned long addr, void *dest, void *base)
511 {
512 	int maxlen = get_loc_len(*(u32 *)dest);
513 	void *__dest;
514 	long ret;
515 
516 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
517 	if ((unsigned long)addr < TASK_SIZE)
518 		return fetch_store_string_user(addr, dest, base);
519 #endif
520 
521 	if (unlikely(!maxlen))
522 		return -ENOMEM;
523 
524 	__dest = get_loc_data(dest, base);
525 
526 	/*
527 	 * Try to get string again, since the string can be changed while
528 	 * probing.
529 	 */
530 	ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
531 	if (ret >= 0)
532 		*(u32 *)dest = make_data_loc(ret, __dest - base);
533 
534 	return ret;
535 }
536 
537 static nokprobe_inline int
538 probe_mem_read_user(void *dest, void *src, size_t size)
539 {
540 	const void __user *uaddr =  (__force const void __user *)src;
541 
542 	return copy_from_user_nofault(dest, uaddr, size);
543 }
544 
545 static nokprobe_inline int
546 probe_mem_read(void *dest, void *src, size_t size)
547 {
548 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
549 	if ((unsigned long)src < TASK_SIZE)
550 		return probe_mem_read_user(dest, src, size);
551 #endif
552 	return copy_from_kernel_nofault(dest, src, size);
553 }
554 
555 /* eprobe handler */
556 static inline void
557 __eprobe_trace_func(struct eprobe_data *edata, void *rec)
558 {
559 	struct eprobe_trace_entry_head *entry;
560 	struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp);
561 	struct trace_event_buffer fbuffer;
562 	int dsize;
563 
564 	if (WARN_ON_ONCE(call != edata->file->event_call))
565 		return;
566 
567 	if (trace_trigger_soft_disabled(edata->file))
568 		return;
569 
570 	dsize = get_eprobe_size(&edata->ep->tp, rec);
571 
572 	entry = trace_event_buffer_reserve(&fbuffer, edata->file,
573 					   sizeof(*entry) + edata->ep->tp.size + dsize);
574 
575 	if (!entry)
576 		return;
577 
578 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
579 	store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
580 
581 	trace_event_buffer_commit(&fbuffer);
582 }
583 
584 /*
585  * The event probe implementation uses event triggers to get access to
586  * the event it is attached to, but is not an actual trigger. The below
587  * functions are just stubs to fulfill what is needed to use the trigger
588  * infrastructure.
589  */
590 static int eprobe_trigger_init(struct event_trigger_data *data)
591 {
592 	return 0;
593 }
594 
595 static void eprobe_trigger_free(struct event_trigger_data *data)
596 {
597 
598 }
599 
600 static int eprobe_trigger_print(struct seq_file *m,
601 				struct event_trigger_data *data)
602 {
603 	/* Do not print eprobe event triggers */
604 	return 0;
605 }
606 
607 static void eprobe_trigger_func(struct event_trigger_data *data,
608 				struct trace_buffer *buffer, void *rec,
609 				struct ring_buffer_event *rbe)
610 {
611 	struct eprobe_data *edata = data->private_data;
612 
613 	__eprobe_trace_func(edata, rec);
614 }
615 
616 static struct event_trigger_ops eprobe_trigger_ops = {
617 	.trigger		= eprobe_trigger_func,
618 	.print			= eprobe_trigger_print,
619 	.init			= eprobe_trigger_init,
620 	.free			= eprobe_trigger_free,
621 };
622 
623 static int eprobe_trigger_cmd_parse(struct event_command *cmd_ops,
624 				    struct trace_event_file *file,
625 				    char *glob, char *cmd,
626 				    char *param_and_filter)
627 {
628 	return -1;
629 }
630 
631 static int eprobe_trigger_reg_func(char *glob,
632 				   struct event_trigger_data *data,
633 				   struct trace_event_file *file)
634 {
635 	return -1;
636 }
637 
638 static void eprobe_trigger_unreg_func(char *glob,
639 				      struct event_trigger_data *data,
640 				      struct trace_event_file *file)
641 {
642 
643 }
644 
645 static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd,
646 							char *param)
647 {
648 	return &eprobe_trigger_ops;
649 }
650 
651 static struct event_command event_trigger_cmd = {
652 	.name			= "eprobe",
653 	.trigger_type		= ETT_EVENT_EPROBE,
654 	.flags			= EVENT_CMD_FL_NEEDS_REC,
655 	.parse			= eprobe_trigger_cmd_parse,
656 	.reg			= eprobe_trigger_reg_func,
657 	.unreg			= eprobe_trigger_unreg_func,
658 	.unreg_all		= NULL,
659 	.get_trigger_ops	= eprobe_trigger_get_ops,
660 	.set_filter		= NULL,
661 };
662 
663 static struct event_trigger_data *
664 new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
665 {
666 	struct event_trigger_data *trigger;
667 	struct eprobe_data *edata;
668 
669 	edata = kzalloc(sizeof(*edata), GFP_KERNEL);
670 	trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
671 	if (!trigger || !edata) {
672 		kfree(edata);
673 		kfree(trigger);
674 		return ERR_PTR(-ENOMEM);
675 	}
676 
677 	trigger->flags = EVENT_TRIGGER_FL_PROBE;
678 	trigger->count = -1;
679 	trigger->ops = &eprobe_trigger_ops;
680 
681 	/*
682 	 * EVENT PROBE triggers are not registered as commands with
683 	 * register_event_command(), as they are not controlled by the user
684 	 * from the trigger file
685 	 */
686 	trigger->cmd_ops = &event_trigger_cmd;
687 
688 	INIT_LIST_HEAD(&trigger->list);
689 	RCU_INIT_POINTER(trigger->filter, NULL);
690 
691 	edata->file = file;
692 	edata->ep = ep;
693 	trigger->private_data = edata;
694 
695 	return trigger;
696 }
697 
698 static int enable_eprobe(struct trace_eprobe *ep,
699 			 struct trace_event_file *eprobe_file)
700 {
701 	struct event_trigger_data *trigger;
702 	struct trace_event_file *file;
703 	struct trace_array *tr = eprobe_file->tr;
704 
705 	file = find_event_file(tr, ep->event_system, ep->event_name);
706 	if (!file)
707 		return -ENOENT;
708 	trigger = new_eprobe_trigger(ep, eprobe_file);
709 	if (IS_ERR(trigger))
710 		return PTR_ERR(trigger);
711 
712 	list_add_tail_rcu(&trigger->list, &file->triggers);
713 
714 	trace_event_trigger_enable_disable(file, 1);
715 	update_cond_flag(file);
716 
717 	return 0;
718 }
719 
720 static struct trace_event_functions eprobe_funcs = {
721 	.trace		= print_eprobe_event
722 };
723 
724 static int disable_eprobe(struct trace_eprobe *ep,
725 			  struct trace_array *tr)
726 {
727 	struct event_trigger_data *trigger = NULL, *iter;
728 	struct trace_event_file *file;
729 	struct eprobe_data *edata;
730 
731 	file = find_event_file(tr, ep->event_system, ep->event_name);
732 	if (!file)
733 		return -ENOENT;
734 
735 	list_for_each_entry(iter, &file->triggers, list) {
736 		if (!(iter->flags & EVENT_TRIGGER_FL_PROBE))
737 			continue;
738 		edata = iter->private_data;
739 		if (edata->ep == ep) {
740 			trigger = iter;
741 			break;
742 		}
743 	}
744 	if (!trigger)
745 		return -ENODEV;
746 
747 	list_del_rcu(&trigger->list);
748 
749 	trace_event_trigger_enable_disable(file, 0);
750 	update_cond_flag(file);
751 
752 	/* Make sure nothing is using the edata or trigger */
753 	tracepoint_synchronize_unregister();
754 
755 	kfree(edata);
756 	kfree(trigger);
757 
758 	return 0;
759 }
760 
761 static int enable_trace_eprobe(struct trace_event_call *call,
762 			       struct trace_event_file *file)
763 {
764 	struct trace_probe *pos, *tp;
765 	struct trace_eprobe *ep;
766 	bool enabled;
767 	int ret = 0;
768 
769 	tp = trace_probe_primary_from_call(call);
770 	if (WARN_ON_ONCE(!tp))
771 		return -ENODEV;
772 	enabled = trace_probe_is_enabled(tp);
773 
774 	/* This also changes "enabled" state */
775 	if (file) {
776 		ret = trace_probe_add_file(tp, file);
777 		if (ret)
778 			return ret;
779 	} else
780 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
781 
782 	if (enabled)
783 		return 0;
784 
785 	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
786 		ep = container_of(pos, struct trace_eprobe, tp);
787 		ret = enable_eprobe(ep, file);
788 		if (ret)
789 			break;
790 		enabled = true;
791 	}
792 
793 	if (ret) {
794 		/* Failed to enable one of them. Roll back all */
795 		if (enabled)
796 			disable_eprobe(ep, file->tr);
797 		if (file)
798 			trace_probe_remove_file(tp, file);
799 		else
800 			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
801 	}
802 
803 	return ret;
804 }
805 
806 static int disable_trace_eprobe(struct trace_event_call *call,
807 				struct trace_event_file *file)
808 {
809 	struct trace_probe *pos, *tp;
810 	struct trace_eprobe *ep;
811 
812 	tp = trace_probe_primary_from_call(call);
813 	if (WARN_ON_ONCE(!tp))
814 		return -ENODEV;
815 
816 	if (file) {
817 		if (!trace_probe_get_file_link(tp, file))
818 			return -ENOENT;
819 		if (!trace_probe_has_single_file(tp))
820 			goto out;
821 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
822 	} else
823 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
824 
825 	if (!trace_probe_is_enabled(tp)) {
826 		list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
827 			ep = container_of(pos, struct trace_eprobe, tp);
828 			disable_eprobe(ep, file->tr);
829 		}
830 	}
831 
832  out:
833 	if (file)
834 		/*
835 		 * Synchronization is done in below function. For perf event,
836 		 * file == NULL and perf_trace_event_unreg() calls
837 		 * tracepoint_synchronize_unregister() to ensure synchronize
838 		 * event. We don't need to care about it.
839 		 */
840 		trace_probe_remove_file(tp, file);
841 
842 	return 0;
843 }
844 
845 static int eprobe_register(struct trace_event_call *event,
846 			   enum trace_reg type, void *data)
847 {
848 	struct trace_event_file *file = data;
849 
850 	switch (type) {
851 	case TRACE_REG_REGISTER:
852 		return enable_trace_eprobe(event, file);
853 	case TRACE_REG_UNREGISTER:
854 		return disable_trace_eprobe(event, file);
855 #ifdef CONFIG_PERF_EVENTS
856 	case TRACE_REG_PERF_REGISTER:
857 	case TRACE_REG_PERF_UNREGISTER:
858 	case TRACE_REG_PERF_OPEN:
859 	case TRACE_REG_PERF_CLOSE:
860 	case TRACE_REG_PERF_ADD:
861 	case TRACE_REG_PERF_DEL:
862 		return 0;
863 #endif
864 	}
865 	return 0;
866 }
867 
868 static inline void init_trace_eprobe_call(struct trace_eprobe *ep)
869 {
870 	struct trace_event_call *call = trace_probe_event_call(&ep->tp);
871 
872 	call->flags = TRACE_EVENT_FL_EPROBE;
873 	call->event.funcs = &eprobe_funcs;
874 	call->class->fields_array = eprobe_fields_array;
875 	call->class->reg = eprobe_register;
876 }
877 
878 static struct trace_event_call *
879 find_and_get_event(const char *system, const char *event_name)
880 {
881 	struct trace_event_call *tp_event;
882 	const char *name;
883 
884 	list_for_each_entry(tp_event, &ftrace_events, list) {
885 		/* Skip other probes and ftrace events */
886 		if (tp_event->flags &
887 		    (TRACE_EVENT_FL_IGNORE_ENABLE |
888 		     TRACE_EVENT_FL_KPROBE |
889 		     TRACE_EVENT_FL_UPROBE |
890 		     TRACE_EVENT_FL_EPROBE))
891 			continue;
892 		if (!tp_event->class->system ||
893 		    strcmp(system, tp_event->class->system))
894 			continue;
895 		name = trace_event_name(tp_event);
896 		if (!name || strcmp(event_name, name))
897 			continue;
898 		if (!trace_event_try_get_ref(tp_event)) {
899 			return NULL;
900 			break;
901 		}
902 		return tp_event;
903 		break;
904 	}
905 	return NULL;
906 }
907 
908 static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i)
909 {
910 	unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_TPOINT;
911 	int ret;
912 
913 	ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], flags);
914 	if (ret)
915 		return ret;
916 
917 	if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG) {
918 		ret = trace_eprobe_tp_arg_update(ep, i);
919 		if (ret)
920 			trace_probe_log_err(0, BAD_ATTACH_ARG);
921 	}
922 
923 	/* Handle symbols "@" */
924 	if (!ret)
925 		ret = traceprobe_update_arg(&ep->tp.args[i]);
926 
927 	return ret;
928 }
929 
930 static int __trace_eprobe_create(int argc, const char *argv[])
931 {
932 	/*
933 	 * Argument syntax:
934 	 *      e[:[GRP/][ENAME]] SYSTEM.EVENT [FETCHARGS]
935 	 * Fetch args:
936 	 *  <name>=$<field>[:TYPE]
937 	 */
938 	const char *event = NULL, *group = EPROBE_EVENT_SYSTEM;
939 	const char *sys_event = NULL, *sys_name = NULL;
940 	struct trace_event_call *event_call;
941 	struct trace_eprobe *ep = NULL;
942 	char buf1[MAX_EVENT_NAME_LEN];
943 	char buf2[MAX_EVENT_NAME_LEN];
944 	char gbuf[MAX_EVENT_NAME_LEN];
945 	int ret = 0;
946 	int i;
947 
948 	if (argc < 2 || argv[0][0] != 'e')
949 		return -ECANCELED;
950 
951 	trace_probe_log_init("event_probe", argc, argv);
952 
953 	event = strchr(&argv[0][1], ':');
954 	if (event) {
955 		event++;
956 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
957 						  event - argv[0]);
958 		if (ret)
959 			goto parse_error;
960 	}
961 
962 	trace_probe_log_set_index(1);
963 	sys_event = argv[1];
964 	ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 0);
965 	if (ret || !sys_event || !sys_name) {
966 		trace_probe_log_err(0, NO_EVENT_INFO);
967 		goto parse_error;
968 	}
969 
970 	if (!event) {
971 		strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN);
972 		sanitize_event_name(buf1);
973 		event = buf1;
974 	}
975 
976 	mutex_lock(&event_mutex);
977 	event_call = find_and_get_event(sys_name, sys_event);
978 	ep = alloc_event_probe(group, event, event_call, argc - 2);
979 	mutex_unlock(&event_mutex);
980 
981 	if (IS_ERR(ep)) {
982 		ret = PTR_ERR(ep);
983 		if (ret == -ENODEV)
984 			trace_probe_log_err(0, BAD_ATTACH_EVENT);
985 		/* This must return -ENOMEM or missing event, else there is a bug */
986 		WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
987 		ep = NULL;
988 		goto error;
989 	}
990 
991 	argc -= 2; argv += 2;
992 	/* parse arguments */
993 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
994 		trace_probe_log_set_index(i + 2);
995 		ret = trace_eprobe_tp_update_arg(ep, argv, i);
996 		if (ret)
997 			goto error;
998 	}
999 	ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT);
1000 	if (ret < 0)
1001 		goto error;
1002 	init_trace_eprobe_call(ep);
1003 	mutex_lock(&event_mutex);
1004 	ret = trace_probe_register_event_call(&ep->tp);
1005 	if (ret) {
1006 		if (ret == -EEXIST) {
1007 			trace_probe_log_set_index(0);
1008 			trace_probe_log_err(0, EVENT_EXIST);
1009 		}
1010 		mutex_unlock(&event_mutex);
1011 		goto error;
1012 	}
1013 	ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
1014 	mutex_unlock(&event_mutex);
1015 	return ret;
1016 parse_error:
1017 	ret = -EINVAL;
1018 error:
1019 	trace_event_probe_cleanup(ep);
1020 	return ret;
1021 }
1022 
1023 /*
1024  * Register dynevent at core_initcall. This allows kernel to setup eprobe
1025  * events in postcore_initcall without tracefs.
1026  */
1027 static __init int trace_events_eprobe_init_early(void)
1028 {
1029 	int err = 0;
1030 
1031 	err = dyn_event_register(&eprobe_dyn_event_ops);
1032 	if (err)
1033 		pr_warn("Could not register eprobe_dyn_event_ops\n");
1034 
1035 	return err;
1036 }
1037 core_initcall(trace_events_eprobe_init_early);
1038