xref: /openbmc/linux/kernel/trace/trace_eprobe.c (revision 1504b6f9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * event probes
4  *
5  * Part of this code was copied from kernel/trace/trace_kprobe.c written by
6  * Masami Hiramatsu <mhiramat@kernel.org>
7  *
8  * Copyright (C) 2021, VMware Inc, Steven Rostedt <rostedt@goodmis.org>
9  * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com>
10  *
11  */
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/ftrace.h>
15 
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_tmpl.h"
19 #include "trace_probe_kernel.h"
20 
21 #define EPROBE_EVENT_SYSTEM "eprobes"
22 
23 struct trace_eprobe {
24 	/* tracepoint system */
25 	const char *event_system;
26 
27 	/* tracepoint event */
28 	const char *event_name;
29 
30 	/* filter string for the tracepoint */
31 	char *filter_str;
32 
33 	struct trace_event_call *event;
34 
35 	struct dyn_event	devent;
36 	struct trace_probe	tp;
37 };
38 
39 struct eprobe_data {
40 	struct trace_event_file	*file;
41 	struct trace_eprobe	*ep;
42 };
43 
44 static int __trace_eprobe_create(int argc, const char *argv[]);
45 
46 static void trace_event_probe_cleanup(struct trace_eprobe *ep)
47 {
48 	if (!ep)
49 		return;
50 	trace_probe_cleanup(&ep->tp);
51 	kfree(ep->event_name);
52 	kfree(ep->event_system);
53 	if (ep->event)
54 		trace_event_put_ref(ep->event);
55 	kfree(ep);
56 }
57 
58 static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev)
59 {
60 	return container_of(ev, struct trace_eprobe, devent);
61 }
62 
63 static int eprobe_dyn_event_create(const char *raw_command)
64 {
65 	return trace_probe_create(raw_command, __trace_eprobe_create);
66 }
67 
68 static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev)
69 {
70 	struct trace_eprobe *ep = to_trace_eprobe(ev);
71 	int i;
72 
73 	seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp),
74 				trace_probe_name(&ep->tp));
75 	seq_printf(m, " %s.%s", ep->event_system, ep->event_name);
76 
77 	for (i = 0; i < ep->tp.nr_args; i++)
78 		seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm);
79 	seq_putc(m, '\n');
80 
81 	return 0;
82 }
83 
84 static int unregister_trace_eprobe(struct trace_eprobe *ep)
85 {
86 	/* If other probes are on the event, just unregister eprobe */
87 	if (trace_probe_has_sibling(&ep->tp))
88 		goto unreg;
89 
90 	/* Enabled event can not be unregistered */
91 	if (trace_probe_is_enabled(&ep->tp))
92 		return -EBUSY;
93 
94 	/* Will fail if probe is being used by ftrace or perf */
95 	if (trace_probe_unregister_event_call(&ep->tp))
96 		return -EBUSY;
97 
98 unreg:
99 	dyn_event_remove(&ep->devent);
100 	trace_probe_unlink(&ep->tp);
101 
102 	return 0;
103 }
104 
105 static int eprobe_dyn_event_release(struct dyn_event *ev)
106 {
107 	struct trace_eprobe *ep = to_trace_eprobe(ev);
108 	int ret = unregister_trace_eprobe(ep);
109 
110 	if (!ret)
111 		trace_event_probe_cleanup(ep);
112 	return ret;
113 }
114 
115 static bool eprobe_dyn_event_is_busy(struct dyn_event *ev)
116 {
117 	struct trace_eprobe *ep = to_trace_eprobe(ev);
118 
119 	return trace_probe_is_enabled(&ep->tp);
120 }
121 
122 static bool eprobe_dyn_event_match(const char *system, const char *event,
123 			int argc, const char **argv, struct dyn_event *ev)
124 {
125 	struct trace_eprobe *ep = to_trace_eprobe(ev);
126 	const char *slash;
127 
128 	/*
129 	 * We match the following:
130 	 *  event only			- match all eprobes with event name
131 	 *  system and event only	- match all system/event probes
132 	 *  system only			- match all system probes
133 	 *
134 	 * The below has the above satisfied with more arguments:
135 	 *
136 	 *  attached system/event	- If the arg has the system and event
137 	 *				  the probe is attached to, match
138 	 *				  probes with the attachment.
139 	 *
140 	 *  If any more args are given, then it requires a full match.
141 	 */
142 
143 	/*
144 	 * If system exists, but this probe is not part of that system
145 	 * do not match.
146 	 */
147 	if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
148 		return false;
149 
150 	/* Must match the event name */
151 	if (event[0] != '\0' && strcmp(trace_probe_name(&ep->tp), event) != 0)
152 		return false;
153 
154 	/* No arguments match all */
155 	if (argc < 1)
156 		return true;
157 
158 	/* First argument is the system/event the probe is attached to */
159 
160 	slash = strchr(argv[0], '/');
161 	if (!slash)
162 		slash = strchr(argv[0], '.');
163 	if (!slash)
164 		return false;
165 
166 	if (strncmp(ep->event_system, argv[0], slash - argv[0]))
167 		return false;
168 	if (strcmp(ep->event_name, slash + 1))
169 		return false;
170 
171 	argc--;
172 	argv++;
173 
174 	/* If there are no other args, then match */
175 	if (argc < 1)
176 		return true;
177 
178 	return trace_probe_match_command_args(&ep->tp, argc, argv);
179 }
180 
181 static struct dyn_event_operations eprobe_dyn_event_ops = {
182 	.create = eprobe_dyn_event_create,
183 	.show = eprobe_dyn_event_show,
184 	.is_busy = eprobe_dyn_event_is_busy,
185 	.free = eprobe_dyn_event_release,
186 	.match = eprobe_dyn_event_match,
187 };
188 
189 static struct trace_eprobe *alloc_event_probe(const char *group,
190 					      const char *this_event,
191 					      struct trace_event_call *event,
192 					      int nargs)
193 {
194 	struct trace_eprobe *ep;
195 	const char *event_name;
196 	const char *sys_name;
197 	int ret = -ENOMEM;
198 
199 	if (!event)
200 		return ERR_PTR(-ENODEV);
201 
202 	sys_name = event->class->system;
203 	event_name = trace_event_name(event);
204 
205 	ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL);
206 	if (!ep) {
207 		trace_event_put_ref(event);
208 		goto error;
209 	}
210 	ep->event = event;
211 	ep->event_name = kstrdup(event_name, GFP_KERNEL);
212 	if (!ep->event_name)
213 		goto error;
214 	ep->event_system = kstrdup(sys_name, GFP_KERNEL);
215 	if (!ep->event_system)
216 		goto error;
217 
218 	ret = trace_probe_init(&ep->tp, this_event, group, false);
219 	if (ret < 0)
220 		goto error;
221 
222 	dyn_event_init(&ep->devent, &eprobe_dyn_event_ops);
223 	return ep;
224 error:
225 	trace_event_probe_cleanup(ep);
226 	return ERR_PTR(ret);
227 }
228 
229 static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
230 {
231 	struct probe_arg *parg = &ep->tp.args[i];
232 	struct ftrace_event_field *field;
233 	struct list_head *head;
234 	int ret = -ENOENT;
235 
236 	head = trace_get_fields(ep->event);
237 	list_for_each_entry(field, head, link) {
238 		if (!strcmp(parg->code->data, field->name)) {
239 			kfree(parg->code->data);
240 			parg->code->data = field;
241 			return 0;
242 		}
243 	}
244 
245 	/*
246 	 * Argument not found on event. But allow for comm and COMM
247 	 * to be used to get the current->comm.
248 	 */
249 	if (strcmp(parg->code->data, "COMM") == 0 ||
250 	    strcmp(parg->code->data, "comm") == 0) {
251 		parg->code->op = FETCH_OP_COMM;
252 		ret = 0;
253 	}
254 
255 	kfree(parg->code->data);
256 	parg->code->data = NULL;
257 	return ret;
258 }
259 
260 static int eprobe_event_define_fields(struct trace_event_call *event_call)
261 {
262 	struct eprobe_trace_entry_head field;
263 	struct trace_probe *tp;
264 
265 	tp = trace_probe_primary_from_call(event_call);
266 	if (WARN_ON_ONCE(!tp))
267 		return -ENOENT;
268 
269 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
270 }
271 
272 static struct trace_event_fields eprobe_fields_array[] = {
273 	{ .type = TRACE_FUNCTION_TYPE,
274 	  .define_fields = eprobe_event_define_fields },
275 	{}
276 };
277 
278 /* Event entry printers */
279 static enum print_line_t
280 print_eprobe_event(struct trace_iterator *iter, int flags,
281 		   struct trace_event *event)
282 {
283 	struct eprobe_trace_entry_head *field;
284 	struct trace_event_call *pevent;
285 	struct trace_event *probed_event;
286 	struct trace_seq *s = &iter->seq;
287 	struct trace_eprobe *ep;
288 	struct trace_probe *tp;
289 	unsigned int type;
290 
291 	field = (struct eprobe_trace_entry_head *)iter->ent;
292 	tp = trace_probe_primary_from_call(
293 		container_of(event, struct trace_event_call, event));
294 	if (WARN_ON_ONCE(!tp))
295 		goto out;
296 
297 	ep = container_of(tp, struct trace_eprobe, tp);
298 	type = ep->event->event.type;
299 
300 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
301 
302 	probed_event = ftrace_find_event(type);
303 	if (probed_event) {
304 		pevent = container_of(probed_event, struct trace_event_call, event);
305 		trace_seq_printf(s, "%s.%s", pevent->class->system,
306 				 trace_event_name(pevent));
307 	} else {
308 		trace_seq_printf(s, "%u", type);
309 	}
310 
311 	trace_seq_putc(s, ')');
312 
313 	if (print_probe_args(s, tp->args, tp->nr_args,
314 			     (u8 *)&field[1], field) < 0)
315 		goto out;
316 
317 	trace_seq_putc(s, '\n');
318  out:
319 	return trace_handle_return(s);
320 }
321 
322 static unsigned long get_event_field(struct fetch_insn *code, void *rec)
323 {
324 	struct ftrace_event_field *field = code->data;
325 	unsigned long val;
326 	void *addr;
327 
328 	addr = rec + field->offset;
329 
330 	if (is_string_field(field)) {
331 		switch (field->filter_type) {
332 		case FILTER_DYN_STRING:
333 			val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff));
334 			break;
335 		case FILTER_RDYN_STRING:
336 			val = (unsigned long)(addr + (*(unsigned int *)addr & 0xffff));
337 			break;
338 		case FILTER_STATIC_STRING:
339 			val = (unsigned long)addr;
340 			break;
341 		case FILTER_PTR_STRING:
342 			val = (unsigned long)(*(char *)addr);
343 			break;
344 		default:
345 			WARN_ON_ONCE(1);
346 			return 0;
347 		}
348 		return val;
349 	}
350 
351 	switch (field->size) {
352 	case 1:
353 		if (field->is_signed)
354 			val = *(char *)addr;
355 		else
356 			val = *(unsigned char *)addr;
357 		break;
358 	case 2:
359 		if (field->is_signed)
360 			val = *(short *)addr;
361 		else
362 			val = *(unsigned short *)addr;
363 		break;
364 	case 4:
365 		if (field->is_signed)
366 			val = *(int *)addr;
367 		else
368 			val = *(unsigned int *)addr;
369 		break;
370 	default:
371 		if (field->is_signed)
372 			val = *(long *)addr;
373 		else
374 			val = *(unsigned long *)addr;
375 		break;
376 	}
377 	return val;
378 }
379 
380 static int get_eprobe_size(struct trace_probe *tp, void *rec)
381 {
382 	struct fetch_insn *code;
383 	struct probe_arg *arg;
384 	int i, len, ret = 0;
385 
386 	for (i = 0; i < tp->nr_args; i++) {
387 		arg = tp->args + i;
388 		if (arg->dynamic) {
389 			unsigned long val;
390 
391 			code = arg->code;
392  retry:
393 			switch (code->op) {
394 			case FETCH_OP_TP_ARG:
395 				val = get_event_field(code, rec);
396 				break;
397 			case FETCH_OP_IMM:
398 				val = code->immediate;
399 				break;
400 			case FETCH_OP_COMM:
401 				val = (unsigned long)current->comm;
402 				break;
403 			case FETCH_OP_DATA:
404 				val = (unsigned long)code->data;
405 				break;
406 			case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
407 				code++;
408 				goto retry;
409 			default:
410 				continue;
411 			}
412 			code++;
413 			len = process_fetch_insn_bottom(code, val, NULL, NULL);
414 			if (len > 0)
415 				ret += len;
416 		}
417 	}
418 
419 	return ret;
420 }
421 
422 /* Kprobe specific fetch functions */
423 
424 /* Note that we don't verify it, since the code does not come from user space */
425 static int
426 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
427 		   void *base)
428 {
429 	unsigned long val;
430 
431  retry:
432 	switch (code->op) {
433 	case FETCH_OP_TP_ARG:
434 		val = get_event_field(code, rec);
435 		break;
436 	case FETCH_OP_IMM:
437 		val = code->immediate;
438 		break;
439 	case FETCH_OP_COMM:
440 		val = (unsigned long)current->comm;
441 		break;
442 	case FETCH_OP_DATA:
443 		val = (unsigned long)code->data;
444 		break;
445 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
446 		code++;
447 		goto retry;
448 	default:
449 		return -EILSEQ;
450 	}
451 	code++;
452 	return process_fetch_insn_bottom(code, val, dest, base);
453 }
454 NOKPROBE_SYMBOL(process_fetch_insn)
455 
456 /* Return the length of string -- including null terminal byte */
457 static nokprobe_inline int
458 fetch_store_strlen_user(unsigned long addr)
459 {
460 	return kern_fetch_store_strlen_user(addr);
461 }
462 
463 /* Return the length of string -- including null terminal byte */
464 static nokprobe_inline int
465 fetch_store_strlen(unsigned long addr)
466 {
467 	return kern_fetch_store_strlen(addr);
468 }
469 
470 /*
471  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
472  * with max length and relative data location.
473  */
474 static nokprobe_inline int
475 fetch_store_string_user(unsigned long addr, void *dest, void *base)
476 {
477 	return kern_fetch_store_string_user(addr, dest, base);
478 }
479 
480 /*
481  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
482  * length and relative data location.
483  */
484 static nokprobe_inline int
485 fetch_store_string(unsigned long addr, void *dest, void *base)
486 {
487 	return kern_fetch_store_string(addr, dest, base);
488 }
489 
490 static nokprobe_inline int
491 probe_mem_read_user(void *dest, void *src, size_t size)
492 {
493 	const void __user *uaddr =  (__force const void __user *)src;
494 
495 	return copy_from_user_nofault(dest, uaddr, size);
496 }
497 
498 static nokprobe_inline int
499 probe_mem_read(void *dest, void *src, size_t size)
500 {
501 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
502 	if ((unsigned long)src < TASK_SIZE)
503 		return probe_mem_read_user(dest, src, size);
504 #endif
505 	return copy_from_kernel_nofault(dest, src, size);
506 }
507 
508 /* eprobe handler */
509 static inline void
510 __eprobe_trace_func(struct eprobe_data *edata, void *rec)
511 {
512 	struct eprobe_trace_entry_head *entry;
513 	struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp);
514 	struct trace_event_buffer fbuffer;
515 	int dsize;
516 
517 	if (WARN_ON_ONCE(call != edata->file->event_call))
518 		return;
519 
520 	if (trace_trigger_soft_disabled(edata->file))
521 		return;
522 
523 	dsize = get_eprobe_size(&edata->ep->tp, rec);
524 
525 	entry = trace_event_buffer_reserve(&fbuffer, edata->file,
526 					   sizeof(*entry) + edata->ep->tp.size + dsize);
527 
528 	if (!entry)
529 		return;
530 
531 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
532 	store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
533 
534 	trace_event_buffer_commit(&fbuffer);
535 }
536 
537 /*
538  * The event probe implementation uses event triggers to get access to
539  * the event it is attached to, but is not an actual trigger. The below
540  * functions are just stubs to fulfill what is needed to use the trigger
541  * infrastructure.
542  */
543 static int eprobe_trigger_init(struct event_trigger_data *data)
544 {
545 	return 0;
546 }
547 
548 static void eprobe_trigger_free(struct event_trigger_data *data)
549 {
550 
551 }
552 
553 static int eprobe_trigger_print(struct seq_file *m,
554 				struct event_trigger_data *data)
555 {
556 	/* Do not print eprobe event triggers */
557 	return 0;
558 }
559 
560 static void eprobe_trigger_func(struct event_trigger_data *data,
561 				struct trace_buffer *buffer, void *rec,
562 				struct ring_buffer_event *rbe)
563 {
564 	struct eprobe_data *edata = data->private_data;
565 
566 	__eprobe_trace_func(edata, rec);
567 }
568 
569 static struct event_trigger_ops eprobe_trigger_ops = {
570 	.trigger		= eprobe_trigger_func,
571 	.print			= eprobe_trigger_print,
572 	.init			= eprobe_trigger_init,
573 	.free			= eprobe_trigger_free,
574 };
575 
576 static int eprobe_trigger_cmd_parse(struct event_command *cmd_ops,
577 				    struct trace_event_file *file,
578 				    char *glob, char *cmd,
579 				    char *param_and_filter)
580 {
581 	return -1;
582 }
583 
584 static int eprobe_trigger_reg_func(char *glob,
585 				   struct event_trigger_data *data,
586 				   struct trace_event_file *file)
587 {
588 	return -1;
589 }
590 
591 static void eprobe_trigger_unreg_func(char *glob,
592 				      struct event_trigger_data *data,
593 				      struct trace_event_file *file)
594 {
595 
596 }
597 
598 static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd,
599 							char *param)
600 {
601 	return &eprobe_trigger_ops;
602 }
603 
604 static struct event_command event_trigger_cmd = {
605 	.name			= "eprobe",
606 	.trigger_type		= ETT_EVENT_EPROBE,
607 	.flags			= EVENT_CMD_FL_NEEDS_REC,
608 	.parse			= eprobe_trigger_cmd_parse,
609 	.reg			= eprobe_trigger_reg_func,
610 	.unreg			= eprobe_trigger_unreg_func,
611 	.unreg_all		= NULL,
612 	.get_trigger_ops	= eprobe_trigger_get_ops,
613 	.set_filter		= NULL,
614 };
615 
616 static struct event_trigger_data *
617 new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
618 {
619 	struct event_trigger_data *trigger;
620 	struct event_filter *filter = NULL;
621 	struct eprobe_data *edata;
622 	int ret;
623 
624 	edata = kzalloc(sizeof(*edata), GFP_KERNEL);
625 	trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
626 	if (!trigger || !edata) {
627 		ret = -ENOMEM;
628 		goto error;
629 	}
630 
631 	trigger->flags = EVENT_TRIGGER_FL_PROBE;
632 	trigger->count = -1;
633 	trigger->ops = &eprobe_trigger_ops;
634 
635 	/*
636 	 * EVENT PROBE triggers are not registered as commands with
637 	 * register_event_command(), as they are not controlled by the user
638 	 * from the trigger file
639 	 */
640 	trigger->cmd_ops = &event_trigger_cmd;
641 
642 	INIT_LIST_HEAD(&trigger->list);
643 
644 	if (ep->filter_str) {
645 		ret = create_event_filter(file->tr, file->event_call,
646 					ep->filter_str, false, &filter);
647 		if (ret)
648 			goto error;
649 	}
650 	RCU_INIT_POINTER(trigger->filter, filter);
651 
652 	edata->file = file;
653 	edata->ep = ep;
654 	trigger->private_data = edata;
655 
656 	return trigger;
657 error:
658 	free_event_filter(filter);
659 	kfree(edata);
660 	kfree(trigger);
661 	return ERR_PTR(ret);
662 }
663 
664 static int enable_eprobe(struct trace_eprobe *ep,
665 			 struct trace_event_file *eprobe_file)
666 {
667 	struct event_trigger_data *trigger;
668 	struct trace_event_file *file;
669 	struct trace_array *tr = eprobe_file->tr;
670 
671 	file = find_event_file(tr, ep->event_system, ep->event_name);
672 	if (!file)
673 		return -ENOENT;
674 	trigger = new_eprobe_trigger(ep, eprobe_file);
675 	if (IS_ERR(trigger))
676 		return PTR_ERR(trigger);
677 
678 	list_add_tail_rcu(&trigger->list, &file->triggers);
679 
680 	trace_event_trigger_enable_disable(file, 1);
681 	update_cond_flag(file);
682 
683 	return 0;
684 }
685 
686 static struct trace_event_functions eprobe_funcs = {
687 	.trace		= print_eprobe_event
688 };
689 
690 static int disable_eprobe(struct trace_eprobe *ep,
691 			  struct trace_array *tr)
692 {
693 	struct event_trigger_data *trigger = NULL, *iter;
694 	struct trace_event_file *file;
695 	struct event_filter *filter;
696 	struct eprobe_data *edata;
697 
698 	file = find_event_file(tr, ep->event_system, ep->event_name);
699 	if (!file)
700 		return -ENOENT;
701 
702 	list_for_each_entry(iter, &file->triggers, list) {
703 		if (!(iter->flags & EVENT_TRIGGER_FL_PROBE))
704 			continue;
705 		edata = iter->private_data;
706 		if (edata->ep == ep) {
707 			trigger = iter;
708 			break;
709 		}
710 	}
711 	if (!trigger)
712 		return -ENODEV;
713 
714 	list_del_rcu(&trigger->list);
715 
716 	trace_event_trigger_enable_disable(file, 0);
717 	update_cond_flag(file);
718 
719 	/* Make sure nothing is using the edata or trigger */
720 	tracepoint_synchronize_unregister();
721 
722 	filter = rcu_access_pointer(trigger->filter);
723 
724 	if (filter)
725 		free_event_filter(filter);
726 	kfree(edata);
727 	kfree(trigger);
728 
729 	return 0;
730 }
731 
732 static int enable_trace_eprobe(struct trace_event_call *call,
733 			       struct trace_event_file *file)
734 {
735 	struct trace_probe *pos, *tp;
736 	struct trace_eprobe *ep;
737 	bool enabled;
738 	int ret = 0;
739 
740 	tp = trace_probe_primary_from_call(call);
741 	if (WARN_ON_ONCE(!tp))
742 		return -ENODEV;
743 	enabled = trace_probe_is_enabled(tp);
744 
745 	/* This also changes "enabled" state */
746 	if (file) {
747 		ret = trace_probe_add_file(tp, file);
748 		if (ret)
749 			return ret;
750 	} else
751 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
752 
753 	if (enabled)
754 		return 0;
755 
756 	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
757 		ep = container_of(pos, struct trace_eprobe, tp);
758 		ret = enable_eprobe(ep, file);
759 		if (ret)
760 			break;
761 		enabled = true;
762 	}
763 
764 	if (ret) {
765 		/* Failed to enable one of them. Roll back all */
766 		if (enabled)
767 			disable_eprobe(ep, file->tr);
768 		if (file)
769 			trace_probe_remove_file(tp, file);
770 		else
771 			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
772 	}
773 
774 	return ret;
775 }
776 
777 static int disable_trace_eprobe(struct trace_event_call *call,
778 				struct trace_event_file *file)
779 {
780 	struct trace_probe *pos, *tp;
781 	struct trace_eprobe *ep;
782 
783 	tp = trace_probe_primary_from_call(call);
784 	if (WARN_ON_ONCE(!tp))
785 		return -ENODEV;
786 
787 	if (file) {
788 		if (!trace_probe_get_file_link(tp, file))
789 			return -ENOENT;
790 		if (!trace_probe_has_single_file(tp))
791 			goto out;
792 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
793 	} else
794 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
795 
796 	if (!trace_probe_is_enabled(tp)) {
797 		list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
798 			ep = container_of(pos, struct trace_eprobe, tp);
799 			disable_eprobe(ep, file->tr);
800 		}
801 	}
802 
803  out:
804 	if (file)
805 		/*
806 		 * Synchronization is done in below function. For perf event,
807 		 * file == NULL and perf_trace_event_unreg() calls
808 		 * tracepoint_synchronize_unregister() to ensure synchronize
809 		 * event. We don't need to care about it.
810 		 */
811 		trace_probe_remove_file(tp, file);
812 
813 	return 0;
814 }
815 
816 static int eprobe_register(struct trace_event_call *event,
817 			   enum trace_reg type, void *data)
818 {
819 	struct trace_event_file *file = data;
820 
821 	switch (type) {
822 	case TRACE_REG_REGISTER:
823 		return enable_trace_eprobe(event, file);
824 	case TRACE_REG_UNREGISTER:
825 		return disable_trace_eprobe(event, file);
826 #ifdef CONFIG_PERF_EVENTS
827 	case TRACE_REG_PERF_REGISTER:
828 	case TRACE_REG_PERF_UNREGISTER:
829 	case TRACE_REG_PERF_OPEN:
830 	case TRACE_REG_PERF_CLOSE:
831 	case TRACE_REG_PERF_ADD:
832 	case TRACE_REG_PERF_DEL:
833 		return 0;
834 #endif
835 	}
836 	return 0;
837 }
838 
839 static inline void init_trace_eprobe_call(struct trace_eprobe *ep)
840 {
841 	struct trace_event_call *call = trace_probe_event_call(&ep->tp);
842 
843 	call->flags = TRACE_EVENT_FL_EPROBE;
844 	call->event.funcs = &eprobe_funcs;
845 	call->class->fields_array = eprobe_fields_array;
846 	call->class->reg = eprobe_register;
847 }
848 
849 static struct trace_event_call *
850 find_and_get_event(const char *system, const char *event_name)
851 {
852 	struct trace_event_call *tp_event;
853 	const char *name;
854 
855 	list_for_each_entry(tp_event, &ftrace_events, list) {
856 		/* Skip other probes and ftrace events */
857 		if (tp_event->flags &
858 		    (TRACE_EVENT_FL_IGNORE_ENABLE |
859 		     TRACE_EVENT_FL_KPROBE |
860 		     TRACE_EVENT_FL_UPROBE |
861 		     TRACE_EVENT_FL_EPROBE))
862 			continue;
863 		if (!tp_event->class->system ||
864 		    strcmp(system, tp_event->class->system))
865 			continue;
866 		name = trace_event_name(tp_event);
867 		if (!name || strcmp(event_name, name))
868 			continue;
869 		if (!trace_event_try_get_ref(tp_event)) {
870 			return NULL;
871 			break;
872 		}
873 		return tp_event;
874 		break;
875 	}
876 	return NULL;
877 }
878 
879 static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i)
880 {
881 	unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_TPOINT;
882 	int ret;
883 
884 	ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], flags);
885 	if (ret)
886 		return ret;
887 
888 	if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG) {
889 		ret = trace_eprobe_tp_arg_update(ep, i);
890 		if (ret)
891 			trace_probe_log_err(0, BAD_ATTACH_ARG);
892 	}
893 
894 	/* Handle symbols "@" */
895 	if (!ret)
896 		ret = traceprobe_update_arg(&ep->tp.args[i]);
897 
898 	return ret;
899 }
900 
901 static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[])
902 {
903 	struct event_filter *dummy;
904 	int i, ret, len = 0;
905 	char *p;
906 
907 	if (argc == 0) {
908 		trace_probe_log_err(0, NO_EP_FILTER);
909 		return -EINVAL;
910 	}
911 
912 	/* Recover the filter string */
913 	for (i = 0; i < argc; i++)
914 		len += strlen(argv[i]) + 1;
915 
916 	ep->filter_str = kzalloc(len, GFP_KERNEL);
917 	if (!ep->filter_str)
918 		return -ENOMEM;
919 
920 	p = ep->filter_str;
921 	for (i = 0; i < argc; i++) {
922 		ret = snprintf(p, len, "%s ", argv[i]);
923 		if (ret < 0)
924 			goto error;
925 		if (ret > len) {
926 			ret = -E2BIG;
927 			goto error;
928 		}
929 		p += ret;
930 		len -= ret;
931 	}
932 	p[-1] = '\0';
933 
934 	/*
935 	 * Ensure the filter string can be parsed correctly. Note, this
936 	 * filter string is for the original event, not for the eprobe.
937 	 */
938 	ret = create_event_filter(top_trace_array(), ep->event, ep->filter_str,
939 				  true, &dummy);
940 	free_event_filter(dummy);
941 	if (ret)
942 		goto error;
943 
944 	return 0;
945 error:
946 	kfree(ep->filter_str);
947 	ep->filter_str = NULL;
948 	return ret;
949 }
950 
951 static int __trace_eprobe_create(int argc, const char *argv[])
952 {
953 	/*
954 	 * Argument syntax:
955 	 *      e[:[GRP/][ENAME]] SYSTEM.EVENT [FETCHARGS] [if FILTER]
956 	 * Fetch args (no space):
957 	 *  <name>=$<field>[:TYPE]
958 	 */
959 	const char *event = NULL, *group = EPROBE_EVENT_SYSTEM;
960 	const char *sys_event = NULL, *sys_name = NULL;
961 	struct trace_event_call *event_call;
962 	struct trace_eprobe *ep = NULL;
963 	char buf1[MAX_EVENT_NAME_LEN];
964 	char buf2[MAX_EVENT_NAME_LEN];
965 	char gbuf[MAX_EVENT_NAME_LEN];
966 	int ret = 0, filter_idx = 0;
967 	int i, filter_cnt;
968 
969 	if (argc < 2 || argv[0][0] != 'e')
970 		return -ECANCELED;
971 
972 	trace_probe_log_init("event_probe", argc, argv);
973 
974 	event = strchr(&argv[0][1], ':');
975 	if (event) {
976 		event++;
977 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
978 						  event - argv[0]);
979 		if (ret)
980 			goto parse_error;
981 	}
982 
983 	trace_probe_log_set_index(1);
984 	sys_event = argv[1];
985 	ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 0);
986 	if (ret || !sys_event || !sys_name) {
987 		trace_probe_log_err(0, NO_EVENT_INFO);
988 		goto parse_error;
989 	}
990 
991 	if (!event) {
992 		strscpy(buf1, sys_event, MAX_EVENT_NAME_LEN);
993 		event = buf1;
994 	}
995 
996 	for (i = 2; i < argc; i++) {
997 		if (!strcmp(argv[i], "if")) {
998 			filter_idx = i + 1;
999 			filter_cnt = argc - filter_idx;
1000 			argc = i;
1001 			break;
1002 		}
1003 	}
1004 
1005 	mutex_lock(&event_mutex);
1006 	event_call = find_and_get_event(sys_name, sys_event);
1007 	ep = alloc_event_probe(group, event, event_call, argc - 2);
1008 	mutex_unlock(&event_mutex);
1009 
1010 	if (IS_ERR(ep)) {
1011 		ret = PTR_ERR(ep);
1012 		if (ret == -ENODEV)
1013 			trace_probe_log_err(0, BAD_ATTACH_EVENT);
1014 		/* This must return -ENOMEM or missing event, else there is a bug */
1015 		WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
1016 		ep = NULL;
1017 		goto error;
1018 	}
1019 
1020 	if (filter_idx) {
1021 		trace_probe_log_set_index(filter_idx);
1022 		ret = trace_eprobe_parse_filter(ep, filter_cnt, argv + filter_idx);
1023 		if (ret)
1024 			goto parse_error;
1025 	} else
1026 		ep->filter_str = NULL;
1027 
1028 	argc -= 2; argv += 2;
1029 	/* parse arguments */
1030 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
1031 		trace_probe_log_set_index(i + 2);
1032 		ret = trace_eprobe_tp_update_arg(ep, argv, i);
1033 		if (ret)
1034 			goto error;
1035 	}
1036 	ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT);
1037 	if (ret < 0)
1038 		goto error;
1039 	init_trace_eprobe_call(ep);
1040 	mutex_lock(&event_mutex);
1041 	ret = trace_probe_register_event_call(&ep->tp);
1042 	if (ret) {
1043 		if (ret == -EEXIST) {
1044 			trace_probe_log_set_index(0);
1045 			trace_probe_log_err(0, EVENT_EXIST);
1046 		}
1047 		mutex_unlock(&event_mutex);
1048 		goto error;
1049 	}
1050 	ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
1051 	mutex_unlock(&event_mutex);
1052 	return ret;
1053 parse_error:
1054 	ret = -EINVAL;
1055 error:
1056 	trace_event_probe_cleanup(ep);
1057 	return ret;
1058 }
1059 
1060 /*
1061  * Register dynevent at core_initcall. This allows kernel to setup eprobe
1062  * events in postcore_initcall without tracefs.
1063  */
1064 static __init int trace_events_eprobe_init_early(void)
1065 {
1066 	int err = 0;
1067 
1068 	err = dyn_event_register(&eprobe_dyn_event_ops);
1069 	if (err)
1070 		pr_warn("Could not register eprobe_dyn_event_ops\n");
1071 
1072 	return err;
1073 }
1074 core_initcall(trace_events_eprobe_init_early);
1075