1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static char *last_cmd;
48 
49 static int errpos(const char *str)
50 {
51 	if (!str || !last_cmd)
52 		return 0;
53 
54 	return err_pos(last_cmd, str);
55 }
56 
57 static void last_cmd_set(const char *str)
58 {
59 	if (!str)
60 		return;
61 
62 	kfree(last_cmd);
63 
64 	last_cmd = kstrdup(str, GFP_KERNEL);
65 }
66 
67 static void synth_err(u8 err_type, u16 err_pos)
68 {
69 	if (!last_cmd)
70 		return;
71 
72 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
73 			err_type, err_pos);
74 }
75 
76 static int create_synth_event(const char *raw_command);
77 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
78 static int synth_event_release(struct dyn_event *ev);
79 static bool synth_event_is_busy(struct dyn_event *ev);
80 static bool synth_event_match(const char *system, const char *event,
81 			int argc, const char **argv, struct dyn_event *ev);
82 
83 static struct dyn_event_operations synth_event_ops = {
84 	.create = create_synth_event,
85 	.show = synth_event_show,
86 	.is_busy = synth_event_is_busy,
87 	.free = synth_event_release,
88 	.match = synth_event_match,
89 };
90 
91 static bool is_synth_event(struct dyn_event *ev)
92 {
93 	return ev->ops == &synth_event_ops;
94 }
95 
96 static struct synth_event *to_synth_event(struct dyn_event *ev)
97 {
98 	return container_of(ev, struct synth_event, devent);
99 }
100 
101 static bool synth_event_is_busy(struct dyn_event *ev)
102 {
103 	struct synth_event *event = to_synth_event(ev);
104 
105 	return event->ref != 0;
106 }
107 
108 static bool synth_event_match(const char *system, const char *event,
109 			int argc, const char **argv, struct dyn_event *ev)
110 {
111 	struct synth_event *sev = to_synth_event(ev);
112 
113 	return strcmp(sev->name, event) == 0 &&
114 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
115 }
116 
117 struct synth_trace_event {
118 	struct trace_entry	ent;
119 	u64			fields[];
120 };
121 
122 static int synth_event_define_fields(struct trace_event_call *call)
123 {
124 	struct synth_trace_event trace;
125 	int offset = offsetof(typeof(trace), fields);
126 	struct synth_event *event = call->data;
127 	unsigned int i, size, n_u64;
128 	char *name, *type;
129 	bool is_signed;
130 	int ret = 0;
131 
132 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
133 		size = event->fields[i]->size;
134 		is_signed = event->fields[i]->is_signed;
135 		type = event->fields[i]->type;
136 		name = event->fields[i]->name;
137 		ret = trace_define_field(call, type, name, offset, size,
138 					 is_signed, FILTER_OTHER);
139 		if (ret)
140 			break;
141 
142 		event->fields[i]->offset = n_u64;
143 
144 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
145 			offset += STR_VAR_LEN_MAX;
146 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
147 		} else {
148 			offset += sizeof(u64);
149 			n_u64++;
150 		}
151 	}
152 
153 	event->n_u64 = n_u64;
154 
155 	return ret;
156 }
157 
158 static bool synth_field_signed(char *type)
159 {
160 	if (str_has_prefix(type, "u"))
161 		return false;
162 	if (strcmp(type, "gfp_t") == 0)
163 		return false;
164 
165 	return true;
166 }
167 
168 static int synth_field_is_string(char *type)
169 {
170 	if (strstr(type, "char[") != NULL)
171 		return true;
172 
173 	return false;
174 }
175 
176 static int synth_field_string_size(char *type)
177 {
178 	char buf[4], *end, *start;
179 	unsigned int len;
180 	int size, err;
181 
182 	start = strstr(type, "char[");
183 	if (start == NULL)
184 		return -EINVAL;
185 	start += sizeof("char[") - 1;
186 
187 	end = strchr(type, ']');
188 	if (!end || end < start || type + strlen(type) > end + 1)
189 		return -EINVAL;
190 
191 	len = end - start;
192 	if (len > 3)
193 		return -EINVAL;
194 
195 	if (len == 0)
196 		return 0; /* variable-length string */
197 
198 	strncpy(buf, start, len);
199 	buf[len] = '\0';
200 
201 	err = kstrtouint(buf, 0, &size);
202 	if (err)
203 		return err;
204 
205 	if (size > STR_VAR_LEN_MAX)
206 		return -EINVAL;
207 
208 	return size;
209 }
210 
211 static int synth_field_size(char *type)
212 {
213 	int size = 0;
214 
215 	if (strcmp(type, "s64") == 0)
216 		size = sizeof(s64);
217 	else if (strcmp(type, "u64") == 0)
218 		size = sizeof(u64);
219 	else if (strcmp(type, "s32") == 0)
220 		size = sizeof(s32);
221 	else if (strcmp(type, "u32") == 0)
222 		size = sizeof(u32);
223 	else if (strcmp(type, "s16") == 0)
224 		size = sizeof(s16);
225 	else if (strcmp(type, "u16") == 0)
226 		size = sizeof(u16);
227 	else if (strcmp(type, "s8") == 0)
228 		size = sizeof(s8);
229 	else if (strcmp(type, "u8") == 0)
230 		size = sizeof(u8);
231 	else if (strcmp(type, "char") == 0)
232 		size = sizeof(char);
233 	else if (strcmp(type, "unsigned char") == 0)
234 		size = sizeof(unsigned char);
235 	else if (strcmp(type, "int") == 0)
236 		size = sizeof(int);
237 	else if (strcmp(type, "unsigned int") == 0)
238 		size = sizeof(unsigned int);
239 	else if (strcmp(type, "long") == 0)
240 		size = sizeof(long);
241 	else if (strcmp(type, "unsigned long") == 0)
242 		size = sizeof(unsigned long);
243 	else if (strcmp(type, "bool") == 0)
244 		size = sizeof(bool);
245 	else if (strcmp(type, "pid_t") == 0)
246 		size = sizeof(pid_t);
247 	else if (strcmp(type, "gfp_t") == 0)
248 		size = sizeof(gfp_t);
249 	else if (synth_field_is_string(type))
250 		size = synth_field_string_size(type);
251 
252 	return size;
253 }
254 
255 static const char *synth_field_fmt(char *type)
256 {
257 	const char *fmt = "%llu";
258 
259 	if (strcmp(type, "s64") == 0)
260 		fmt = "%lld";
261 	else if (strcmp(type, "u64") == 0)
262 		fmt = "%llu";
263 	else if (strcmp(type, "s32") == 0)
264 		fmt = "%d";
265 	else if (strcmp(type, "u32") == 0)
266 		fmt = "%u";
267 	else if (strcmp(type, "s16") == 0)
268 		fmt = "%d";
269 	else if (strcmp(type, "u16") == 0)
270 		fmt = "%u";
271 	else if (strcmp(type, "s8") == 0)
272 		fmt = "%d";
273 	else if (strcmp(type, "u8") == 0)
274 		fmt = "%u";
275 	else if (strcmp(type, "char") == 0)
276 		fmt = "%d";
277 	else if (strcmp(type, "unsigned char") == 0)
278 		fmt = "%u";
279 	else if (strcmp(type, "int") == 0)
280 		fmt = "%d";
281 	else if (strcmp(type, "unsigned int") == 0)
282 		fmt = "%u";
283 	else if (strcmp(type, "long") == 0)
284 		fmt = "%ld";
285 	else if (strcmp(type, "unsigned long") == 0)
286 		fmt = "%lu";
287 	else if (strcmp(type, "bool") == 0)
288 		fmt = "%d";
289 	else if (strcmp(type, "pid_t") == 0)
290 		fmt = "%d";
291 	else if (strcmp(type, "gfp_t") == 0)
292 		fmt = "%x";
293 	else if (synth_field_is_string(type))
294 		fmt = "%.*s";
295 
296 	return fmt;
297 }
298 
299 static void print_synth_event_num_val(struct trace_seq *s,
300 				      char *print_fmt, char *name,
301 				      int size, u64 val, char *space)
302 {
303 	switch (size) {
304 	case 1:
305 		trace_seq_printf(s, print_fmt, name, (u8)val, space);
306 		break;
307 
308 	case 2:
309 		trace_seq_printf(s, print_fmt, name, (u16)val, space);
310 		break;
311 
312 	case 4:
313 		trace_seq_printf(s, print_fmt, name, (u32)val, space);
314 		break;
315 
316 	default:
317 		trace_seq_printf(s, print_fmt, name, val, space);
318 		break;
319 	}
320 }
321 
322 static enum print_line_t print_synth_event(struct trace_iterator *iter,
323 					   int flags,
324 					   struct trace_event *event)
325 {
326 	struct trace_array *tr = iter->tr;
327 	struct trace_seq *s = &iter->seq;
328 	struct synth_trace_event *entry;
329 	struct synth_event *se;
330 	unsigned int i, n_u64;
331 	char print_fmt[32];
332 	const char *fmt;
333 
334 	entry = (struct synth_trace_event *)iter->ent;
335 	se = container_of(event, struct synth_event, call.event);
336 
337 	trace_seq_printf(s, "%s: ", se->name);
338 
339 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
340 		if (trace_seq_has_overflowed(s))
341 			goto end;
342 
343 		fmt = synth_field_fmt(se->fields[i]->type);
344 
345 		/* parameter types */
346 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
347 			trace_seq_printf(s, "%s ", fmt);
348 
349 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
350 
351 		/* parameter values */
352 		if (se->fields[i]->is_string) {
353 			if (se->fields[i]->is_dynamic) {
354 				u32 offset, data_offset;
355 				char *str_field;
356 
357 				offset = (u32)entry->fields[n_u64];
358 				data_offset = offset & 0xffff;
359 
360 				str_field = (char *)entry + data_offset;
361 
362 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
363 						 STR_VAR_LEN_MAX,
364 						 str_field,
365 						 i == se->n_fields - 1 ? "" : " ");
366 				n_u64++;
367 			} else {
368 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
369 						 STR_VAR_LEN_MAX,
370 						 (char *)&entry->fields[n_u64],
371 						 i == se->n_fields - 1 ? "" : " ");
372 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
373 			}
374 		} else {
375 			struct trace_print_flags __flags[] = {
376 			    __def_gfpflag_names, {-1, NULL} };
377 			char *space = (i == se->n_fields - 1 ? "" : " ");
378 
379 			print_synth_event_num_val(s, print_fmt,
380 						  se->fields[i]->name,
381 						  se->fields[i]->size,
382 						  entry->fields[n_u64],
383 						  space);
384 
385 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
386 				trace_seq_puts(s, " (");
387 				trace_print_flags_seq(s, "|",
388 						      entry->fields[n_u64],
389 						      __flags);
390 				trace_seq_putc(s, ')');
391 			}
392 			n_u64++;
393 		}
394 	}
395 end:
396 	trace_seq_putc(s, '\n');
397 
398 	return trace_handle_return(s);
399 }
400 
401 static struct trace_event_functions synth_event_funcs = {
402 	.trace		= print_synth_event
403 };
404 
405 static unsigned int trace_string(struct synth_trace_event *entry,
406 				 struct synth_event *event,
407 				 char *str_val,
408 				 bool is_dynamic,
409 				 unsigned int data_size,
410 				 unsigned int *n_u64)
411 {
412 	unsigned int len = 0;
413 	char *str_field;
414 	int ret;
415 
416 	if (is_dynamic) {
417 		u32 data_offset;
418 
419 		data_offset = offsetof(typeof(*entry), fields);
420 		data_offset += event->n_u64 * sizeof(u64);
421 		data_offset += data_size;
422 
423 		len = kern_fetch_store_strlen((unsigned long)str_val);
424 
425 		data_offset |= len << 16;
426 		*(u32 *)&entry->fields[*n_u64] = data_offset;
427 
428 		ret = kern_fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
429 
430 		(*n_u64)++;
431 	} else {
432 		str_field = (char *)&entry->fields[*n_u64];
433 
434 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
435 		if ((unsigned long)str_val < TASK_SIZE)
436 			ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
437 		else
438 #endif
439 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
440 
441 		if (ret < 0)
442 			strcpy(str_field, FAULT_STRING);
443 
444 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
445 	}
446 
447 	return len;
448 }
449 
450 static notrace void trace_event_raw_event_synth(void *__data,
451 						u64 *var_ref_vals,
452 						unsigned int *var_ref_idx)
453 {
454 	unsigned int i, n_u64, val_idx, len, data_size = 0;
455 	struct trace_event_file *trace_file = __data;
456 	struct synth_trace_event *entry;
457 	struct trace_event_buffer fbuffer;
458 	struct trace_buffer *buffer;
459 	struct synth_event *event;
460 	int fields_size = 0;
461 
462 	event = trace_file->event_call->data;
463 
464 	if (trace_trigger_soft_disabled(trace_file))
465 		return;
466 
467 	fields_size = event->n_u64 * sizeof(u64);
468 
469 	for (i = 0; i < event->n_dynamic_fields; i++) {
470 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
471 		char *str_val;
472 
473 		val_idx = var_ref_idx[field_pos];
474 		str_val = (char *)(long)var_ref_vals[val_idx];
475 
476 		len = kern_fetch_store_strlen((unsigned long)str_val);
477 
478 		fields_size += len;
479 	}
480 
481 	/*
482 	 * Avoid ring buffer recursion detection, as this event
483 	 * is being performed within another event.
484 	 */
485 	buffer = trace_file->tr->array_buffer.buffer;
486 	ring_buffer_nest_start(buffer);
487 
488 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
489 					   sizeof(*entry) + fields_size);
490 	if (!entry)
491 		goto out;
492 
493 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
494 		val_idx = var_ref_idx[i];
495 		if (event->fields[i]->is_string) {
496 			char *str_val = (char *)(long)var_ref_vals[val_idx];
497 
498 			len = trace_string(entry, event, str_val,
499 					   event->fields[i]->is_dynamic,
500 					   data_size, &n_u64);
501 			data_size += len; /* only dynamic string increments */
502 		} else {
503 			struct synth_field *field = event->fields[i];
504 			u64 val = var_ref_vals[val_idx];
505 
506 			switch (field->size) {
507 			case 1:
508 				*(u8 *)&entry->fields[n_u64] = (u8)val;
509 				break;
510 
511 			case 2:
512 				*(u16 *)&entry->fields[n_u64] = (u16)val;
513 				break;
514 
515 			case 4:
516 				*(u32 *)&entry->fields[n_u64] = (u32)val;
517 				break;
518 
519 			default:
520 				entry->fields[n_u64] = val;
521 				break;
522 			}
523 			n_u64++;
524 		}
525 	}
526 
527 	trace_event_buffer_commit(&fbuffer);
528 out:
529 	ring_buffer_nest_end(buffer);
530 }
531 
532 static void free_synth_event_print_fmt(struct trace_event_call *call)
533 {
534 	if (call) {
535 		kfree(call->print_fmt);
536 		call->print_fmt = NULL;
537 	}
538 }
539 
540 static int __set_synth_event_print_fmt(struct synth_event *event,
541 				       char *buf, int len)
542 {
543 	const char *fmt;
544 	int pos = 0;
545 	int i;
546 
547 	/* When len=0, we just calculate the needed length */
548 #define LEN_OR_ZERO (len ? len - pos : 0)
549 
550 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
551 	for (i = 0; i < event->n_fields; i++) {
552 		fmt = synth_field_fmt(event->fields[i]->type);
553 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
554 				event->fields[i]->name, fmt,
555 				i == event->n_fields - 1 ? "" : ", ");
556 	}
557 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
558 
559 	for (i = 0; i < event->n_fields; i++) {
560 		if (event->fields[i]->is_string &&
561 		    event->fields[i]->is_dynamic)
562 			pos += snprintf(buf + pos, LEN_OR_ZERO,
563 				", __get_str(%s)", event->fields[i]->name);
564 		else
565 			pos += snprintf(buf + pos, LEN_OR_ZERO,
566 					", REC->%s", event->fields[i]->name);
567 	}
568 
569 #undef LEN_OR_ZERO
570 
571 	/* return the length of print_fmt */
572 	return pos;
573 }
574 
575 static int set_synth_event_print_fmt(struct trace_event_call *call)
576 {
577 	struct synth_event *event = call->data;
578 	char *print_fmt;
579 	int len;
580 
581 	/* First: called with 0 length to calculate the needed length */
582 	len = __set_synth_event_print_fmt(event, NULL, 0);
583 
584 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
585 	if (!print_fmt)
586 		return -ENOMEM;
587 
588 	/* Second: actually write the @print_fmt */
589 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
590 	call->print_fmt = print_fmt;
591 
592 	return 0;
593 }
594 
595 static void free_synth_field(struct synth_field *field)
596 {
597 	kfree(field->type);
598 	kfree(field->name);
599 	kfree(field);
600 }
601 
602 static int check_field_version(const char *prefix, const char *field_type,
603 			       const char *field_name)
604 {
605 	/*
606 	 * For backward compatibility, the old synthetic event command
607 	 * format did not require semicolons, and in order to not
608 	 * break user space, that old format must still work. If a new
609 	 * feature is added, then the format that uses the new feature
610 	 * will be required to have semicolons, as nothing that uses
611 	 * the old format would be using the new, yet to be created,
612 	 * feature. When a new feature is added, this will detect it,
613 	 * and return a number greater than 1, and require the format
614 	 * to use semicolons.
615 	 */
616 	return 1;
617 }
618 
619 static struct synth_field *parse_synth_field(int argc, char **argv,
620 					     int *consumed, int *field_version)
621 {
622 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
623 	struct synth_field *field;
624 	int len, ret = -ENOMEM;
625 	struct seq_buf s;
626 	ssize_t size;
627 
628 	if (!strcmp(field_type, "unsigned")) {
629 		if (argc < 3) {
630 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
631 			return ERR_PTR(-EINVAL);
632 		}
633 		prefix = "unsigned ";
634 		field_type = argv[1];
635 		field_name = argv[2];
636 		*consumed += 3;
637 	} else {
638 		field_name = argv[1];
639 		*consumed += 2;
640 	}
641 
642 	if (!field_name) {
643 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
644 		return ERR_PTR(-EINVAL);
645 	}
646 
647 	*field_version = check_field_version(prefix, field_type, field_name);
648 
649 	field = kzalloc(sizeof(*field), GFP_KERNEL);
650 	if (!field)
651 		return ERR_PTR(-ENOMEM);
652 
653 	len = strlen(field_name);
654 	array = strchr(field_name, '[');
655 	if (array)
656 		len -= strlen(array);
657 
658 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
659 	if (!field->name)
660 		goto free;
661 
662 	if (!is_good_name(field->name)) {
663 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
664 		ret = -EINVAL;
665 		goto free;
666 	}
667 
668 	len = strlen(field_type) + 1;
669 
670 	if (array)
671 		len += strlen(array);
672 
673 	if (prefix)
674 		len += strlen(prefix);
675 
676 	field->type = kzalloc(len, GFP_KERNEL);
677 	if (!field->type)
678 		goto free;
679 
680 	seq_buf_init(&s, field->type, len);
681 	if (prefix)
682 		seq_buf_puts(&s, prefix);
683 	seq_buf_puts(&s, field_type);
684 	if (array)
685 		seq_buf_puts(&s, array);
686 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
687 		goto free;
688 
689 	s.buffer[s.len] = '\0';
690 
691 	size = synth_field_size(field->type);
692 	if (size < 0) {
693 		if (array)
694 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
695 		else
696 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
697 		ret = -EINVAL;
698 		goto free;
699 	} else if (size == 0) {
700 		if (synth_field_is_string(field->type)) {
701 			char *type;
702 
703 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
704 			type = kzalloc(len, GFP_KERNEL);
705 			if (!type)
706 				goto free;
707 
708 			seq_buf_init(&s, type, len);
709 			seq_buf_puts(&s, "__data_loc ");
710 			seq_buf_puts(&s, field->type);
711 
712 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
713 				goto free;
714 			s.buffer[s.len] = '\0';
715 
716 			kfree(field->type);
717 			field->type = type;
718 
719 			field->is_dynamic = true;
720 			size = sizeof(u64);
721 		} else {
722 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
723 			ret = -EINVAL;
724 			goto free;
725 		}
726 	}
727 	field->size = size;
728 
729 	if (synth_field_is_string(field->type))
730 		field->is_string = true;
731 
732 	field->is_signed = synth_field_signed(field->type);
733  out:
734 	return field;
735  free:
736 	free_synth_field(field);
737 	field = ERR_PTR(ret);
738 	goto out;
739 }
740 
741 static void free_synth_tracepoint(struct tracepoint *tp)
742 {
743 	if (!tp)
744 		return;
745 
746 	kfree(tp->name);
747 	kfree(tp);
748 }
749 
750 static struct tracepoint *alloc_synth_tracepoint(char *name)
751 {
752 	struct tracepoint *tp;
753 
754 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
755 	if (!tp)
756 		return ERR_PTR(-ENOMEM);
757 
758 	tp->name = kstrdup(name, GFP_KERNEL);
759 	if (!tp->name) {
760 		kfree(tp);
761 		return ERR_PTR(-ENOMEM);
762 	}
763 
764 	return tp;
765 }
766 
767 struct synth_event *find_synth_event(const char *name)
768 {
769 	struct dyn_event *pos;
770 	struct synth_event *event;
771 
772 	for_each_dyn_event(pos) {
773 		if (!is_synth_event(pos))
774 			continue;
775 		event = to_synth_event(pos);
776 		if (strcmp(event->name, name) == 0)
777 			return event;
778 	}
779 
780 	return NULL;
781 }
782 
783 static struct trace_event_fields synth_event_fields_array[] = {
784 	{ .type = TRACE_FUNCTION_TYPE,
785 	  .define_fields = synth_event_define_fields },
786 	{}
787 };
788 
789 static int register_synth_event(struct synth_event *event)
790 {
791 	struct trace_event_call *call = &event->call;
792 	int ret = 0;
793 
794 	event->call.class = &event->class;
795 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
796 	if (!event->class.system) {
797 		ret = -ENOMEM;
798 		goto out;
799 	}
800 
801 	event->tp = alloc_synth_tracepoint(event->name);
802 	if (IS_ERR(event->tp)) {
803 		ret = PTR_ERR(event->tp);
804 		event->tp = NULL;
805 		goto out;
806 	}
807 
808 	INIT_LIST_HEAD(&call->class->fields);
809 	call->event.funcs = &synth_event_funcs;
810 	call->class->fields_array = synth_event_fields_array;
811 
812 	ret = register_trace_event(&call->event);
813 	if (!ret) {
814 		ret = -ENODEV;
815 		goto out;
816 	}
817 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
818 	call->class->reg = trace_event_reg;
819 	call->class->probe = trace_event_raw_event_synth;
820 	call->data = event;
821 	call->tp = event->tp;
822 
823 	ret = trace_add_event_call(call);
824 	if (ret) {
825 		pr_warn("Failed to register synthetic event: %s\n",
826 			trace_event_name(call));
827 		goto err;
828 	}
829 
830 	ret = set_synth_event_print_fmt(call);
831 	/* unregister_trace_event() will be called inside */
832 	if (ret < 0)
833 		trace_remove_event_call(call);
834  out:
835 	return ret;
836  err:
837 	unregister_trace_event(&call->event);
838 	goto out;
839 }
840 
841 static int unregister_synth_event(struct synth_event *event)
842 {
843 	struct trace_event_call *call = &event->call;
844 	int ret;
845 
846 	ret = trace_remove_event_call(call);
847 
848 	return ret;
849 }
850 
851 static void free_synth_event(struct synth_event *event)
852 {
853 	unsigned int i;
854 
855 	if (!event)
856 		return;
857 
858 	for (i = 0; i < event->n_fields; i++)
859 		free_synth_field(event->fields[i]);
860 
861 	kfree(event->fields);
862 	kfree(event->dynamic_fields);
863 	kfree(event->name);
864 	kfree(event->class.system);
865 	free_synth_tracepoint(event->tp);
866 	free_synth_event_print_fmt(&event->call);
867 	kfree(event);
868 }
869 
870 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
871 					     struct synth_field **fields)
872 {
873 	unsigned int i, j, n_dynamic_fields = 0;
874 	struct synth_event *event;
875 
876 	event = kzalloc(sizeof(*event), GFP_KERNEL);
877 	if (!event) {
878 		event = ERR_PTR(-ENOMEM);
879 		goto out;
880 	}
881 
882 	event->name = kstrdup(name, GFP_KERNEL);
883 	if (!event->name) {
884 		kfree(event);
885 		event = ERR_PTR(-ENOMEM);
886 		goto out;
887 	}
888 
889 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
890 	if (!event->fields) {
891 		free_synth_event(event);
892 		event = ERR_PTR(-ENOMEM);
893 		goto out;
894 	}
895 
896 	for (i = 0; i < n_fields; i++)
897 		if (fields[i]->is_dynamic)
898 			n_dynamic_fields++;
899 
900 	if (n_dynamic_fields) {
901 		event->dynamic_fields = kcalloc(n_dynamic_fields,
902 						sizeof(*event->dynamic_fields),
903 						GFP_KERNEL);
904 		if (!event->dynamic_fields) {
905 			free_synth_event(event);
906 			event = ERR_PTR(-ENOMEM);
907 			goto out;
908 		}
909 	}
910 
911 	dyn_event_init(&event->devent, &synth_event_ops);
912 
913 	for (i = 0, j = 0; i < n_fields; i++) {
914 		fields[i]->field_pos = i;
915 		event->fields[i] = fields[i];
916 
917 		if (fields[i]->is_dynamic)
918 			event->dynamic_fields[j++] = fields[i];
919 	}
920 	event->n_dynamic_fields = j;
921 	event->n_fields = n_fields;
922  out:
923 	return event;
924 }
925 
926 static int synth_event_check_arg_fn(void *data)
927 {
928 	struct dynevent_arg_pair *arg_pair = data;
929 	int size;
930 
931 	size = synth_field_size((char *)arg_pair->lhs);
932 	if (size == 0) {
933 		if (strstr((char *)arg_pair->lhs, "["))
934 			return 0;
935 	}
936 
937 	return size ? 0 : -EINVAL;
938 }
939 
940 /**
941  * synth_event_add_field - Add a new field to a synthetic event cmd
942  * @cmd: A pointer to the dynevent_cmd struct representing the new event
943  * @type: The type of the new field to add
944  * @name: The name of the new field to add
945  *
946  * Add a new field to a synthetic event cmd object.  Field ordering is in
947  * the same order the fields are added.
948  *
949  * See synth_field_size() for available types. If field_name contains
950  * [n] the field is considered to be an array.
951  *
952  * Return: 0 if successful, error otherwise.
953  */
954 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
955 			  const char *name)
956 {
957 	struct dynevent_arg_pair arg_pair;
958 	int ret;
959 
960 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
961 		return -EINVAL;
962 
963 	if (!type || !name)
964 		return -EINVAL;
965 
966 	dynevent_arg_pair_init(&arg_pair, 0, ';');
967 
968 	arg_pair.lhs = type;
969 	arg_pair.rhs = name;
970 
971 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
972 	if (ret)
973 		return ret;
974 
975 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
976 		ret = -EINVAL;
977 
978 	return ret;
979 }
980 EXPORT_SYMBOL_GPL(synth_event_add_field);
981 
982 /**
983  * synth_event_add_field_str - Add a new field to a synthetic event cmd
984  * @cmd: A pointer to the dynevent_cmd struct representing the new event
985  * @type_name: The type and name of the new field to add, as a single string
986  *
987  * Add a new field to a synthetic event cmd object, as a single
988  * string.  The @type_name string is expected to be of the form 'type
989  * name', which will be appended by ';'.  No sanity checking is done -
990  * what's passed in is assumed to already be well-formed.  Field
991  * ordering is in the same order the fields are added.
992  *
993  * See synth_field_size() for available types. If field_name contains
994  * [n] the field is considered to be an array.
995  *
996  * Return: 0 if successful, error otherwise.
997  */
998 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
999 {
1000 	struct dynevent_arg arg;
1001 	int ret;
1002 
1003 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1004 		return -EINVAL;
1005 
1006 	if (!type_name)
1007 		return -EINVAL;
1008 
1009 	dynevent_arg_init(&arg, ';');
1010 
1011 	arg.str = type_name;
1012 
1013 	ret = dynevent_arg_add(cmd, &arg, NULL);
1014 	if (ret)
1015 		return ret;
1016 
1017 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1018 		ret = -EINVAL;
1019 
1020 	return ret;
1021 }
1022 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1023 
1024 /**
1025  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1026  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1027  * @fields: An array of type/name field descriptions
1028  * @n_fields: The number of field descriptions contained in the fields array
1029  *
1030  * Add a new set of fields to a synthetic event cmd object.  The event
1031  * fields that will be defined for the event should be passed in as an
1032  * array of struct synth_field_desc, and the number of elements in the
1033  * array passed in as n_fields.  Field ordering will retain the
1034  * ordering given in the fields array.
1035  *
1036  * See synth_field_size() for available types. If field_name contains
1037  * [n] the field is considered to be an array.
1038  *
1039  * Return: 0 if successful, error otherwise.
1040  */
1041 int synth_event_add_fields(struct dynevent_cmd *cmd,
1042 			   struct synth_field_desc *fields,
1043 			   unsigned int n_fields)
1044 {
1045 	unsigned int i;
1046 	int ret = 0;
1047 
1048 	for (i = 0; i < n_fields; i++) {
1049 		if (fields[i].type == NULL || fields[i].name == NULL) {
1050 			ret = -EINVAL;
1051 			break;
1052 		}
1053 
1054 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1055 		if (ret)
1056 			break;
1057 	}
1058 
1059 	return ret;
1060 }
1061 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1062 
1063 /**
1064  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1065  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1066  * @name: The name of the synthetic event
1067  * @mod: The module creating the event, NULL if not created from a module
1068  * @args: Variable number of arg (pairs), one pair for each field
1069  *
1070  * NOTE: Users normally won't want to call this function directly, but
1071  * rather use the synth_event_gen_cmd_start() wrapper, which
1072  * automatically adds a NULL to the end of the arg list.  If this
1073  * function is used directly, make sure the last arg in the variable
1074  * arg list is NULL.
1075  *
1076  * Generate a synthetic event command to be executed by
1077  * synth_event_gen_cmd_end().  This function can be used to generate
1078  * the complete command or only the first part of it; in the latter
1079  * case, synth_event_add_field(), synth_event_add_field_str(), or
1080  * synth_event_add_fields() can be used to add more fields following
1081  * this.
1082  *
1083  * There should be an even number variable args, each pair consisting
1084  * of a type followed by a field name.
1085  *
1086  * See synth_field_size() for available types. If field_name contains
1087  * [n] the field is considered to be an array.
1088  *
1089  * Return: 0 if successful, error otherwise.
1090  */
1091 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1092 				struct module *mod, ...)
1093 {
1094 	struct dynevent_arg arg;
1095 	va_list args;
1096 	int ret;
1097 
1098 	cmd->event_name = name;
1099 	cmd->private_data = mod;
1100 
1101 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1102 		return -EINVAL;
1103 
1104 	dynevent_arg_init(&arg, 0);
1105 	arg.str = name;
1106 	ret = dynevent_arg_add(cmd, &arg, NULL);
1107 	if (ret)
1108 		return ret;
1109 
1110 	va_start(args, mod);
1111 	for (;;) {
1112 		const char *type, *name;
1113 
1114 		type = va_arg(args, const char *);
1115 		if (!type)
1116 			break;
1117 		name = va_arg(args, const char *);
1118 		if (!name)
1119 			break;
1120 
1121 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1122 			ret = -EINVAL;
1123 			break;
1124 		}
1125 
1126 		ret = synth_event_add_field(cmd, type, name);
1127 		if (ret)
1128 			break;
1129 	}
1130 	va_end(args);
1131 
1132 	return ret;
1133 }
1134 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1135 
1136 /**
1137  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1138  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1139  * @name: The name of the synthetic event
1140  * @fields: An array of type/name field descriptions
1141  * @n_fields: The number of field descriptions contained in the fields array
1142  *
1143  * Generate a synthetic event command to be executed by
1144  * synth_event_gen_cmd_end().  This function can be used to generate
1145  * the complete command or only the first part of it; in the latter
1146  * case, synth_event_add_field(), synth_event_add_field_str(), or
1147  * synth_event_add_fields() can be used to add more fields following
1148  * this.
1149  *
1150  * The event fields that will be defined for the event should be
1151  * passed in as an array of struct synth_field_desc, and the number of
1152  * elements in the array passed in as n_fields.  Field ordering will
1153  * retain the ordering given in the fields array.
1154  *
1155  * See synth_field_size() for available types. If field_name contains
1156  * [n] the field is considered to be an array.
1157  *
1158  * Return: 0 if successful, error otherwise.
1159  */
1160 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1161 				    struct module *mod,
1162 				    struct synth_field_desc *fields,
1163 				    unsigned int n_fields)
1164 {
1165 	struct dynevent_arg arg;
1166 	unsigned int i;
1167 	int ret = 0;
1168 
1169 	cmd->event_name = name;
1170 	cmd->private_data = mod;
1171 
1172 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1173 		return -EINVAL;
1174 
1175 	if (n_fields > SYNTH_FIELDS_MAX)
1176 		return -EINVAL;
1177 
1178 	dynevent_arg_init(&arg, 0);
1179 	arg.str = name;
1180 	ret = dynevent_arg_add(cmd, &arg, NULL);
1181 	if (ret)
1182 		return ret;
1183 
1184 	for (i = 0; i < n_fields; i++) {
1185 		if (fields[i].type == NULL || fields[i].name == NULL)
1186 			return -EINVAL;
1187 
1188 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1189 		if (ret)
1190 			break;
1191 	}
1192 
1193 	return ret;
1194 }
1195 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1196 
1197 static int __create_synth_event(const char *name, const char *raw_fields)
1198 {
1199 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1200 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1201 	int consumed, cmd_version = 1, n_fields_this_loop;
1202 	int i, argc, n_fields = 0, ret = 0;
1203 	struct synth_event *event = NULL;
1204 
1205 	/*
1206 	 * Argument syntax:
1207 	 *  - Add synthetic event: <event_name> field[;field] ...
1208 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1209 	 *      where 'field' = type field_name
1210 	 */
1211 
1212 	if (name[0] == '\0') {
1213 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1214 		return -EINVAL;
1215 	}
1216 
1217 	if (!is_good_name(name)) {
1218 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1219 		return -EINVAL;
1220 	}
1221 
1222 	mutex_lock(&event_mutex);
1223 
1224 	event = find_synth_event(name);
1225 	if (event) {
1226 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1227 		ret = -EEXIST;
1228 		goto err;
1229 	}
1230 
1231 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1232 	if (!tmp_fields) {
1233 		ret = -ENOMEM;
1234 		goto err;
1235 	}
1236 
1237 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1238 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1239 		if (!argv) {
1240 			ret = -ENOMEM;
1241 			goto err;
1242 		}
1243 
1244 		if (!argc) {
1245 			argv_free(argv);
1246 			continue;
1247 		}
1248 
1249 		n_fields_this_loop = 0;
1250 		consumed = 0;
1251 		while (argc > consumed) {
1252 			int field_version;
1253 
1254 			field = parse_synth_field(argc - consumed,
1255 						  argv + consumed, &consumed,
1256 						  &field_version);
1257 			if (IS_ERR(field)) {
1258 				ret = PTR_ERR(field);
1259 				goto err_free_arg;
1260 			}
1261 
1262 			/*
1263 			 * Track the highest version of any field we
1264 			 * found in the command.
1265 			 */
1266 			if (field_version > cmd_version)
1267 				cmd_version = field_version;
1268 
1269 			/*
1270 			 * Now sort out what is and isn't valid for
1271 			 * each supported version.
1272 			 *
1273 			 * If we see more than 1 field per loop, it
1274 			 * means we have multiple fields between
1275 			 * semicolons, and that's something we no
1276 			 * longer support in a version 2 or greater
1277 			 * command.
1278 			 */
1279 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1280 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1281 				ret = -EINVAL;
1282 				goto err_free_arg;
1283 			}
1284 
1285 			if (n_fields == SYNTH_FIELDS_MAX) {
1286 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1287 				ret = -EINVAL;
1288 				goto err_free_arg;
1289 			}
1290 			fields[n_fields++] = field;
1291 
1292 			n_fields_this_loop++;
1293 		}
1294 		argv_free(argv);
1295 
1296 		if (consumed < argc) {
1297 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1298 			ret = -EINVAL;
1299 			goto err;
1300 		}
1301 
1302 	}
1303 
1304 	if (n_fields == 0) {
1305 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1306 		ret = -EINVAL;
1307 		goto err;
1308 	}
1309 
1310 	event = alloc_synth_event(name, n_fields, fields);
1311 	if (IS_ERR(event)) {
1312 		ret = PTR_ERR(event);
1313 		event = NULL;
1314 		goto err;
1315 	}
1316 	ret = register_synth_event(event);
1317 	if (!ret)
1318 		dyn_event_add(&event->devent, &event->call);
1319 	else
1320 		free_synth_event(event);
1321  out:
1322 	mutex_unlock(&event_mutex);
1323 
1324 	kfree(saved_fields);
1325 
1326 	return ret;
1327  err_free_arg:
1328 	argv_free(argv);
1329  err:
1330 	for (i = 0; i < n_fields; i++)
1331 		free_synth_field(fields[i]);
1332 
1333 	goto out;
1334 }
1335 
1336 /**
1337  * synth_event_create - Create a new synthetic event
1338  * @name: The name of the new synthetic event
1339  * @fields: An array of type/name field descriptions
1340  * @n_fields: The number of field descriptions contained in the fields array
1341  * @mod: The module creating the event, NULL if not created from a module
1342  *
1343  * Create a new synthetic event with the given name under the
1344  * trace/events/synthetic/ directory.  The event fields that will be
1345  * defined for the event should be passed in as an array of struct
1346  * synth_field_desc, and the number elements in the array passed in as
1347  * n_fields. Field ordering will retain the ordering given in the
1348  * fields array.
1349  *
1350  * If the new synthetic event is being created from a module, the mod
1351  * param must be non-NULL.  This will ensure that the trace buffer
1352  * won't contain unreadable events.
1353  *
1354  * The new synth event should be deleted using synth_event_delete()
1355  * function.  The new synthetic event can be generated from modules or
1356  * other kernel code using trace_synth_event() and related functions.
1357  *
1358  * Return: 0 if successful, error otherwise.
1359  */
1360 int synth_event_create(const char *name, struct synth_field_desc *fields,
1361 		       unsigned int n_fields, struct module *mod)
1362 {
1363 	struct dynevent_cmd cmd;
1364 	char *buf;
1365 	int ret;
1366 
1367 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1368 	if (!buf)
1369 		return -ENOMEM;
1370 
1371 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1372 
1373 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1374 					      fields, n_fields);
1375 	if (ret)
1376 		goto out;
1377 
1378 	ret = synth_event_gen_cmd_end(&cmd);
1379  out:
1380 	kfree(buf);
1381 
1382 	return ret;
1383 }
1384 EXPORT_SYMBOL_GPL(synth_event_create);
1385 
1386 static int destroy_synth_event(struct synth_event *se)
1387 {
1388 	int ret;
1389 
1390 	if (se->ref)
1391 		return -EBUSY;
1392 
1393 	if (trace_event_dyn_busy(&se->call))
1394 		return -EBUSY;
1395 
1396 	ret = unregister_synth_event(se);
1397 	if (!ret) {
1398 		dyn_event_remove(&se->devent);
1399 		free_synth_event(se);
1400 	}
1401 
1402 	return ret;
1403 }
1404 
1405 /**
1406  * synth_event_delete - Delete a synthetic event
1407  * @event_name: The name of the new synthetic event
1408  *
1409  * Delete a synthetic event that was created with synth_event_create().
1410  *
1411  * Return: 0 if successful, error otherwise.
1412  */
1413 int synth_event_delete(const char *event_name)
1414 {
1415 	struct synth_event *se = NULL;
1416 	struct module *mod = NULL;
1417 	int ret = -ENOENT;
1418 
1419 	mutex_lock(&event_mutex);
1420 	se = find_synth_event(event_name);
1421 	if (se) {
1422 		mod = se->mod;
1423 		ret = destroy_synth_event(se);
1424 	}
1425 	mutex_unlock(&event_mutex);
1426 
1427 	if (mod) {
1428 		/*
1429 		 * It is safest to reset the ring buffer if the module
1430 		 * being unloaded registered any events that were
1431 		 * used. The only worry is if a new module gets
1432 		 * loaded, and takes on the same id as the events of
1433 		 * this module. When printing out the buffer, traced
1434 		 * events left over from this module may be passed to
1435 		 * the new module events and unexpected results may
1436 		 * occur.
1437 		 */
1438 		tracing_reset_all_online_cpus();
1439 	}
1440 
1441 	return ret;
1442 }
1443 EXPORT_SYMBOL_GPL(synth_event_delete);
1444 
1445 static int check_command(const char *raw_command)
1446 {
1447 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1448 	int argc, ret = 0;
1449 
1450 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1451 	if (!cmd)
1452 		return -ENOMEM;
1453 
1454 	name_and_field = strsep(&cmd, ";");
1455 	if (!name_and_field) {
1456 		ret = -EINVAL;
1457 		goto free;
1458 	}
1459 
1460 	if (name_and_field[0] == '!')
1461 		goto free;
1462 
1463 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1464 	if (!argv) {
1465 		ret = -ENOMEM;
1466 		goto free;
1467 	}
1468 	argv_free(argv);
1469 
1470 	if (argc < 3)
1471 		ret = -EINVAL;
1472 free:
1473 	kfree(saved_cmd);
1474 
1475 	return ret;
1476 }
1477 
1478 static int create_or_delete_synth_event(const char *raw_command)
1479 {
1480 	char *name = NULL, *fields, *p;
1481 	int ret = 0;
1482 
1483 	raw_command = skip_spaces(raw_command);
1484 	if (raw_command[0] == '\0')
1485 		return ret;
1486 
1487 	last_cmd_set(raw_command);
1488 
1489 	ret = check_command(raw_command);
1490 	if (ret) {
1491 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1492 		return ret;
1493 	}
1494 
1495 	p = strpbrk(raw_command, " \t");
1496 	if (!p && raw_command[0] != '!') {
1497 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1498 		ret = -EINVAL;
1499 		goto free;
1500 	}
1501 
1502 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1503 	if (!name)
1504 		return -ENOMEM;
1505 
1506 	if (name[0] == '!') {
1507 		ret = synth_event_delete(name + 1);
1508 		goto free;
1509 	}
1510 
1511 	fields = skip_spaces(p);
1512 
1513 	ret = __create_synth_event(name, fields);
1514 free:
1515 	kfree(name);
1516 
1517 	return ret;
1518 }
1519 
1520 static int synth_event_run_command(struct dynevent_cmd *cmd)
1521 {
1522 	struct synth_event *se;
1523 	int ret;
1524 
1525 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1526 	if (ret)
1527 		return ret;
1528 
1529 	se = find_synth_event(cmd->event_name);
1530 	if (WARN_ON(!se))
1531 		return -ENOENT;
1532 
1533 	se->mod = cmd->private_data;
1534 
1535 	return ret;
1536 }
1537 
1538 /**
1539  * synth_event_cmd_init - Initialize a synthetic event command object
1540  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1541  * @buf: A pointer to the buffer used to build the command
1542  * @maxlen: The length of the buffer passed in @buf
1543  *
1544  * Initialize a synthetic event command object.  Use this before
1545  * calling any of the other dyenvent_cmd functions.
1546  */
1547 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1548 {
1549 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1550 			  synth_event_run_command);
1551 }
1552 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1553 
1554 static inline int
1555 __synth_event_trace_init(struct trace_event_file *file,
1556 			 struct synth_event_trace_state *trace_state)
1557 {
1558 	int ret = 0;
1559 
1560 	memset(trace_state, '\0', sizeof(*trace_state));
1561 
1562 	/*
1563 	 * Normal event tracing doesn't get called at all unless the
1564 	 * ENABLED bit is set (which attaches the probe thus allowing
1565 	 * this code to be called, etc).  Because this is called
1566 	 * directly by the user, we don't have that but we still need
1567 	 * to honor not logging when disabled.  For the iterated
1568 	 * trace case, we save the enabled state upon start and just
1569 	 * ignore the following data calls.
1570 	 */
1571 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1572 	    trace_trigger_soft_disabled(file)) {
1573 		trace_state->disabled = true;
1574 		ret = -ENOENT;
1575 		goto out;
1576 	}
1577 
1578 	trace_state->event = file->event_call->data;
1579 out:
1580 	return ret;
1581 }
1582 
1583 static inline int
1584 __synth_event_trace_start(struct trace_event_file *file,
1585 			  struct synth_event_trace_state *trace_state,
1586 			  int dynamic_fields_size)
1587 {
1588 	int entry_size, fields_size = 0;
1589 	int ret = 0;
1590 
1591 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1592 	fields_size += dynamic_fields_size;
1593 
1594 	/*
1595 	 * Avoid ring buffer recursion detection, as this event
1596 	 * is being performed within another event.
1597 	 */
1598 	trace_state->buffer = file->tr->array_buffer.buffer;
1599 	ring_buffer_nest_start(trace_state->buffer);
1600 
1601 	entry_size = sizeof(*trace_state->entry) + fields_size;
1602 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1603 							file,
1604 							entry_size);
1605 	if (!trace_state->entry) {
1606 		ring_buffer_nest_end(trace_state->buffer);
1607 		ret = -EINVAL;
1608 	}
1609 
1610 	return ret;
1611 }
1612 
1613 static inline void
1614 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1615 {
1616 	trace_event_buffer_commit(&trace_state->fbuffer);
1617 
1618 	ring_buffer_nest_end(trace_state->buffer);
1619 }
1620 
1621 /**
1622  * synth_event_trace - Trace a synthetic event
1623  * @file: The trace_event_file representing the synthetic event
1624  * @n_vals: The number of values in vals
1625  * @args: Variable number of args containing the event values
1626  *
1627  * Trace a synthetic event using the values passed in the variable
1628  * argument list.
1629  *
1630  * The argument list should be a list 'n_vals' u64 values.  The number
1631  * of vals must match the number of field in the synthetic event, and
1632  * must be in the same order as the synthetic event fields.
1633  *
1634  * All vals should be cast to u64, and string vals are just pointers
1635  * to strings, cast to u64.  Strings will be copied into space
1636  * reserved in the event for the string, using these pointers.
1637  *
1638  * Return: 0 on success, err otherwise.
1639  */
1640 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1641 {
1642 	unsigned int i, n_u64, len, data_size = 0;
1643 	struct synth_event_trace_state state;
1644 	va_list args;
1645 	int ret;
1646 
1647 	ret = __synth_event_trace_init(file, &state);
1648 	if (ret) {
1649 		if (ret == -ENOENT)
1650 			ret = 0; /* just disabled, not really an error */
1651 		return ret;
1652 	}
1653 
1654 	if (state.event->n_dynamic_fields) {
1655 		va_start(args, n_vals);
1656 
1657 		for (i = 0; i < state.event->n_fields; i++) {
1658 			u64 val = va_arg(args, u64);
1659 
1660 			if (state.event->fields[i]->is_string &&
1661 			    state.event->fields[i]->is_dynamic) {
1662 				char *str_val = (char *)(long)val;
1663 
1664 				data_size += strlen(str_val) + 1;
1665 			}
1666 		}
1667 
1668 		va_end(args);
1669 	}
1670 
1671 	ret = __synth_event_trace_start(file, &state, data_size);
1672 	if (ret)
1673 		return ret;
1674 
1675 	if (n_vals != state.event->n_fields) {
1676 		ret = -EINVAL;
1677 		goto out;
1678 	}
1679 
1680 	data_size = 0;
1681 
1682 	va_start(args, n_vals);
1683 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1684 		u64 val;
1685 
1686 		val = va_arg(args, u64);
1687 
1688 		if (state.event->fields[i]->is_string) {
1689 			char *str_val = (char *)(long)val;
1690 
1691 			len = trace_string(state.entry, state.event, str_val,
1692 					   state.event->fields[i]->is_dynamic,
1693 					   data_size, &n_u64);
1694 			data_size += len; /* only dynamic string increments */
1695 		} else {
1696 			struct synth_field *field = state.event->fields[i];
1697 
1698 			switch (field->size) {
1699 			case 1:
1700 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1701 				break;
1702 
1703 			case 2:
1704 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1705 				break;
1706 
1707 			case 4:
1708 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1709 				break;
1710 
1711 			default:
1712 				state.entry->fields[n_u64] = val;
1713 				break;
1714 			}
1715 			n_u64++;
1716 		}
1717 	}
1718 	va_end(args);
1719 out:
1720 	__synth_event_trace_end(&state);
1721 
1722 	return ret;
1723 }
1724 EXPORT_SYMBOL_GPL(synth_event_trace);
1725 
1726 /**
1727  * synth_event_trace_array - Trace a synthetic event from an array
1728  * @file: The trace_event_file representing the synthetic event
1729  * @vals: Array of values
1730  * @n_vals: The number of values in vals
1731  *
1732  * Trace a synthetic event using the values passed in as 'vals'.
1733  *
1734  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1735  * vals must match the number of field in the synthetic event, and
1736  * must be in the same order as the synthetic event fields.
1737  *
1738  * All vals should be cast to u64, and string vals are just pointers
1739  * to strings, cast to u64.  Strings will be copied into space
1740  * reserved in the event for the string, using these pointers.
1741  *
1742  * Return: 0 on success, err otherwise.
1743  */
1744 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1745 			    unsigned int n_vals)
1746 {
1747 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1748 	struct synth_event_trace_state state;
1749 	char *str_val;
1750 	int ret;
1751 
1752 	ret = __synth_event_trace_init(file, &state);
1753 	if (ret) {
1754 		if (ret == -ENOENT)
1755 			ret = 0; /* just disabled, not really an error */
1756 		return ret;
1757 	}
1758 
1759 	if (state.event->n_dynamic_fields) {
1760 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1761 			field_pos = state.event->dynamic_fields[i]->field_pos;
1762 			str_val = (char *)(long)vals[field_pos];
1763 			len = strlen(str_val) + 1;
1764 			data_size += len;
1765 		}
1766 	}
1767 
1768 	ret = __synth_event_trace_start(file, &state, data_size);
1769 	if (ret)
1770 		return ret;
1771 
1772 	if (n_vals != state.event->n_fields) {
1773 		ret = -EINVAL;
1774 		goto out;
1775 	}
1776 
1777 	data_size = 0;
1778 
1779 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1780 		if (state.event->fields[i]->is_string) {
1781 			char *str_val = (char *)(long)vals[i];
1782 
1783 			len = trace_string(state.entry, state.event, str_val,
1784 					   state.event->fields[i]->is_dynamic,
1785 					   data_size, &n_u64);
1786 			data_size += len; /* only dynamic string increments */
1787 		} else {
1788 			struct synth_field *field = state.event->fields[i];
1789 			u64 val = vals[i];
1790 
1791 			switch (field->size) {
1792 			case 1:
1793 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1794 				break;
1795 
1796 			case 2:
1797 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1798 				break;
1799 
1800 			case 4:
1801 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1802 				break;
1803 
1804 			default:
1805 				state.entry->fields[n_u64] = val;
1806 				break;
1807 			}
1808 			n_u64++;
1809 		}
1810 	}
1811 out:
1812 	__synth_event_trace_end(&state);
1813 
1814 	return ret;
1815 }
1816 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1817 
1818 /**
1819  * synth_event_trace_start - Start piecewise synthetic event trace
1820  * @file: The trace_event_file representing the synthetic event
1821  * @trace_state: A pointer to object tracking the piecewise trace state
1822  *
1823  * Start the trace of a synthetic event field-by-field rather than all
1824  * at once.
1825  *
1826  * This function 'opens' an event trace, which means space is reserved
1827  * for the event in the trace buffer, after which the event's
1828  * individual field values can be set through either
1829  * synth_event_add_next_val() or synth_event_add_val().
1830  *
1831  * A pointer to a trace_state object is passed in, which will keep
1832  * track of the current event trace state until the event trace is
1833  * closed (and the event finally traced) using
1834  * synth_event_trace_end().
1835  *
1836  * Note that synth_event_trace_end() must be called after all values
1837  * have been added for each event trace, regardless of whether adding
1838  * all field values succeeded or not.
1839  *
1840  * Note also that for a given event trace, all fields must be added
1841  * using either synth_event_add_next_val() or synth_event_add_val()
1842  * but not both together or interleaved.
1843  *
1844  * Return: 0 on success, err otherwise.
1845  */
1846 int synth_event_trace_start(struct trace_event_file *file,
1847 			    struct synth_event_trace_state *trace_state)
1848 {
1849 	int ret;
1850 
1851 	if (!trace_state)
1852 		return -EINVAL;
1853 
1854 	ret = __synth_event_trace_init(file, trace_state);
1855 	if (ret) {
1856 		if (ret == -ENOENT)
1857 			ret = 0; /* just disabled, not really an error */
1858 		return ret;
1859 	}
1860 
1861 	if (trace_state->event->n_dynamic_fields)
1862 		return -ENOTSUPP;
1863 
1864 	ret = __synth_event_trace_start(file, trace_state, 0);
1865 
1866 	return ret;
1867 }
1868 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1869 
1870 static int __synth_event_add_val(const char *field_name, u64 val,
1871 				 struct synth_event_trace_state *trace_state)
1872 {
1873 	struct synth_field *field = NULL;
1874 	struct synth_trace_event *entry;
1875 	struct synth_event *event;
1876 	int i, ret = 0;
1877 
1878 	if (!trace_state) {
1879 		ret = -EINVAL;
1880 		goto out;
1881 	}
1882 
1883 	/* can't mix add_next_synth_val() with add_synth_val() */
1884 	if (field_name) {
1885 		if (trace_state->add_next) {
1886 			ret = -EINVAL;
1887 			goto out;
1888 		}
1889 		trace_state->add_name = true;
1890 	} else {
1891 		if (trace_state->add_name) {
1892 			ret = -EINVAL;
1893 			goto out;
1894 		}
1895 		trace_state->add_next = true;
1896 	}
1897 
1898 	if (trace_state->disabled)
1899 		goto out;
1900 
1901 	event = trace_state->event;
1902 	if (trace_state->add_name) {
1903 		for (i = 0; i < event->n_fields; i++) {
1904 			field = event->fields[i];
1905 			if (strcmp(field->name, field_name) == 0)
1906 				break;
1907 		}
1908 		if (!field) {
1909 			ret = -EINVAL;
1910 			goto out;
1911 		}
1912 	} else {
1913 		if (trace_state->cur_field >= event->n_fields) {
1914 			ret = -EINVAL;
1915 			goto out;
1916 		}
1917 		field = event->fields[trace_state->cur_field++];
1918 	}
1919 
1920 	entry = trace_state->entry;
1921 	if (field->is_string) {
1922 		char *str_val = (char *)(long)val;
1923 		char *str_field;
1924 
1925 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
1926 			ret = -EINVAL;
1927 			goto out;
1928 		}
1929 
1930 		if (!str_val) {
1931 			ret = -EINVAL;
1932 			goto out;
1933 		}
1934 
1935 		str_field = (char *)&entry->fields[field->offset];
1936 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1937 	} else {
1938 		switch (field->size) {
1939 		case 1:
1940 			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1941 			break;
1942 
1943 		case 2:
1944 			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1945 			break;
1946 
1947 		case 4:
1948 			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1949 			break;
1950 
1951 		default:
1952 			trace_state->entry->fields[field->offset] = val;
1953 			break;
1954 		}
1955 	}
1956  out:
1957 	return ret;
1958 }
1959 
1960 /**
1961  * synth_event_add_next_val - Add the next field's value to an open synth trace
1962  * @val: The value to set the next field to
1963  * @trace_state: A pointer to object tracking the piecewise trace state
1964  *
1965  * Set the value of the next field in an event that's been opened by
1966  * synth_event_trace_start().
1967  *
1968  * The val param should be the value cast to u64.  If the value points
1969  * to a string, the val param should be a char * cast to u64.
1970  *
1971  * This function assumes all the fields in an event are to be set one
1972  * after another - successive calls to this function are made, one for
1973  * each field, in the order of the fields in the event, until all
1974  * fields have been set.  If you'd rather set each field individually
1975  * without regard to ordering, synth_event_add_val() can be used
1976  * instead.
1977  *
1978  * Note however that synth_event_add_next_val() and
1979  * synth_event_add_val() can't be intermixed for a given event trace -
1980  * one or the other but not both can be used at the same time.
1981  *
1982  * Note also that synth_event_trace_end() must be called after all
1983  * values have been added for each event trace, regardless of whether
1984  * adding all field values succeeded or not.
1985  *
1986  * Return: 0 on success, err otherwise.
1987  */
1988 int synth_event_add_next_val(u64 val,
1989 			     struct synth_event_trace_state *trace_state)
1990 {
1991 	return __synth_event_add_val(NULL, val, trace_state);
1992 }
1993 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1994 
1995 /**
1996  * synth_event_add_val - Add a named field's value to an open synth trace
1997  * @field_name: The name of the synthetic event field value to set
1998  * @val: The value to set the named field to
1999  * @trace_state: A pointer to object tracking the piecewise trace state
2000  *
2001  * Set the value of the named field in an event that's been opened by
2002  * synth_event_trace_start().
2003  *
2004  * The val param should be the value cast to u64.  If the value points
2005  * to a string, the val param should be a char * cast to u64.
2006  *
2007  * This function looks up the field name, and if found, sets the field
2008  * to the specified value.  This lookup makes this function more
2009  * expensive than synth_event_add_next_val(), so use that or the
2010  * none-piecewise synth_event_trace() instead if efficiency is more
2011  * important.
2012  *
2013  * Note however that synth_event_add_next_val() and
2014  * synth_event_add_val() can't be intermixed for a given event trace -
2015  * one or the other but not both can be used at the same time.
2016  *
2017  * Note also that synth_event_trace_end() must be called after all
2018  * values have been added for each event trace, regardless of whether
2019  * adding all field values succeeded or not.
2020  *
2021  * Return: 0 on success, err otherwise.
2022  */
2023 int synth_event_add_val(const char *field_name, u64 val,
2024 			struct synth_event_trace_state *trace_state)
2025 {
2026 	return __synth_event_add_val(field_name, val, trace_state);
2027 }
2028 EXPORT_SYMBOL_GPL(synth_event_add_val);
2029 
2030 /**
2031  * synth_event_trace_end - End piecewise synthetic event trace
2032  * @trace_state: A pointer to object tracking the piecewise trace state
2033  *
2034  * End the trace of a synthetic event opened by
2035  * synth_event_trace__start().
2036  *
2037  * This function 'closes' an event trace, which basically means that
2038  * it commits the reserved event and cleans up other loose ends.
2039  *
2040  * A pointer to a trace_state object is passed in, which will keep
2041  * track of the current event trace state opened with
2042  * synth_event_trace_start().
2043  *
2044  * Note that this function must be called after all values have been
2045  * added for each event trace, regardless of whether adding all field
2046  * values succeeded or not.
2047  *
2048  * Return: 0 on success, err otherwise.
2049  */
2050 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2051 {
2052 	if (!trace_state)
2053 		return -EINVAL;
2054 
2055 	__synth_event_trace_end(trace_state);
2056 
2057 	return 0;
2058 }
2059 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2060 
2061 static int create_synth_event(const char *raw_command)
2062 {
2063 	char *fields, *p;
2064 	const char *name;
2065 	int len, ret = 0;
2066 
2067 	raw_command = skip_spaces(raw_command);
2068 	if (raw_command[0] == '\0')
2069 		return ret;
2070 
2071 	last_cmd_set(raw_command);
2072 
2073 	name = raw_command;
2074 
2075 	/* Don't try to process if not our system */
2076 	if (name[0] != 's' || name[1] != ':')
2077 		return -ECANCELED;
2078 	name += 2;
2079 
2080 	p = strpbrk(raw_command, " \t");
2081 	if (!p) {
2082 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2083 		return -EINVAL;
2084 	}
2085 
2086 	fields = skip_spaces(p);
2087 
2088 	/* This interface accepts group name prefix */
2089 	if (strchr(name, '/')) {
2090 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2091 		if (len == 0) {
2092 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2093 			return -EINVAL;
2094 		}
2095 		name += len;
2096 	}
2097 
2098 	len = name - raw_command;
2099 
2100 	ret = check_command(raw_command + len);
2101 	if (ret) {
2102 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2103 		return ret;
2104 	}
2105 
2106 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2107 	if (!name)
2108 		return -ENOMEM;
2109 
2110 	ret = __create_synth_event(name, fields);
2111 
2112 	kfree(name);
2113 
2114 	return ret;
2115 }
2116 
2117 static int synth_event_release(struct dyn_event *ev)
2118 {
2119 	struct synth_event *event = to_synth_event(ev);
2120 	int ret;
2121 
2122 	if (event->ref)
2123 		return -EBUSY;
2124 
2125 	if (trace_event_dyn_busy(&event->call))
2126 		return -EBUSY;
2127 
2128 	ret = unregister_synth_event(event);
2129 	if (ret)
2130 		return ret;
2131 
2132 	dyn_event_remove(ev);
2133 	free_synth_event(event);
2134 	return 0;
2135 }
2136 
2137 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2138 {
2139 	struct synth_field *field;
2140 	unsigned int i;
2141 	char *type, *t;
2142 
2143 	seq_printf(m, "%s\t", event->name);
2144 
2145 	for (i = 0; i < event->n_fields; i++) {
2146 		field = event->fields[i];
2147 
2148 		type = field->type;
2149 		t = strstr(type, "__data_loc");
2150 		if (t) { /* __data_loc belongs in format but not event desc */
2151 			t += sizeof("__data_loc");
2152 			type = t;
2153 		}
2154 
2155 		/* parameter values */
2156 		seq_printf(m, "%s %s%s", type, field->name,
2157 			   i == event->n_fields - 1 ? "" : "; ");
2158 	}
2159 
2160 	seq_putc(m, '\n');
2161 
2162 	return 0;
2163 }
2164 
2165 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2166 {
2167 	struct synth_event *event = to_synth_event(ev);
2168 
2169 	seq_printf(m, "s:%s/", event->class.system);
2170 
2171 	return __synth_event_show(m, event);
2172 }
2173 
2174 static int synth_events_seq_show(struct seq_file *m, void *v)
2175 {
2176 	struct dyn_event *ev = v;
2177 
2178 	if (!is_synth_event(ev))
2179 		return 0;
2180 
2181 	return __synth_event_show(m, to_synth_event(ev));
2182 }
2183 
2184 static const struct seq_operations synth_events_seq_op = {
2185 	.start	= dyn_event_seq_start,
2186 	.next	= dyn_event_seq_next,
2187 	.stop	= dyn_event_seq_stop,
2188 	.show	= synth_events_seq_show,
2189 };
2190 
2191 static int synth_events_open(struct inode *inode, struct file *file)
2192 {
2193 	int ret;
2194 
2195 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2196 	if (ret)
2197 		return ret;
2198 
2199 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2200 		ret = dyn_events_release_all(&synth_event_ops);
2201 		if (ret < 0)
2202 			return ret;
2203 	}
2204 
2205 	return seq_open(file, &synth_events_seq_op);
2206 }
2207 
2208 static ssize_t synth_events_write(struct file *file,
2209 				  const char __user *buffer,
2210 				  size_t count, loff_t *ppos)
2211 {
2212 	return trace_parse_run_command(file, buffer, count, ppos,
2213 				       create_or_delete_synth_event);
2214 }
2215 
2216 static const struct file_operations synth_events_fops = {
2217 	.open           = synth_events_open,
2218 	.write		= synth_events_write,
2219 	.read           = seq_read,
2220 	.llseek         = seq_lseek,
2221 	.release        = seq_release,
2222 };
2223 
2224 /*
2225  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2226  * events in postcore_initcall without tracefs.
2227  */
2228 static __init int trace_events_synth_init_early(void)
2229 {
2230 	int err = 0;
2231 
2232 	err = dyn_event_register(&synth_event_ops);
2233 	if (err)
2234 		pr_warn("Could not register synth_event_ops\n");
2235 
2236 	return err;
2237 }
2238 core_initcall(trace_events_synth_init_early);
2239 
2240 static __init int trace_events_synth_init(void)
2241 {
2242 	struct dentry *entry = NULL;
2243 	int err = 0;
2244 	err = tracing_init_dentry();
2245 	if (err)
2246 		goto err;
2247 
2248 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2249 				    NULL, NULL, &synth_events_fops);
2250 	if (!entry) {
2251 		err = -ENODEV;
2252 		goto err;
2253 	}
2254 
2255 	return err;
2256  err:
2257 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2258 
2259 	return err;
2260 }
2261 
2262 fs_initcall(trace_events_synth_init);
2263