1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static char *last_cmd;
48 
49 static int errpos(const char *str)
50 {
51 	if (!str || !last_cmd)
52 		return 0;
53 
54 	return err_pos(last_cmd, str);
55 }
56 
57 static void last_cmd_set(const char *str)
58 {
59 	if (!str)
60 		return;
61 
62 	kfree(last_cmd);
63 
64 	last_cmd = kstrdup(str, GFP_KERNEL);
65 }
66 
67 static void synth_err(u8 err_type, u16 err_pos)
68 {
69 	if (!last_cmd)
70 		return;
71 
72 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
73 			err_type, err_pos);
74 }
75 
76 static int create_synth_event(const char *raw_command);
77 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
78 static int synth_event_release(struct dyn_event *ev);
79 static bool synth_event_is_busy(struct dyn_event *ev);
80 static bool synth_event_match(const char *system, const char *event,
81 			int argc, const char **argv, struct dyn_event *ev);
82 
83 static struct dyn_event_operations synth_event_ops = {
84 	.create = create_synth_event,
85 	.show = synth_event_show,
86 	.is_busy = synth_event_is_busy,
87 	.free = synth_event_release,
88 	.match = synth_event_match,
89 };
90 
91 static bool is_synth_event(struct dyn_event *ev)
92 {
93 	return ev->ops == &synth_event_ops;
94 }
95 
96 static struct synth_event *to_synth_event(struct dyn_event *ev)
97 {
98 	return container_of(ev, struct synth_event, devent);
99 }
100 
101 static bool synth_event_is_busy(struct dyn_event *ev)
102 {
103 	struct synth_event *event = to_synth_event(ev);
104 
105 	return event->ref != 0;
106 }
107 
108 static bool synth_event_match(const char *system, const char *event,
109 			int argc, const char **argv, struct dyn_event *ev)
110 {
111 	struct synth_event *sev = to_synth_event(ev);
112 
113 	return strcmp(sev->name, event) == 0 &&
114 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
115 }
116 
117 struct synth_trace_event {
118 	struct trace_entry	ent;
119 	u64			fields[];
120 };
121 
122 static int synth_event_define_fields(struct trace_event_call *call)
123 {
124 	struct synth_trace_event trace;
125 	int offset = offsetof(typeof(trace), fields);
126 	struct synth_event *event = call->data;
127 	unsigned int i, size, n_u64;
128 	char *name, *type;
129 	bool is_signed;
130 	int ret = 0;
131 
132 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
133 		size = event->fields[i]->size;
134 		is_signed = event->fields[i]->is_signed;
135 		type = event->fields[i]->type;
136 		name = event->fields[i]->name;
137 		ret = trace_define_field(call, type, name, offset, size,
138 					 is_signed, FILTER_OTHER);
139 		if (ret)
140 			break;
141 
142 		event->fields[i]->offset = n_u64;
143 
144 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
145 			offset += STR_VAR_LEN_MAX;
146 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
147 		} else {
148 			offset += sizeof(u64);
149 			n_u64++;
150 		}
151 	}
152 
153 	event->n_u64 = n_u64;
154 
155 	return ret;
156 }
157 
158 static bool synth_field_signed(char *type)
159 {
160 	if (str_has_prefix(type, "u"))
161 		return false;
162 	if (strcmp(type, "gfp_t") == 0)
163 		return false;
164 
165 	return true;
166 }
167 
168 static int synth_field_is_string(char *type)
169 {
170 	if (strstr(type, "char[") != NULL)
171 		return true;
172 
173 	return false;
174 }
175 
176 static int synth_field_string_size(char *type)
177 {
178 	char buf[4], *end, *start;
179 	unsigned int len;
180 	int size, err;
181 
182 	start = strstr(type, "char[");
183 	if (start == NULL)
184 		return -EINVAL;
185 	start += sizeof("char[") - 1;
186 
187 	end = strchr(type, ']');
188 	if (!end || end < start || type + strlen(type) > end + 1)
189 		return -EINVAL;
190 
191 	len = end - start;
192 	if (len > 3)
193 		return -EINVAL;
194 
195 	if (len == 0)
196 		return 0; /* variable-length string */
197 
198 	strncpy(buf, start, len);
199 	buf[len] = '\0';
200 
201 	err = kstrtouint(buf, 0, &size);
202 	if (err)
203 		return err;
204 
205 	if (size > STR_VAR_LEN_MAX)
206 		return -EINVAL;
207 
208 	return size;
209 }
210 
211 static int synth_field_size(char *type)
212 {
213 	int size = 0;
214 
215 	if (strcmp(type, "s64") == 0)
216 		size = sizeof(s64);
217 	else if (strcmp(type, "u64") == 0)
218 		size = sizeof(u64);
219 	else if (strcmp(type, "s32") == 0)
220 		size = sizeof(s32);
221 	else if (strcmp(type, "u32") == 0)
222 		size = sizeof(u32);
223 	else if (strcmp(type, "s16") == 0)
224 		size = sizeof(s16);
225 	else if (strcmp(type, "u16") == 0)
226 		size = sizeof(u16);
227 	else if (strcmp(type, "s8") == 0)
228 		size = sizeof(s8);
229 	else if (strcmp(type, "u8") == 0)
230 		size = sizeof(u8);
231 	else if (strcmp(type, "char") == 0)
232 		size = sizeof(char);
233 	else if (strcmp(type, "unsigned char") == 0)
234 		size = sizeof(unsigned char);
235 	else if (strcmp(type, "int") == 0)
236 		size = sizeof(int);
237 	else if (strcmp(type, "unsigned int") == 0)
238 		size = sizeof(unsigned int);
239 	else if (strcmp(type, "long") == 0)
240 		size = sizeof(long);
241 	else if (strcmp(type, "unsigned long") == 0)
242 		size = sizeof(unsigned long);
243 	else if (strcmp(type, "bool") == 0)
244 		size = sizeof(bool);
245 	else if (strcmp(type, "pid_t") == 0)
246 		size = sizeof(pid_t);
247 	else if (strcmp(type, "gfp_t") == 0)
248 		size = sizeof(gfp_t);
249 	else if (synth_field_is_string(type))
250 		size = synth_field_string_size(type);
251 
252 	return size;
253 }
254 
255 static const char *synth_field_fmt(char *type)
256 {
257 	const char *fmt = "%llu";
258 
259 	if (strcmp(type, "s64") == 0)
260 		fmt = "%lld";
261 	else if (strcmp(type, "u64") == 0)
262 		fmt = "%llu";
263 	else if (strcmp(type, "s32") == 0)
264 		fmt = "%d";
265 	else if (strcmp(type, "u32") == 0)
266 		fmt = "%u";
267 	else if (strcmp(type, "s16") == 0)
268 		fmt = "%d";
269 	else if (strcmp(type, "u16") == 0)
270 		fmt = "%u";
271 	else if (strcmp(type, "s8") == 0)
272 		fmt = "%d";
273 	else if (strcmp(type, "u8") == 0)
274 		fmt = "%u";
275 	else if (strcmp(type, "char") == 0)
276 		fmt = "%d";
277 	else if (strcmp(type, "unsigned char") == 0)
278 		fmt = "%u";
279 	else if (strcmp(type, "int") == 0)
280 		fmt = "%d";
281 	else if (strcmp(type, "unsigned int") == 0)
282 		fmt = "%u";
283 	else if (strcmp(type, "long") == 0)
284 		fmt = "%ld";
285 	else if (strcmp(type, "unsigned long") == 0)
286 		fmt = "%lu";
287 	else if (strcmp(type, "bool") == 0)
288 		fmt = "%d";
289 	else if (strcmp(type, "pid_t") == 0)
290 		fmt = "%d";
291 	else if (strcmp(type, "gfp_t") == 0)
292 		fmt = "%x";
293 	else if (synth_field_is_string(type))
294 		fmt = "%.*s";
295 
296 	return fmt;
297 }
298 
299 static void print_synth_event_num_val(struct trace_seq *s,
300 				      char *print_fmt, char *name,
301 				      int size, u64 val, char *space)
302 {
303 	switch (size) {
304 	case 1:
305 		trace_seq_printf(s, print_fmt, name, (u8)val, space);
306 		break;
307 
308 	case 2:
309 		trace_seq_printf(s, print_fmt, name, (u16)val, space);
310 		break;
311 
312 	case 4:
313 		trace_seq_printf(s, print_fmt, name, (u32)val, space);
314 		break;
315 
316 	default:
317 		trace_seq_printf(s, print_fmt, name, val, space);
318 		break;
319 	}
320 }
321 
322 static enum print_line_t print_synth_event(struct trace_iterator *iter,
323 					   int flags,
324 					   struct trace_event *event)
325 {
326 	struct trace_array *tr = iter->tr;
327 	struct trace_seq *s = &iter->seq;
328 	struct synth_trace_event *entry;
329 	struct synth_event *se;
330 	unsigned int i, n_u64;
331 	char print_fmt[32];
332 	const char *fmt;
333 
334 	entry = (struct synth_trace_event *)iter->ent;
335 	se = container_of(event, struct synth_event, call.event);
336 
337 	trace_seq_printf(s, "%s: ", se->name);
338 
339 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
340 		if (trace_seq_has_overflowed(s))
341 			goto end;
342 
343 		fmt = synth_field_fmt(se->fields[i]->type);
344 
345 		/* parameter types */
346 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
347 			trace_seq_printf(s, "%s ", fmt);
348 
349 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
350 
351 		/* parameter values */
352 		if (se->fields[i]->is_string) {
353 			if (se->fields[i]->is_dynamic) {
354 				u32 offset, data_offset;
355 				char *str_field;
356 
357 				offset = (u32)entry->fields[n_u64];
358 				data_offset = offset & 0xffff;
359 
360 				str_field = (char *)entry + data_offset;
361 
362 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
363 						 STR_VAR_LEN_MAX,
364 						 str_field,
365 						 i == se->n_fields - 1 ? "" : " ");
366 				n_u64++;
367 			} else {
368 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
369 						 STR_VAR_LEN_MAX,
370 						 (char *)&entry->fields[n_u64],
371 						 i == se->n_fields - 1 ? "" : " ");
372 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
373 			}
374 		} else {
375 			struct trace_print_flags __flags[] = {
376 			    __def_gfpflag_names, {-1, NULL} };
377 			char *space = (i == se->n_fields - 1 ? "" : " ");
378 
379 			print_synth_event_num_val(s, print_fmt,
380 						  se->fields[i]->name,
381 						  se->fields[i]->size,
382 						  entry->fields[n_u64],
383 						  space);
384 
385 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
386 				trace_seq_puts(s, " (");
387 				trace_print_flags_seq(s, "|",
388 						      entry->fields[n_u64],
389 						      __flags);
390 				trace_seq_putc(s, ')');
391 			}
392 			n_u64++;
393 		}
394 	}
395 end:
396 	trace_seq_putc(s, '\n');
397 
398 	return trace_handle_return(s);
399 }
400 
401 static struct trace_event_functions synth_event_funcs = {
402 	.trace		= print_synth_event
403 };
404 
405 static unsigned int trace_string(struct synth_trace_event *entry,
406 				 struct synth_event *event,
407 				 char *str_val,
408 				 bool is_dynamic,
409 				 unsigned int data_size,
410 				 unsigned int *n_u64)
411 {
412 	unsigned int len = 0;
413 	char *str_field;
414 	int ret;
415 
416 	if (is_dynamic) {
417 		u32 data_offset;
418 
419 		data_offset = offsetof(typeof(*entry), fields);
420 		data_offset += event->n_u64 * sizeof(u64);
421 		data_offset += data_size;
422 
423 		len = kern_fetch_store_strlen((unsigned long)str_val);
424 
425 		data_offset |= len << 16;
426 		*(u32 *)&entry->fields[*n_u64] = data_offset;
427 
428 		ret = kern_fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
429 
430 		(*n_u64)++;
431 	} else {
432 		str_field = (char *)&entry->fields[*n_u64];
433 
434 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
435 		if ((unsigned long)str_val < TASK_SIZE)
436 			ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
437 		else
438 #endif
439 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
440 
441 		if (ret < 0)
442 			strcpy(str_field, FAULT_STRING);
443 
444 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
445 	}
446 
447 	return len;
448 }
449 
450 static notrace void trace_event_raw_event_synth(void *__data,
451 						u64 *var_ref_vals,
452 						unsigned int *var_ref_idx)
453 {
454 	unsigned int i, n_u64, val_idx, len, data_size = 0;
455 	struct trace_event_file *trace_file = __data;
456 	struct synth_trace_event *entry;
457 	struct trace_event_buffer fbuffer;
458 	struct trace_buffer *buffer;
459 	struct synth_event *event;
460 	int fields_size = 0;
461 
462 	event = trace_file->event_call->data;
463 
464 	if (trace_trigger_soft_disabled(trace_file))
465 		return;
466 
467 	fields_size = event->n_u64 * sizeof(u64);
468 
469 	for (i = 0; i < event->n_dynamic_fields; i++) {
470 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
471 		char *str_val;
472 
473 		val_idx = var_ref_idx[field_pos];
474 		str_val = (char *)(long)var_ref_vals[val_idx];
475 
476 		len = kern_fetch_store_strlen((unsigned long)str_val);
477 
478 		fields_size += len;
479 	}
480 
481 	/*
482 	 * Avoid ring buffer recursion detection, as this event
483 	 * is being performed within another event.
484 	 */
485 	buffer = trace_file->tr->array_buffer.buffer;
486 	ring_buffer_nest_start(buffer);
487 
488 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
489 					   sizeof(*entry) + fields_size);
490 	if (!entry)
491 		goto out;
492 
493 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
494 		val_idx = var_ref_idx[i];
495 		if (event->fields[i]->is_string) {
496 			char *str_val = (char *)(long)var_ref_vals[val_idx];
497 
498 			len = trace_string(entry, event, str_val,
499 					   event->fields[i]->is_dynamic,
500 					   data_size, &n_u64);
501 			data_size += len; /* only dynamic string increments */
502 		} else {
503 			struct synth_field *field = event->fields[i];
504 			u64 val = var_ref_vals[val_idx];
505 
506 			switch (field->size) {
507 			case 1:
508 				*(u8 *)&entry->fields[n_u64] = (u8)val;
509 				break;
510 
511 			case 2:
512 				*(u16 *)&entry->fields[n_u64] = (u16)val;
513 				break;
514 
515 			case 4:
516 				*(u32 *)&entry->fields[n_u64] = (u32)val;
517 				break;
518 
519 			default:
520 				entry->fields[n_u64] = val;
521 				break;
522 			}
523 			n_u64++;
524 		}
525 	}
526 
527 	trace_event_buffer_commit(&fbuffer);
528 out:
529 	ring_buffer_nest_end(buffer);
530 }
531 
532 static void free_synth_event_print_fmt(struct trace_event_call *call)
533 {
534 	if (call) {
535 		kfree(call->print_fmt);
536 		call->print_fmt = NULL;
537 	}
538 }
539 
540 static int __set_synth_event_print_fmt(struct synth_event *event,
541 				       char *buf, int len)
542 {
543 	const char *fmt;
544 	int pos = 0;
545 	int i;
546 
547 	/* When len=0, we just calculate the needed length */
548 #define LEN_OR_ZERO (len ? len - pos : 0)
549 
550 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
551 	for (i = 0; i < event->n_fields; i++) {
552 		fmt = synth_field_fmt(event->fields[i]->type);
553 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
554 				event->fields[i]->name, fmt,
555 				i == event->n_fields - 1 ? "" : ", ");
556 	}
557 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
558 
559 	for (i = 0; i < event->n_fields; i++) {
560 		if (event->fields[i]->is_string &&
561 		    event->fields[i]->is_dynamic)
562 			pos += snprintf(buf + pos, LEN_OR_ZERO,
563 				", __get_str(%s)", event->fields[i]->name);
564 		else
565 			pos += snprintf(buf + pos, LEN_OR_ZERO,
566 					", REC->%s", event->fields[i]->name);
567 	}
568 
569 #undef LEN_OR_ZERO
570 
571 	/* return the length of print_fmt */
572 	return pos;
573 }
574 
575 static int set_synth_event_print_fmt(struct trace_event_call *call)
576 {
577 	struct synth_event *event = call->data;
578 	char *print_fmt;
579 	int len;
580 
581 	/* First: called with 0 length to calculate the needed length */
582 	len = __set_synth_event_print_fmt(event, NULL, 0);
583 
584 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
585 	if (!print_fmt)
586 		return -ENOMEM;
587 
588 	/* Second: actually write the @print_fmt */
589 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
590 	call->print_fmt = print_fmt;
591 
592 	return 0;
593 }
594 
595 static void free_synth_field(struct synth_field *field)
596 {
597 	kfree(field->type);
598 	kfree(field->name);
599 	kfree(field);
600 }
601 
602 static int check_field_version(const char *prefix, const char *field_type,
603 			       const char *field_name)
604 {
605 	/*
606 	 * For backward compatibility, the old synthetic event command
607 	 * format did not require semicolons, and in order to not
608 	 * break user space, that old format must still work. If a new
609 	 * feature is added, then the format that uses the new feature
610 	 * will be required to have semicolons, as nothing that uses
611 	 * the old format would be using the new, yet to be created,
612 	 * feature. When a new feature is added, this will detect it,
613 	 * and return a number greater than 1, and require the format
614 	 * to use semicolons.
615 	 */
616 	return 1;
617 }
618 
619 static struct synth_field *parse_synth_field(int argc, char **argv,
620 					     int *consumed, int *field_version)
621 {
622 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
623 	struct synth_field *field;
624 	int len, ret = -ENOMEM;
625 	struct seq_buf s;
626 	ssize_t size;
627 
628 	if (!strcmp(field_type, "unsigned")) {
629 		if (argc < 3) {
630 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
631 			return ERR_PTR(-EINVAL);
632 		}
633 		prefix = "unsigned ";
634 		field_type = argv[1];
635 		field_name = argv[2];
636 		*consumed += 3;
637 	} else {
638 		field_name = argv[1];
639 		*consumed += 2;
640 	}
641 
642 	if (!field_name) {
643 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
644 		return ERR_PTR(-EINVAL);
645 	}
646 
647 	*field_version = check_field_version(prefix, field_type, field_name);
648 
649 	field = kzalloc(sizeof(*field), GFP_KERNEL);
650 	if (!field)
651 		return ERR_PTR(-ENOMEM);
652 
653 	len = strlen(field_name);
654 	array = strchr(field_name, '[');
655 	if (array)
656 		len -= strlen(array);
657 
658 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
659 	if (!field->name)
660 		goto free;
661 
662 	if (!is_good_name(field->name)) {
663 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
664 		ret = -EINVAL;
665 		goto free;
666 	}
667 
668 	len = strlen(field_type) + 1;
669 
670 	if (array)
671 		len += strlen(array);
672 
673 	if (prefix)
674 		len += strlen(prefix);
675 
676 	field->type = kzalloc(len, GFP_KERNEL);
677 	if (!field->type)
678 		goto free;
679 
680 	seq_buf_init(&s, field->type, len);
681 	if (prefix)
682 		seq_buf_puts(&s, prefix);
683 	seq_buf_puts(&s, field_type);
684 	if (array)
685 		seq_buf_puts(&s, array);
686 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
687 		goto free;
688 
689 	s.buffer[s.len] = '\0';
690 
691 	size = synth_field_size(field->type);
692 	if (size < 0) {
693 		if (array)
694 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
695 		else
696 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
697 		ret = -EINVAL;
698 		goto free;
699 	} else if (size == 0) {
700 		if (synth_field_is_string(field->type)) {
701 			char *type;
702 
703 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
704 			type = kzalloc(len, GFP_KERNEL);
705 			if (!type)
706 				goto free;
707 
708 			seq_buf_init(&s, type, len);
709 			seq_buf_puts(&s, "__data_loc ");
710 			seq_buf_puts(&s, field->type);
711 
712 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
713 				goto free;
714 			s.buffer[s.len] = '\0';
715 
716 			kfree(field->type);
717 			field->type = type;
718 
719 			field->is_dynamic = true;
720 			size = sizeof(u64);
721 		} else {
722 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
723 			ret = -EINVAL;
724 			goto free;
725 		}
726 	}
727 	field->size = size;
728 
729 	if (synth_field_is_string(field->type))
730 		field->is_string = true;
731 
732 	field->is_signed = synth_field_signed(field->type);
733  out:
734 	return field;
735  free:
736 	free_synth_field(field);
737 	field = ERR_PTR(ret);
738 	goto out;
739 }
740 
741 static void free_synth_tracepoint(struct tracepoint *tp)
742 {
743 	if (!tp)
744 		return;
745 
746 	kfree(tp->name);
747 	kfree(tp);
748 }
749 
750 static struct tracepoint *alloc_synth_tracepoint(char *name)
751 {
752 	struct tracepoint *tp;
753 
754 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
755 	if (!tp)
756 		return ERR_PTR(-ENOMEM);
757 
758 	tp->name = kstrdup(name, GFP_KERNEL);
759 	if (!tp->name) {
760 		kfree(tp);
761 		return ERR_PTR(-ENOMEM);
762 	}
763 
764 	return tp;
765 }
766 
767 struct synth_event *find_synth_event(const char *name)
768 {
769 	struct dyn_event *pos;
770 	struct synth_event *event;
771 
772 	for_each_dyn_event(pos) {
773 		if (!is_synth_event(pos))
774 			continue;
775 		event = to_synth_event(pos);
776 		if (strcmp(event->name, name) == 0)
777 			return event;
778 	}
779 
780 	return NULL;
781 }
782 
783 static struct trace_event_fields synth_event_fields_array[] = {
784 	{ .type = TRACE_FUNCTION_TYPE,
785 	  .define_fields = synth_event_define_fields },
786 	{}
787 };
788 
789 static int register_synth_event(struct synth_event *event)
790 {
791 	struct trace_event_call *call = &event->call;
792 	int ret = 0;
793 
794 	event->call.class = &event->class;
795 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
796 	if (!event->class.system) {
797 		ret = -ENOMEM;
798 		goto out;
799 	}
800 
801 	event->tp = alloc_synth_tracepoint(event->name);
802 	if (IS_ERR(event->tp)) {
803 		ret = PTR_ERR(event->tp);
804 		event->tp = NULL;
805 		goto out;
806 	}
807 
808 	INIT_LIST_HEAD(&call->class->fields);
809 	call->event.funcs = &synth_event_funcs;
810 	call->class->fields_array = synth_event_fields_array;
811 
812 	ret = register_trace_event(&call->event);
813 	if (!ret) {
814 		ret = -ENODEV;
815 		goto out;
816 	}
817 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
818 	call->class->reg = trace_event_reg;
819 	call->class->probe = trace_event_raw_event_synth;
820 	call->data = event;
821 	call->tp = event->tp;
822 
823 	ret = trace_add_event_call(call);
824 	if (ret) {
825 		pr_warn("Failed to register synthetic event: %s\n",
826 			trace_event_name(call));
827 		goto err;
828 	}
829 
830 	ret = set_synth_event_print_fmt(call);
831 	if (ret < 0) {
832 		trace_remove_event_call(call);
833 		goto err;
834 	}
835  out:
836 	return ret;
837  err:
838 	unregister_trace_event(&call->event);
839 	goto out;
840 }
841 
842 static int unregister_synth_event(struct synth_event *event)
843 {
844 	struct trace_event_call *call = &event->call;
845 	int ret;
846 
847 	ret = trace_remove_event_call(call);
848 
849 	return ret;
850 }
851 
852 static void free_synth_event(struct synth_event *event)
853 {
854 	unsigned int i;
855 
856 	if (!event)
857 		return;
858 
859 	for (i = 0; i < event->n_fields; i++)
860 		free_synth_field(event->fields[i]);
861 
862 	kfree(event->fields);
863 	kfree(event->dynamic_fields);
864 	kfree(event->name);
865 	kfree(event->class.system);
866 	free_synth_tracepoint(event->tp);
867 	free_synth_event_print_fmt(&event->call);
868 	kfree(event);
869 }
870 
871 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
872 					     struct synth_field **fields)
873 {
874 	unsigned int i, j, n_dynamic_fields = 0;
875 	struct synth_event *event;
876 
877 	event = kzalloc(sizeof(*event), GFP_KERNEL);
878 	if (!event) {
879 		event = ERR_PTR(-ENOMEM);
880 		goto out;
881 	}
882 
883 	event->name = kstrdup(name, GFP_KERNEL);
884 	if (!event->name) {
885 		kfree(event);
886 		event = ERR_PTR(-ENOMEM);
887 		goto out;
888 	}
889 
890 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
891 	if (!event->fields) {
892 		free_synth_event(event);
893 		event = ERR_PTR(-ENOMEM);
894 		goto out;
895 	}
896 
897 	for (i = 0; i < n_fields; i++)
898 		if (fields[i]->is_dynamic)
899 			n_dynamic_fields++;
900 
901 	if (n_dynamic_fields) {
902 		event->dynamic_fields = kcalloc(n_dynamic_fields,
903 						sizeof(*event->dynamic_fields),
904 						GFP_KERNEL);
905 		if (!event->dynamic_fields) {
906 			free_synth_event(event);
907 			event = ERR_PTR(-ENOMEM);
908 			goto out;
909 		}
910 	}
911 
912 	dyn_event_init(&event->devent, &synth_event_ops);
913 
914 	for (i = 0, j = 0; i < n_fields; i++) {
915 		fields[i]->field_pos = i;
916 		event->fields[i] = fields[i];
917 
918 		if (fields[i]->is_dynamic)
919 			event->dynamic_fields[j++] = fields[i];
920 	}
921 	event->n_dynamic_fields = j;
922 	event->n_fields = n_fields;
923  out:
924 	return event;
925 }
926 
927 static int synth_event_check_arg_fn(void *data)
928 {
929 	struct dynevent_arg_pair *arg_pair = data;
930 	int size;
931 
932 	size = synth_field_size((char *)arg_pair->lhs);
933 	if (size == 0) {
934 		if (strstr((char *)arg_pair->lhs, "["))
935 			return 0;
936 	}
937 
938 	return size ? 0 : -EINVAL;
939 }
940 
941 /**
942  * synth_event_add_field - Add a new field to a synthetic event cmd
943  * @cmd: A pointer to the dynevent_cmd struct representing the new event
944  * @type: The type of the new field to add
945  * @name: The name of the new field to add
946  *
947  * Add a new field to a synthetic event cmd object.  Field ordering is in
948  * the same order the fields are added.
949  *
950  * See synth_field_size() for available types. If field_name contains
951  * [n] the field is considered to be an array.
952  *
953  * Return: 0 if successful, error otherwise.
954  */
955 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
956 			  const char *name)
957 {
958 	struct dynevent_arg_pair arg_pair;
959 	int ret;
960 
961 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
962 		return -EINVAL;
963 
964 	if (!type || !name)
965 		return -EINVAL;
966 
967 	dynevent_arg_pair_init(&arg_pair, 0, ';');
968 
969 	arg_pair.lhs = type;
970 	arg_pair.rhs = name;
971 
972 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
973 	if (ret)
974 		return ret;
975 
976 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
977 		ret = -EINVAL;
978 
979 	return ret;
980 }
981 EXPORT_SYMBOL_GPL(synth_event_add_field);
982 
983 /**
984  * synth_event_add_field_str - Add a new field to a synthetic event cmd
985  * @cmd: A pointer to the dynevent_cmd struct representing the new event
986  * @type_name: The type and name of the new field to add, as a single string
987  *
988  * Add a new field to a synthetic event cmd object, as a single
989  * string.  The @type_name string is expected to be of the form 'type
990  * name', which will be appended by ';'.  No sanity checking is done -
991  * what's passed in is assumed to already be well-formed.  Field
992  * ordering is in the same order the fields are added.
993  *
994  * See synth_field_size() for available types. If field_name contains
995  * [n] the field is considered to be an array.
996  *
997  * Return: 0 if successful, error otherwise.
998  */
999 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1000 {
1001 	struct dynevent_arg arg;
1002 	int ret;
1003 
1004 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1005 		return -EINVAL;
1006 
1007 	if (!type_name)
1008 		return -EINVAL;
1009 
1010 	dynevent_arg_init(&arg, ';');
1011 
1012 	arg.str = type_name;
1013 
1014 	ret = dynevent_arg_add(cmd, &arg, NULL);
1015 	if (ret)
1016 		return ret;
1017 
1018 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1019 		ret = -EINVAL;
1020 
1021 	return ret;
1022 }
1023 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1024 
1025 /**
1026  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1027  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1028  * @fields: An array of type/name field descriptions
1029  * @n_fields: The number of field descriptions contained in the fields array
1030  *
1031  * Add a new set of fields to a synthetic event cmd object.  The event
1032  * fields that will be defined for the event should be passed in as an
1033  * array of struct synth_field_desc, and the number of elements in the
1034  * array passed in as n_fields.  Field ordering will retain the
1035  * ordering given in the fields array.
1036  *
1037  * See synth_field_size() for available types. If field_name contains
1038  * [n] the field is considered to be an array.
1039  *
1040  * Return: 0 if successful, error otherwise.
1041  */
1042 int synth_event_add_fields(struct dynevent_cmd *cmd,
1043 			   struct synth_field_desc *fields,
1044 			   unsigned int n_fields)
1045 {
1046 	unsigned int i;
1047 	int ret = 0;
1048 
1049 	for (i = 0; i < n_fields; i++) {
1050 		if (fields[i].type == NULL || fields[i].name == NULL) {
1051 			ret = -EINVAL;
1052 			break;
1053 		}
1054 
1055 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1056 		if (ret)
1057 			break;
1058 	}
1059 
1060 	return ret;
1061 }
1062 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1063 
1064 /**
1065  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1066  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1067  * @name: The name of the synthetic event
1068  * @mod: The module creating the event, NULL if not created from a module
1069  * @args: Variable number of arg (pairs), one pair for each field
1070  *
1071  * NOTE: Users normally won't want to call this function directly, but
1072  * rather use the synth_event_gen_cmd_start() wrapper, which
1073  * automatically adds a NULL to the end of the arg list.  If this
1074  * function is used directly, make sure the last arg in the variable
1075  * arg list is NULL.
1076  *
1077  * Generate a synthetic event command to be executed by
1078  * synth_event_gen_cmd_end().  This function can be used to generate
1079  * the complete command or only the first part of it; in the latter
1080  * case, synth_event_add_field(), synth_event_add_field_str(), or
1081  * synth_event_add_fields() can be used to add more fields following
1082  * this.
1083  *
1084  * There should be an even number variable args, each pair consisting
1085  * of a type followed by a field name.
1086  *
1087  * See synth_field_size() for available types. If field_name contains
1088  * [n] the field is considered to be an array.
1089  *
1090  * Return: 0 if successful, error otherwise.
1091  */
1092 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1093 				struct module *mod, ...)
1094 {
1095 	struct dynevent_arg arg;
1096 	va_list args;
1097 	int ret;
1098 
1099 	cmd->event_name = name;
1100 	cmd->private_data = mod;
1101 
1102 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1103 		return -EINVAL;
1104 
1105 	dynevent_arg_init(&arg, 0);
1106 	arg.str = name;
1107 	ret = dynevent_arg_add(cmd, &arg, NULL);
1108 	if (ret)
1109 		return ret;
1110 
1111 	va_start(args, mod);
1112 	for (;;) {
1113 		const char *type, *name;
1114 
1115 		type = va_arg(args, const char *);
1116 		if (!type)
1117 			break;
1118 		name = va_arg(args, const char *);
1119 		if (!name)
1120 			break;
1121 
1122 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1123 			ret = -EINVAL;
1124 			break;
1125 		}
1126 
1127 		ret = synth_event_add_field(cmd, type, name);
1128 		if (ret)
1129 			break;
1130 	}
1131 	va_end(args);
1132 
1133 	return ret;
1134 }
1135 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1136 
1137 /**
1138  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1139  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1140  * @name: The name of the synthetic event
1141  * @fields: An array of type/name field descriptions
1142  * @n_fields: The number of field descriptions contained in the fields array
1143  *
1144  * Generate a synthetic event command to be executed by
1145  * synth_event_gen_cmd_end().  This function can be used to generate
1146  * the complete command or only the first part of it; in the latter
1147  * case, synth_event_add_field(), synth_event_add_field_str(), or
1148  * synth_event_add_fields() can be used to add more fields following
1149  * this.
1150  *
1151  * The event fields that will be defined for the event should be
1152  * passed in as an array of struct synth_field_desc, and the number of
1153  * elements in the array passed in as n_fields.  Field ordering will
1154  * retain the ordering given in the fields array.
1155  *
1156  * See synth_field_size() for available types. If field_name contains
1157  * [n] the field is considered to be an array.
1158  *
1159  * Return: 0 if successful, error otherwise.
1160  */
1161 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1162 				    struct module *mod,
1163 				    struct synth_field_desc *fields,
1164 				    unsigned int n_fields)
1165 {
1166 	struct dynevent_arg arg;
1167 	unsigned int i;
1168 	int ret = 0;
1169 
1170 	cmd->event_name = name;
1171 	cmd->private_data = mod;
1172 
1173 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1174 		return -EINVAL;
1175 
1176 	if (n_fields > SYNTH_FIELDS_MAX)
1177 		return -EINVAL;
1178 
1179 	dynevent_arg_init(&arg, 0);
1180 	arg.str = name;
1181 	ret = dynevent_arg_add(cmd, &arg, NULL);
1182 	if (ret)
1183 		return ret;
1184 
1185 	for (i = 0; i < n_fields; i++) {
1186 		if (fields[i].type == NULL || fields[i].name == NULL)
1187 			return -EINVAL;
1188 
1189 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1190 		if (ret)
1191 			break;
1192 	}
1193 
1194 	return ret;
1195 }
1196 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1197 
1198 static int __create_synth_event(const char *name, const char *raw_fields)
1199 {
1200 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1201 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1202 	int consumed, cmd_version = 1, n_fields_this_loop;
1203 	int i, argc, n_fields = 0, ret = 0;
1204 	struct synth_event *event = NULL;
1205 
1206 	/*
1207 	 * Argument syntax:
1208 	 *  - Add synthetic event: <event_name> field[;field] ...
1209 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1210 	 *      where 'field' = type field_name
1211 	 */
1212 
1213 	if (name[0] == '\0') {
1214 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1215 		return -EINVAL;
1216 	}
1217 
1218 	if (!is_good_name(name)) {
1219 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1220 		return -EINVAL;
1221 	}
1222 
1223 	mutex_lock(&event_mutex);
1224 
1225 	event = find_synth_event(name);
1226 	if (event) {
1227 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1228 		ret = -EEXIST;
1229 		goto err;
1230 	}
1231 
1232 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1233 	if (!tmp_fields) {
1234 		ret = -ENOMEM;
1235 		goto err;
1236 	}
1237 
1238 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1239 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1240 		if (!argv) {
1241 			ret = -ENOMEM;
1242 			goto err;
1243 		}
1244 
1245 		if (!argc) {
1246 			argv_free(argv);
1247 			continue;
1248 		}
1249 
1250 		n_fields_this_loop = 0;
1251 		consumed = 0;
1252 		while (argc > consumed) {
1253 			int field_version;
1254 
1255 			field = parse_synth_field(argc - consumed,
1256 						  argv + consumed, &consumed,
1257 						  &field_version);
1258 			if (IS_ERR(field)) {
1259 				ret = PTR_ERR(field);
1260 				goto err_free_arg;
1261 			}
1262 
1263 			/*
1264 			 * Track the highest version of any field we
1265 			 * found in the command.
1266 			 */
1267 			if (field_version > cmd_version)
1268 				cmd_version = field_version;
1269 
1270 			/*
1271 			 * Now sort out what is and isn't valid for
1272 			 * each supported version.
1273 			 *
1274 			 * If we see more than 1 field per loop, it
1275 			 * means we have multiple fields between
1276 			 * semicolons, and that's something we no
1277 			 * longer support in a version 2 or greater
1278 			 * command.
1279 			 */
1280 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1281 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1282 				ret = -EINVAL;
1283 				goto err_free_arg;
1284 			}
1285 
1286 			fields[n_fields++] = field;
1287 			if (n_fields == SYNTH_FIELDS_MAX) {
1288 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1289 				ret = -EINVAL;
1290 				goto err_free_arg;
1291 			}
1292 
1293 			n_fields_this_loop++;
1294 		}
1295 		argv_free(argv);
1296 
1297 		if (consumed < argc) {
1298 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1299 			ret = -EINVAL;
1300 			goto err;
1301 		}
1302 
1303 	}
1304 
1305 	if (n_fields == 0) {
1306 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1307 		ret = -EINVAL;
1308 		goto err;
1309 	}
1310 
1311 	event = alloc_synth_event(name, n_fields, fields);
1312 	if (IS_ERR(event)) {
1313 		ret = PTR_ERR(event);
1314 		event = NULL;
1315 		goto err;
1316 	}
1317 	ret = register_synth_event(event);
1318 	if (!ret)
1319 		dyn_event_add(&event->devent, &event->call);
1320 	else
1321 		free_synth_event(event);
1322  out:
1323 	mutex_unlock(&event_mutex);
1324 
1325 	kfree(saved_fields);
1326 
1327 	return ret;
1328  err_free_arg:
1329 	argv_free(argv);
1330  err:
1331 	for (i = 0; i < n_fields; i++)
1332 		free_synth_field(fields[i]);
1333 
1334 	goto out;
1335 }
1336 
1337 /**
1338  * synth_event_create - Create a new synthetic event
1339  * @name: The name of the new synthetic event
1340  * @fields: An array of type/name field descriptions
1341  * @n_fields: The number of field descriptions contained in the fields array
1342  * @mod: The module creating the event, NULL if not created from a module
1343  *
1344  * Create a new synthetic event with the given name under the
1345  * trace/events/synthetic/ directory.  The event fields that will be
1346  * defined for the event should be passed in as an array of struct
1347  * synth_field_desc, and the number elements in the array passed in as
1348  * n_fields. Field ordering will retain the ordering given in the
1349  * fields array.
1350  *
1351  * If the new synthetic event is being created from a module, the mod
1352  * param must be non-NULL.  This will ensure that the trace buffer
1353  * won't contain unreadable events.
1354  *
1355  * The new synth event should be deleted using synth_event_delete()
1356  * function.  The new synthetic event can be generated from modules or
1357  * other kernel code using trace_synth_event() and related functions.
1358  *
1359  * Return: 0 if successful, error otherwise.
1360  */
1361 int synth_event_create(const char *name, struct synth_field_desc *fields,
1362 		       unsigned int n_fields, struct module *mod)
1363 {
1364 	struct dynevent_cmd cmd;
1365 	char *buf;
1366 	int ret;
1367 
1368 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1369 	if (!buf)
1370 		return -ENOMEM;
1371 
1372 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1373 
1374 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1375 					      fields, n_fields);
1376 	if (ret)
1377 		goto out;
1378 
1379 	ret = synth_event_gen_cmd_end(&cmd);
1380  out:
1381 	kfree(buf);
1382 
1383 	return ret;
1384 }
1385 EXPORT_SYMBOL_GPL(synth_event_create);
1386 
1387 static int destroy_synth_event(struct synth_event *se)
1388 {
1389 	int ret;
1390 
1391 	if (se->ref)
1392 		return -EBUSY;
1393 
1394 	if (trace_event_dyn_busy(&se->call))
1395 		return -EBUSY;
1396 
1397 	ret = unregister_synth_event(se);
1398 	if (!ret) {
1399 		dyn_event_remove(&se->devent);
1400 		free_synth_event(se);
1401 	}
1402 
1403 	return ret;
1404 }
1405 
1406 /**
1407  * synth_event_delete - Delete a synthetic event
1408  * @event_name: The name of the new synthetic event
1409  *
1410  * Delete a synthetic event that was created with synth_event_create().
1411  *
1412  * Return: 0 if successful, error otherwise.
1413  */
1414 int synth_event_delete(const char *event_name)
1415 {
1416 	struct synth_event *se = NULL;
1417 	struct module *mod = NULL;
1418 	int ret = -ENOENT;
1419 
1420 	mutex_lock(&event_mutex);
1421 	se = find_synth_event(event_name);
1422 	if (se) {
1423 		mod = se->mod;
1424 		ret = destroy_synth_event(se);
1425 	}
1426 	mutex_unlock(&event_mutex);
1427 
1428 	if (mod) {
1429 		mutex_lock(&trace_types_lock);
1430 		/*
1431 		 * It is safest to reset the ring buffer if the module
1432 		 * being unloaded registered any events that were
1433 		 * used. The only worry is if a new module gets
1434 		 * loaded, and takes on the same id as the events of
1435 		 * this module. When printing out the buffer, traced
1436 		 * events left over from this module may be passed to
1437 		 * the new module events and unexpected results may
1438 		 * occur.
1439 		 */
1440 		tracing_reset_all_online_cpus();
1441 		mutex_unlock(&trace_types_lock);
1442 	}
1443 
1444 	return ret;
1445 }
1446 EXPORT_SYMBOL_GPL(synth_event_delete);
1447 
1448 static int check_command(const char *raw_command)
1449 {
1450 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1451 	int argc, ret = 0;
1452 
1453 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1454 	if (!cmd)
1455 		return -ENOMEM;
1456 
1457 	name_and_field = strsep(&cmd, ";");
1458 	if (!name_and_field) {
1459 		ret = -EINVAL;
1460 		goto free;
1461 	}
1462 
1463 	if (name_and_field[0] == '!')
1464 		goto free;
1465 
1466 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1467 	if (!argv) {
1468 		ret = -ENOMEM;
1469 		goto free;
1470 	}
1471 	argv_free(argv);
1472 
1473 	if (argc < 3)
1474 		ret = -EINVAL;
1475 free:
1476 	kfree(saved_cmd);
1477 
1478 	return ret;
1479 }
1480 
1481 static int create_or_delete_synth_event(const char *raw_command)
1482 {
1483 	char *name = NULL, *fields, *p;
1484 	int ret = 0;
1485 
1486 	raw_command = skip_spaces(raw_command);
1487 	if (raw_command[0] == '\0')
1488 		return ret;
1489 
1490 	last_cmd_set(raw_command);
1491 
1492 	ret = check_command(raw_command);
1493 	if (ret) {
1494 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1495 		return ret;
1496 	}
1497 
1498 	p = strpbrk(raw_command, " \t");
1499 	if (!p && raw_command[0] != '!') {
1500 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1501 		ret = -EINVAL;
1502 		goto free;
1503 	}
1504 
1505 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1506 	if (!name)
1507 		return -ENOMEM;
1508 
1509 	if (name[0] == '!') {
1510 		ret = synth_event_delete(name + 1);
1511 		goto free;
1512 	}
1513 
1514 	fields = skip_spaces(p);
1515 
1516 	ret = __create_synth_event(name, fields);
1517 free:
1518 	kfree(name);
1519 
1520 	return ret;
1521 }
1522 
1523 static int synth_event_run_command(struct dynevent_cmd *cmd)
1524 {
1525 	struct synth_event *se;
1526 	int ret;
1527 
1528 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1529 	if (ret)
1530 		return ret;
1531 
1532 	se = find_synth_event(cmd->event_name);
1533 	if (WARN_ON(!se))
1534 		return -ENOENT;
1535 
1536 	se->mod = cmd->private_data;
1537 
1538 	return ret;
1539 }
1540 
1541 /**
1542  * synth_event_cmd_init - Initialize a synthetic event command object
1543  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1544  * @buf: A pointer to the buffer used to build the command
1545  * @maxlen: The length of the buffer passed in @buf
1546  *
1547  * Initialize a synthetic event command object.  Use this before
1548  * calling any of the other dyenvent_cmd functions.
1549  */
1550 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1551 {
1552 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1553 			  synth_event_run_command);
1554 }
1555 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1556 
1557 static inline int
1558 __synth_event_trace_init(struct trace_event_file *file,
1559 			 struct synth_event_trace_state *trace_state)
1560 {
1561 	int ret = 0;
1562 
1563 	memset(trace_state, '\0', sizeof(*trace_state));
1564 
1565 	/*
1566 	 * Normal event tracing doesn't get called at all unless the
1567 	 * ENABLED bit is set (which attaches the probe thus allowing
1568 	 * this code to be called, etc).  Because this is called
1569 	 * directly by the user, we don't have that but we still need
1570 	 * to honor not logging when disabled.  For the iterated
1571 	 * trace case, we save the enabled state upon start and just
1572 	 * ignore the following data calls.
1573 	 */
1574 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1575 	    trace_trigger_soft_disabled(file)) {
1576 		trace_state->disabled = true;
1577 		ret = -ENOENT;
1578 		goto out;
1579 	}
1580 
1581 	trace_state->event = file->event_call->data;
1582 out:
1583 	return ret;
1584 }
1585 
1586 static inline int
1587 __synth_event_trace_start(struct trace_event_file *file,
1588 			  struct synth_event_trace_state *trace_state,
1589 			  int dynamic_fields_size)
1590 {
1591 	int entry_size, fields_size = 0;
1592 	int ret = 0;
1593 
1594 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1595 	fields_size += dynamic_fields_size;
1596 
1597 	/*
1598 	 * Avoid ring buffer recursion detection, as this event
1599 	 * is being performed within another event.
1600 	 */
1601 	trace_state->buffer = file->tr->array_buffer.buffer;
1602 	ring_buffer_nest_start(trace_state->buffer);
1603 
1604 	entry_size = sizeof(*trace_state->entry) + fields_size;
1605 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1606 							file,
1607 							entry_size);
1608 	if (!trace_state->entry) {
1609 		ring_buffer_nest_end(trace_state->buffer);
1610 		ret = -EINVAL;
1611 	}
1612 
1613 	return ret;
1614 }
1615 
1616 static inline void
1617 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1618 {
1619 	trace_event_buffer_commit(&trace_state->fbuffer);
1620 
1621 	ring_buffer_nest_end(trace_state->buffer);
1622 }
1623 
1624 /**
1625  * synth_event_trace - Trace a synthetic event
1626  * @file: The trace_event_file representing the synthetic event
1627  * @n_vals: The number of values in vals
1628  * @args: Variable number of args containing the event values
1629  *
1630  * Trace a synthetic event using the values passed in the variable
1631  * argument list.
1632  *
1633  * The argument list should be a list 'n_vals' u64 values.  The number
1634  * of vals must match the number of field in the synthetic event, and
1635  * must be in the same order as the synthetic event fields.
1636  *
1637  * All vals should be cast to u64, and string vals are just pointers
1638  * to strings, cast to u64.  Strings will be copied into space
1639  * reserved in the event for the string, using these pointers.
1640  *
1641  * Return: 0 on success, err otherwise.
1642  */
1643 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1644 {
1645 	unsigned int i, n_u64, len, data_size = 0;
1646 	struct synth_event_trace_state state;
1647 	va_list args;
1648 	int ret;
1649 
1650 	ret = __synth_event_trace_init(file, &state);
1651 	if (ret) {
1652 		if (ret == -ENOENT)
1653 			ret = 0; /* just disabled, not really an error */
1654 		return ret;
1655 	}
1656 
1657 	if (state.event->n_dynamic_fields) {
1658 		va_start(args, n_vals);
1659 
1660 		for (i = 0; i < state.event->n_fields; i++) {
1661 			u64 val = va_arg(args, u64);
1662 
1663 			if (state.event->fields[i]->is_string &&
1664 			    state.event->fields[i]->is_dynamic) {
1665 				char *str_val = (char *)(long)val;
1666 
1667 				data_size += strlen(str_val) + 1;
1668 			}
1669 		}
1670 
1671 		va_end(args);
1672 	}
1673 
1674 	ret = __synth_event_trace_start(file, &state, data_size);
1675 	if (ret)
1676 		return ret;
1677 
1678 	if (n_vals != state.event->n_fields) {
1679 		ret = -EINVAL;
1680 		goto out;
1681 	}
1682 
1683 	data_size = 0;
1684 
1685 	va_start(args, n_vals);
1686 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1687 		u64 val;
1688 
1689 		val = va_arg(args, u64);
1690 
1691 		if (state.event->fields[i]->is_string) {
1692 			char *str_val = (char *)(long)val;
1693 
1694 			len = trace_string(state.entry, state.event, str_val,
1695 					   state.event->fields[i]->is_dynamic,
1696 					   data_size, &n_u64);
1697 			data_size += len; /* only dynamic string increments */
1698 		} else {
1699 			struct synth_field *field = state.event->fields[i];
1700 
1701 			switch (field->size) {
1702 			case 1:
1703 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1704 				break;
1705 
1706 			case 2:
1707 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1708 				break;
1709 
1710 			case 4:
1711 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1712 				break;
1713 
1714 			default:
1715 				state.entry->fields[n_u64] = val;
1716 				break;
1717 			}
1718 			n_u64++;
1719 		}
1720 	}
1721 	va_end(args);
1722 out:
1723 	__synth_event_trace_end(&state);
1724 
1725 	return ret;
1726 }
1727 EXPORT_SYMBOL_GPL(synth_event_trace);
1728 
1729 /**
1730  * synth_event_trace_array - Trace a synthetic event from an array
1731  * @file: The trace_event_file representing the synthetic event
1732  * @vals: Array of values
1733  * @n_vals: The number of values in vals
1734  *
1735  * Trace a synthetic event using the values passed in as 'vals'.
1736  *
1737  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1738  * vals must match the number of field in the synthetic event, and
1739  * must be in the same order as the synthetic event fields.
1740  *
1741  * All vals should be cast to u64, and string vals are just pointers
1742  * to strings, cast to u64.  Strings will be copied into space
1743  * reserved in the event for the string, using these pointers.
1744  *
1745  * Return: 0 on success, err otherwise.
1746  */
1747 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1748 			    unsigned int n_vals)
1749 {
1750 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1751 	struct synth_event_trace_state state;
1752 	char *str_val;
1753 	int ret;
1754 
1755 	ret = __synth_event_trace_init(file, &state);
1756 	if (ret) {
1757 		if (ret == -ENOENT)
1758 			ret = 0; /* just disabled, not really an error */
1759 		return ret;
1760 	}
1761 
1762 	if (state.event->n_dynamic_fields) {
1763 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1764 			field_pos = state.event->dynamic_fields[i]->field_pos;
1765 			str_val = (char *)(long)vals[field_pos];
1766 			len = strlen(str_val) + 1;
1767 			data_size += len;
1768 		}
1769 	}
1770 
1771 	ret = __synth_event_trace_start(file, &state, data_size);
1772 	if (ret)
1773 		return ret;
1774 
1775 	if (n_vals != state.event->n_fields) {
1776 		ret = -EINVAL;
1777 		goto out;
1778 	}
1779 
1780 	data_size = 0;
1781 
1782 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1783 		if (state.event->fields[i]->is_string) {
1784 			char *str_val = (char *)(long)vals[i];
1785 
1786 			len = trace_string(state.entry, state.event, str_val,
1787 					   state.event->fields[i]->is_dynamic,
1788 					   data_size, &n_u64);
1789 			data_size += len; /* only dynamic string increments */
1790 		} else {
1791 			struct synth_field *field = state.event->fields[i];
1792 			u64 val = vals[i];
1793 
1794 			switch (field->size) {
1795 			case 1:
1796 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1797 				break;
1798 
1799 			case 2:
1800 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1801 				break;
1802 
1803 			case 4:
1804 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1805 				break;
1806 
1807 			default:
1808 				state.entry->fields[n_u64] = val;
1809 				break;
1810 			}
1811 			n_u64++;
1812 		}
1813 	}
1814 out:
1815 	__synth_event_trace_end(&state);
1816 
1817 	return ret;
1818 }
1819 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1820 
1821 /**
1822  * synth_event_trace_start - Start piecewise synthetic event trace
1823  * @file: The trace_event_file representing the synthetic event
1824  * @trace_state: A pointer to object tracking the piecewise trace state
1825  *
1826  * Start the trace of a synthetic event field-by-field rather than all
1827  * at once.
1828  *
1829  * This function 'opens' an event trace, which means space is reserved
1830  * for the event in the trace buffer, after which the event's
1831  * individual field values can be set through either
1832  * synth_event_add_next_val() or synth_event_add_val().
1833  *
1834  * A pointer to a trace_state object is passed in, which will keep
1835  * track of the current event trace state until the event trace is
1836  * closed (and the event finally traced) using
1837  * synth_event_trace_end().
1838  *
1839  * Note that synth_event_trace_end() must be called after all values
1840  * have been added for each event trace, regardless of whether adding
1841  * all field values succeeded or not.
1842  *
1843  * Note also that for a given event trace, all fields must be added
1844  * using either synth_event_add_next_val() or synth_event_add_val()
1845  * but not both together or interleaved.
1846  *
1847  * Return: 0 on success, err otherwise.
1848  */
1849 int synth_event_trace_start(struct trace_event_file *file,
1850 			    struct synth_event_trace_state *trace_state)
1851 {
1852 	int ret;
1853 
1854 	if (!trace_state)
1855 		return -EINVAL;
1856 
1857 	ret = __synth_event_trace_init(file, trace_state);
1858 	if (ret) {
1859 		if (ret == -ENOENT)
1860 			ret = 0; /* just disabled, not really an error */
1861 		return ret;
1862 	}
1863 
1864 	if (trace_state->event->n_dynamic_fields)
1865 		return -ENOTSUPP;
1866 
1867 	ret = __synth_event_trace_start(file, trace_state, 0);
1868 
1869 	return ret;
1870 }
1871 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1872 
1873 static int __synth_event_add_val(const char *field_name, u64 val,
1874 				 struct synth_event_trace_state *trace_state)
1875 {
1876 	struct synth_field *field = NULL;
1877 	struct synth_trace_event *entry;
1878 	struct synth_event *event;
1879 	int i, ret = 0;
1880 
1881 	if (!trace_state) {
1882 		ret = -EINVAL;
1883 		goto out;
1884 	}
1885 
1886 	/* can't mix add_next_synth_val() with add_synth_val() */
1887 	if (field_name) {
1888 		if (trace_state->add_next) {
1889 			ret = -EINVAL;
1890 			goto out;
1891 		}
1892 		trace_state->add_name = true;
1893 	} else {
1894 		if (trace_state->add_name) {
1895 			ret = -EINVAL;
1896 			goto out;
1897 		}
1898 		trace_state->add_next = true;
1899 	}
1900 
1901 	if (trace_state->disabled)
1902 		goto out;
1903 
1904 	event = trace_state->event;
1905 	if (trace_state->add_name) {
1906 		for (i = 0; i < event->n_fields; i++) {
1907 			field = event->fields[i];
1908 			if (strcmp(field->name, field_name) == 0)
1909 				break;
1910 		}
1911 		if (!field) {
1912 			ret = -EINVAL;
1913 			goto out;
1914 		}
1915 	} else {
1916 		if (trace_state->cur_field >= event->n_fields) {
1917 			ret = -EINVAL;
1918 			goto out;
1919 		}
1920 		field = event->fields[trace_state->cur_field++];
1921 	}
1922 
1923 	entry = trace_state->entry;
1924 	if (field->is_string) {
1925 		char *str_val = (char *)(long)val;
1926 		char *str_field;
1927 
1928 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
1929 			ret = -EINVAL;
1930 			goto out;
1931 		}
1932 
1933 		if (!str_val) {
1934 			ret = -EINVAL;
1935 			goto out;
1936 		}
1937 
1938 		str_field = (char *)&entry->fields[field->offset];
1939 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1940 	} else {
1941 		switch (field->size) {
1942 		case 1:
1943 			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1944 			break;
1945 
1946 		case 2:
1947 			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1948 			break;
1949 
1950 		case 4:
1951 			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1952 			break;
1953 
1954 		default:
1955 			trace_state->entry->fields[field->offset] = val;
1956 			break;
1957 		}
1958 	}
1959  out:
1960 	return ret;
1961 }
1962 
1963 /**
1964  * synth_event_add_next_val - Add the next field's value to an open synth trace
1965  * @val: The value to set the next field to
1966  * @trace_state: A pointer to object tracking the piecewise trace state
1967  *
1968  * Set the value of the next field in an event that's been opened by
1969  * synth_event_trace_start().
1970  *
1971  * The val param should be the value cast to u64.  If the value points
1972  * to a string, the val param should be a char * cast to u64.
1973  *
1974  * This function assumes all the fields in an event are to be set one
1975  * after another - successive calls to this function are made, one for
1976  * each field, in the order of the fields in the event, until all
1977  * fields have been set.  If you'd rather set each field individually
1978  * without regard to ordering, synth_event_add_val() can be used
1979  * instead.
1980  *
1981  * Note however that synth_event_add_next_val() and
1982  * synth_event_add_val() can't be intermixed for a given event trace -
1983  * one or the other but not both can be used at the same time.
1984  *
1985  * Note also that synth_event_trace_end() must be called after all
1986  * values have been added for each event trace, regardless of whether
1987  * adding all field values succeeded or not.
1988  *
1989  * Return: 0 on success, err otherwise.
1990  */
1991 int synth_event_add_next_val(u64 val,
1992 			     struct synth_event_trace_state *trace_state)
1993 {
1994 	return __synth_event_add_val(NULL, val, trace_state);
1995 }
1996 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1997 
1998 /**
1999  * synth_event_add_val - Add a named field's value to an open synth trace
2000  * @field_name: The name of the synthetic event field value to set
2001  * @val: The value to set the named field to
2002  * @trace_state: A pointer to object tracking the piecewise trace state
2003  *
2004  * Set the value of the named field in an event that's been opened by
2005  * synth_event_trace_start().
2006  *
2007  * The val param should be the value cast to u64.  If the value points
2008  * to a string, the val param should be a char * cast to u64.
2009  *
2010  * This function looks up the field name, and if found, sets the field
2011  * to the specified value.  This lookup makes this function more
2012  * expensive than synth_event_add_next_val(), so use that or the
2013  * none-piecewise synth_event_trace() instead if efficiency is more
2014  * important.
2015  *
2016  * Note however that synth_event_add_next_val() and
2017  * synth_event_add_val() can't be intermixed for a given event trace -
2018  * one or the other but not both can be used at the same time.
2019  *
2020  * Note also that synth_event_trace_end() must be called after all
2021  * values have been added for each event trace, regardless of whether
2022  * adding all field values succeeded or not.
2023  *
2024  * Return: 0 on success, err otherwise.
2025  */
2026 int synth_event_add_val(const char *field_name, u64 val,
2027 			struct synth_event_trace_state *trace_state)
2028 {
2029 	return __synth_event_add_val(field_name, val, trace_state);
2030 }
2031 EXPORT_SYMBOL_GPL(synth_event_add_val);
2032 
2033 /**
2034  * synth_event_trace_end - End piecewise synthetic event trace
2035  * @trace_state: A pointer to object tracking the piecewise trace state
2036  *
2037  * End the trace of a synthetic event opened by
2038  * synth_event_trace__start().
2039  *
2040  * This function 'closes' an event trace, which basically means that
2041  * it commits the reserved event and cleans up other loose ends.
2042  *
2043  * A pointer to a trace_state object is passed in, which will keep
2044  * track of the current event trace state opened with
2045  * synth_event_trace_start().
2046  *
2047  * Note that this function must be called after all values have been
2048  * added for each event trace, regardless of whether adding all field
2049  * values succeeded or not.
2050  *
2051  * Return: 0 on success, err otherwise.
2052  */
2053 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2054 {
2055 	if (!trace_state)
2056 		return -EINVAL;
2057 
2058 	__synth_event_trace_end(trace_state);
2059 
2060 	return 0;
2061 }
2062 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2063 
2064 static int create_synth_event(const char *raw_command)
2065 {
2066 	char *fields, *p;
2067 	const char *name;
2068 	int len, ret = 0;
2069 
2070 	raw_command = skip_spaces(raw_command);
2071 	if (raw_command[0] == '\0')
2072 		return ret;
2073 
2074 	last_cmd_set(raw_command);
2075 
2076 	name = raw_command;
2077 
2078 	/* Don't try to process if not our system */
2079 	if (name[0] != 's' || name[1] != ':')
2080 		return -ECANCELED;
2081 	name += 2;
2082 
2083 	p = strpbrk(raw_command, " \t");
2084 	if (!p) {
2085 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2086 		return -EINVAL;
2087 	}
2088 
2089 	fields = skip_spaces(p);
2090 
2091 	/* This interface accepts group name prefix */
2092 	if (strchr(name, '/')) {
2093 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2094 		if (len == 0) {
2095 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2096 			return -EINVAL;
2097 		}
2098 		name += len;
2099 	}
2100 
2101 	len = name - raw_command;
2102 
2103 	ret = check_command(raw_command + len);
2104 	if (ret) {
2105 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2106 		return ret;
2107 	}
2108 
2109 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2110 	if (!name)
2111 		return -ENOMEM;
2112 
2113 	ret = __create_synth_event(name, fields);
2114 
2115 	kfree(name);
2116 
2117 	return ret;
2118 }
2119 
2120 static int synth_event_release(struct dyn_event *ev)
2121 {
2122 	struct synth_event *event = to_synth_event(ev);
2123 	int ret;
2124 
2125 	if (event->ref)
2126 		return -EBUSY;
2127 
2128 	if (trace_event_dyn_busy(&event->call))
2129 		return -EBUSY;
2130 
2131 	ret = unregister_synth_event(event);
2132 	if (ret)
2133 		return ret;
2134 
2135 	dyn_event_remove(ev);
2136 	free_synth_event(event);
2137 	return 0;
2138 }
2139 
2140 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2141 {
2142 	struct synth_field *field;
2143 	unsigned int i;
2144 	char *type, *t;
2145 
2146 	seq_printf(m, "%s\t", event->name);
2147 
2148 	for (i = 0; i < event->n_fields; i++) {
2149 		field = event->fields[i];
2150 
2151 		type = field->type;
2152 		t = strstr(type, "__data_loc");
2153 		if (t) { /* __data_loc belongs in format but not event desc */
2154 			t += sizeof("__data_loc");
2155 			type = t;
2156 		}
2157 
2158 		/* parameter values */
2159 		seq_printf(m, "%s %s%s", type, field->name,
2160 			   i == event->n_fields - 1 ? "" : "; ");
2161 	}
2162 
2163 	seq_putc(m, '\n');
2164 
2165 	return 0;
2166 }
2167 
2168 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2169 {
2170 	struct synth_event *event = to_synth_event(ev);
2171 
2172 	seq_printf(m, "s:%s/", event->class.system);
2173 
2174 	return __synth_event_show(m, event);
2175 }
2176 
2177 static int synth_events_seq_show(struct seq_file *m, void *v)
2178 {
2179 	struct dyn_event *ev = v;
2180 
2181 	if (!is_synth_event(ev))
2182 		return 0;
2183 
2184 	return __synth_event_show(m, to_synth_event(ev));
2185 }
2186 
2187 static const struct seq_operations synth_events_seq_op = {
2188 	.start	= dyn_event_seq_start,
2189 	.next	= dyn_event_seq_next,
2190 	.stop	= dyn_event_seq_stop,
2191 	.show	= synth_events_seq_show,
2192 };
2193 
2194 static int synth_events_open(struct inode *inode, struct file *file)
2195 {
2196 	int ret;
2197 
2198 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2199 	if (ret)
2200 		return ret;
2201 
2202 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2203 		ret = dyn_events_release_all(&synth_event_ops);
2204 		if (ret < 0)
2205 			return ret;
2206 	}
2207 
2208 	return seq_open(file, &synth_events_seq_op);
2209 }
2210 
2211 static ssize_t synth_events_write(struct file *file,
2212 				  const char __user *buffer,
2213 				  size_t count, loff_t *ppos)
2214 {
2215 	return trace_parse_run_command(file, buffer, count, ppos,
2216 				       create_or_delete_synth_event);
2217 }
2218 
2219 static const struct file_operations synth_events_fops = {
2220 	.open           = synth_events_open,
2221 	.write		= synth_events_write,
2222 	.read           = seq_read,
2223 	.llseek         = seq_lseek,
2224 	.release        = seq_release,
2225 };
2226 
2227 /*
2228  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2229  * events in postcore_initcall without tracefs.
2230  */
2231 static __init int trace_events_synth_init_early(void)
2232 {
2233 	int err = 0;
2234 
2235 	err = dyn_event_register(&synth_event_ops);
2236 	if (err)
2237 		pr_warn("Could not register synth_event_ops\n");
2238 
2239 	return err;
2240 }
2241 core_initcall(trace_events_synth_init_early);
2242 
2243 static __init int trace_events_synth_init(void)
2244 {
2245 	struct dentry *entry = NULL;
2246 	int err = 0;
2247 	err = tracing_init_dentry();
2248 	if (err)
2249 		goto err;
2250 
2251 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2252 				    NULL, NULL, &synth_events_fops);
2253 	if (!entry) {
2254 		err = -ENODEV;
2255 		goto err;
2256 	}
2257 
2258 	return err;
2259  err:
2260 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2261 
2262 	return err;
2263 }
2264 
2265 fs_initcall(trace_events_synth_init);
2266