1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
49 
50 static int errpos(const char *str)
51 {
52 	int ret = 0;
53 
54 	mutex_lock(&lastcmd_mutex);
55 	if (!str || !last_cmd)
56 		goto out;
57 
58 	ret = err_pos(last_cmd, str);
59  out:
60 	mutex_unlock(&lastcmd_mutex);
61 	return ret;
62 }
63 
64 static void last_cmd_set(const char *str)
65 {
66 	if (!str)
67 		return;
68 
69 	mutex_lock(&lastcmd_mutex);
70 	kfree(last_cmd);
71 	last_cmd = kstrdup(str, GFP_KERNEL);
72 	mutex_unlock(&lastcmd_mutex);
73 }
74 
75 static void synth_err(u8 err_type, u16 err_pos)
76 {
77 	mutex_lock(&lastcmd_mutex);
78 	if (!last_cmd)
79 		goto out;
80 
81 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
82 			err_type, err_pos);
83  out:
84 	mutex_unlock(&lastcmd_mutex);
85 }
86 
87 static int create_synth_event(const char *raw_command);
88 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
89 static int synth_event_release(struct dyn_event *ev);
90 static bool synth_event_is_busy(struct dyn_event *ev);
91 static bool synth_event_match(const char *system, const char *event,
92 			int argc, const char **argv, struct dyn_event *ev);
93 
94 static struct dyn_event_operations synth_event_ops = {
95 	.create = create_synth_event,
96 	.show = synth_event_show,
97 	.is_busy = synth_event_is_busy,
98 	.free = synth_event_release,
99 	.match = synth_event_match,
100 };
101 
102 static bool is_synth_event(struct dyn_event *ev)
103 {
104 	return ev->ops == &synth_event_ops;
105 }
106 
107 static struct synth_event *to_synth_event(struct dyn_event *ev)
108 {
109 	return container_of(ev, struct synth_event, devent);
110 }
111 
112 static bool synth_event_is_busy(struct dyn_event *ev)
113 {
114 	struct synth_event *event = to_synth_event(ev);
115 
116 	return event->ref != 0;
117 }
118 
119 static bool synth_event_match(const char *system, const char *event,
120 			int argc, const char **argv, struct dyn_event *ev)
121 {
122 	struct synth_event *sev = to_synth_event(ev);
123 
124 	return strcmp(sev->name, event) == 0 &&
125 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
126 }
127 
128 struct synth_trace_event {
129 	struct trace_entry	ent;
130 	union trace_synth_field	fields[];
131 };
132 
133 static int synth_event_define_fields(struct trace_event_call *call)
134 {
135 	struct synth_trace_event trace;
136 	int offset = offsetof(typeof(trace), fields);
137 	struct synth_event *event = call->data;
138 	unsigned int i, size, n_u64;
139 	char *name, *type;
140 	bool is_signed;
141 	int ret = 0;
142 
143 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
144 		size = event->fields[i]->size;
145 		is_signed = event->fields[i]->is_signed;
146 		type = event->fields[i]->type;
147 		name = event->fields[i]->name;
148 		ret = trace_define_field(call, type, name, offset, size,
149 					 is_signed, FILTER_OTHER);
150 		if (ret)
151 			break;
152 
153 		event->fields[i]->offset = n_u64;
154 
155 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
156 			offset += STR_VAR_LEN_MAX;
157 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
158 		} else {
159 			offset += sizeof(u64);
160 			n_u64++;
161 		}
162 	}
163 
164 	event->n_u64 = n_u64;
165 
166 	return ret;
167 }
168 
169 static bool synth_field_signed(char *type)
170 {
171 	if (str_has_prefix(type, "u"))
172 		return false;
173 	if (strcmp(type, "gfp_t") == 0)
174 		return false;
175 
176 	return true;
177 }
178 
179 static int synth_field_is_string(char *type)
180 {
181 	if (strstr(type, "char[") != NULL)
182 		return true;
183 
184 	return false;
185 }
186 
187 static int synth_field_is_stack(char *type)
188 {
189 	if (strstr(type, "long[") != NULL)
190 		return true;
191 
192 	return false;
193 }
194 
195 static int synth_field_string_size(char *type)
196 {
197 	char buf[4], *end, *start;
198 	unsigned int len;
199 	int size, err;
200 
201 	start = strstr(type, "char[");
202 	if (start == NULL)
203 		return -EINVAL;
204 	start += sizeof("char[") - 1;
205 
206 	end = strchr(type, ']');
207 	if (!end || end < start || type + strlen(type) > end + 1)
208 		return -EINVAL;
209 
210 	len = end - start;
211 	if (len > 3)
212 		return -EINVAL;
213 
214 	if (len == 0)
215 		return 0; /* variable-length string */
216 
217 	strncpy(buf, start, len);
218 	buf[len] = '\0';
219 
220 	err = kstrtouint(buf, 0, &size);
221 	if (err)
222 		return err;
223 
224 	if (size > STR_VAR_LEN_MAX)
225 		return -EINVAL;
226 
227 	return size;
228 }
229 
230 static int synth_field_size(char *type)
231 {
232 	int size = 0;
233 
234 	if (strcmp(type, "s64") == 0)
235 		size = sizeof(s64);
236 	else if (strcmp(type, "u64") == 0)
237 		size = sizeof(u64);
238 	else if (strcmp(type, "s32") == 0)
239 		size = sizeof(s32);
240 	else if (strcmp(type, "u32") == 0)
241 		size = sizeof(u32);
242 	else if (strcmp(type, "s16") == 0)
243 		size = sizeof(s16);
244 	else if (strcmp(type, "u16") == 0)
245 		size = sizeof(u16);
246 	else if (strcmp(type, "s8") == 0)
247 		size = sizeof(s8);
248 	else if (strcmp(type, "u8") == 0)
249 		size = sizeof(u8);
250 	else if (strcmp(type, "char") == 0)
251 		size = sizeof(char);
252 	else if (strcmp(type, "unsigned char") == 0)
253 		size = sizeof(unsigned char);
254 	else if (strcmp(type, "int") == 0)
255 		size = sizeof(int);
256 	else if (strcmp(type, "unsigned int") == 0)
257 		size = sizeof(unsigned int);
258 	else if (strcmp(type, "long") == 0)
259 		size = sizeof(long);
260 	else if (strcmp(type, "unsigned long") == 0)
261 		size = sizeof(unsigned long);
262 	else if (strcmp(type, "bool") == 0)
263 		size = sizeof(bool);
264 	else if (strcmp(type, "pid_t") == 0)
265 		size = sizeof(pid_t);
266 	else if (strcmp(type, "gfp_t") == 0)
267 		size = sizeof(gfp_t);
268 	else if (synth_field_is_string(type))
269 		size = synth_field_string_size(type);
270 	else if (synth_field_is_stack(type))
271 		size = 0;
272 
273 	return size;
274 }
275 
276 static const char *synth_field_fmt(char *type)
277 {
278 	const char *fmt = "%llu";
279 
280 	if (strcmp(type, "s64") == 0)
281 		fmt = "%lld";
282 	else if (strcmp(type, "u64") == 0)
283 		fmt = "%llu";
284 	else if (strcmp(type, "s32") == 0)
285 		fmt = "%d";
286 	else if (strcmp(type, "u32") == 0)
287 		fmt = "%u";
288 	else if (strcmp(type, "s16") == 0)
289 		fmt = "%d";
290 	else if (strcmp(type, "u16") == 0)
291 		fmt = "%u";
292 	else if (strcmp(type, "s8") == 0)
293 		fmt = "%d";
294 	else if (strcmp(type, "u8") == 0)
295 		fmt = "%u";
296 	else if (strcmp(type, "char") == 0)
297 		fmt = "%d";
298 	else if (strcmp(type, "unsigned char") == 0)
299 		fmt = "%u";
300 	else if (strcmp(type, "int") == 0)
301 		fmt = "%d";
302 	else if (strcmp(type, "unsigned int") == 0)
303 		fmt = "%u";
304 	else if (strcmp(type, "long") == 0)
305 		fmt = "%ld";
306 	else if (strcmp(type, "unsigned long") == 0)
307 		fmt = "%lu";
308 	else if (strcmp(type, "bool") == 0)
309 		fmt = "%d";
310 	else if (strcmp(type, "pid_t") == 0)
311 		fmt = "%d";
312 	else if (strcmp(type, "gfp_t") == 0)
313 		fmt = "%x";
314 	else if (synth_field_is_string(type))
315 		fmt = "%.*s";
316 	else if (synth_field_is_stack(type))
317 		fmt = "%s";
318 
319 	return fmt;
320 }
321 
322 static void print_synth_event_num_val(struct trace_seq *s,
323 				      char *print_fmt, char *name,
324 				      int size, union trace_synth_field *val, char *space)
325 {
326 	switch (size) {
327 	case 1:
328 		trace_seq_printf(s, print_fmt, name, val->as_u8, space);
329 		break;
330 
331 	case 2:
332 		trace_seq_printf(s, print_fmt, name, val->as_u16, space);
333 		break;
334 
335 	case 4:
336 		trace_seq_printf(s, print_fmt, name, val->as_u32, space);
337 		break;
338 
339 	default:
340 		trace_seq_printf(s, print_fmt, name, val->as_u64, space);
341 		break;
342 	}
343 }
344 
345 static enum print_line_t print_synth_event(struct trace_iterator *iter,
346 					   int flags,
347 					   struct trace_event *event)
348 {
349 	struct trace_array *tr = iter->tr;
350 	struct trace_seq *s = &iter->seq;
351 	struct synth_trace_event *entry;
352 	struct synth_event *se;
353 	unsigned int i, j, n_u64;
354 	char print_fmt[32];
355 	const char *fmt;
356 
357 	entry = (struct synth_trace_event *)iter->ent;
358 	se = container_of(event, struct synth_event, call.event);
359 
360 	trace_seq_printf(s, "%s: ", se->name);
361 
362 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
363 		if (trace_seq_has_overflowed(s))
364 			goto end;
365 
366 		fmt = synth_field_fmt(se->fields[i]->type);
367 
368 		/* parameter types */
369 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
370 			trace_seq_printf(s, "%s ", fmt);
371 
372 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
373 
374 		/* parameter values */
375 		if (se->fields[i]->is_string) {
376 			if (se->fields[i]->is_dynamic) {
377 				union trace_synth_field *data = &entry->fields[n_u64];
378 
379 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
380 						 STR_VAR_LEN_MAX,
381 						 (char *)entry + data->as_dynamic.offset,
382 						 i == se->n_fields - 1 ? "" : " ");
383 				n_u64++;
384 			} else {
385 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
386 						 STR_VAR_LEN_MAX,
387 						 (char *)&entry->fields[n_u64].as_u64,
388 						 i == se->n_fields - 1 ? "" : " ");
389 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
390 			}
391 		} else if (se->fields[i]->is_stack) {
392 			union trace_synth_field *data = &entry->fields[n_u64];
393 			unsigned long *p = (void *)entry + data->as_dynamic.offset;
394 
395 			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
396 			for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
397 				trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
398 			n_u64++;
399 		} else {
400 			struct trace_print_flags __flags[] = {
401 			    __def_gfpflag_names, {-1, NULL} };
402 			char *space = (i == se->n_fields - 1 ? "" : " ");
403 
404 			print_synth_event_num_val(s, print_fmt,
405 						  se->fields[i]->name,
406 						  se->fields[i]->size,
407 						  &entry->fields[n_u64],
408 						  space);
409 
410 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
411 				trace_seq_puts(s, " (");
412 				trace_print_flags_seq(s, "|",
413 						      entry->fields[n_u64].as_u64,
414 						      __flags);
415 				trace_seq_putc(s, ')');
416 			}
417 			n_u64++;
418 		}
419 	}
420 end:
421 	trace_seq_putc(s, '\n');
422 
423 	return trace_handle_return(s);
424 }
425 
426 static struct trace_event_functions synth_event_funcs = {
427 	.trace		= print_synth_event
428 };
429 
430 static unsigned int trace_string(struct synth_trace_event *entry,
431 				 struct synth_event *event,
432 				 char *str_val,
433 				 bool is_dynamic,
434 				 unsigned int data_size,
435 				 unsigned int *n_u64)
436 {
437 	unsigned int len = 0;
438 	char *str_field;
439 	int ret;
440 
441 	if (is_dynamic) {
442 		union trace_synth_field *data = &entry->fields[*n_u64];
443 
444 		data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
445 		data->as_dynamic.len = fetch_store_strlen((unsigned long)str_val);
446 
447 		ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
448 
449 		(*n_u64)++;
450 	} else {
451 		str_field = (char *)&entry->fields[*n_u64].as_u64;
452 
453 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
454 		if ((unsigned long)str_val < TASK_SIZE)
455 			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
456 		else
457 #endif
458 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
459 
460 		if (ret < 0)
461 			strcpy(str_field, FAULT_STRING);
462 
463 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
464 	}
465 
466 	return len;
467 }
468 
469 static unsigned int trace_stack(struct synth_trace_event *entry,
470 				 struct synth_event *event,
471 				 long *stack,
472 				 unsigned int data_size,
473 				 unsigned int *n_u64)
474 {
475 	union trace_synth_field *data = &entry->fields[*n_u64];
476 	unsigned int len;
477 	u32 data_offset;
478 	void *data_loc;
479 
480 	data_offset = struct_size(entry, fields, event->n_u64);
481 	data_offset += data_size;
482 
483 	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
484 		if (!stack[len])
485 			break;
486 	}
487 
488 	len *= sizeof(long);
489 
490 	/* Find the dynamic section to copy the stack into. */
491 	data_loc = (void *)entry + data_offset;
492 	memcpy(data_loc, stack, len);
493 
494 	/* Fill in the field that holds the offset/len combo */
495 
496 	data->as_dynamic.offset = data_offset;
497 	data->as_dynamic.len = len;
498 
499 	(*n_u64)++;
500 
501 	return len;
502 }
503 
504 static notrace void trace_event_raw_event_synth(void *__data,
505 						u64 *var_ref_vals,
506 						unsigned int *var_ref_idx)
507 {
508 	unsigned int i, n_u64, val_idx, len, data_size = 0;
509 	struct trace_event_file *trace_file = __data;
510 	struct synth_trace_event *entry;
511 	struct trace_event_buffer fbuffer;
512 	struct trace_buffer *buffer;
513 	struct synth_event *event;
514 	int fields_size = 0;
515 
516 	event = trace_file->event_call->data;
517 
518 	if (trace_trigger_soft_disabled(trace_file))
519 		return;
520 
521 	fields_size = event->n_u64 * sizeof(u64);
522 
523 	for (i = 0; i < event->n_dynamic_fields; i++) {
524 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
525 		char *str_val;
526 
527 		val_idx = var_ref_idx[field_pos];
528 		str_val = (char *)(long)var_ref_vals[val_idx];
529 
530 		if (event->dynamic_fields[i]->is_stack) {
531 			/* reserve one extra element for size */
532 			len = *((unsigned long *)str_val) + 1;
533 			len *= sizeof(unsigned long);
534 		} else {
535 			len = fetch_store_strlen((unsigned long)str_val);
536 		}
537 
538 		fields_size += len;
539 	}
540 
541 	/*
542 	 * Avoid ring buffer recursion detection, as this event
543 	 * is being performed within another event.
544 	 */
545 	buffer = trace_file->tr->array_buffer.buffer;
546 	ring_buffer_nest_start(buffer);
547 
548 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
549 					   sizeof(*entry) + fields_size);
550 	if (!entry)
551 		goto out;
552 
553 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
554 		val_idx = var_ref_idx[i];
555 		if (event->fields[i]->is_string) {
556 			char *str_val = (char *)(long)var_ref_vals[val_idx];
557 
558 			len = trace_string(entry, event, str_val,
559 					   event->fields[i]->is_dynamic,
560 					   data_size, &n_u64);
561 			data_size += len; /* only dynamic string increments */
562 		} else if (event->fields[i]->is_stack) {
563 			long *stack = (long *)(long)var_ref_vals[val_idx];
564 
565 			len = trace_stack(entry, event, stack,
566 					   data_size, &n_u64);
567 			data_size += len;
568 		} else {
569 			struct synth_field *field = event->fields[i];
570 			u64 val = var_ref_vals[val_idx];
571 
572 			switch (field->size) {
573 			case 1:
574 				entry->fields[n_u64].as_u8 = (u8)val;
575 				break;
576 
577 			case 2:
578 				entry->fields[n_u64].as_u16 = (u16)val;
579 				break;
580 
581 			case 4:
582 				entry->fields[n_u64].as_u32 = (u32)val;
583 				break;
584 
585 			default:
586 				entry->fields[n_u64].as_u64 = val;
587 				break;
588 			}
589 			n_u64++;
590 		}
591 	}
592 
593 	trace_event_buffer_commit(&fbuffer);
594 out:
595 	ring_buffer_nest_end(buffer);
596 }
597 
598 static void free_synth_event_print_fmt(struct trace_event_call *call)
599 {
600 	if (call) {
601 		kfree(call->print_fmt);
602 		call->print_fmt = NULL;
603 	}
604 }
605 
606 static int __set_synth_event_print_fmt(struct synth_event *event,
607 				       char *buf, int len)
608 {
609 	const char *fmt;
610 	int pos = 0;
611 	int i;
612 
613 	/* When len=0, we just calculate the needed length */
614 #define LEN_OR_ZERO (len ? len - pos : 0)
615 
616 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
617 	for (i = 0; i < event->n_fields; i++) {
618 		fmt = synth_field_fmt(event->fields[i]->type);
619 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
620 				event->fields[i]->name, fmt,
621 				i == event->n_fields - 1 ? "" : ", ");
622 	}
623 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
624 
625 	for (i = 0; i < event->n_fields; i++) {
626 		if (event->fields[i]->is_string &&
627 		    event->fields[i]->is_dynamic)
628 			pos += snprintf(buf + pos, LEN_OR_ZERO,
629 				", __get_str(%s)", event->fields[i]->name);
630 		else if (event->fields[i]->is_stack)
631 			pos += snprintf(buf + pos, LEN_OR_ZERO,
632 				", __get_stacktrace(%s)", event->fields[i]->name);
633 		else
634 			pos += snprintf(buf + pos, LEN_OR_ZERO,
635 					", REC->%s", event->fields[i]->name);
636 	}
637 
638 #undef LEN_OR_ZERO
639 
640 	/* return the length of print_fmt */
641 	return pos;
642 }
643 
644 static int set_synth_event_print_fmt(struct trace_event_call *call)
645 {
646 	struct synth_event *event = call->data;
647 	char *print_fmt;
648 	int len;
649 
650 	/* First: called with 0 length to calculate the needed length */
651 	len = __set_synth_event_print_fmt(event, NULL, 0);
652 
653 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
654 	if (!print_fmt)
655 		return -ENOMEM;
656 
657 	/* Second: actually write the @print_fmt */
658 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
659 	call->print_fmt = print_fmt;
660 
661 	return 0;
662 }
663 
664 static void free_synth_field(struct synth_field *field)
665 {
666 	kfree(field->type);
667 	kfree(field->name);
668 	kfree(field);
669 }
670 
671 static int check_field_version(const char *prefix, const char *field_type,
672 			       const char *field_name)
673 {
674 	/*
675 	 * For backward compatibility, the old synthetic event command
676 	 * format did not require semicolons, and in order to not
677 	 * break user space, that old format must still work. If a new
678 	 * feature is added, then the format that uses the new feature
679 	 * will be required to have semicolons, as nothing that uses
680 	 * the old format would be using the new, yet to be created,
681 	 * feature. When a new feature is added, this will detect it,
682 	 * and return a number greater than 1, and require the format
683 	 * to use semicolons.
684 	 */
685 	return 1;
686 }
687 
688 static struct synth_field *parse_synth_field(int argc, char **argv,
689 					     int *consumed, int *field_version)
690 {
691 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
692 	struct synth_field *field;
693 	int len, ret = -ENOMEM;
694 	struct seq_buf s;
695 	ssize_t size;
696 
697 	if (!strcmp(field_type, "unsigned")) {
698 		if (argc < 3) {
699 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
700 			return ERR_PTR(-EINVAL);
701 		}
702 		prefix = "unsigned ";
703 		field_type = argv[1];
704 		field_name = argv[2];
705 		*consumed += 3;
706 	} else {
707 		field_name = argv[1];
708 		*consumed += 2;
709 	}
710 
711 	if (!field_name) {
712 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
713 		return ERR_PTR(-EINVAL);
714 	}
715 
716 	*field_version = check_field_version(prefix, field_type, field_name);
717 
718 	field = kzalloc(sizeof(*field), GFP_KERNEL);
719 	if (!field)
720 		return ERR_PTR(-ENOMEM);
721 
722 	len = strlen(field_name);
723 	array = strchr(field_name, '[');
724 	if (array)
725 		len -= strlen(array);
726 
727 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
728 	if (!field->name)
729 		goto free;
730 
731 	if (!is_good_name(field->name)) {
732 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
733 		ret = -EINVAL;
734 		goto free;
735 	}
736 
737 	len = strlen(field_type) + 1;
738 
739 	if (array)
740 		len += strlen(array);
741 
742 	if (prefix)
743 		len += strlen(prefix);
744 
745 	field->type = kzalloc(len, GFP_KERNEL);
746 	if (!field->type)
747 		goto free;
748 
749 	seq_buf_init(&s, field->type, len);
750 	if (prefix)
751 		seq_buf_puts(&s, prefix);
752 	seq_buf_puts(&s, field_type);
753 	if (array)
754 		seq_buf_puts(&s, array);
755 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
756 		goto free;
757 
758 	s.buffer[s.len] = '\0';
759 
760 	size = synth_field_size(field->type);
761 	if (size < 0) {
762 		if (array)
763 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
764 		else
765 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
766 		ret = -EINVAL;
767 		goto free;
768 	} else if (size == 0) {
769 		if (synth_field_is_string(field->type) ||
770 		    synth_field_is_stack(field->type)) {
771 			char *type;
772 
773 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
774 			type = kzalloc(len, GFP_KERNEL);
775 			if (!type)
776 				goto free;
777 
778 			seq_buf_init(&s, type, len);
779 			seq_buf_puts(&s, "__data_loc ");
780 			seq_buf_puts(&s, field->type);
781 
782 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
783 				goto free;
784 			s.buffer[s.len] = '\0';
785 
786 			kfree(field->type);
787 			field->type = type;
788 
789 			field->is_dynamic = true;
790 			size = sizeof(u64);
791 		} else {
792 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
793 			ret = -EINVAL;
794 			goto free;
795 		}
796 	}
797 	field->size = size;
798 
799 	if (synth_field_is_string(field->type))
800 		field->is_string = true;
801 	else if (synth_field_is_stack(field->type))
802 		field->is_stack = true;
803 
804 	field->is_signed = synth_field_signed(field->type);
805  out:
806 	return field;
807  free:
808 	free_synth_field(field);
809 	field = ERR_PTR(ret);
810 	goto out;
811 }
812 
813 static void free_synth_tracepoint(struct tracepoint *tp)
814 {
815 	if (!tp)
816 		return;
817 
818 	kfree(tp->name);
819 	kfree(tp);
820 }
821 
822 static struct tracepoint *alloc_synth_tracepoint(char *name)
823 {
824 	struct tracepoint *tp;
825 
826 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
827 	if (!tp)
828 		return ERR_PTR(-ENOMEM);
829 
830 	tp->name = kstrdup(name, GFP_KERNEL);
831 	if (!tp->name) {
832 		kfree(tp);
833 		return ERR_PTR(-ENOMEM);
834 	}
835 
836 	return tp;
837 }
838 
839 struct synth_event *find_synth_event(const char *name)
840 {
841 	struct dyn_event *pos;
842 	struct synth_event *event;
843 
844 	for_each_dyn_event(pos) {
845 		if (!is_synth_event(pos))
846 			continue;
847 		event = to_synth_event(pos);
848 		if (strcmp(event->name, name) == 0)
849 			return event;
850 	}
851 
852 	return NULL;
853 }
854 
855 static struct trace_event_fields synth_event_fields_array[] = {
856 	{ .type = TRACE_FUNCTION_TYPE,
857 	  .define_fields = synth_event_define_fields },
858 	{}
859 };
860 
861 static int register_synth_event(struct synth_event *event)
862 {
863 	struct trace_event_call *call = &event->call;
864 	int ret = 0;
865 
866 	event->call.class = &event->class;
867 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
868 	if (!event->class.system) {
869 		ret = -ENOMEM;
870 		goto out;
871 	}
872 
873 	event->tp = alloc_synth_tracepoint(event->name);
874 	if (IS_ERR(event->tp)) {
875 		ret = PTR_ERR(event->tp);
876 		event->tp = NULL;
877 		goto out;
878 	}
879 
880 	INIT_LIST_HEAD(&call->class->fields);
881 	call->event.funcs = &synth_event_funcs;
882 	call->class->fields_array = synth_event_fields_array;
883 
884 	ret = register_trace_event(&call->event);
885 	if (!ret) {
886 		ret = -ENODEV;
887 		goto out;
888 	}
889 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
890 	call->class->reg = trace_event_reg;
891 	call->class->probe = trace_event_raw_event_synth;
892 	call->data = event;
893 	call->tp = event->tp;
894 
895 	ret = trace_add_event_call(call);
896 	if (ret) {
897 		pr_warn("Failed to register synthetic event: %s\n",
898 			trace_event_name(call));
899 		goto err;
900 	}
901 
902 	ret = set_synth_event_print_fmt(call);
903 	/* unregister_trace_event() will be called inside */
904 	if (ret < 0)
905 		trace_remove_event_call(call);
906  out:
907 	return ret;
908  err:
909 	unregister_trace_event(&call->event);
910 	goto out;
911 }
912 
913 static int unregister_synth_event(struct synth_event *event)
914 {
915 	struct trace_event_call *call = &event->call;
916 	int ret;
917 
918 	ret = trace_remove_event_call(call);
919 
920 	return ret;
921 }
922 
923 static void free_synth_event(struct synth_event *event)
924 {
925 	unsigned int i;
926 
927 	if (!event)
928 		return;
929 
930 	for (i = 0; i < event->n_fields; i++)
931 		free_synth_field(event->fields[i]);
932 
933 	kfree(event->fields);
934 	kfree(event->dynamic_fields);
935 	kfree(event->name);
936 	kfree(event->class.system);
937 	free_synth_tracepoint(event->tp);
938 	free_synth_event_print_fmt(&event->call);
939 	kfree(event);
940 }
941 
942 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
943 					     struct synth_field **fields)
944 {
945 	unsigned int i, j, n_dynamic_fields = 0;
946 	struct synth_event *event;
947 
948 	event = kzalloc(sizeof(*event), GFP_KERNEL);
949 	if (!event) {
950 		event = ERR_PTR(-ENOMEM);
951 		goto out;
952 	}
953 
954 	event->name = kstrdup(name, GFP_KERNEL);
955 	if (!event->name) {
956 		kfree(event);
957 		event = ERR_PTR(-ENOMEM);
958 		goto out;
959 	}
960 
961 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
962 	if (!event->fields) {
963 		free_synth_event(event);
964 		event = ERR_PTR(-ENOMEM);
965 		goto out;
966 	}
967 
968 	for (i = 0; i < n_fields; i++)
969 		if (fields[i]->is_dynamic)
970 			n_dynamic_fields++;
971 
972 	if (n_dynamic_fields) {
973 		event->dynamic_fields = kcalloc(n_dynamic_fields,
974 						sizeof(*event->dynamic_fields),
975 						GFP_KERNEL);
976 		if (!event->dynamic_fields) {
977 			free_synth_event(event);
978 			event = ERR_PTR(-ENOMEM);
979 			goto out;
980 		}
981 	}
982 
983 	dyn_event_init(&event->devent, &synth_event_ops);
984 
985 	for (i = 0, j = 0; i < n_fields; i++) {
986 		fields[i]->field_pos = i;
987 		event->fields[i] = fields[i];
988 
989 		if (fields[i]->is_dynamic)
990 			event->dynamic_fields[j++] = fields[i];
991 	}
992 	event->n_dynamic_fields = j;
993 	event->n_fields = n_fields;
994  out:
995 	return event;
996 }
997 
998 static int synth_event_check_arg_fn(void *data)
999 {
1000 	struct dynevent_arg_pair *arg_pair = data;
1001 	int size;
1002 
1003 	size = synth_field_size((char *)arg_pair->lhs);
1004 	if (size == 0) {
1005 		if (strstr((char *)arg_pair->lhs, "["))
1006 			return 0;
1007 	}
1008 
1009 	return size ? 0 : -EINVAL;
1010 }
1011 
1012 /**
1013  * synth_event_add_field - Add a new field to a synthetic event cmd
1014  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1015  * @type: The type of the new field to add
1016  * @name: The name of the new field to add
1017  *
1018  * Add a new field to a synthetic event cmd object.  Field ordering is in
1019  * the same order the fields are added.
1020  *
1021  * See synth_field_size() for available types. If field_name contains
1022  * [n] the field is considered to be an array.
1023  *
1024  * Return: 0 if successful, error otherwise.
1025  */
1026 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1027 			  const char *name)
1028 {
1029 	struct dynevent_arg_pair arg_pair;
1030 	int ret;
1031 
1032 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1033 		return -EINVAL;
1034 
1035 	if (!type || !name)
1036 		return -EINVAL;
1037 
1038 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1039 
1040 	arg_pair.lhs = type;
1041 	arg_pair.rhs = name;
1042 
1043 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1044 	if (ret)
1045 		return ret;
1046 
1047 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1048 		ret = -EINVAL;
1049 
1050 	return ret;
1051 }
1052 EXPORT_SYMBOL_GPL(synth_event_add_field);
1053 
1054 /**
1055  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1056  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1057  * @type_name: The type and name of the new field to add, as a single string
1058  *
1059  * Add a new field to a synthetic event cmd object, as a single
1060  * string.  The @type_name string is expected to be of the form 'type
1061  * name', which will be appended by ';'.  No sanity checking is done -
1062  * what's passed in is assumed to already be well-formed.  Field
1063  * ordering is in the same order the fields are added.
1064  *
1065  * See synth_field_size() for available types. If field_name contains
1066  * [n] the field is considered to be an array.
1067  *
1068  * Return: 0 if successful, error otherwise.
1069  */
1070 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1071 {
1072 	struct dynevent_arg arg;
1073 	int ret;
1074 
1075 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1076 		return -EINVAL;
1077 
1078 	if (!type_name)
1079 		return -EINVAL;
1080 
1081 	dynevent_arg_init(&arg, ';');
1082 
1083 	arg.str = type_name;
1084 
1085 	ret = dynevent_arg_add(cmd, &arg, NULL);
1086 	if (ret)
1087 		return ret;
1088 
1089 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1090 		ret = -EINVAL;
1091 
1092 	return ret;
1093 }
1094 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1095 
1096 /**
1097  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1098  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1099  * @fields: An array of type/name field descriptions
1100  * @n_fields: The number of field descriptions contained in the fields array
1101  *
1102  * Add a new set of fields to a synthetic event cmd object.  The event
1103  * fields that will be defined for the event should be passed in as an
1104  * array of struct synth_field_desc, and the number of elements in the
1105  * array passed in as n_fields.  Field ordering will retain the
1106  * ordering given in the fields array.
1107  *
1108  * See synth_field_size() for available types. If field_name contains
1109  * [n] the field is considered to be an array.
1110  *
1111  * Return: 0 if successful, error otherwise.
1112  */
1113 int synth_event_add_fields(struct dynevent_cmd *cmd,
1114 			   struct synth_field_desc *fields,
1115 			   unsigned int n_fields)
1116 {
1117 	unsigned int i;
1118 	int ret = 0;
1119 
1120 	for (i = 0; i < n_fields; i++) {
1121 		if (fields[i].type == NULL || fields[i].name == NULL) {
1122 			ret = -EINVAL;
1123 			break;
1124 		}
1125 
1126 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1127 		if (ret)
1128 			break;
1129 	}
1130 
1131 	return ret;
1132 }
1133 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1134 
1135 /**
1136  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1137  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1138  * @name: The name of the synthetic event
1139  * @mod: The module creating the event, NULL if not created from a module
1140  * @args: Variable number of arg (pairs), one pair for each field
1141  *
1142  * NOTE: Users normally won't want to call this function directly, but
1143  * rather use the synth_event_gen_cmd_start() wrapper, which
1144  * automatically adds a NULL to the end of the arg list.  If this
1145  * function is used directly, make sure the last arg in the variable
1146  * arg list is NULL.
1147  *
1148  * Generate a synthetic event command to be executed by
1149  * synth_event_gen_cmd_end().  This function can be used to generate
1150  * the complete command or only the first part of it; in the latter
1151  * case, synth_event_add_field(), synth_event_add_field_str(), or
1152  * synth_event_add_fields() can be used to add more fields following
1153  * this.
1154  *
1155  * There should be an even number variable args, each pair consisting
1156  * of a type followed by a field name.
1157  *
1158  * See synth_field_size() for available types. If field_name contains
1159  * [n] the field is considered to be an array.
1160  *
1161  * Return: 0 if successful, error otherwise.
1162  */
1163 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1164 				struct module *mod, ...)
1165 {
1166 	struct dynevent_arg arg;
1167 	va_list args;
1168 	int ret;
1169 
1170 	cmd->event_name = name;
1171 	cmd->private_data = mod;
1172 
1173 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1174 		return -EINVAL;
1175 
1176 	dynevent_arg_init(&arg, 0);
1177 	arg.str = name;
1178 	ret = dynevent_arg_add(cmd, &arg, NULL);
1179 	if (ret)
1180 		return ret;
1181 
1182 	va_start(args, mod);
1183 	for (;;) {
1184 		const char *type, *name;
1185 
1186 		type = va_arg(args, const char *);
1187 		if (!type)
1188 			break;
1189 		name = va_arg(args, const char *);
1190 		if (!name)
1191 			break;
1192 
1193 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1194 			ret = -EINVAL;
1195 			break;
1196 		}
1197 
1198 		ret = synth_event_add_field(cmd, type, name);
1199 		if (ret)
1200 			break;
1201 	}
1202 	va_end(args);
1203 
1204 	return ret;
1205 }
1206 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1207 
1208 /**
1209  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1210  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1211  * @name: The name of the synthetic event
1212  * @mod: The module creating the event, NULL if not created from a module
1213  * @fields: An array of type/name field descriptions
1214  * @n_fields: The number of field descriptions contained in the fields array
1215  *
1216  * Generate a synthetic event command to be executed by
1217  * synth_event_gen_cmd_end().  This function can be used to generate
1218  * the complete command or only the first part of it; in the latter
1219  * case, synth_event_add_field(), synth_event_add_field_str(), or
1220  * synth_event_add_fields() can be used to add more fields following
1221  * this.
1222  *
1223  * The event fields that will be defined for the event should be
1224  * passed in as an array of struct synth_field_desc, and the number of
1225  * elements in the array passed in as n_fields.  Field ordering will
1226  * retain the ordering given in the fields array.
1227  *
1228  * See synth_field_size() for available types. If field_name contains
1229  * [n] the field is considered to be an array.
1230  *
1231  * Return: 0 if successful, error otherwise.
1232  */
1233 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1234 				    struct module *mod,
1235 				    struct synth_field_desc *fields,
1236 				    unsigned int n_fields)
1237 {
1238 	struct dynevent_arg arg;
1239 	unsigned int i;
1240 	int ret = 0;
1241 
1242 	cmd->event_name = name;
1243 	cmd->private_data = mod;
1244 
1245 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1246 		return -EINVAL;
1247 
1248 	if (n_fields > SYNTH_FIELDS_MAX)
1249 		return -EINVAL;
1250 
1251 	dynevent_arg_init(&arg, 0);
1252 	arg.str = name;
1253 	ret = dynevent_arg_add(cmd, &arg, NULL);
1254 	if (ret)
1255 		return ret;
1256 
1257 	for (i = 0; i < n_fields; i++) {
1258 		if (fields[i].type == NULL || fields[i].name == NULL)
1259 			return -EINVAL;
1260 
1261 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1262 		if (ret)
1263 			break;
1264 	}
1265 
1266 	return ret;
1267 }
1268 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1269 
1270 static int __create_synth_event(const char *name, const char *raw_fields)
1271 {
1272 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1273 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1274 	int consumed, cmd_version = 1, n_fields_this_loop;
1275 	int i, argc, n_fields = 0, ret = 0;
1276 	struct synth_event *event = NULL;
1277 
1278 	/*
1279 	 * Argument syntax:
1280 	 *  - Add synthetic event: <event_name> field[;field] ...
1281 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1282 	 *      where 'field' = type field_name
1283 	 */
1284 
1285 	if (name[0] == '\0') {
1286 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1287 		return -EINVAL;
1288 	}
1289 
1290 	if (!is_good_name(name)) {
1291 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1292 		return -EINVAL;
1293 	}
1294 
1295 	mutex_lock(&event_mutex);
1296 
1297 	event = find_synth_event(name);
1298 	if (event) {
1299 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1300 		ret = -EEXIST;
1301 		goto err;
1302 	}
1303 
1304 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1305 	if (!tmp_fields) {
1306 		ret = -ENOMEM;
1307 		goto err;
1308 	}
1309 
1310 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1311 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1312 		if (!argv) {
1313 			ret = -ENOMEM;
1314 			goto err;
1315 		}
1316 
1317 		if (!argc) {
1318 			argv_free(argv);
1319 			continue;
1320 		}
1321 
1322 		n_fields_this_loop = 0;
1323 		consumed = 0;
1324 		while (argc > consumed) {
1325 			int field_version;
1326 
1327 			field = parse_synth_field(argc - consumed,
1328 						  argv + consumed, &consumed,
1329 						  &field_version);
1330 			if (IS_ERR(field)) {
1331 				ret = PTR_ERR(field);
1332 				goto err_free_arg;
1333 			}
1334 
1335 			/*
1336 			 * Track the highest version of any field we
1337 			 * found in the command.
1338 			 */
1339 			if (field_version > cmd_version)
1340 				cmd_version = field_version;
1341 
1342 			/*
1343 			 * Now sort out what is and isn't valid for
1344 			 * each supported version.
1345 			 *
1346 			 * If we see more than 1 field per loop, it
1347 			 * means we have multiple fields between
1348 			 * semicolons, and that's something we no
1349 			 * longer support in a version 2 or greater
1350 			 * command.
1351 			 */
1352 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1353 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1354 				ret = -EINVAL;
1355 				goto err_free_arg;
1356 			}
1357 
1358 			if (n_fields == SYNTH_FIELDS_MAX) {
1359 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1360 				ret = -EINVAL;
1361 				goto err_free_arg;
1362 			}
1363 			fields[n_fields++] = field;
1364 
1365 			n_fields_this_loop++;
1366 		}
1367 		argv_free(argv);
1368 
1369 		if (consumed < argc) {
1370 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1371 			ret = -EINVAL;
1372 			goto err;
1373 		}
1374 
1375 	}
1376 
1377 	if (n_fields == 0) {
1378 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1379 		ret = -EINVAL;
1380 		goto err;
1381 	}
1382 
1383 	event = alloc_synth_event(name, n_fields, fields);
1384 	if (IS_ERR(event)) {
1385 		ret = PTR_ERR(event);
1386 		event = NULL;
1387 		goto err;
1388 	}
1389 	ret = register_synth_event(event);
1390 	if (!ret)
1391 		dyn_event_add(&event->devent, &event->call);
1392 	else
1393 		free_synth_event(event);
1394  out:
1395 	mutex_unlock(&event_mutex);
1396 
1397 	kfree(saved_fields);
1398 
1399 	return ret;
1400  err_free_arg:
1401 	argv_free(argv);
1402  err:
1403 	for (i = 0; i < n_fields; i++)
1404 		free_synth_field(fields[i]);
1405 
1406 	goto out;
1407 }
1408 
1409 /**
1410  * synth_event_create - Create a new synthetic event
1411  * @name: The name of the new synthetic event
1412  * @fields: An array of type/name field descriptions
1413  * @n_fields: The number of field descriptions contained in the fields array
1414  * @mod: The module creating the event, NULL if not created from a module
1415  *
1416  * Create a new synthetic event with the given name under the
1417  * trace/events/synthetic/ directory.  The event fields that will be
1418  * defined for the event should be passed in as an array of struct
1419  * synth_field_desc, and the number elements in the array passed in as
1420  * n_fields. Field ordering will retain the ordering given in the
1421  * fields array.
1422  *
1423  * If the new synthetic event is being created from a module, the mod
1424  * param must be non-NULL.  This will ensure that the trace buffer
1425  * won't contain unreadable events.
1426  *
1427  * The new synth event should be deleted using synth_event_delete()
1428  * function.  The new synthetic event can be generated from modules or
1429  * other kernel code using trace_synth_event() and related functions.
1430  *
1431  * Return: 0 if successful, error otherwise.
1432  */
1433 int synth_event_create(const char *name, struct synth_field_desc *fields,
1434 		       unsigned int n_fields, struct module *mod)
1435 {
1436 	struct dynevent_cmd cmd;
1437 	char *buf;
1438 	int ret;
1439 
1440 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1441 	if (!buf)
1442 		return -ENOMEM;
1443 
1444 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1445 
1446 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1447 					      fields, n_fields);
1448 	if (ret)
1449 		goto out;
1450 
1451 	ret = synth_event_gen_cmd_end(&cmd);
1452  out:
1453 	kfree(buf);
1454 
1455 	return ret;
1456 }
1457 EXPORT_SYMBOL_GPL(synth_event_create);
1458 
1459 static int destroy_synth_event(struct synth_event *se)
1460 {
1461 	int ret;
1462 
1463 	if (se->ref)
1464 		return -EBUSY;
1465 
1466 	if (trace_event_dyn_busy(&se->call))
1467 		return -EBUSY;
1468 
1469 	ret = unregister_synth_event(se);
1470 	if (!ret) {
1471 		dyn_event_remove(&se->devent);
1472 		free_synth_event(se);
1473 	}
1474 
1475 	return ret;
1476 }
1477 
1478 /**
1479  * synth_event_delete - Delete a synthetic event
1480  * @event_name: The name of the new synthetic event
1481  *
1482  * Delete a synthetic event that was created with synth_event_create().
1483  *
1484  * Return: 0 if successful, error otherwise.
1485  */
1486 int synth_event_delete(const char *event_name)
1487 {
1488 	struct synth_event *se = NULL;
1489 	struct module *mod = NULL;
1490 	int ret = -ENOENT;
1491 
1492 	mutex_lock(&event_mutex);
1493 	se = find_synth_event(event_name);
1494 	if (se) {
1495 		mod = se->mod;
1496 		ret = destroy_synth_event(se);
1497 	}
1498 	mutex_unlock(&event_mutex);
1499 
1500 	if (mod) {
1501 		/*
1502 		 * It is safest to reset the ring buffer if the module
1503 		 * being unloaded registered any events that were
1504 		 * used. The only worry is if a new module gets
1505 		 * loaded, and takes on the same id as the events of
1506 		 * this module. When printing out the buffer, traced
1507 		 * events left over from this module may be passed to
1508 		 * the new module events and unexpected results may
1509 		 * occur.
1510 		 */
1511 		tracing_reset_all_online_cpus();
1512 	}
1513 
1514 	return ret;
1515 }
1516 EXPORT_SYMBOL_GPL(synth_event_delete);
1517 
1518 static int check_command(const char *raw_command)
1519 {
1520 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1521 	int argc, ret = 0;
1522 
1523 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1524 	if (!cmd)
1525 		return -ENOMEM;
1526 
1527 	name_and_field = strsep(&cmd, ";");
1528 	if (!name_and_field) {
1529 		ret = -EINVAL;
1530 		goto free;
1531 	}
1532 
1533 	if (name_and_field[0] == '!')
1534 		goto free;
1535 
1536 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1537 	if (!argv) {
1538 		ret = -ENOMEM;
1539 		goto free;
1540 	}
1541 	argv_free(argv);
1542 
1543 	if (argc < 3)
1544 		ret = -EINVAL;
1545 free:
1546 	kfree(saved_cmd);
1547 
1548 	return ret;
1549 }
1550 
1551 static int create_or_delete_synth_event(const char *raw_command)
1552 {
1553 	char *name = NULL, *fields, *p;
1554 	int ret = 0;
1555 
1556 	raw_command = skip_spaces(raw_command);
1557 	if (raw_command[0] == '\0')
1558 		return ret;
1559 
1560 	last_cmd_set(raw_command);
1561 
1562 	ret = check_command(raw_command);
1563 	if (ret) {
1564 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1565 		return ret;
1566 	}
1567 
1568 	p = strpbrk(raw_command, " \t");
1569 	if (!p && raw_command[0] != '!') {
1570 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1571 		ret = -EINVAL;
1572 		goto free;
1573 	}
1574 
1575 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1576 	if (!name)
1577 		return -ENOMEM;
1578 
1579 	if (name[0] == '!') {
1580 		ret = synth_event_delete(name + 1);
1581 		goto free;
1582 	}
1583 
1584 	fields = skip_spaces(p);
1585 
1586 	ret = __create_synth_event(name, fields);
1587 free:
1588 	kfree(name);
1589 
1590 	return ret;
1591 }
1592 
1593 static int synth_event_run_command(struct dynevent_cmd *cmd)
1594 {
1595 	struct synth_event *se;
1596 	int ret;
1597 
1598 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1599 	if (ret)
1600 		return ret;
1601 
1602 	se = find_synth_event(cmd->event_name);
1603 	if (WARN_ON(!se))
1604 		return -ENOENT;
1605 
1606 	se->mod = cmd->private_data;
1607 
1608 	return ret;
1609 }
1610 
1611 /**
1612  * synth_event_cmd_init - Initialize a synthetic event command object
1613  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1614  * @buf: A pointer to the buffer used to build the command
1615  * @maxlen: The length of the buffer passed in @buf
1616  *
1617  * Initialize a synthetic event command object.  Use this before
1618  * calling any of the other dyenvent_cmd functions.
1619  */
1620 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1621 {
1622 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1623 			  synth_event_run_command);
1624 }
1625 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1626 
1627 static inline int
1628 __synth_event_trace_init(struct trace_event_file *file,
1629 			 struct synth_event_trace_state *trace_state)
1630 {
1631 	int ret = 0;
1632 
1633 	memset(trace_state, '\0', sizeof(*trace_state));
1634 
1635 	/*
1636 	 * Normal event tracing doesn't get called at all unless the
1637 	 * ENABLED bit is set (which attaches the probe thus allowing
1638 	 * this code to be called, etc).  Because this is called
1639 	 * directly by the user, we don't have that but we still need
1640 	 * to honor not logging when disabled.  For the iterated
1641 	 * trace case, we save the enabled state upon start and just
1642 	 * ignore the following data calls.
1643 	 */
1644 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1645 	    trace_trigger_soft_disabled(file)) {
1646 		trace_state->disabled = true;
1647 		ret = -ENOENT;
1648 		goto out;
1649 	}
1650 
1651 	trace_state->event = file->event_call->data;
1652 out:
1653 	return ret;
1654 }
1655 
1656 static inline int
1657 __synth_event_trace_start(struct trace_event_file *file,
1658 			  struct synth_event_trace_state *trace_state,
1659 			  int dynamic_fields_size)
1660 {
1661 	int entry_size, fields_size = 0;
1662 	int ret = 0;
1663 
1664 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1665 	fields_size += dynamic_fields_size;
1666 
1667 	/*
1668 	 * Avoid ring buffer recursion detection, as this event
1669 	 * is being performed within another event.
1670 	 */
1671 	trace_state->buffer = file->tr->array_buffer.buffer;
1672 	ring_buffer_nest_start(trace_state->buffer);
1673 
1674 	entry_size = sizeof(*trace_state->entry) + fields_size;
1675 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1676 							file,
1677 							entry_size);
1678 	if (!trace_state->entry) {
1679 		ring_buffer_nest_end(trace_state->buffer);
1680 		ret = -EINVAL;
1681 	}
1682 
1683 	return ret;
1684 }
1685 
1686 static inline void
1687 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1688 {
1689 	trace_event_buffer_commit(&trace_state->fbuffer);
1690 
1691 	ring_buffer_nest_end(trace_state->buffer);
1692 }
1693 
1694 /**
1695  * synth_event_trace - Trace a synthetic event
1696  * @file: The trace_event_file representing the synthetic event
1697  * @n_vals: The number of values in vals
1698  * @args: Variable number of args containing the event values
1699  *
1700  * Trace a synthetic event using the values passed in the variable
1701  * argument list.
1702  *
1703  * The argument list should be a list 'n_vals' u64 values.  The number
1704  * of vals must match the number of field in the synthetic event, and
1705  * must be in the same order as the synthetic event fields.
1706  *
1707  * All vals should be cast to u64, and string vals are just pointers
1708  * to strings, cast to u64.  Strings will be copied into space
1709  * reserved in the event for the string, using these pointers.
1710  *
1711  * Return: 0 on success, err otherwise.
1712  */
1713 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1714 {
1715 	unsigned int i, n_u64, len, data_size = 0;
1716 	struct synth_event_trace_state state;
1717 	va_list args;
1718 	int ret;
1719 
1720 	ret = __synth_event_trace_init(file, &state);
1721 	if (ret) {
1722 		if (ret == -ENOENT)
1723 			ret = 0; /* just disabled, not really an error */
1724 		return ret;
1725 	}
1726 
1727 	if (state.event->n_dynamic_fields) {
1728 		va_start(args, n_vals);
1729 
1730 		for (i = 0; i < state.event->n_fields; i++) {
1731 			u64 val = va_arg(args, u64);
1732 
1733 			if (state.event->fields[i]->is_string &&
1734 			    state.event->fields[i]->is_dynamic) {
1735 				char *str_val = (char *)(long)val;
1736 
1737 				data_size += strlen(str_val) + 1;
1738 			}
1739 		}
1740 
1741 		va_end(args);
1742 	}
1743 
1744 	ret = __synth_event_trace_start(file, &state, data_size);
1745 	if (ret)
1746 		return ret;
1747 
1748 	if (n_vals != state.event->n_fields) {
1749 		ret = -EINVAL;
1750 		goto out;
1751 	}
1752 
1753 	data_size = 0;
1754 
1755 	va_start(args, n_vals);
1756 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1757 		u64 val;
1758 
1759 		val = va_arg(args, u64);
1760 
1761 		if (state.event->fields[i]->is_string) {
1762 			char *str_val = (char *)(long)val;
1763 
1764 			len = trace_string(state.entry, state.event, str_val,
1765 					   state.event->fields[i]->is_dynamic,
1766 					   data_size, &n_u64);
1767 			data_size += len; /* only dynamic string increments */
1768 		} else {
1769 			struct synth_field *field = state.event->fields[i];
1770 
1771 			switch (field->size) {
1772 			case 1:
1773 				state.entry->fields[n_u64].as_u8 = (u8)val;
1774 				break;
1775 
1776 			case 2:
1777 				state.entry->fields[n_u64].as_u16 = (u16)val;
1778 				break;
1779 
1780 			case 4:
1781 				state.entry->fields[n_u64].as_u32 = (u32)val;
1782 				break;
1783 
1784 			default:
1785 				state.entry->fields[n_u64].as_u64 = val;
1786 				break;
1787 			}
1788 			n_u64++;
1789 		}
1790 	}
1791 	va_end(args);
1792 out:
1793 	__synth_event_trace_end(&state);
1794 
1795 	return ret;
1796 }
1797 EXPORT_SYMBOL_GPL(synth_event_trace);
1798 
1799 /**
1800  * synth_event_trace_array - Trace a synthetic event from an array
1801  * @file: The trace_event_file representing the synthetic event
1802  * @vals: Array of values
1803  * @n_vals: The number of values in vals
1804  *
1805  * Trace a synthetic event using the values passed in as 'vals'.
1806  *
1807  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1808  * vals must match the number of field in the synthetic event, and
1809  * must be in the same order as the synthetic event fields.
1810  *
1811  * All vals should be cast to u64, and string vals are just pointers
1812  * to strings, cast to u64.  Strings will be copied into space
1813  * reserved in the event for the string, using these pointers.
1814  *
1815  * Return: 0 on success, err otherwise.
1816  */
1817 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1818 			    unsigned int n_vals)
1819 {
1820 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1821 	struct synth_event_trace_state state;
1822 	char *str_val;
1823 	int ret;
1824 
1825 	ret = __synth_event_trace_init(file, &state);
1826 	if (ret) {
1827 		if (ret == -ENOENT)
1828 			ret = 0; /* just disabled, not really an error */
1829 		return ret;
1830 	}
1831 
1832 	if (state.event->n_dynamic_fields) {
1833 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1834 			field_pos = state.event->dynamic_fields[i]->field_pos;
1835 			str_val = (char *)(long)vals[field_pos];
1836 			len = strlen(str_val) + 1;
1837 			data_size += len;
1838 		}
1839 	}
1840 
1841 	ret = __synth_event_trace_start(file, &state, data_size);
1842 	if (ret)
1843 		return ret;
1844 
1845 	if (n_vals != state.event->n_fields) {
1846 		ret = -EINVAL;
1847 		goto out;
1848 	}
1849 
1850 	data_size = 0;
1851 
1852 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1853 		if (state.event->fields[i]->is_string) {
1854 			char *str_val = (char *)(long)vals[i];
1855 
1856 			len = trace_string(state.entry, state.event, str_val,
1857 					   state.event->fields[i]->is_dynamic,
1858 					   data_size, &n_u64);
1859 			data_size += len; /* only dynamic string increments */
1860 		} else {
1861 			struct synth_field *field = state.event->fields[i];
1862 			u64 val = vals[i];
1863 
1864 			switch (field->size) {
1865 			case 1:
1866 				state.entry->fields[n_u64].as_u8 = (u8)val;
1867 				break;
1868 
1869 			case 2:
1870 				state.entry->fields[n_u64].as_u16 = (u16)val;
1871 				break;
1872 
1873 			case 4:
1874 				state.entry->fields[n_u64].as_u32 = (u32)val;
1875 				break;
1876 
1877 			default:
1878 				state.entry->fields[n_u64].as_u64 = val;
1879 				break;
1880 			}
1881 			n_u64++;
1882 		}
1883 	}
1884 out:
1885 	__synth_event_trace_end(&state);
1886 
1887 	return ret;
1888 }
1889 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1890 
1891 /**
1892  * synth_event_trace_start - Start piecewise synthetic event trace
1893  * @file: The trace_event_file representing the synthetic event
1894  * @trace_state: A pointer to object tracking the piecewise trace state
1895  *
1896  * Start the trace of a synthetic event field-by-field rather than all
1897  * at once.
1898  *
1899  * This function 'opens' an event trace, which means space is reserved
1900  * for the event in the trace buffer, after which the event's
1901  * individual field values can be set through either
1902  * synth_event_add_next_val() or synth_event_add_val().
1903  *
1904  * A pointer to a trace_state object is passed in, which will keep
1905  * track of the current event trace state until the event trace is
1906  * closed (and the event finally traced) using
1907  * synth_event_trace_end().
1908  *
1909  * Note that synth_event_trace_end() must be called after all values
1910  * have been added for each event trace, regardless of whether adding
1911  * all field values succeeded or not.
1912  *
1913  * Note also that for a given event trace, all fields must be added
1914  * using either synth_event_add_next_val() or synth_event_add_val()
1915  * but not both together or interleaved.
1916  *
1917  * Return: 0 on success, err otherwise.
1918  */
1919 int synth_event_trace_start(struct trace_event_file *file,
1920 			    struct synth_event_trace_state *trace_state)
1921 {
1922 	int ret;
1923 
1924 	if (!trace_state)
1925 		return -EINVAL;
1926 
1927 	ret = __synth_event_trace_init(file, trace_state);
1928 	if (ret) {
1929 		if (ret == -ENOENT)
1930 			ret = 0; /* just disabled, not really an error */
1931 		return ret;
1932 	}
1933 
1934 	if (trace_state->event->n_dynamic_fields)
1935 		return -ENOTSUPP;
1936 
1937 	ret = __synth_event_trace_start(file, trace_state, 0);
1938 
1939 	return ret;
1940 }
1941 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1942 
1943 static int __synth_event_add_val(const char *field_name, u64 val,
1944 				 struct synth_event_trace_state *trace_state)
1945 {
1946 	struct synth_field *field = NULL;
1947 	struct synth_trace_event *entry;
1948 	struct synth_event *event;
1949 	int i, ret = 0;
1950 
1951 	if (!trace_state) {
1952 		ret = -EINVAL;
1953 		goto out;
1954 	}
1955 
1956 	/* can't mix add_next_synth_val() with add_synth_val() */
1957 	if (field_name) {
1958 		if (trace_state->add_next) {
1959 			ret = -EINVAL;
1960 			goto out;
1961 		}
1962 		trace_state->add_name = true;
1963 	} else {
1964 		if (trace_state->add_name) {
1965 			ret = -EINVAL;
1966 			goto out;
1967 		}
1968 		trace_state->add_next = true;
1969 	}
1970 
1971 	if (trace_state->disabled)
1972 		goto out;
1973 
1974 	event = trace_state->event;
1975 	if (trace_state->add_name) {
1976 		for (i = 0; i < event->n_fields; i++) {
1977 			field = event->fields[i];
1978 			if (strcmp(field->name, field_name) == 0)
1979 				break;
1980 		}
1981 		if (!field) {
1982 			ret = -EINVAL;
1983 			goto out;
1984 		}
1985 	} else {
1986 		if (trace_state->cur_field >= event->n_fields) {
1987 			ret = -EINVAL;
1988 			goto out;
1989 		}
1990 		field = event->fields[trace_state->cur_field++];
1991 	}
1992 
1993 	entry = trace_state->entry;
1994 	if (field->is_string) {
1995 		char *str_val = (char *)(long)val;
1996 		char *str_field;
1997 
1998 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
1999 			ret = -EINVAL;
2000 			goto out;
2001 		}
2002 
2003 		if (!str_val) {
2004 			ret = -EINVAL;
2005 			goto out;
2006 		}
2007 
2008 		str_field = (char *)&entry->fields[field->offset];
2009 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2010 	} else {
2011 		switch (field->size) {
2012 		case 1:
2013 			trace_state->entry->fields[field->offset].as_u8 = (u8)val;
2014 			break;
2015 
2016 		case 2:
2017 			trace_state->entry->fields[field->offset].as_u16 = (u16)val;
2018 			break;
2019 
2020 		case 4:
2021 			trace_state->entry->fields[field->offset].as_u32 = (u32)val;
2022 			break;
2023 
2024 		default:
2025 			trace_state->entry->fields[field->offset].as_u64 = val;
2026 			break;
2027 		}
2028 	}
2029  out:
2030 	return ret;
2031 }
2032 
2033 /**
2034  * synth_event_add_next_val - Add the next field's value to an open synth trace
2035  * @val: The value to set the next field to
2036  * @trace_state: A pointer to object tracking the piecewise trace state
2037  *
2038  * Set the value of the next field in an event that's been opened by
2039  * synth_event_trace_start().
2040  *
2041  * The val param should be the value cast to u64.  If the value points
2042  * to a string, the val param should be a char * cast to u64.
2043  *
2044  * This function assumes all the fields in an event are to be set one
2045  * after another - successive calls to this function are made, one for
2046  * each field, in the order of the fields in the event, until all
2047  * fields have been set.  If you'd rather set each field individually
2048  * without regard to ordering, synth_event_add_val() can be used
2049  * instead.
2050  *
2051  * Note however that synth_event_add_next_val() and
2052  * synth_event_add_val() can't be intermixed for a given event trace -
2053  * one or the other but not both can be used at the same time.
2054  *
2055  * Note also that synth_event_trace_end() must be called after all
2056  * values have been added for each event trace, regardless of whether
2057  * adding all field values succeeded or not.
2058  *
2059  * Return: 0 on success, err otherwise.
2060  */
2061 int synth_event_add_next_val(u64 val,
2062 			     struct synth_event_trace_state *trace_state)
2063 {
2064 	return __synth_event_add_val(NULL, val, trace_state);
2065 }
2066 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2067 
2068 /**
2069  * synth_event_add_val - Add a named field's value to an open synth trace
2070  * @field_name: The name of the synthetic event field value to set
2071  * @val: The value to set the named field to
2072  * @trace_state: A pointer to object tracking the piecewise trace state
2073  *
2074  * Set the value of the named field in an event that's been opened by
2075  * synth_event_trace_start().
2076  *
2077  * The val param should be the value cast to u64.  If the value points
2078  * to a string, the val param should be a char * cast to u64.
2079  *
2080  * This function looks up the field name, and if found, sets the field
2081  * to the specified value.  This lookup makes this function more
2082  * expensive than synth_event_add_next_val(), so use that or the
2083  * none-piecewise synth_event_trace() instead if efficiency is more
2084  * important.
2085  *
2086  * Note however that synth_event_add_next_val() and
2087  * synth_event_add_val() can't be intermixed for a given event trace -
2088  * one or the other but not both can be used at the same time.
2089  *
2090  * Note also that synth_event_trace_end() must be called after all
2091  * values have been added for each event trace, regardless of whether
2092  * adding all field values succeeded or not.
2093  *
2094  * Return: 0 on success, err otherwise.
2095  */
2096 int synth_event_add_val(const char *field_name, u64 val,
2097 			struct synth_event_trace_state *trace_state)
2098 {
2099 	return __synth_event_add_val(field_name, val, trace_state);
2100 }
2101 EXPORT_SYMBOL_GPL(synth_event_add_val);
2102 
2103 /**
2104  * synth_event_trace_end - End piecewise synthetic event trace
2105  * @trace_state: A pointer to object tracking the piecewise trace state
2106  *
2107  * End the trace of a synthetic event opened by
2108  * synth_event_trace__start().
2109  *
2110  * This function 'closes' an event trace, which basically means that
2111  * it commits the reserved event and cleans up other loose ends.
2112  *
2113  * A pointer to a trace_state object is passed in, which will keep
2114  * track of the current event trace state opened with
2115  * synth_event_trace_start().
2116  *
2117  * Note that this function must be called after all values have been
2118  * added for each event trace, regardless of whether adding all field
2119  * values succeeded or not.
2120  *
2121  * Return: 0 on success, err otherwise.
2122  */
2123 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2124 {
2125 	if (!trace_state)
2126 		return -EINVAL;
2127 
2128 	__synth_event_trace_end(trace_state);
2129 
2130 	return 0;
2131 }
2132 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2133 
2134 static int create_synth_event(const char *raw_command)
2135 {
2136 	char *fields, *p;
2137 	const char *name;
2138 	int len, ret = 0;
2139 
2140 	raw_command = skip_spaces(raw_command);
2141 	if (raw_command[0] == '\0')
2142 		return ret;
2143 
2144 	last_cmd_set(raw_command);
2145 
2146 	name = raw_command;
2147 
2148 	/* Don't try to process if not our system */
2149 	if (name[0] != 's' || name[1] != ':')
2150 		return -ECANCELED;
2151 	name += 2;
2152 
2153 	p = strpbrk(raw_command, " \t");
2154 	if (!p) {
2155 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2156 		return -EINVAL;
2157 	}
2158 
2159 	fields = skip_spaces(p);
2160 
2161 	/* This interface accepts group name prefix */
2162 	if (strchr(name, '/')) {
2163 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2164 		if (len == 0) {
2165 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2166 			return -EINVAL;
2167 		}
2168 		name += len;
2169 	}
2170 
2171 	len = name - raw_command;
2172 
2173 	ret = check_command(raw_command + len);
2174 	if (ret) {
2175 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2176 		return ret;
2177 	}
2178 
2179 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2180 	if (!name)
2181 		return -ENOMEM;
2182 
2183 	ret = __create_synth_event(name, fields);
2184 
2185 	kfree(name);
2186 
2187 	return ret;
2188 }
2189 
2190 static int synth_event_release(struct dyn_event *ev)
2191 {
2192 	struct synth_event *event = to_synth_event(ev);
2193 	int ret;
2194 
2195 	if (event->ref)
2196 		return -EBUSY;
2197 
2198 	if (trace_event_dyn_busy(&event->call))
2199 		return -EBUSY;
2200 
2201 	ret = unregister_synth_event(event);
2202 	if (ret)
2203 		return ret;
2204 
2205 	dyn_event_remove(ev);
2206 	free_synth_event(event);
2207 	return 0;
2208 }
2209 
2210 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2211 {
2212 	struct synth_field *field;
2213 	unsigned int i;
2214 	char *type, *t;
2215 
2216 	seq_printf(m, "%s\t", event->name);
2217 
2218 	for (i = 0; i < event->n_fields; i++) {
2219 		field = event->fields[i];
2220 
2221 		type = field->type;
2222 		t = strstr(type, "__data_loc");
2223 		if (t) { /* __data_loc belongs in format but not event desc */
2224 			t += sizeof("__data_loc");
2225 			type = t;
2226 		}
2227 
2228 		/* parameter values */
2229 		seq_printf(m, "%s %s%s", type, field->name,
2230 			   i == event->n_fields - 1 ? "" : "; ");
2231 	}
2232 
2233 	seq_putc(m, '\n');
2234 
2235 	return 0;
2236 }
2237 
2238 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2239 {
2240 	struct synth_event *event = to_synth_event(ev);
2241 
2242 	seq_printf(m, "s:%s/", event->class.system);
2243 
2244 	return __synth_event_show(m, event);
2245 }
2246 
2247 static int synth_events_seq_show(struct seq_file *m, void *v)
2248 {
2249 	struct dyn_event *ev = v;
2250 
2251 	if (!is_synth_event(ev))
2252 		return 0;
2253 
2254 	return __synth_event_show(m, to_synth_event(ev));
2255 }
2256 
2257 static const struct seq_operations synth_events_seq_op = {
2258 	.start	= dyn_event_seq_start,
2259 	.next	= dyn_event_seq_next,
2260 	.stop	= dyn_event_seq_stop,
2261 	.show	= synth_events_seq_show,
2262 };
2263 
2264 static int synth_events_open(struct inode *inode, struct file *file)
2265 {
2266 	int ret;
2267 
2268 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2269 	if (ret)
2270 		return ret;
2271 
2272 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2273 		ret = dyn_events_release_all(&synth_event_ops);
2274 		if (ret < 0)
2275 			return ret;
2276 	}
2277 
2278 	return seq_open(file, &synth_events_seq_op);
2279 }
2280 
2281 static ssize_t synth_events_write(struct file *file,
2282 				  const char __user *buffer,
2283 				  size_t count, loff_t *ppos)
2284 {
2285 	return trace_parse_run_command(file, buffer, count, ppos,
2286 				       create_or_delete_synth_event);
2287 }
2288 
2289 static const struct file_operations synth_events_fops = {
2290 	.open           = synth_events_open,
2291 	.write		= synth_events_write,
2292 	.read           = seq_read,
2293 	.llseek         = seq_lseek,
2294 	.release        = seq_release,
2295 };
2296 
2297 /*
2298  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2299  * events in postcore_initcall without tracefs.
2300  */
2301 static __init int trace_events_synth_init_early(void)
2302 {
2303 	int err = 0;
2304 
2305 	err = dyn_event_register(&synth_event_ops);
2306 	if (err)
2307 		pr_warn("Could not register synth_event_ops\n");
2308 
2309 	return err;
2310 }
2311 core_initcall(trace_events_synth_init_early);
2312 
2313 static __init int trace_events_synth_init(void)
2314 {
2315 	struct dentry *entry = NULL;
2316 	int err = 0;
2317 	err = tracing_init_dentry();
2318 	if (err)
2319 		goto err;
2320 
2321 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2322 				    NULL, NULL, &synth_events_fops);
2323 	if (!entry) {
2324 		err = -ENODEV;
2325 		goto err;
2326 	}
2327 
2328 	return err;
2329  err:
2330 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2331 
2332 	return err;
2333 }
2334 
2335 fs_initcall(trace_events_synth_init);
2336