1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/stacktrace.h>
13 #include <linux/rculist.h>
14 #include <linux/tracefs.h>
15 
16 #include "tracing_map.h"
17 #include "trace.h"
18 #include "trace_dynevent.h"
19 
20 #define SYNTH_SYSTEM		"synthetic"
21 #define SYNTH_FIELDS_MAX	16
22 
23 #define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
24 
25 struct hist_field;
26 
27 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
28 				struct tracing_map_elt *elt,
29 				struct ring_buffer_event *rbe,
30 				void *event);
31 
32 #define HIST_FIELD_OPERANDS_MAX	2
33 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
34 #define HIST_ACTIONS_MAX	8
35 
36 enum field_op_id {
37 	FIELD_OP_NONE,
38 	FIELD_OP_PLUS,
39 	FIELD_OP_MINUS,
40 	FIELD_OP_UNARY_MINUS,
41 };
42 
43 /*
44  * A hist_var (histogram variable) contains variable information for
45  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
46  * flag set.  A hist_var has a variable name e.g. ts0, and is
47  * associated with a given histogram trigger, as specified by
48  * hist_data.  The hist_var idx is the unique index assigned to the
49  * variable by the hist trigger's tracing_map.  The idx is what is
50  * used to set a variable's value and, by a variable reference, to
51  * retrieve it.
52  */
53 struct hist_var {
54 	char				*name;
55 	struct hist_trigger_data	*hist_data;
56 	unsigned int			idx;
57 };
58 
59 struct hist_field {
60 	struct ftrace_event_field	*field;
61 	unsigned long			flags;
62 	hist_field_fn_t			fn;
63 	unsigned int			size;
64 	unsigned int			offset;
65 	unsigned int                    is_signed;
66 	const char			*type;
67 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
68 	struct hist_trigger_data	*hist_data;
69 
70 	/*
71 	 * Variable fields contain variable-specific info in var.
72 	 */
73 	struct hist_var			var;
74 	enum field_op_id		operator;
75 	char				*system;
76 	char				*event_name;
77 
78 	/*
79 	 * The name field is used for EXPR and VAR_REF fields.  VAR
80 	 * fields contain the variable name in var.name.
81 	 */
82 	char				*name;
83 
84 	/*
85 	 * When a histogram trigger is hit, if it has any references
86 	 * to variables, the values of those variables are collected
87 	 * into a var_ref_vals array by resolve_var_refs().  The
88 	 * current value of each variable is read from the tracing_map
89 	 * using the hist field's hist_var.idx and entered into the
90 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
91 	 */
92 	unsigned int			var_ref_idx;
93 	bool                            read_once;
94 };
95 
96 static u64 hist_field_none(struct hist_field *field,
97 			   struct tracing_map_elt *elt,
98 			   struct ring_buffer_event *rbe,
99 			   void *event)
100 {
101 	return 0;
102 }
103 
104 static u64 hist_field_counter(struct hist_field *field,
105 			      struct tracing_map_elt *elt,
106 			      struct ring_buffer_event *rbe,
107 			      void *event)
108 {
109 	return 1;
110 }
111 
112 static u64 hist_field_string(struct hist_field *hist_field,
113 			     struct tracing_map_elt *elt,
114 			     struct ring_buffer_event *rbe,
115 			     void *event)
116 {
117 	char *addr = (char *)(event + hist_field->field->offset);
118 
119 	return (u64)(unsigned long)addr;
120 }
121 
122 static u64 hist_field_dynstring(struct hist_field *hist_field,
123 				struct tracing_map_elt *elt,
124 				struct ring_buffer_event *rbe,
125 				void *event)
126 {
127 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
128 	int str_loc = str_item & 0xffff;
129 	char *addr = (char *)(event + str_loc);
130 
131 	return (u64)(unsigned long)addr;
132 }
133 
134 static u64 hist_field_pstring(struct hist_field *hist_field,
135 			      struct tracing_map_elt *elt,
136 			      struct ring_buffer_event *rbe,
137 			      void *event)
138 {
139 	char **addr = (char **)(event + hist_field->field->offset);
140 
141 	return (u64)(unsigned long)*addr;
142 }
143 
144 static u64 hist_field_log2(struct hist_field *hist_field,
145 			   struct tracing_map_elt *elt,
146 			   struct ring_buffer_event *rbe,
147 			   void *event)
148 {
149 	struct hist_field *operand = hist_field->operands[0];
150 
151 	u64 val = operand->fn(operand, elt, rbe, event);
152 
153 	return (u64) ilog2(roundup_pow_of_two(val));
154 }
155 
156 static u64 hist_field_plus(struct hist_field *hist_field,
157 			   struct tracing_map_elt *elt,
158 			   struct ring_buffer_event *rbe,
159 			   void *event)
160 {
161 	struct hist_field *operand1 = hist_field->operands[0];
162 	struct hist_field *operand2 = hist_field->operands[1];
163 
164 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
165 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
166 
167 	return val1 + val2;
168 }
169 
170 static u64 hist_field_minus(struct hist_field *hist_field,
171 			    struct tracing_map_elt *elt,
172 			    struct ring_buffer_event *rbe,
173 			    void *event)
174 {
175 	struct hist_field *operand1 = hist_field->operands[0];
176 	struct hist_field *operand2 = hist_field->operands[1];
177 
178 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
179 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
180 
181 	return val1 - val2;
182 }
183 
184 static u64 hist_field_unary_minus(struct hist_field *hist_field,
185 				  struct tracing_map_elt *elt,
186 				  struct ring_buffer_event *rbe,
187 				  void *event)
188 {
189 	struct hist_field *operand = hist_field->operands[0];
190 
191 	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
192 	u64 val = (u64)-sval;
193 
194 	return val;
195 }
196 
197 #define DEFINE_HIST_FIELD_FN(type)					\
198 	static u64 hist_field_##type(struct hist_field *hist_field,	\
199 				     struct tracing_map_elt *elt,	\
200 				     struct ring_buffer_event *rbe,	\
201 				     void *event)			\
202 {									\
203 	type *addr = (type *)(event + hist_field->field->offset);	\
204 									\
205 	return (u64)(unsigned long)*addr;				\
206 }
207 
208 DEFINE_HIST_FIELD_FN(s64);
209 DEFINE_HIST_FIELD_FN(u64);
210 DEFINE_HIST_FIELD_FN(s32);
211 DEFINE_HIST_FIELD_FN(u32);
212 DEFINE_HIST_FIELD_FN(s16);
213 DEFINE_HIST_FIELD_FN(u16);
214 DEFINE_HIST_FIELD_FN(s8);
215 DEFINE_HIST_FIELD_FN(u8);
216 
217 #define for_each_hist_field(i, hist_data)	\
218 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
219 
220 #define for_each_hist_val_field(i, hist_data)	\
221 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
222 
223 #define for_each_hist_key_field(i, hist_data)	\
224 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
225 
226 #define HIST_STACKTRACE_DEPTH	16
227 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
228 #define HIST_STACKTRACE_SKIP	5
229 
230 #define HITCOUNT_IDX		0
231 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
232 
233 enum hist_field_flags {
234 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
235 	HIST_FIELD_FL_KEY		= 1 << 1,
236 	HIST_FIELD_FL_STRING		= 1 << 2,
237 	HIST_FIELD_FL_HEX		= 1 << 3,
238 	HIST_FIELD_FL_SYM		= 1 << 4,
239 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
240 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
241 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
242 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
243 	HIST_FIELD_FL_LOG2		= 1 << 9,
244 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
245 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
246 	HIST_FIELD_FL_VAR		= 1 << 12,
247 	HIST_FIELD_FL_EXPR		= 1 << 13,
248 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
249 	HIST_FIELD_FL_CPU		= 1 << 15,
250 	HIST_FIELD_FL_ALIAS		= 1 << 16,
251 };
252 
253 struct var_defs {
254 	unsigned int	n_vars;
255 	char		*name[TRACING_MAP_VARS_MAX];
256 	char		*expr[TRACING_MAP_VARS_MAX];
257 };
258 
259 struct hist_trigger_attrs {
260 	char		*keys_str;
261 	char		*vals_str;
262 	char		*sort_key_str;
263 	char		*name;
264 	char		*clock;
265 	bool		pause;
266 	bool		cont;
267 	bool		clear;
268 	bool		ts_in_usecs;
269 	unsigned int	map_bits;
270 
271 	char		*assignment_str[TRACING_MAP_VARS_MAX];
272 	unsigned int	n_assignments;
273 
274 	char		*action_str[HIST_ACTIONS_MAX];
275 	unsigned int	n_actions;
276 
277 	struct var_defs	var_defs;
278 };
279 
280 struct field_var {
281 	struct hist_field	*var;
282 	struct hist_field	*val;
283 };
284 
285 struct field_var_hist {
286 	struct hist_trigger_data	*hist_data;
287 	char				*cmd;
288 };
289 
290 struct hist_trigger_data {
291 	struct hist_field               *fields[HIST_FIELDS_MAX];
292 	unsigned int			n_vals;
293 	unsigned int			n_keys;
294 	unsigned int			n_fields;
295 	unsigned int			n_vars;
296 	unsigned int			key_size;
297 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
298 	unsigned int			n_sort_keys;
299 	struct trace_event_file		*event_file;
300 	struct hist_trigger_attrs	*attrs;
301 	struct tracing_map		*map;
302 	bool				enable_timestamps;
303 	bool				remove;
304 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
305 	unsigned int			n_var_refs;
306 
307 	struct action_data		*actions[HIST_ACTIONS_MAX];
308 	unsigned int			n_actions;
309 
310 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
311 	unsigned int			n_field_vars;
312 	unsigned int			n_field_var_str;
313 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
314 	unsigned int			n_field_var_hists;
315 
316 	struct field_var		*max_vars[SYNTH_FIELDS_MAX];
317 	unsigned int			n_max_vars;
318 	unsigned int			n_max_var_str;
319 };
320 
321 static int synth_event_create(int argc, const char **argv);
322 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
323 static int synth_event_release(struct dyn_event *ev);
324 static bool synth_event_is_busy(struct dyn_event *ev);
325 static bool synth_event_match(const char *system, const char *event,
326 			      struct dyn_event *ev);
327 
328 static struct dyn_event_operations synth_event_ops = {
329 	.create = synth_event_create,
330 	.show = synth_event_show,
331 	.is_busy = synth_event_is_busy,
332 	.free = synth_event_release,
333 	.match = synth_event_match,
334 };
335 
336 struct synth_field {
337 	char *type;
338 	char *name;
339 	size_t size;
340 	bool is_signed;
341 	bool is_string;
342 };
343 
344 struct synth_event {
345 	struct dyn_event			devent;
346 	int					ref;
347 	char					*name;
348 	struct synth_field			**fields;
349 	unsigned int				n_fields;
350 	unsigned int				n_u64;
351 	struct trace_event_class		class;
352 	struct trace_event_call			call;
353 	struct tracepoint			*tp;
354 };
355 
356 static bool is_synth_event(struct dyn_event *ev)
357 {
358 	return ev->ops == &synth_event_ops;
359 }
360 
361 static struct synth_event *to_synth_event(struct dyn_event *ev)
362 {
363 	return container_of(ev, struct synth_event, devent);
364 }
365 
366 static bool synth_event_is_busy(struct dyn_event *ev)
367 {
368 	struct synth_event *event = to_synth_event(ev);
369 
370 	return event->ref != 0;
371 }
372 
373 static bool synth_event_match(const char *system, const char *event,
374 			      struct dyn_event *ev)
375 {
376 	struct synth_event *sev = to_synth_event(ev);
377 
378 	return strcmp(sev->name, event) == 0 &&
379 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
380 }
381 
382 struct action_data;
383 
384 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
385 			     struct tracing_map_elt *elt, void *rec,
386 			     struct ring_buffer_event *rbe,
387 			     struct action_data *data, u64 *var_ref_vals);
388 
389 struct action_data {
390 	action_fn_t		fn;
391 	unsigned int		n_params;
392 	char			*params[SYNTH_FIELDS_MAX];
393 
394 	union {
395 		struct {
396 			/*
397 			 * When a histogram trigger is hit, the values of any
398 			 * references to variables, including variables being passed
399 			 * as parameters to synthetic events, are collected into a
400 			 * var_ref_vals array.  This var_ref_idx is the index of the
401 			 * first param in the array to be passed to the synthetic
402 			 * event invocation.
403 			 */
404 			unsigned int		var_ref_idx;
405 			char			*match_event;
406 			char			*match_event_system;
407 			char			*synth_event_name;
408 			struct synth_event	*synth_event;
409 		} onmatch;
410 
411 		struct {
412 			char			*var_str;
413 			char			*fn_name;
414 			unsigned int		max_var_ref_idx;
415 			struct hist_field	*max_var;
416 			struct hist_field	*var;
417 		} onmax;
418 	};
419 };
420 
421 
422 static char last_hist_cmd[MAX_FILTER_STR_VAL];
423 static char hist_err_str[MAX_FILTER_STR_VAL];
424 
425 static void last_cmd_set(char *str)
426 {
427 	if (!str)
428 		return;
429 
430 	strncpy(last_hist_cmd, str, MAX_FILTER_STR_VAL - 1);
431 }
432 
433 static void hist_err(char *str, char *var)
434 {
435 	int maxlen = MAX_FILTER_STR_VAL - 1;
436 
437 	if (!str)
438 		return;
439 
440 	if (strlen(hist_err_str))
441 		return;
442 
443 	if (!var)
444 		var = "";
445 
446 	if (strlen(hist_err_str) + strlen(str) + strlen(var) > maxlen)
447 		return;
448 
449 	strcat(hist_err_str, str);
450 	strcat(hist_err_str, var);
451 }
452 
453 static void hist_err_event(char *str, char *system, char *event, char *var)
454 {
455 	char err[MAX_FILTER_STR_VAL];
456 
457 	if (system && var)
458 		snprintf(err, MAX_FILTER_STR_VAL, "%s.%s.%s", system, event, var);
459 	else if (system)
460 		snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
461 	else
462 		strscpy(err, var, MAX_FILTER_STR_VAL);
463 
464 	hist_err(str, err);
465 }
466 
467 static void hist_err_clear(void)
468 {
469 	hist_err_str[0] = '\0';
470 }
471 
472 static bool have_hist_err(void)
473 {
474 	if (strlen(hist_err_str))
475 		return true;
476 
477 	return false;
478 }
479 
480 struct synth_trace_event {
481 	struct trace_entry	ent;
482 	u64			fields[];
483 };
484 
485 static int synth_event_define_fields(struct trace_event_call *call)
486 {
487 	struct synth_trace_event trace;
488 	int offset = offsetof(typeof(trace), fields);
489 	struct synth_event *event = call->data;
490 	unsigned int i, size, n_u64;
491 	char *name, *type;
492 	bool is_signed;
493 	int ret = 0;
494 
495 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
496 		size = event->fields[i]->size;
497 		is_signed = event->fields[i]->is_signed;
498 		type = event->fields[i]->type;
499 		name = event->fields[i]->name;
500 		ret = trace_define_field(call, type, name, offset, size,
501 					 is_signed, FILTER_OTHER);
502 		if (ret)
503 			break;
504 
505 		if (event->fields[i]->is_string) {
506 			offset += STR_VAR_LEN_MAX;
507 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
508 		} else {
509 			offset += sizeof(u64);
510 			n_u64++;
511 		}
512 	}
513 
514 	event->n_u64 = n_u64;
515 
516 	return ret;
517 }
518 
519 static bool synth_field_signed(char *type)
520 {
521 	if (str_has_prefix(type, "u"))
522 		return false;
523 
524 	return true;
525 }
526 
527 static int synth_field_is_string(char *type)
528 {
529 	if (strstr(type, "char[") != NULL)
530 		return true;
531 
532 	return false;
533 }
534 
535 static int synth_field_string_size(char *type)
536 {
537 	char buf[4], *end, *start;
538 	unsigned int len;
539 	int size, err;
540 
541 	start = strstr(type, "char[");
542 	if (start == NULL)
543 		return -EINVAL;
544 	start += sizeof("char[") - 1;
545 
546 	end = strchr(type, ']');
547 	if (!end || end < start)
548 		return -EINVAL;
549 
550 	len = end - start;
551 	if (len > 3)
552 		return -EINVAL;
553 
554 	strncpy(buf, start, len);
555 	buf[len] = '\0';
556 
557 	err = kstrtouint(buf, 0, &size);
558 	if (err)
559 		return err;
560 
561 	if (size > STR_VAR_LEN_MAX)
562 		return -EINVAL;
563 
564 	return size;
565 }
566 
567 static int synth_field_size(char *type)
568 {
569 	int size = 0;
570 
571 	if (strcmp(type, "s64") == 0)
572 		size = sizeof(s64);
573 	else if (strcmp(type, "u64") == 0)
574 		size = sizeof(u64);
575 	else if (strcmp(type, "s32") == 0)
576 		size = sizeof(s32);
577 	else if (strcmp(type, "u32") == 0)
578 		size = sizeof(u32);
579 	else if (strcmp(type, "s16") == 0)
580 		size = sizeof(s16);
581 	else if (strcmp(type, "u16") == 0)
582 		size = sizeof(u16);
583 	else if (strcmp(type, "s8") == 0)
584 		size = sizeof(s8);
585 	else if (strcmp(type, "u8") == 0)
586 		size = sizeof(u8);
587 	else if (strcmp(type, "char") == 0)
588 		size = sizeof(char);
589 	else if (strcmp(type, "unsigned char") == 0)
590 		size = sizeof(unsigned char);
591 	else if (strcmp(type, "int") == 0)
592 		size = sizeof(int);
593 	else if (strcmp(type, "unsigned int") == 0)
594 		size = sizeof(unsigned int);
595 	else if (strcmp(type, "long") == 0)
596 		size = sizeof(long);
597 	else if (strcmp(type, "unsigned long") == 0)
598 		size = sizeof(unsigned long);
599 	else if (strcmp(type, "pid_t") == 0)
600 		size = sizeof(pid_t);
601 	else if (synth_field_is_string(type))
602 		size = synth_field_string_size(type);
603 
604 	return size;
605 }
606 
607 static const char *synth_field_fmt(char *type)
608 {
609 	const char *fmt = "%llu";
610 
611 	if (strcmp(type, "s64") == 0)
612 		fmt = "%lld";
613 	else if (strcmp(type, "u64") == 0)
614 		fmt = "%llu";
615 	else if (strcmp(type, "s32") == 0)
616 		fmt = "%d";
617 	else if (strcmp(type, "u32") == 0)
618 		fmt = "%u";
619 	else if (strcmp(type, "s16") == 0)
620 		fmt = "%d";
621 	else if (strcmp(type, "u16") == 0)
622 		fmt = "%u";
623 	else if (strcmp(type, "s8") == 0)
624 		fmt = "%d";
625 	else if (strcmp(type, "u8") == 0)
626 		fmt = "%u";
627 	else if (strcmp(type, "char") == 0)
628 		fmt = "%d";
629 	else if (strcmp(type, "unsigned char") == 0)
630 		fmt = "%u";
631 	else if (strcmp(type, "int") == 0)
632 		fmt = "%d";
633 	else if (strcmp(type, "unsigned int") == 0)
634 		fmt = "%u";
635 	else if (strcmp(type, "long") == 0)
636 		fmt = "%ld";
637 	else if (strcmp(type, "unsigned long") == 0)
638 		fmt = "%lu";
639 	else if (strcmp(type, "pid_t") == 0)
640 		fmt = "%d";
641 	else if (synth_field_is_string(type))
642 		fmt = "%s";
643 
644 	return fmt;
645 }
646 
647 static enum print_line_t print_synth_event(struct trace_iterator *iter,
648 					   int flags,
649 					   struct trace_event *event)
650 {
651 	struct trace_array *tr = iter->tr;
652 	struct trace_seq *s = &iter->seq;
653 	struct synth_trace_event *entry;
654 	struct synth_event *se;
655 	unsigned int i, n_u64;
656 	char print_fmt[32];
657 	const char *fmt;
658 
659 	entry = (struct synth_trace_event *)iter->ent;
660 	se = container_of(event, struct synth_event, call.event);
661 
662 	trace_seq_printf(s, "%s: ", se->name);
663 
664 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
665 		if (trace_seq_has_overflowed(s))
666 			goto end;
667 
668 		fmt = synth_field_fmt(se->fields[i]->type);
669 
670 		/* parameter types */
671 		if (tr->trace_flags & TRACE_ITER_VERBOSE)
672 			trace_seq_printf(s, "%s ", fmt);
673 
674 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
675 
676 		/* parameter values */
677 		if (se->fields[i]->is_string) {
678 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
679 					 (char *)&entry->fields[n_u64],
680 					 i == se->n_fields - 1 ? "" : " ");
681 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
682 		} else {
683 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
684 					 entry->fields[n_u64],
685 					 i == se->n_fields - 1 ? "" : " ");
686 			n_u64++;
687 		}
688 	}
689 end:
690 	trace_seq_putc(s, '\n');
691 
692 	return trace_handle_return(s);
693 }
694 
695 static struct trace_event_functions synth_event_funcs = {
696 	.trace		= print_synth_event
697 };
698 
699 static notrace void trace_event_raw_event_synth(void *__data,
700 						u64 *var_ref_vals,
701 						unsigned int var_ref_idx)
702 {
703 	struct trace_event_file *trace_file = __data;
704 	struct synth_trace_event *entry;
705 	struct trace_event_buffer fbuffer;
706 	struct ring_buffer *buffer;
707 	struct synth_event *event;
708 	unsigned int i, n_u64;
709 	int fields_size = 0;
710 
711 	event = trace_file->event_call->data;
712 
713 	if (trace_trigger_soft_disabled(trace_file))
714 		return;
715 
716 	fields_size = event->n_u64 * sizeof(u64);
717 
718 	/*
719 	 * Avoid ring buffer recursion detection, as this event
720 	 * is being performed within another event.
721 	 */
722 	buffer = trace_file->tr->trace_buffer.buffer;
723 	ring_buffer_nest_start(buffer);
724 
725 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
726 					   sizeof(*entry) + fields_size);
727 	if (!entry)
728 		goto out;
729 
730 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
731 		if (event->fields[i]->is_string) {
732 			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
733 			char *str_field = (char *)&entry->fields[n_u64];
734 
735 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
736 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
737 		} else {
738 			entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
739 			n_u64++;
740 		}
741 	}
742 
743 	trace_event_buffer_commit(&fbuffer);
744 out:
745 	ring_buffer_nest_end(buffer);
746 }
747 
748 static void free_synth_event_print_fmt(struct trace_event_call *call)
749 {
750 	if (call) {
751 		kfree(call->print_fmt);
752 		call->print_fmt = NULL;
753 	}
754 }
755 
756 static int __set_synth_event_print_fmt(struct synth_event *event,
757 				       char *buf, int len)
758 {
759 	const char *fmt;
760 	int pos = 0;
761 	int i;
762 
763 	/* When len=0, we just calculate the needed length */
764 #define LEN_OR_ZERO (len ? len - pos : 0)
765 
766 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
767 	for (i = 0; i < event->n_fields; i++) {
768 		fmt = synth_field_fmt(event->fields[i]->type);
769 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
770 				event->fields[i]->name, fmt,
771 				i == event->n_fields - 1 ? "" : ", ");
772 	}
773 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
774 
775 	for (i = 0; i < event->n_fields; i++) {
776 		pos += snprintf(buf + pos, LEN_OR_ZERO,
777 				", REC->%s", event->fields[i]->name);
778 	}
779 
780 #undef LEN_OR_ZERO
781 
782 	/* return the length of print_fmt */
783 	return pos;
784 }
785 
786 static int set_synth_event_print_fmt(struct trace_event_call *call)
787 {
788 	struct synth_event *event = call->data;
789 	char *print_fmt;
790 	int len;
791 
792 	/* First: called with 0 length to calculate the needed length */
793 	len = __set_synth_event_print_fmt(event, NULL, 0);
794 
795 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
796 	if (!print_fmt)
797 		return -ENOMEM;
798 
799 	/* Second: actually write the @print_fmt */
800 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
801 	call->print_fmt = print_fmt;
802 
803 	return 0;
804 }
805 
806 static void free_synth_field(struct synth_field *field)
807 {
808 	kfree(field->type);
809 	kfree(field->name);
810 	kfree(field);
811 }
812 
813 static struct synth_field *parse_synth_field(int argc, const char **argv,
814 					     int *consumed)
815 {
816 	struct synth_field *field;
817 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
818 	int len, ret = 0;
819 
820 	if (field_type[0] == ';')
821 		field_type++;
822 
823 	if (!strcmp(field_type, "unsigned")) {
824 		if (argc < 3)
825 			return ERR_PTR(-EINVAL);
826 		prefix = "unsigned ";
827 		field_type = argv[1];
828 		field_name = argv[2];
829 		*consumed = 3;
830 	} else {
831 		field_name = argv[1];
832 		*consumed = 2;
833 	}
834 
835 	field = kzalloc(sizeof(*field), GFP_KERNEL);
836 	if (!field)
837 		return ERR_PTR(-ENOMEM);
838 
839 	len = strlen(field_name);
840 	array = strchr(field_name, '[');
841 	if (array)
842 		len -= strlen(array);
843 	else if (field_name[len - 1] == ';')
844 		len--;
845 
846 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
847 	if (!field->name) {
848 		ret = -ENOMEM;
849 		goto free;
850 	}
851 
852 	if (field_type[0] == ';')
853 		field_type++;
854 	len = strlen(field_type) + 1;
855 	if (array)
856 		len += strlen(array);
857 	if (prefix)
858 		len += strlen(prefix);
859 
860 	field->type = kzalloc(len, GFP_KERNEL);
861 	if (!field->type) {
862 		ret = -ENOMEM;
863 		goto free;
864 	}
865 	if (prefix)
866 		strcat(field->type, prefix);
867 	strcat(field->type, field_type);
868 	if (array) {
869 		strcat(field->type, array);
870 		if (field->type[len - 1] == ';')
871 			field->type[len - 1] = '\0';
872 	}
873 
874 	field->size = synth_field_size(field->type);
875 	if (!field->size) {
876 		ret = -EINVAL;
877 		goto free;
878 	}
879 
880 	if (synth_field_is_string(field->type))
881 		field->is_string = true;
882 
883 	field->is_signed = synth_field_signed(field->type);
884 
885  out:
886 	return field;
887  free:
888 	free_synth_field(field);
889 	field = ERR_PTR(ret);
890 	goto out;
891 }
892 
893 static void free_synth_tracepoint(struct tracepoint *tp)
894 {
895 	if (!tp)
896 		return;
897 
898 	kfree(tp->name);
899 	kfree(tp);
900 }
901 
902 static struct tracepoint *alloc_synth_tracepoint(char *name)
903 {
904 	struct tracepoint *tp;
905 
906 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
907 	if (!tp)
908 		return ERR_PTR(-ENOMEM);
909 
910 	tp->name = kstrdup(name, GFP_KERNEL);
911 	if (!tp->name) {
912 		kfree(tp);
913 		return ERR_PTR(-ENOMEM);
914 	}
915 
916 	return tp;
917 }
918 
919 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
920 				    unsigned int var_ref_idx);
921 
922 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
923 			       unsigned int var_ref_idx)
924 {
925 	struct tracepoint *tp = event->tp;
926 
927 	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
928 		struct tracepoint_func *probe_func_ptr;
929 		synth_probe_func_t probe_func;
930 		void *__data;
931 
932 		if (!(cpu_online(raw_smp_processor_id())))
933 			return;
934 
935 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
936 		if (probe_func_ptr) {
937 			do {
938 				probe_func = probe_func_ptr->func;
939 				__data = probe_func_ptr->data;
940 				probe_func(__data, var_ref_vals, var_ref_idx);
941 			} while ((++probe_func_ptr)->func);
942 		}
943 	}
944 }
945 
946 static struct synth_event *find_synth_event(const char *name)
947 {
948 	struct dyn_event *pos;
949 	struct synth_event *event;
950 
951 	for_each_dyn_event(pos) {
952 		if (!is_synth_event(pos))
953 			continue;
954 		event = to_synth_event(pos);
955 		if (strcmp(event->name, name) == 0)
956 			return event;
957 	}
958 
959 	return NULL;
960 }
961 
962 static int register_synth_event(struct synth_event *event)
963 {
964 	struct trace_event_call *call = &event->call;
965 	int ret = 0;
966 
967 	event->call.class = &event->class;
968 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
969 	if (!event->class.system) {
970 		ret = -ENOMEM;
971 		goto out;
972 	}
973 
974 	event->tp = alloc_synth_tracepoint(event->name);
975 	if (IS_ERR(event->tp)) {
976 		ret = PTR_ERR(event->tp);
977 		event->tp = NULL;
978 		goto out;
979 	}
980 
981 	INIT_LIST_HEAD(&call->class->fields);
982 	call->event.funcs = &synth_event_funcs;
983 	call->class->define_fields = synth_event_define_fields;
984 
985 	ret = register_trace_event(&call->event);
986 	if (!ret) {
987 		ret = -ENODEV;
988 		goto out;
989 	}
990 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
991 	call->class->reg = trace_event_reg;
992 	call->class->probe = trace_event_raw_event_synth;
993 	call->data = event;
994 	call->tp = event->tp;
995 
996 	ret = trace_add_event_call(call);
997 	if (ret) {
998 		pr_warn("Failed to register synthetic event: %s\n",
999 			trace_event_name(call));
1000 		goto err;
1001 	}
1002 
1003 	ret = set_synth_event_print_fmt(call);
1004 	if (ret < 0) {
1005 		trace_remove_event_call(call);
1006 		goto err;
1007 	}
1008  out:
1009 	return ret;
1010  err:
1011 	unregister_trace_event(&call->event);
1012 	goto out;
1013 }
1014 
1015 static int unregister_synth_event(struct synth_event *event)
1016 {
1017 	struct trace_event_call *call = &event->call;
1018 	int ret;
1019 
1020 	ret = trace_remove_event_call(call);
1021 
1022 	return ret;
1023 }
1024 
1025 static void free_synth_event(struct synth_event *event)
1026 {
1027 	unsigned int i;
1028 
1029 	if (!event)
1030 		return;
1031 
1032 	for (i = 0; i < event->n_fields; i++)
1033 		free_synth_field(event->fields[i]);
1034 
1035 	kfree(event->fields);
1036 	kfree(event->name);
1037 	kfree(event->class.system);
1038 	free_synth_tracepoint(event->tp);
1039 	free_synth_event_print_fmt(&event->call);
1040 	kfree(event);
1041 }
1042 
1043 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1044 					     struct synth_field **fields)
1045 {
1046 	struct synth_event *event;
1047 	unsigned int i;
1048 
1049 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1050 	if (!event) {
1051 		event = ERR_PTR(-ENOMEM);
1052 		goto out;
1053 	}
1054 
1055 	event->name = kstrdup(name, GFP_KERNEL);
1056 	if (!event->name) {
1057 		kfree(event);
1058 		event = ERR_PTR(-ENOMEM);
1059 		goto out;
1060 	}
1061 
1062 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1063 	if (!event->fields) {
1064 		free_synth_event(event);
1065 		event = ERR_PTR(-ENOMEM);
1066 		goto out;
1067 	}
1068 
1069 	dyn_event_init(&event->devent, &synth_event_ops);
1070 
1071 	for (i = 0; i < n_fields; i++)
1072 		event->fields[i] = fields[i];
1073 
1074 	event->n_fields = n_fields;
1075  out:
1076 	return event;
1077 }
1078 
1079 static void action_trace(struct hist_trigger_data *hist_data,
1080 			 struct tracing_map_elt *elt, void *rec,
1081 			 struct ring_buffer_event *rbe,
1082 			 struct action_data *data, u64 *var_ref_vals)
1083 {
1084 	struct synth_event *event = data->onmatch.synth_event;
1085 
1086 	trace_synth(event, var_ref_vals, data->onmatch.var_ref_idx);
1087 }
1088 
1089 struct hist_var_data {
1090 	struct list_head list;
1091 	struct hist_trigger_data *hist_data;
1092 };
1093 
1094 static int __create_synth_event(int argc, const char *name, const char **argv)
1095 {
1096 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1097 	struct synth_event *event = NULL;
1098 	int i, consumed = 0, n_fields = 0, ret = 0;
1099 
1100 	/*
1101 	 * Argument syntax:
1102 	 *  - Add synthetic event: <event_name> field[;field] ...
1103 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1104 	 *      where 'field' = type field_name
1105 	 */
1106 
1107 	if (name[0] == '\0' || argc < 1)
1108 		return -EINVAL;
1109 
1110 	mutex_lock(&event_mutex);
1111 
1112 	event = find_synth_event(name);
1113 	if (event) {
1114 		ret = -EEXIST;
1115 		goto out;
1116 	}
1117 
1118 	for (i = 0; i < argc - 1; i++) {
1119 		if (strcmp(argv[i], ";") == 0)
1120 			continue;
1121 		if (n_fields == SYNTH_FIELDS_MAX) {
1122 			ret = -EINVAL;
1123 			goto err;
1124 		}
1125 
1126 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1127 		if (IS_ERR(field)) {
1128 			ret = PTR_ERR(field);
1129 			goto err;
1130 		}
1131 		fields[n_fields++] = field;
1132 		i += consumed - 1;
1133 	}
1134 
1135 	if (i < argc && strcmp(argv[i], ";") != 0) {
1136 		ret = -EINVAL;
1137 		goto err;
1138 	}
1139 
1140 	event = alloc_synth_event(name, n_fields, fields);
1141 	if (IS_ERR(event)) {
1142 		ret = PTR_ERR(event);
1143 		event = NULL;
1144 		goto err;
1145 	}
1146 	ret = register_synth_event(event);
1147 	if (!ret)
1148 		dyn_event_add(&event->devent);
1149 	else
1150 		free_synth_event(event);
1151  out:
1152 	mutex_unlock(&event_mutex);
1153 
1154 	return ret;
1155  err:
1156 	for (i = 0; i < n_fields; i++)
1157 		free_synth_field(fields[i]);
1158 
1159 	goto out;
1160 }
1161 
1162 static int create_or_delete_synth_event(int argc, char **argv)
1163 {
1164 	const char *name = argv[0];
1165 	struct synth_event *event = NULL;
1166 	int ret;
1167 
1168 	/* trace_run_command() ensures argc != 0 */
1169 	if (name[0] == '!') {
1170 		mutex_lock(&event_mutex);
1171 		event = find_synth_event(name + 1);
1172 		if (event) {
1173 			if (event->ref)
1174 				ret = -EBUSY;
1175 			else {
1176 				ret = unregister_synth_event(event);
1177 				if (!ret) {
1178 					dyn_event_remove(&event->devent);
1179 					free_synth_event(event);
1180 				}
1181 			}
1182 		} else
1183 			ret = -ENOENT;
1184 		mutex_unlock(&event_mutex);
1185 		return ret;
1186 	}
1187 
1188 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1189 	return ret == -ECANCELED ? -EINVAL : ret;
1190 }
1191 
1192 static int synth_event_create(int argc, const char **argv)
1193 {
1194 	const char *name = argv[0];
1195 	int len;
1196 
1197 	if (name[0] != 's' || name[1] != ':')
1198 		return -ECANCELED;
1199 	name += 2;
1200 
1201 	/* This interface accepts group name prefix */
1202 	if (strchr(name, '/')) {
1203 		len = sizeof(SYNTH_SYSTEM "/") - 1;
1204 		if (strncmp(name, SYNTH_SYSTEM "/", len))
1205 			return -EINVAL;
1206 		name += len;
1207 	}
1208 	return __create_synth_event(argc - 1, name, argv + 1);
1209 }
1210 
1211 static int synth_event_release(struct dyn_event *ev)
1212 {
1213 	struct synth_event *event = to_synth_event(ev);
1214 	int ret;
1215 
1216 	if (event->ref)
1217 		return -EBUSY;
1218 
1219 	ret = unregister_synth_event(event);
1220 	if (ret)
1221 		return ret;
1222 
1223 	dyn_event_remove(ev);
1224 	free_synth_event(event);
1225 	return 0;
1226 }
1227 
1228 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1229 {
1230 	struct synth_field *field;
1231 	unsigned int i;
1232 
1233 	seq_printf(m, "%s\t", event->name);
1234 
1235 	for (i = 0; i < event->n_fields; i++) {
1236 		field = event->fields[i];
1237 
1238 		/* parameter values */
1239 		seq_printf(m, "%s %s%s", field->type, field->name,
1240 			   i == event->n_fields - 1 ? "" : "; ");
1241 	}
1242 
1243 	seq_putc(m, '\n');
1244 
1245 	return 0;
1246 }
1247 
1248 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1249 {
1250 	struct synth_event *event = to_synth_event(ev);
1251 
1252 	seq_printf(m, "s:%s/", event->class.system);
1253 
1254 	return __synth_event_show(m, event);
1255 }
1256 
1257 static int synth_events_seq_show(struct seq_file *m, void *v)
1258 {
1259 	struct dyn_event *ev = v;
1260 
1261 	if (!is_synth_event(ev))
1262 		return 0;
1263 
1264 	return __synth_event_show(m, to_synth_event(ev));
1265 }
1266 
1267 static const struct seq_operations synth_events_seq_op = {
1268 	.start	= dyn_event_seq_start,
1269 	.next	= dyn_event_seq_next,
1270 	.stop	= dyn_event_seq_stop,
1271 	.show	= synth_events_seq_show,
1272 };
1273 
1274 static int synth_events_open(struct inode *inode, struct file *file)
1275 {
1276 	int ret;
1277 
1278 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1279 		ret = dyn_events_release_all(&synth_event_ops);
1280 		if (ret < 0)
1281 			return ret;
1282 	}
1283 
1284 	return seq_open(file, &synth_events_seq_op);
1285 }
1286 
1287 static ssize_t synth_events_write(struct file *file,
1288 				  const char __user *buffer,
1289 				  size_t count, loff_t *ppos)
1290 {
1291 	return trace_parse_run_command(file, buffer, count, ppos,
1292 				       create_or_delete_synth_event);
1293 }
1294 
1295 static const struct file_operations synth_events_fops = {
1296 	.open           = synth_events_open,
1297 	.write		= synth_events_write,
1298 	.read           = seq_read,
1299 	.llseek         = seq_lseek,
1300 	.release        = seq_release,
1301 };
1302 
1303 static u64 hist_field_timestamp(struct hist_field *hist_field,
1304 				struct tracing_map_elt *elt,
1305 				struct ring_buffer_event *rbe,
1306 				void *event)
1307 {
1308 	struct hist_trigger_data *hist_data = hist_field->hist_data;
1309 	struct trace_array *tr = hist_data->event_file->tr;
1310 
1311 	u64 ts = ring_buffer_event_time_stamp(rbe);
1312 
1313 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1314 		ts = ns2usecs(ts);
1315 
1316 	return ts;
1317 }
1318 
1319 static u64 hist_field_cpu(struct hist_field *hist_field,
1320 			  struct tracing_map_elt *elt,
1321 			  struct ring_buffer_event *rbe,
1322 			  void *event)
1323 {
1324 	int cpu = smp_processor_id();
1325 
1326 	return cpu;
1327 }
1328 
1329 /**
1330  * check_field_for_var_ref - Check if a VAR_REF field references a variable
1331  * @hist_field: The VAR_REF field to check
1332  * @var_data: The hist trigger that owns the variable
1333  * @var_idx: The trigger variable identifier
1334  *
1335  * Check the given VAR_REF field to see whether or not it references
1336  * the given variable associated with the given trigger.
1337  *
1338  * Return: The VAR_REF field if it does reference the variable, NULL if not
1339  */
1340 static struct hist_field *
1341 check_field_for_var_ref(struct hist_field *hist_field,
1342 			struct hist_trigger_data *var_data,
1343 			unsigned int var_idx)
1344 {
1345 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1346 
1347 	if (hist_field && hist_field->var.idx == var_idx &&
1348 	    hist_field->var.hist_data == var_data)
1349 		return hist_field;
1350 
1351 	return NULL;
1352 }
1353 
1354 /**
1355  * find_var_ref - Check if a trigger has a reference to a trigger variable
1356  * @hist_data: The hist trigger that might have a reference to the variable
1357  * @var_data: The hist trigger that owns the variable
1358  * @var_idx: The trigger variable identifier
1359  *
1360  * Check the list of var_refs[] on the first hist trigger to see
1361  * whether any of them are references to the variable on the second
1362  * trigger.
1363  *
1364  * Return: The VAR_REF field referencing the variable if so, NULL if not
1365  */
1366 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1367 				       struct hist_trigger_data *var_data,
1368 				       unsigned int var_idx)
1369 {
1370 	struct hist_field *hist_field;
1371 	unsigned int i;
1372 
1373 	for (i = 0; i < hist_data->n_var_refs; i++) {
1374 		hist_field = hist_data->var_refs[i];
1375 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
1376 			return hist_field;
1377 	}
1378 
1379 	return NULL;
1380 }
1381 
1382 /**
1383  * find_any_var_ref - Check if there is a reference to a given trigger variable
1384  * @hist_data: The hist trigger
1385  * @var_idx: The trigger variable identifier
1386  *
1387  * Check to see whether the given variable is currently referenced by
1388  * any other trigger.
1389  *
1390  * The trigger the variable is defined on is explicitly excluded - the
1391  * assumption being that a self-reference doesn't prevent a trigger
1392  * from being removed.
1393  *
1394  * Return: The VAR_REF field referencing the variable if so, NULL if not
1395  */
1396 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1397 					   unsigned int var_idx)
1398 {
1399 	struct trace_array *tr = hist_data->event_file->tr;
1400 	struct hist_field *found = NULL;
1401 	struct hist_var_data *var_data;
1402 
1403 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1404 		if (var_data->hist_data == hist_data)
1405 			continue;
1406 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1407 		if (found)
1408 			break;
1409 	}
1410 
1411 	return found;
1412 }
1413 
1414 /**
1415  * check_var_refs - Check if there is a reference to any of trigger's variables
1416  * @hist_data: The hist trigger
1417  *
1418  * A trigger can define one or more variables.  If any one of them is
1419  * currently referenced by any other trigger, this function will
1420  * determine that.
1421 
1422  * Typically used to determine whether or not a trigger can be removed
1423  * - if there are any references to a trigger's variables, it cannot.
1424  *
1425  * Return: True if there is a reference to any of trigger's variables
1426  */
1427 static bool check_var_refs(struct hist_trigger_data *hist_data)
1428 {
1429 	struct hist_field *field;
1430 	bool found = false;
1431 	int i;
1432 
1433 	for_each_hist_field(i, hist_data) {
1434 		field = hist_data->fields[i];
1435 		if (field && field->flags & HIST_FIELD_FL_VAR) {
1436 			if (find_any_var_ref(hist_data, field->var.idx)) {
1437 				found = true;
1438 				break;
1439 			}
1440 		}
1441 	}
1442 
1443 	return found;
1444 }
1445 
1446 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1447 {
1448 	struct trace_array *tr = hist_data->event_file->tr;
1449 	struct hist_var_data *var_data, *found = NULL;
1450 
1451 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1452 		if (var_data->hist_data == hist_data) {
1453 			found = var_data;
1454 			break;
1455 		}
1456 	}
1457 
1458 	return found;
1459 }
1460 
1461 static bool field_has_hist_vars(struct hist_field *hist_field,
1462 				unsigned int level)
1463 {
1464 	int i;
1465 
1466 	if (level > 3)
1467 		return false;
1468 
1469 	if (!hist_field)
1470 		return false;
1471 
1472 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
1473 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
1474 		return true;
1475 
1476 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1477 		struct hist_field *operand;
1478 
1479 		operand = hist_field->operands[i];
1480 		if (field_has_hist_vars(operand, level + 1))
1481 			return true;
1482 	}
1483 
1484 	return false;
1485 }
1486 
1487 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1488 {
1489 	struct hist_field *hist_field;
1490 	int i;
1491 
1492 	for_each_hist_field(i, hist_data) {
1493 		hist_field = hist_data->fields[i];
1494 		if (field_has_hist_vars(hist_field, 0))
1495 			return true;
1496 	}
1497 
1498 	return false;
1499 }
1500 
1501 static int save_hist_vars(struct hist_trigger_data *hist_data)
1502 {
1503 	struct trace_array *tr = hist_data->event_file->tr;
1504 	struct hist_var_data *var_data;
1505 
1506 	var_data = find_hist_vars(hist_data);
1507 	if (var_data)
1508 		return 0;
1509 
1510 	if (trace_array_get(tr) < 0)
1511 		return -ENODEV;
1512 
1513 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1514 	if (!var_data) {
1515 		trace_array_put(tr);
1516 		return -ENOMEM;
1517 	}
1518 
1519 	var_data->hist_data = hist_data;
1520 	list_add(&var_data->list, &tr->hist_vars);
1521 
1522 	return 0;
1523 }
1524 
1525 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1526 {
1527 	struct trace_array *tr = hist_data->event_file->tr;
1528 	struct hist_var_data *var_data;
1529 
1530 	var_data = find_hist_vars(hist_data);
1531 	if (!var_data)
1532 		return;
1533 
1534 	if (WARN_ON(check_var_refs(hist_data)))
1535 		return;
1536 
1537 	list_del(&var_data->list);
1538 
1539 	kfree(var_data);
1540 
1541 	trace_array_put(tr);
1542 }
1543 
1544 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1545 					 const char *var_name)
1546 {
1547 	struct hist_field *hist_field, *found = NULL;
1548 	int i;
1549 
1550 	for_each_hist_field(i, hist_data) {
1551 		hist_field = hist_data->fields[i];
1552 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1553 		    strcmp(hist_field->var.name, var_name) == 0) {
1554 			found = hist_field;
1555 			break;
1556 		}
1557 	}
1558 
1559 	return found;
1560 }
1561 
1562 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1563 				   struct trace_event_file *file,
1564 				   const char *var_name)
1565 {
1566 	struct hist_trigger_data *test_data;
1567 	struct event_trigger_data *test;
1568 	struct hist_field *hist_field;
1569 
1570 	hist_field = find_var_field(hist_data, var_name);
1571 	if (hist_field)
1572 		return hist_field;
1573 
1574 	list_for_each_entry_rcu(test, &file->triggers, list) {
1575 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1576 			test_data = test->private_data;
1577 			hist_field = find_var_field(test_data, var_name);
1578 			if (hist_field)
1579 				return hist_field;
1580 		}
1581 	}
1582 
1583 	return NULL;
1584 }
1585 
1586 static struct trace_event_file *find_var_file(struct trace_array *tr,
1587 					      char *system,
1588 					      char *event_name,
1589 					      char *var_name)
1590 {
1591 	struct hist_trigger_data *var_hist_data;
1592 	struct hist_var_data *var_data;
1593 	struct trace_event_file *file, *found = NULL;
1594 
1595 	if (system)
1596 		return find_event_file(tr, system, event_name);
1597 
1598 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1599 		var_hist_data = var_data->hist_data;
1600 		file = var_hist_data->event_file;
1601 		if (file == found)
1602 			continue;
1603 
1604 		if (find_var_field(var_hist_data, var_name)) {
1605 			if (found) {
1606 				hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name);
1607 				return NULL;
1608 			}
1609 
1610 			found = file;
1611 		}
1612 	}
1613 
1614 	return found;
1615 }
1616 
1617 static struct hist_field *find_file_var(struct trace_event_file *file,
1618 					const char *var_name)
1619 {
1620 	struct hist_trigger_data *test_data;
1621 	struct event_trigger_data *test;
1622 	struct hist_field *hist_field;
1623 
1624 	list_for_each_entry_rcu(test, &file->triggers, list) {
1625 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1626 			test_data = test->private_data;
1627 			hist_field = find_var_field(test_data, var_name);
1628 			if (hist_field)
1629 				return hist_field;
1630 		}
1631 	}
1632 
1633 	return NULL;
1634 }
1635 
1636 static struct hist_field *
1637 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1638 {
1639 	struct trace_array *tr = hist_data->event_file->tr;
1640 	struct hist_field *hist_field, *found = NULL;
1641 	struct trace_event_file *file;
1642 	unsigned int i;
1643 
1644 	for (i = 0; i < hist_data->n_actions; i++) {
1645 		struct action_data *data = hist_data->actions[i];
1646 
1647 		if (data->fn == action_trace) {
1648 			char *system = data->onmatch.match_event_system;
1649 			char *event_name = data->onmatch.match_event;
1650 
1651 			file = find_var_file(tr, system, event_name, var_name);
1652 			if (!file)
1653 				continue;
1654 			hist_field = find_file_var(file, var_name);
1655 			if (hist_field) {
1656 				if (found) {
1657 					hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name);
1658 					return ERR_PTR(-EINVAL);
1659 				}
1660 
1661 				found = hist_field;
1662 			}
1663 		}
1664 	}
1665 	return found;
1666 }
1667 
1668 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1669 					 char *system,
1670 					 char *event_name,
1671 					 char *var_name)
1672 {
1673 	struct trace_array *tr = hist_data->event_file->tr;
1674 	struct hist_field *hist_field = NULL;
1675 	struct trace_event_file *file;
1676 
1677 	if (!system || !event_name) {
1678 		hist_field = find_match_var(hist_data, var_name);
1679 		if (IS_ERR(hist_field))
1680 			return NULL;
1681 		if (hist_field)
1682 			return hist_field;
1683 	}
1684 
1685 	file = find_var_file(tr, system, event_name, var_name);
1686 	if (!file)
1687 		return NULL;
1688 
1689 	hist_field = find_file_var(file, var_name);
1690 
1691 	return hist_field;
1692 }
1693 
1694 struct hist_elt_data {
1695 	char *comm;
1696 	u64 *var_ref_vals;
1697 	char *field_var_str[SYNTH_FIELDS_MAX];
1698 };
1699 
1700 static u64 hist_field_var_ref(struct hist_field *hist_field,
1701 			      struct tracing_map_elt *elt,
1702 			      struct ring_buffer_event *rbe,
1703 			      void *event)
1704 {
1705 	struct hist_elt_data *elt_data;
1706 	u64 var_val = 0;
1707 
1708 	elt_data = elt->private_data;
1709 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1710 
1711 	return var_val;
1712 }
1713 
1714 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1715 			     u64 *var_ref_vals, bool self)
1716 {
1717 	struct hist_trigger_data *var_data;
1718 	struct tracing_map_elt *var_elt;
1719 	struct hist_field *hist_field;
1720 	unsigned int i, var_idx;
1721 	bool resolved = true;
1722 	u64 var_val = 0;
1723 
1724 	for (i = 0; i < hist_data->n_var_refs; i++) {
1725 		hist_field = hist_data->var_refs[i];
1726 		var_idx = hist_field->var.idx;
1727 		var_data = hist_field->var.hist_data;
1728 
1729 		if (var_data == NULL) {
1730 			resolved = false;
1731 			break;
1732 		}
1733 
1734 		if ((self && var_data != hist_data) ||
1735 		    (!self && var_data == hist_data))
1736 			continue;
1737 
1738 		var_elt = tracing_map_lookup(var_data->map, key);
1739 		if (!var_elt) {
1740 			resolved = false;
1741 			break;
1742 		}
1743 
1744 		if (!tracing_map_var_set(var_elt, var_idx)) {
1745 			resolved = false;
1746 			break;
1747 		}
1748 
1749 		if (self || !hist_field->read_once)
1750 			var_val = tracing_map_read_var(var_elt, var_idx);
1751 		else
1752 			var_val = tracing_map_read_var_once(var_elt, var_idx);
1753 
1754 		var_ref_vals[i] = var_val;
1755 	}
1756 
1757 	return resolved;
1758 }
1759 
1760 static const char *hist_field_name(struct hist_field *field,
1761 				   unsigned int level)
1762 {
1763 	const char *field_name = "";
1764 
1765 	if (level > 1)
1766 		return field_name;
1767 
1768 	if (field->field)
1769 		field_name = field->field->name;
1770 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
1771 		 field->flags & HIST_FIELD_FL_ALIAS)
1772 		field_name = hist_field_name(field->operands[0], ++level);
1773 	else if (field->flags & HIST_FIELD_FL_CPU)
1774 		field_name = "cpu";
1775 	else if (field->flags & HIST_FIELD_FL_EXPR ||
1776 		 field->flags & HIST_FIELD_FL_VAR_REF) {
1777 		if (field->system) {
1778 			static char full_name[MAX_FILTER_STR_VAL];
1779 
1780 			strcat(full_name, field->system);
1781 			strcat(full_name, ".");
1782 			strcat(full_name, field->event_name);
1783 			strcat(full_name, ".");
1784 			strcat(full_name, field->name);
1785 			field_name = full_name;
1786 		} else
1787 			field_name = field->name;
1788 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1789 		field_name = "common_timestamp";
1790 
1791 	if (field_name == NULL)
1792 		field_name = "";
1793 
1794 	return field_name;
1795 }
1796 
1797 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1798 {
1799 	hist_field_fn_t fn = NULL;
1800 
1801 	switch (field_size) {
1802 	case 8:
1803 		if (field_is_signed)
1804 			fn = hist_field_s64;
1805 		else
1806 			fn = hist_field_u64;
1807 		break;
1808 	case 4:
1809 		if (field_is_signed)
1810 			fn = hist_field_s32;
1811 		else
1812 			fn = hist_field_u32;
1813 		break;
1814 	case 2:
1815 		if (field_is_signed)
1816 			fn = hist_field_s16;
1817 		else
1818 			fn = hist_field_u16;
1819 		break;
1820 	case 1:
1821 		if (field_is_signed)
1822 			fn = hist_field_s8;
1823 		else
1824 			fn = hist_field_u8;
1825 		break;
1826 	}
1827 
1828 	return fn;
1829 }
1830 
1831 static int parse_map_size(char *str)
1832 {
1833 	unsigned long size, map_bits;
1834 	int ret;
1835 
1836 	strsep(&str, "=");
1837 	if (!str) {
1838 		ret = -EINVAL;
1839 		goto out;
1840 	}
1841 
1842 	ret = kstrtoul(str, 0, &size);
1843 	if (ret)
1844 		goto out;
1845 
1846 	map_bits = ilog2(roundup_pow_of_two(size));
1847 	if (map_bits < TRACING_MAP_BITS_MIN ||
1848 	    map_bits > TRACING_MAP_BITS_MAX)
1849 		ret = -EINVAL;
1850 	else
1851 		ret = map_bits;
1852  out:
1853 	return ret;
1854 }
1855 
1856 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
1857 {
1858 	unsigned int i;
1859 
1860 	if (!attrs)
1861 		return;
1862 
1863 	for (i = 0; i < attrs->n_assignments; i++)
1864 		kfree(attrs->assignment_str[i]);
1865 
1866 	for (i = 0; i < attrs->n_actions; i++)
1867 		kfree(attrs->action_str[i]);
1868 
1869 	kfree(attrs->name);
1870 	kfree(attrs->sort_key_str);
1871 	kfree(attrs->keys_str);
1872 	kfree(attrs->vals_str);
1873 	kfree(attrs->clock);
1874 	kfree(attrs);
1875 }
1876 
1877 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
1878 {
1879 	int ret = -EINVAL;
1880 
1881 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
1882 		return ret;
1883 
1884 	if ((str_has_prefix(str, "onmatch(")) ||
1885 	    (str_has_prefix(str, "onmax("))) {
1886 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
1887 		if (!attrs->action_str[attrs->n_actions]) {
1888 			ret = -ENOMEM;
1889 			return ret;
1890 		}
1891 		attrs->n_actions++;
1892 		ret = 0;
1893 	}
1894 
1895 	return ret;
1896 }
1897 
1898 static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
1899 {
1900 	int ret = 0;
1901 
1902 	if ((str_has_prefix(str, "key=")) ||
1903 	    (str_has_prefix(str, "keys="))) {
1904 		attrs->keys_str = kstrdup(str, GFP_KERNEL);
1905 		if (!attrs->keys_str) {
1906 			ret = -ENOMEM;
1907 			goto out;
1908 		}
1909 	} else if ((str_has_prefix(str, "val=")) ||
1910 		   (str_has_prefix(str, "vals=")) ||
1911 		   (str_has_prefix(str, "values="))) {
1912 		attrs->vals_str = kstrdup(str, GFP_KERNEL);
1913 		if (!attrs->vals_str) {
1914 			ret = -ENOMEM;
1915 			goto out;
1916 		}
1917 	} else if (str_has_prefix(str, "sort=")) {
1918 		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
1919 		if (!attrs->sort_key_str) {
1920 			ret = -ENOMEM;
1921 			goto out;
1922 		}
1923 	} else if (str_has_prefix(str, "name=")) {
1924 		attrs->name = kstrdup(str, GFP_KERNEL);
1925 		if (!attrs->name) {
1926 			ret = -ENOMEM;
1927 			goto out;
1928 		}
1929 	} else if (str_has_prefix(str, "clock=")) {
1930 		strsep(&str, "=");
1931 		if (!str) {
1932 			ret = -EINVAL;
1933 			goto out;
1934 		}
1935 
1936 		str = strstrip(str);
1937 		attrs->clock = kstrdup(str, GFP_KERNEL);
1938 		if (!attrs->clock) {
1939 			ret = -ENOMEM;
1940 			goto out;
1941 		}
1942 	} else if (str_has_prefix(str, "size=")) {
1943 		int map_bits = parse_map_size(str);
1944 
1945 		if (map_bits < 0) {
1946 			ret = map_bits;
1947 			goto out;
1948 		}
1949 		attrs->map_bits = map_bits;
1950 	} else {
1951 		char *assignment;
1952 
1953 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
1954 			hist_err("Too many variables defined: ", str);
1955 			ret = -EINVAL;
1956 			goto out;
1957 		}
1958 
1959 		assignment = kstrdup(str, GFP_KERNEL);
1960 		if (!assignment) {
1961 			ret = -ENOMEM;
1962 			goto out;
1963 		}
1964 
1965 		attrs->assignment_str[attrs->n_assignments++] = assignment;
1966 	}
1967  out:
1968 	return ret;
1969 }
1970 
1971 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
1972 {
1973 	struct hist_trigger_attrs *attrs;
1974 	int ret = 0;
1975 
1976 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1977 	if (!attrs)
1978 		return ERR_PTR(-ENOMEM);
1979 
1980 	while (trigger_str) {
1981 		char *str = strsep(&trigger_str, ":");
1982 
1983 		if (strchr(str, '=')) {
1984 			ret = parse_assignment(str, attrs);
1985 			if (ret)
1986 				goto free;
1987 		} else if (strcmp(str, "pause") == 0)
1988 			attrs->pause = true;
1989 		else if ((strcmp(str, "cont") == 0) ||
1990 			 (strcmp(str, "continue") == 0))
1991 			attrs->cont = true;
1992 		else if (strcmp(str, "clear") == 0)
1993 			attrs->clear = true;
1994 		else {
1995 			ret = parse_action(str, attrs);
1996 			if (ret)
1997 				goto free;
1998 		}
1999 	}
2000 
2001 	if (!attrs->keys_str) {
2002 		ret = -EINVAL;
2003 		goto free;
2004 	}
2005 
2006 	if (!attrs->clock) {
2007 		attrs->clock = kstrdup("global", GFP_KERNEL);
2008 		if (!attrs->clock) {
2009 			ret = -ENOMEM;
2010 			goto free;
2011 		}
2012 	}
2013 
2014 	return attrs;
2015  free:
2016 	destroy_hist_trigger_attrs(attrs);
2017 
2018 	return ERR_PTR(ret);
2019 }
2020 
2021 static inline void save_comm(char *comm, struct task_struct *task)
2022 {
2023 	if (!task->pid) {
2024 		strcpy(comm, "<idle>");
2025 		return;
2026 	}
2027 
2028 	if (WARN_ON_ONCE(task->pid < 0)) {
2029 		strcpy(comm, "<XXX>");
2030 		return;
2031 	}
2032 
2033 	memcpy(comm, task->comm, TASK_COMM_LEN);
2034 }
2035 
2036 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2037 {
2038 	unsigned int i;
2039 
2040 	for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2041 		kfree(elt_data->field_var_str[i]);
2042 
2043 	kfree(elt_data->comm);
2044 	kfree(elt_data);
2045 }
2046 
2047 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2048 {
2049 	struct hist_elt_data *elt_data = elt->private_data;
2050 
2051 	hist_elt_data_free(elt_data);
2052 }
2053 
2054 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2055 {
2056 	struct hist_trigger_data *hist_data = elt->map->private_data;
2057 	unsigned int size = TASK_COMM_LEN;
2058 	struct hist_elt_data *elt_data;
2059 	struct hist_field *key_field;
2060 	unsigned int i, n_str;
2061 
2062 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2063 	if (!elt_data)
2064 		return -ENOMEM;
2065 
2066 	for_each_hist_key_field(i, hist_data) {
2067 		key_field = hist_data->fields[i];
2068 
2069 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2070 			elt_data->comm = kzalloc(size, GFP_KERNEL);
2071 			if (!elt_data->comm) {
2072 				kfree(elt_data);
2073 				return -ENOMEM;
2074 			}
2075 			break;
2076 		}
2077 	}
2078 
2079 	n_str = hist_data->n_field_var_str + hist_data->n_max_var_str;
2080 
2081 	size = STR_VAR_LEN_MAX;
2082 
2083 	for (i = 0; i < n_str; i++) {
2084 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2085 		if (!elt_data->field_var_str[i]) {
2086 			hist_elt_data_free(elt_data);
2087 			return -ENOMEM;
2088 		}
2089 	}
2090 
2091 	elt->private_data = elt_data;
2092 
2093 	return 0;
2094 }
2095 
2096 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2097 {
2098 	struct hist_elt_data *elt_data = elt->private_data;
2099 
2100 	if (elt_data->comm)
2101 		save_comm(elt_data->comm, current);
2102 }
2103 
2104 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2105 	.elt_alloc	= hist_trigger_elt_data_alloc,
2106 	.elt_free	= hist_trigger_elt_data_free,
2107 	.elt_init	= hist_trigger_elt_data_init,
2108 };
2109 
2110 static const char *get_hist_field_flags(struct hist_field *hist_field)
2111 {
2112 	const char *flags_str = NULL;
2113 
2114 	if (hist_field->flags & HIST_FIELD_FL_HEX)
2115 		flags_str = "hex";
2116 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
2117 		flags_str = "sym";
2118 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2119 		flags_str = "sym-offset";
2120 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2121 		flags_str = "execname";
2122 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2123 		flags_str = "syscall";
2124 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2125 		flags_str = "log2";
2126 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2127 		flags_str = "usecs";
2128 
2129 	return flags_str;
2130 }
2131 
2132 static void expr_field_str(struct hist_field *field, char *expr)
2133 {
2134 	if (field->flags & HIST_FIELD_FL_VAR_REF)
2135 		strcat(expr, "$");
2136 
2137 	strcat(expr, hist_field_name(field, 0));
2138 
2139 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2140 		const char *flags_str = get_hist_field_flags(field);
2141 
2142 		if (flags_str) {
2143 			strcat(expr, ".");
2144 			strcat(expr, flags_str);
2145 		}
2146 	}
2147 }
2148 
2149 static char *expr_str(struct hist_field *field, unsigned int level)
2150 {
2151 	char *expr;
2152 
2153 	if (level > 1)
2154 		return NULL;
2155 
2156 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2157 	if (!expr)
2158 		return NULL;
2159 
2160 	if (!field->operands[0]) {
2161 		expr_field_str(field, expr);
2162 		return expr;
2163 	}
2164 
2165 	if (field->operator == FIELD_OP_UNARY_MINUS) {
2166 		char *subexpr;
2167 
2168 		strcat(expr, "-(");
2169 		subexpr = expr_str(field->operands[0], ++level);
2170 		if (!subexpr) {
2171 			kfree(expr);
2172 			return NULL;
2173 		}
2174 		strcat(expr, subexpr);
2175 		strcat(expr, ")");
2176 
2177 		kfree(subexpr);
2178 
2179 		return expr;
2180 	}
2181 
2182 	expr_field_str(field->operands[0], expr);
2183 
2184 	switch (field->operator) {
2185 	case FIELD_OP_MINUS:
2186 		strcat(expr, "-");
2187 		break;
2188 	case FIELD_OP_PLUS:
2189 		strcat(expr, "+");
2190 		break;
2191 	default:
2192 		kfree(expr);
2193 		return NULL;
2194 	}
2195 
2196 	expr_field_str(field->operands[1], expr);
2197 
2198 	return expr;
2199 }
2200 
2201 static int contains_operator(char *str)
2202 {
2203 	enum field_op_id field_op = FIELD_OP_NONE;
2204 	char *op;
2205 
2206 	op = strpbrk(str, "+-");
2207 	if (!op)
2208 		return FIELD_OP_NONE;
2209 
2210 	switch (*op) {
2211 	case '-':
2212 		if (*str == '-')
2213 			field_op = FIELD_OP_UNARY_MINUS;
2214 		else
2215 			field_op = FIELD_OP_MINUS;
2216 		break;
2217 	case '+':
2218 		field_op = FIELD_OP_PLUS;
2219 		break;
2220 	default:
2221 		break;
2222 	}
2223 
2224 	return field_op;
2225 }
2226 
2227 static void __destroy_hist_field(struct hist_field *hist_field)
2228 {
2229 	kfree(hist_field->var.name);
2230 	kfree(hist_field->name);
2231 	kfree(hist_field->type);
2232 
2233 	kfree(hist_field);
2234 }
2235 
2236 static void destroy_hist_field(struct hist_field *hist_field,
2237 			       unsigned int level)
2238 {
2239 	unsigned int i;
2240 
2241 	if (level > 3)
2242 		return;
2243 
2244 	if (!hist_field)
2245 		return;
2246 
2247 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2248 		return; /* var refs will be destroyed separately */
2249 
2250 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2251 		destroy_hist_field(hist_field->operands[i], level + 1);
2252 
2253 	__destroy_hist_field(hist_field);
2254 }
2255 
2256 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2257 					    struct ftrace_event_field *field,
2258 					    unsigned long flags,
2259 					    char *var_name)
2260 {
2261 	struct hist_field *hist_field;
2262 
2263 	if (field && is_function_field(field))
2264 		return NULL;
2265 
2266 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2267 	if (!hist_field)
2268 		return NULL;
2269 
2270 	hist_field->hist_data = hist_data;
2271 
2272 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2273 		goto out; /* caller will populate */
2274 
2275 	if (flags & HIST_FIELD_FL_VAR_REF) {
2276 		hist_field->fn = hist_field_var_ref;
2277 		goto out;
2278 	}
2279 
2280 	if (flags & HIST_FIELD_FL_HITCOUNT) {
2281 		hist_field->fn = hist_field_counter;
2282 		hist_field->size = sizeof(u64);
2283 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2284 		if (!hist_field->type)
2285 			goto free;
2286 		goto out;
2287 	}
2288 
2289 	if (flags & HIST_FIELD_FL_STACKTRACE) {
2290 		hist_field->fn = hist_field_none;
2291 		goto out;
2292 	}
2293 
2294 	if (flags & HIST_FIELD_FL_LOG2) {
2295 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2296 		hist_field->fn = hist_field_log2;
2297 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2298 		hist_field->size = hist_field->operands[0]->size;
2299 		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2300 		if (!hist_field->type)
2301 			goto free;
2302 		goto out;
2303 	}
2304 
2305 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
2306 		hist_field->fn = hist_field_timestamp;
2307 		hist_field->size = sizeof(u64);
2308 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2309 		if (!hist_field->type)
2310 			goto free;
2311 		goto out;
2312 	}
2313 
2314 	if (flags & HIST_FIELD_FL_CPU) {
2315 		hist_field->fn = hist_field_cpu;
2316 		hist_field->size = sizeof(int);
2317 		hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2318 		if (!hist_field->type)
2319 			goto free;
2320 		goto out;
2321 	}
2322 
2323 	if (WARN_ON_ONCE(!field))
2324 		goto out;
2325 
2326 	if (is_string_field(field)) {
2327 		flags |= HIST_FIELD_FL_STRING;
2328 
2329 		hist_field->size = MAX_FILTER_STR_VAL;
2330 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2331 		if (!hist_field->type)
2332 			goto free;
2333 
2334 		if (field->filter_type == FILTER_STATIC_STRING)
2335 			hist_field->fn = hist_field_string;
2336 		else if (field->filter_type == FILTER_DYN_STRING)
2337 			hist_field->fn = hist_field_dynstring;
2338 		else
2339 			hist_field->fn = hist_field_pstring;
2340 	} else {
2341 		hist_field->size = field->size;
2342 		hist_field->is_signed = field->is_signed;
2343 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2344 		if (!hist_field->type)
2345 			goto free;
2346 
2347 		hist_field->fn = select_value_fn(field->size,
2348 						 field->is_signed);
2349 		if (!hist_field->fn) {
2350 			destroy_hist_field(hist_field, 0);
2351 			return NULL;
2352 		}
2353 	}
2354  out:
2355 	hist_field->field = field;
2356 	hist_field->flags = flags;
2357 
2358 	if (var_name) {
2359 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2360 		if (!hist_field->var.name)
2361 			goto free;
2362 	}
2363 
2364 	return hist_field;
2365  free:
2366 	destroy_hist_field(hist_field, 0);
2367 	return NULL;
2368 }
2369 
2370 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2371 {
2372 	unsigned int i;
2373 
2374 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
2375 		if (hist_data->fields[i]) {
2376 			destroy_hist_field(hist_data->fields[i], 0);
2377 			hist_data->fields[i] = NULL;
2378 		}
2379 	}
2380 
2381 	for (i = 0; i < hist_data->n_var_refs; i++) {
2382 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2383 		__destroy_hist_field(hist_data->var_refs[i]);
2384 		hist_data->var_refs[i] = NULL;
2385 	}
2386 }
2387 
2388 static int init_var_ref(struct hist_field *ref_field,
2389 			struct hist_field *var_field,
2390 			char *system, char *event_name)
2391 {
2392 	int err = 0;
2393 
2394 	ref_field->var.idx = var_field->var.idx;
2395 	ref_field->var.hist_data = var_field->hist_data;
2396 	ref_field->size = var_field->size;
2397 	ref_field->is_signed = var_field->is_signed;
2398 	ref_field->flags |= var_field->flags &
2399 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2400 
2401 	if (system) {
2402 		ref_field->system = kstrdup(system, GFP_KERNEL);
2403 		if (!ref_field->system)
2404 			return -ENOMEM;
2405 	}
2406 
2407 	if (event_name) {
2408 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2409 		if (!ref_field->event_name) {
2410 			err = -ENOMEM;
2411 			goto free;
2412 		}
2413 	}
2414 
2415 	if (var_field->var.name) {
2416 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2417 		if (!ref_field->name) {
2418 			err = -ENOMEM;
2419 			goto free;
2420 		}
2421 	} else if (var_field->name) {
2422 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2423 		if (!ref_field->name) {
2424 			err = -ENOMEM;
2425 			goto free;
2426 		}
2427 	}
2428 
2429 	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2430 	if (!ref_field->type) {
2431 		err = -ENOMEM;
2432 		goto free;
2433 	}
2434  out:
2435 	return err;
2436  free:
2437 	kfree(ref_field->system);
2438 	kfree(ref_field->event_name);
2439 	kfree(ref_field->name);
2440 
2441 	goto out;
2442 }
2443 
2444 /**
2445  * create_var_ref - Create a variable reference and attach it to trigger
2446  * @hist_data: The trigger that will be referencing the variable
2447  * @var_field: The VAR field to create a reference to
2448  * @system: The optional system string
2449  * @event_name: The optional event_name string
2450  *
2451  * Given a variable hist_field, create a VAR_REF hist_field that
2452  * represents a reference to it.
2453  *
2454  * This function also adds the reference to the trigger that
2455  * now references the variable.
2456  *
2457  * Return: The VAR_REF field if successful, NULL if not
2458  */
2459 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2460 					 struct hist_field *var_field,
2461 					 char *system, char *event_name)
2462 {
2463 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
2464 	struct hist_field *ref_field;
2465 
2466 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2467 	if (ref_field) {
2468 		if (init_var_ref(ref_field, var_field, system, event_name)) {
2469 			destroy_hist_field(ref_field, 0);
2470 			return NULL;
2471 		}
2472 
2473 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2474 		ref_field->var_ref_idx = hist_data->n_var_refs++;
2475 	}
2476 
2477 	return ref_field;
2478 }
2479 
2480 static bool is_var_ref(char *var_name)
2481 {
2482 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2483 		return false;
2484 
2485 	return true;
2486 }
2487 
2488 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2489 				 char *var_name)
2490 {
2491 	char *name, *field;
2492 	unsigned int i;
2493 
2494 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2495 		name = hist_data->attrs->var_defs.name[i];
2496 
2497 		if (strcmp(var_name, name) == 0) {
2498 			field = hist_data->attrs->var_defs.expr[i];
2499 			if (contains_operator(field) || is_var_ref(field))
2500 				continue;
2501 			return field;
2502 		}
2503 	}
2504 
2505 	return NULL;
2506 }
2507 
2508 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2509 				 char *system, char *event_name,
2510 				 char *var_name)
2511 {
2512 	struct trace_event_call *call;
2513 
2514 	if (system && event_name) {
2515 		call = hist_data->event_file->event_call;
2516 
2517 		if (strcmp(system, call->class->system) != 0)
2518 			return NULL;
2519 
2520 		if (strcmp(event_name, trace_event_name(call)) != 0)
2521 			return NULL;
2522 	}
2523 
2524 	if (!!system != !!event_name)
2525 		return NULL;
2526 
2527 	if (!is_var_ref(var_name))
2528 		return NULL;
2529 
2530 	var_name++;
2531 
2532 	return field_name_from_var(hist_data, var_name);
2533 }
2534 
2535 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2536 					char *system, char *event_name,
2537 					char *var_name)
2538 {
2539 	struct hist_field *var_field = NULL, *ref_field = NULL;
2540 
2541 	if (!is_var_ref(var_name))
2542 		return NULL;
2543 
2544 	var_name++;
2545 
2546 	var_field = find_event_var(hist_data, system, event_name, var_name);
2547 	if (var_field)
2548 		ref_field = create_var_ref(hist_data, var_field,
2549 					   system, event_name);
2550 
2551 	if (!ref_field)
2552 		hist_err_event("Couldn't find variable: $",
2553 			       system, event_name, var_name);
2554 
2555 	return ref_field;
2556 }
2557 
2558 static struct ftrace_event_field *
2559 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2560 	    char *field_str, unsigned long *flags)
2561 {
2562 	struct ftrace_event_field *field = NULL;
2563 	char *field_name, *modifier, *str;
2564 
2565 	modifier = str = kstrdup(field_str, GFP_KERNEL);
2566 	if (!modifier)
2567 		return ERR_PTR(-ENOMEM);
2568 
2569 	field_name = strsep(&modifier, ".");
2570 	if (modifier) {
2571 		if (strcmp(modifier, "hex") == 0)
2572 			*flags |= HIST_FIELD_FL_HEX;
2573 		else if (strcmp(modifier, "sym") == 0)
2574 			*flags |= HIST_FIELD_FL_SYM;
2575 		else if (strcmp(modifier, "sym-offset") == 0)
2576 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
2577 		else if ((strcmp(modifier, "execname") == 0) &&
2578 			 (strcmp(field_name, "common_pid") == 0))
2579 			*flags |= HIST_FIELD_FL_EXECNAME;
2580 		else if (strcmp(modifier, "syscall") == 0)
2581 			*flags |= HIST_FIELD_FL_SYSCALL;
2582 		else if (strcmp(modifier, "log2") == 0)
2583 			*flags |= HIST_FIELD_FL_LOG2;
2584 		else if (strcmp(modifier, "usecs") == 0)
2585 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2586 		else {
2587 			hist_err("Invalid field modifier: ", modifier);
2588 			field = ERR_PTR(-EINVAL);
2589 			goto out;
2590 		}
2591 	}
2592 
2593 	if (strcmp(field_name, "common_timestamp") == 0) {
2594 		*flags |= HIST_FIELD_FL_TIMESTAMP;
2595 		hist_data->enable_timestamps = true;
2596 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2597 			hist_data->attrs->ts_in_usecs = true;
2598 	} else if (strcmp(field_name, "cpu") == 0)
2599 		*flags |= HIST_FIELD_FL_CPU;
2600 	else {
2601 		field = trace_find_event_field(file->event_call, field_name);
2602 		if (!field || !field->size) {
2603 			hist_err("Couldn't find field: ", field_name);
2604 			field = ERR_PTR(-EINVAL);
2605 			goto out;
2606 		}
2607 	}
2608  out:
2609 	kfree(str);
2610 
2611 	return field;
2612 }
2613 
2614 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2615 				       struct hist_field *var_ref,
2616 				       char *var_name)
2617 {
2618 	struct hist_field *alias = NULL;
2619 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2620 
2621 	alias = create_hist_field(hist_data, NULL, flags, var_name);
2622 	if (!alias)
2623 		return NULL;
2624 
2625 	alias->fn = var_ref->fn;
2626 	alias->operands[0] = var_ref;
2627 
2628 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2629 		destroy_hist_field(alias, 0);
2630 		return NULL;
2631 	}
2632 
2633 	return alias;
2634 }
2635 
2636 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2637 				     struct trace_event_file *file, char *str,
2638 				     unsigned long *flags, char *var_name)
2639 {
2640 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2641 	struct ftrace_event_field *field = NULL;
2642 	struct hist_field *hist_field = NULL;
2643 	int ret = 0;
2644 
2645 	s = strchr(str, '.');
2646 	if (s) {
2647 		s = strchr(++s, '.');
2648 		if (s) {
2649 			ref_system = strsep(&str, ".");
2650 			if (!str) {
2651 				ret = -EINVAL;
2652 				goto out;
2653 			}
2654 			ref_event = strsep(&str, ".");
2655 			if (!str) {
2656 				ret = -EINVAL;
2657 				goto out;
2658 			}
2659 			ref_var = str;
2660 		}
2661 	}
2662 
2663 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2664 	if (!s) {
2665 		hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var);
2666 		if (hist_field) {
2667 			if (var_name) {
2668 				hist_field = create_alias(hist_data, hist_field, var_name);
2669 				if (!hist_field) {
2670 					ret = -ENOMEM;
2671 					goto out;
2672 				}
2673 			}
2674 			return hist_field;
2675 		}
2676 	} else
2677 		str = s;
2678 
2679 	field = parse_field(hist_data, file, str, flags);
2680 	if (IS_ERR(field)) {
2681 		ret = PTR_ERR(field);
2682 		goto out;
2683 	}
2684 
2685 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
2686 	if (!hist_field) {
2687 		ret = -ENOMEM;
2688 		goto out;
2689 	}
2690 
2691 	return hist_field;
2692  out:
2693 	return ERR_PTR(ret);
2694 }
2695 
2696 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2697 				     struct trace_event_file *file,
2698 				     char *str, unsigned long flags,
2699 				     char *var_name, unsigned int level);
2700 
2701 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2702 				      struct trace_event_file *file,
2703 				      char *str, unsigned long flags,
2704 				      char *var_name, unsigned int level)
2705 {
2706 	struct hist_field *operand1, *expr = NULL;
2707 	unsigned long operand_flags;
2708 	int ret = 0;
2709 	char *s;
2710 
2711 	/* we support only -(xxx) i.e. explicit parens required */
2712 
2713 	if (level > 3) {
2714 		hist_err("Too many subexpressions (3 max): ", str);
2715 		ret = -EINVAL;
2716 		goto free;
2717 	}
2718 
2719 	str++; /* skip leading '-' */
2720 
2721 	s = strchr(str, '(');
2722 	if (s)
2723 		str++;
2724 	else {
2725 		ret = -EINVAL;
2726 		goto free;
2727 	}
2728 
2729 	s = strrchr(str, ')');
2730 	if (s)
2731 		*s = '\0';
2732 	else {
2733 		ret = -EINVAL; /* no closing ')' */
2734 		goto free;
2735 	}
2736 
2737 	flags |= HIST_FIELD_FL_EXPR;
2738 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2739 	if (!expr) {
2740 		ret = -ENOMEM;
2741 		goto free;
2742 	}
2743 
2744 	operand_flags = 0;
2745 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2746 	if (IS_ERR(operand1)) {
2747 		ret = PTR_ERR(operand1);
2748 		goto free;
2749 	}
2750 
2751 	expr->flags |= operand1->flags &
2752 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2753 	expr->fn = hist_field_unary_minus;
2754 	expr->operands[0] = operand1;
2755 	expr->operator = FIELD_OP_UNARY_MINUS;
2756 	expr->name = expr_str(expr, 0);
2757 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
2758 	if (!expr->type) {
2759 		ret = -ENOMEM;
2760 		goto free;
2761 	}
2762 
2763 	return expr;
2764  free:
2765 	destroy_hist_field(expr, 0);
2766 	return ERR_PTR(ret);
2767 }
2768 
2769 static int check_expr_operands(struct hist_field *operand1,
2770 			       struct hist_field *operand2)
2771 {
2772 	unsigned long operand1_flags = operand1->flags;
2773 	unsigned long operand2_flags = operand2->flags;
2774 
2775 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2776 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2777 		struct hist_field *var;
2778 
2779 		var = find_var_field(operand1->var.hist_data, operand1->name);
2780 		if (!var)
2781 			return -EINVAL;
2782 		operand1_flags = var->flags;
2783 	}
2784 
2785 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2786 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2787 		struct hist_field *var;
2788 
2789 		var = find_var_field(operand2->var.hist_data, operand2->name);
2790 		if (!var)
2791 			return -EINVAL;
2792 		operand2_flags = var->flags;
2793 	}
2794 
2795 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2796 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2797 		hist_err("Timestamp units in expression don't match", NULL);
2798 		return -EINVAL;
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2805 				     struct trace_event_file *file,
2806 				     char *str, unsigned long flags,
2807 				     char *var_name, unsigned int level)
2808 {
2809 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2810 	unsigned long operand_flags;
2811 	int field_op, ret = -EINVAL;
2812 	char *sep, *operand1_str;
2813 
2814 	if (level > 3) {
2815 		hist_err("Too many subexpressions (3 max): ", str);
2816 		return ERR_PTR(-EINVAL);
2817 	}
2818 
2819 	field_op = contains_operator(str);
2820 
2821 	if (field_op == FIELD_OP_NONE)
2822 		return parse_atom(hist_data, file, str, &flags, var_name);
2823 
2824 	if (field_op == FIELD_OP_UNARY_MINUS)
2825 		return parse_unary(hist_data, file, str, flags, var_name, ++level);
2826 
2827 	switch (field_op) {
2828 	case FIELD_OP_MINUS:
2829 		sep = "-";
2830 		break;
2831 	case FIELD_OP_PLUS:
2832 		sep = "+";
2833 		break;
2834 	default:
2835 		goto free;
2836 	}
2837 
2838 	operand1_str = strsep(&str, sep);
2839 	if (!operand1_str || !str)
2840 		goto free;
2841 
2842 	operand_flags = 0;
2843 	operand1 = parse_atom(hist_data, file, operand1_str,
2844 			      &operand_flags, NULL);
2845 	if (IS_ERR(operand1)) {
2846 		ret = PTR_ERR(operand1);
2847 		operand1 = NULL;
2848 		goto free;
2849 	}
2850 
2851 	/* rest of string could be another expression e.g. b+c in a+b+c */
2852 	operand_flags = 0;
2853 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2854 	if (IS_ERR(operand2)) {
2855 		ret = PTR_ERR(operand2);
2856 		operand2 = NULL;
2857 		goto free;
2858 	}
2859 
2860 	ret = check_expr_operands(operand1, operand2);
2861 	if (ret)
2862 		goto free;
2863 
2864 	flags |= HIST_FIELD_FL_EXPR;
2865 
2866 	flags |= operand1->flags &
2867 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2868 
2869 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2870 	if (!expr) {
2871 		ret = -ENOMEM;
2872 		goto free;
2873 	}
2874 
2875 	operand1->read_once = true;
2876 	operand2->read_once = true;
2877 
2878 	expr->operands[0] = operand1;
2879 	expr->operands[1] = operand2;
2880 	expr->operator = field_op;
2881 	expr->name = expr_str(expr, 0);
2882 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
2883 	if (!expr->type) {
2884 		ret = -ENOMEM;
2885 		goto free;
2886 	}
2887 
2888 	switch (field_op) {
2889 	case FIELD_OP_MINUS:
2890 		expr->fn = hist_field_minus;
2891 		break;
2892 	case FIELD_OP_PLUS:
2893 		expr->fn = hist_field_plus;
2894 		break;
2895 	default:
2896 		ret = -EINVAL;
2897 		goto free;
2898 	}
2899 
2900 	return expr;
2901  free:
2902 	destroy_hist_field(operand1, 0);
2903 	destroy_hist_field(operand2, 0);
2904 	destroy_hist_field(expr, 0);
2905 
2906 	return ERR_PTR(ret);
2907 }
2908 
2909 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
2910 				 struct trace_event_file *file)
2911 {
2912 	struct event_trigger_data *test;
2913 
2914 	list_for_each_entry_rcu(test, &file->triggers, list) {
2915 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2916 			if (test->private_data == hist_data)
2917 				return test->filter_str;
2918 		}
2919 	}
2920 
2921 	return NULL;
2922 }
2923 
2924 static struct event_command trigger_hist_cmd;
2925 static int event_hist_trigger_func(struct event_command *cmd_ops,
2926 				   struct trace_event_file *file,
2927 				   char *glob, char *cmd, char *param);
2928 
2929 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
2930 			    struct hist_trigger_data *hist_data,
2931 			    unsigned int n_keys)
2932 {
2933 	struct hist_field *target_hist_field, *hist_field;
2934 	unsigned int n, i, j;
2935 
2936 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
2937 		return false;
2938 
2939 	i = hist_data->n_vals;
2940 	j = target_hist_data->n_vals;
2941 
2942 	for (n = 0; n < n_keys; n++) {
2943 		hist_field = hist_data->fields[i + n];
2944 		target_hist_field = target_hist_data->fields[j + n];
2945 
2946 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
2947 			return false;
2948 		if (hist_field->size != target_hist_field->size)
2949 			return false;
2950 		if (hist_field->is_signed != target_hist_field->is_signed)
2951 			return false;
2952 	}
2953 
2954 	return true;
2955 }
2956 
2957 static struct hist_trigger_data *
2958 find_compatible_hist(struct hist_trigger_data *target_hist_data,
2959 		     struct trace_event_file *file)
2960 {
2961 	struct hist_trigger_data *hist_data;
2962 	struct event_trigger_data *test;
2963 	unsigned int n_keys;
2964 
2965 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
2966 
2967 	list_for_each_entry_rcu(test, &file->triggers, list) {
2968 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2969 			hist_data = test->private_data;
2970 
2971 			if (compatible_keys(target_hist_data, hist_data, n_keys))
2972 				return hist_data;
2973 		}
2974 	}
2975 
2976 	return NULL;
2977 }
2978 
2979 static struct trace_event_file *event_file(struct trace_array *tr,
2980 					   char *system, char *event_name)
2981 {
2982 	struct trace_event_file *file;
2983 
2984 	file = __find_event_file(tr, system, event_name);
2985 	if (!file)
2986 		return ERR_PTR(-EINVAL);
2987 
2988 	return file;
2989 }
2990 
2991 static struct hist_field *
2992 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
2993 			 char *system, char *event_name, char *field_name)
2994 {
2995 	struct hist_field *event_var;
2996 	char *synthetic_name;
2997 
2998 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2999 	if (!synthetic_name)
3000 		return ERR_PTR(-ENOMEM);
3001 
3002 	strcpy(synthetic_name, "synthetic_");
3003 	strcat(synthetic_name, field_name);
3004 
3005 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3006 
3007 	kfree(synthetic_name);
3008 
3009 	return event_var;
3010 }
3011 
3012 /**
3013  * create_field_var_hist - Automatically create a histogram and var for a field
3014  * @target_hist_data: The target hist trigger
3015  * @subsys_name: Optional subsystem name
3016  * @event_name: Optional event name
3017  * @field_name: The name of the field (and the resulting variable)
3018  *
3019  * Hist trigger actions fetch data from variables, not directly from
3020  * events.  However, for convenience, users are allowed to directly
3021  * specify an event field in an action, which will be automatically
3022  * converted into a variable on their behalf.
3023 
3024  * If a user specifies a field on an event that isn't the event the
3025  * histogram currently being defined (the target event histogram), the
3026  * only way that can be accomplished is if a new hist trigger is
3027  * created and the field variable defined on that.
3028  *
3029  * This function creates a new histogram compatible with the target
3030  * event (meaning a histogram with the same key as the target
3031  * histogram), and creates a variable for the specified field, but
3032  * with 'synthetic_' prepended to the variable name in order to avoid
3033  * collision with normal field variables.
3034  *
3035  * Return: The variable created for the field.
3036  */
3037 static struct hist_field *
3038 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3039 		      char *subsys_name, char *event_name, char *field_name)
3040 {
3041 	struct trace_array *tr = target_hist_data->event_file->tr;
3042 	struct hist_field *event_var = ERR_PTR(-EINVAL);
3043 	struct hist_trigger_data *hist_data;
3044 	unsigned int i, n, first = true;
3045 	struct field_var_hist *var_hist;
3046 	struct trace_event_file *file;
3047 	struct hist_field *key_field;
3048 	char *saved_filter;
3049 	char *cmd;
3050 	int ret;
3051 
3052 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3053 		hist_err_event("onmatch: Too many field variables defined: ",
3054 			       subsys_name, event_name, field_name);
3055 		return ERR_PTR(-EINVAL);
3056 	}
3057 
3058 	file = event_file(tr, subsys_name, event_name);
3059 
3060 	if (IS_ERR(file)) {
3061 		hist_err_event("onmatch: Event file not found: ",
3062 			       subsys_name, event_name, field_name);
3063 		ret = PTR_ERR(file);
3064 		return ERR_PTR(ret);
3065 	}
3066 
3067 	/*
3068 	 * Look for a histogram compatible with target.  We'll use the
3069 	 * found histogram specification to create a new matching
3070 	 * histogram with our variable on it.  target_hist_data is not
3071 	 * yet a registered histogram so we can't use that.
3072 	 */
3073 	hist_data = find_compatible_hist(target_hist_data, file);
3074 	if (!hist_data) {
3075 		hist_err_event("onmatch: Matching event histogram not found: ",
3076 			       subsys_name, event_name, field_name);
3077 		return ERR_PTR(-EINVAL);
3078 	}
3079 
3080 	/* See if a synthetic field variable has already been created */
3081 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3082 					     event_name, field_name);
3083 	if (!IS_ERR_OR_NULL(event_var))
3084 		return event_var;
3085 
3086 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3087 	if (!var_hist)
3088 		return ERR_PTR(-ENOMEM);
3089 
3090 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3091 	if (!cmd) {
3092 		kfree(var_hist);
3093 		return ERR_PTR(-ENOMEM);
3094 	}
3095 
3096 	/* Use the same keys as the compatible histogram */
3097 	strcat(cmd, "keys=");
3098 
3099 	for_each_hist_key_field(i, hist_data) {
3100 		key_field = hist_data->fields[i];
3101 		if (!first)
3102 			strcat(cmd, ",");
3103 		strcat(cmd, key_field->field->name);
3104 		first = false;
3105 	}
3106 
3107 	/* Create the synthetic field variable specification */
3108 	strcat(cmd, ":synthetic_");
3109 	strcat(cmd, field_name);
3110 	strcat(cmd, "=");
3111 	strcat(cmd, field_name);
3112 
3113 	/* Use the same filter as the compatible histogram */
3114 	saved_filter = find_trigger_filter(hist_data, file);
3115 	if (saved_filter) {
3116 		strcat(cmd, " if ");
3117 		strcat(cmd, saved_filter);
3118 	}
3119 
3120 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3121 	if (!var_hist->cmd) {
3122 		kfree(cmd);
3123 		kfree(var_hist);
3124 		return ERR_PTR(-ENOMEM);
3125 	}
3126 
3127 	/* Save the compatible histogram information */
3128 	var_hist->hist_data = hist_data;
3129 
3130 	/* Create the new histogram with our variable */
3131 	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3132 				      "", "hist", cmd);
3133 	if (ret) {
3134 		kfree(cmd);
3135 		kfree(var_hist->cmd);
3136 		kfree(var_hist);
3137 		hist_err_event("onmatch: Couldn't create histogram for field: ",
3138 			       subsys_name, event_name, field_name);
3139 		return ERR_PTR(ret);
3140 	}
3141 
3142 	kfree(cmd);
3143 
3144 	/* If we can't find the variable, something went wrong */
3145 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3146 					     event_name, field_name);
3147 	if (IS_ERR_OR_NULL(event_var)) {
3148 		kfree(var_hist->cmd);
3149 		kfree(var_hist);
3150 		hist_err_event("onmatch: Couldn't find synthetic variable: ",
3151 			       subsys_name, event_name, field_name);
3152 		return ERR_PTR(-EINVAL);
3153 	}
3154 
3155 	n = target_hist_data->n_field_var_hists;
3156 	target_hist_data->field_var_hists[n] = var_hist;
3157 	target_hist_data->n_field_var_hists++;
3158 
3159 	return event_var;
3160 }
3161 
3162 static struct hist_field *
3163 find_target_event_var(struct hist_trigger_data *hist_data,
3164 		      char *subsys_name, char *event_name, char *var_name)
3165 {
3166 	struct trace_event_file *file = hist_data->event_file;
3167 	struct hist_field *hist_field = NULL;
3168 
3169 	if (subsys_name) {
3170 		struct trace_event_call *call;
3171 
3172 		if (!event_name)
3173 			return NULL;
3174 
3175 		call = file->event_call;
3176 
3177 		if (strcmp(subsys_name, call->class->system) != 0)
3178 			return NULL;
3179 
3180 		if (strcmp(event_name, trace_event_name(call)) != 0)
3181 			return NULL;
3182 	}
3183 
3184 	hist_field = find_var_field(hist_data, var_name);
3185 
3186 	return hist_field;
3187 }
3188 
3189 static inline void __update_field_vars(struct tracing_map_elt *elt,
3190 				       struct ring_buffer_event *rbe,
3191 				       void *rec,
3192 				       struct field_var **field_vars,
3193 				       unsigned int n_field_vars,
3194 				       unsigned int field_var_str_start)
3195 {
3196 	struct hist_elt_data *elt_data = elt->private_data;
3197 	unsigned int i, j, var_idx;
3198 	u64 var_val;
3199 
3200 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3201 		struct field_var *field_var = field_vars[i];
3202 		struct hist_field *var = field_var->var;
3203 		struct hist_field *val = field_var->val;
3204 
3205 		var_val = val->fn(val, elt, rbe, rec);
3206 		var_idx = var->var.idx;
3207 
3208 		if (val->flags & HIST_FIELD_FL_STRING) {
3209 			char *str = elt_data->field_var_str[j++];
3210 			char *val_str = (char *)(uintptr_t)var_val;
3211 
3212 			strscpy(str, val_str, STR_VAR_LEN_MAX);
3213 			var_val = (u64)(uintptr_t)str;
3214 		}
3215 		tracing_map_set_var(elt, var_idx, var_val);
3216 	}
3217 }
3218 
3219 static void update_field_vars(struct hist_trigger_data *hist_data,
3220 			      struct tracing_map_elt *elt,
3221 			      struct ring_buffer_event *rbe,
3222 			      void *rec)
3223 {
3224 	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
3225 			    hist_data->n_field_vars, 0);
3226 }
3227 
3228 static void update_max_vars(struct hist_trigger_data *hist_data,
3229 			    struct tracing_map_elt *elt,
3230 			    struct ring_buffer_event *rbe,
3231 			    void *rec)
3232 {
3233 	__update_field_vars(elt, rbe, rec, hist_data->max_vars,
3234 			    hist_data->n_max_vars, hist_data->n_field_var_str);
3235 }
3236 
3237 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3238 				     struct trace_event_file *file,
3239 				     char *name, int size, const char *type)
3240 {
3241 	struct hist_field *var;
3242 	int idx;
3243 
3244 	if (find_var(hist_data, file, name) && !hist_data->remove) {
3245 		var = ERR_PTR(-EINVAL);
3246 		goto out;
3247 	}
3248 
3249 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3250 	if (!var) {
3251 		var = ERR_PTR(-ENOMEM);
3252 		goto out;
3253 	}
3254 
3255 	idx = tracing_map_add_var(hist_data->map);
3256 	if (idx < 0) {
3257 		kfree(var);
3258 		var = ERR_PTR(-EINVAL);
3259 		goto out;
3260 	}
3261 
3262 	var->flags = HIST_FIELD_FL_VAR;
3263 	var->var.idx = idx;
3264 	var->var.hist_data = var->hist_data = hist_data;
3265 	var->size = size;
3266 	var->var.name = kstrdup(name, GFP_KERNEL);
3267 	var->type = kstrdup(type, GFP_KERNEL);
3268 	if (!var->var.name || !var->type) {
3269 		kfree(var->var.name);
3270 		kfree(var->type);
3271 		kfree(var);
3272 		var = ERR_PTR(-ENOMEM);
3273 	}
3274  out:
3275 	return var;
3276 }
3277 
3278 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3279 					  struct trace_event_file *file,
3280 					  char *field_name)
3281 {
3282 	struct hist_field *val = NULL, *var = NULL;
3283 	unsigned long flags = HIST_FIELD_FL_VAR;
3284 	struct field_var *field_var;
3285 	int ret = 0;
3286 
3287 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3288 		hist_err("Too many field variables defined: ", field_name);
3289 		ret = -EINVAL;
3290 		goto err;
3291 	}
3292 
3293 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
3294 	if (IS_ERR(val)) {
3295 		hist_err("Couldn't parse field variable: ", field_name);
3296 		ret = PTR_ERR(val);
3297 		goto err;
3298 	}
3299 
3300 	var = create_var(hist_data, file, field_name, val->size, val->type);
3301 	if (IS_ERR(var)) {
3302 		hist_err("Couldn't create or find variable: ", field_name);
3303 		kfree(val);
3304 		ret = PTR_ERR(var);
3305 		goto err;
3306 	}
3307 
3308 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3309 	if (!field_var) {
3310 		kfree(val);
3311 		kfree(var);
3312 		ret =  -ENOMEM;
3313 		goto err;
3314 	}
3315 
3316 	field_var->var = var;
3317 	field_var->val = val;
3318  out:
3319 	return field_var;
3320  err:
3321 	field_var = ERR_PTR(ret);
3322 	goto out;
3323 }
3324 
3325 /**
3326  * create_target_field_var - Automatically create a variable for a field
3327  * @target_hist_data: The target hist trigger
3328  * @subsys_name: Optional subsystem name
3329  * @event_name: Optional event name
3330  * @var_name: The name of the field (and the resulting variable)
3331  *
3332  * Hist trigger actions fetch data from variables, not directly from
3333  * events.  However, for convenience, users are allowed to directly
3334  * specify an event field in an action, which will be automatically
3335  * converted into a variable on their behalf.
3336 
3337  * This function creates a field variable with the name var_name on
3338  * the hist trigger currently being defined on the target event.  If
3339  * subsys_name and event_name are specified, this function simply
3340  * verifies that they do in fact match the target event subsystem and
3341  * event name.
3342  *
3343  * Return: The variable created for the field.
3344  */
3345 static struct field_var *
3346 create_target_field_var(struct hist_trigger_data *target_hist_data,
3347 			char *subsys_name, char *event_name, char *var_name)
3348 {
3349 	struct trace_event_file *file = target_hist_data->event_file;
3350 
3351 	if (subsys_name) {
3352 		struct trace_event_call *call;
3353 
3354 		if (!event_name)
3355 			return NULL;
3356 
3357 		call = file->event_call;
3358 
3359 		if (strcmp(subsys_name, call->class->system) != 0)
3360 			return NULL;
3361 
3362 		if (strcmp(event_name, trace_event_name(call)) != 0)
3363 			return NULL;
3364 	}
3365 
3366 	return create_field_var(target_hist_data, file, var_name);
3367 }
3368 
3369 static void onmax_print(struct seq_file *m,
3370 			struct hist_trigger_data *hist_data,
3371 			struct tracing_map_elt *elt,
3372 			struct action_data *data)
3373 {
3374 	unsigned int i, save_var_idx, max_idx = data->onmax.max_var->var.idx;
3375 
3376 	seq_printf(m, "\n\tmax: %10llu", tracing_map_read_var(elt, max_idx));
3377 
3378 	for (i = 0; i < hist_data->n_max_vars; i++) {
3379 		struct hist_field *save_val = hist_data->max_vars[i]->val;
3380 		struct hist_field *save_var = hist_data->max_vars[i]->var;
3381 		u64 val;
3382 
3383 		save_var_idx = save_var->var.idx;
3384 
3385 		val = tracing_map_read_var(elt, save_var_idx);
3386 
3387 		if (save_val->flags & HIST_FIELD_FL_STRING) {
3388 			seq_printf(m, "  %s: %-32s", save_var->var.name,
3389 				   (char *)(uintptr_t)(val));
3390 		} else
3391 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
3392 	}
3393 }
3394 
3395 static void onmax_save(struct hist_trigger_data *hist_data,
3396 		       struct tracing_map_elt *elt, void *rec,
3397 		       struct ring_buffer_event *rbe,
3398 		       struct action_data *data, u64 *var_ref_vals)
3399 {
3400 	unsigned int max_idx = data->onmax.max_var->var.idx;
3401 	unsigned int max_var_ref_idx = data->onmax.max_var_ref_idx;
3402 
3403 	u64 var_val, max_val;
3404 
3405 	var_val = var_ref_vals[max_var_ref_idx];
3406 	max_val = tracing_map_read_var(elt, max_idx);
3407 
3408 	if (var_val <= max_val)
3409 		return;
3410 
3411 	tracing_map_set_var(elt, max_idx, var_val);
3412 
3413 	update_max_vars(hist_data, elt, rbe, rec);
3414 }
3415 
3416 static void onmax_destroy(struct action_data *data)
3417 {
3418 	unsigned int i;
3419 
3420 	destroy_hist_field(data->onmax.max_var, 0);
3421 	destroy_hist_field(data->onmax.var, 0);
3422 
3423 	kfree(data->onmax.var_str);
3424 	kfree(data->onmax.fn_name);
3425 
3426 	for (i = 0; i < data->n_params; i++)
3427 		kfree(data->params[i]);
3428 
3429 	kfree(data);
3430 }
3431 
3432 static int onmax_create(struct hist_trigger_data *hist_data,
3433 			struct action_data *data)
3434 {
3435 	struct trace_event_file *file = hist_data->event_file;
3436 	struct hist_field *var_field, *ref_field, *max_var;
3437 	unsigned int var_ref_idx = hist_data->n_var_refs;
3438 	struct field_var *field_var;
3439 	char *onmax_var_str, *param;
3440 	unsigned int i;
3441 	int ret = 0;
3442 
3443 	onmax_var_str = data->onmax.var_str;
3444 	if (onmax_var_str[0] != '$') {
3445 		hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str);
3446 		return -EINVAL;
3447 	}
3448 	onmax_var_str++;
3449 
3450 	var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str);
3451 	if (!var_field) {
3452 		hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str);
3453 		return -EINVAL;
3454 	}
3455 
3456 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3457 	if (!ref_field)
3458 		return -ENOMEM;
3459 
3460 	data->onmax.var = ref_field;
3461 
3462 	data->fn = onmax_save;
3463 	data->onmax.max_var_ref_idx = var_ref_idx;
3464 	max_var = create_var(hist_data, file, "max", sizeof(u64), "u64");
3465 	if (IS_ERR(max_var)) {
3466 		hist_err("onmax: Couldn't create onmax variable: ", "max");
3467 		ret = PTR_ERR(max_var);
3468 		goto out;
3469 	}
3470 	data->onmax.max_var = max_var;
3471 
3472 	for (i = 0; i < data->n_params; i++) {
3473 		param = kstrdup(data->params[i], GFP_KERNEL);
3474 		if (!param) {
3475 			ret = -ENOMEM;
3476 			goto out;
3477 		}
3478 
3479 		field_var = create_target_field_var(hist_data, NULL, NULL, param);
3480 		if (IS_ERR(field_var)) {
3481 			hist_err("onmax: Couldn't create field variable: ", param);
3482 			ret = PTR_ERR(field_var);
3483 			kfree(param);
3484 			goto out;
3485 		}
3486 
3487 		hist_data->max_vars[hist_data->n_max_vars++] = field_var;
3488 		if (field_var->val->flags & HIST_FIELD_FL_STRING)
3489 			hist_data->n_max_var_str++;
3490 
3491 		kfree(param);
3492 	}
3493  out:
3494 	return ret;
3495 }
3496 
3497 static int parse_action_params(char *params, struct action_data *data)
3498 {
3499 	char *param, *saved_param;
3500 	int ret = 0;
3501 
3502 	while (params) {
3503 		if (data->n_params >= SYNTH_FIELDS_MAX)
3504 			goto out;
3505 
3506 		param = strsep(&params, ",");
3507 		if (!param) {
3508 			ret = -EINVAL;
3509 			goto out;
3510 		}
3511 
3512 		param = strstrip(param);
3513 		if (strlen(param) < 2) {
3514 			hist_err("Invalid action param: ", param);
3515 			ret = -EINVAL;
3516 			goto out;
3517 		}
3518 
3519 		saved_param = kstrdup(param, GFP_KERNEL);
3520 		if (!saved_param) {
3521 			ret = -ENOMEM;
3522 			goto out;
3523 		}
3524 
3525 		data->params[data->n_params++] = saved_param;
3526 	}
3527  out:
3528 	return ret;
3529 }
3530 
3531 static struct action_data *onmax_parse(char *str)
3532 {
3533 	char *onmax_fn_name, *onmax_var_str;
3534 	struct action_data *data;
3535 	int ret = -EINVAL;
3536 
3537 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3538 	if (!data)
3539 		return ERR_PTR(-ENOMEM);
3540 
3541 	onmax_var_str = strsep(&str, ")");
3542 	if (!onmax_var_str || !str) {
3543 		ret = -EINVAL;
3544 		goto free;
3545 	}
3546 
3547 	data->onmax.var_str = kstrdup(onmax_var_str, GFP_KERNEL);
3548 	if (!data->onmax.var_str) {
3549 		ret = -ENOMEM;
3550 		goto free;
3551 	}
3552 
3553 	strsep(&str, ".");
3554 	if (!str)
3555 		goto free;
3556 
3557 	onmax_fn_name = strsep(&str, "(");
3558 	if (!onmax_fn_name || !str)
3559 		goto free;
3560 
3561 	if (str_has_prefix(onmax_fn_name, "save")) {
3562 		char *params = strsep(&str, ")");
3563 
3564 		if (!params) {
3565 			ret = -EINVAL;
3566 			goto free;
3567 		}
3568 
3569 		ret = parse_action_params(params, data);
3570 		if (ret)
3571 			goto free;
3572 	} else
3573 		goto free;
3574 
3575 	data->onmax.fn_name = kstrdup(onmax_fn_name, GFP_KERNEL);
3576 	if (!data->onmax.fn_name) {
3577 		ret = -ENOMEM;
3578 		goto free;
3579 	}
3580  out:
3581 	return data;
3582  free:
3583 	onmax_destroy(data);
3584 	data = ERR_PTR(ret);
3585 	goto out;
3586 }
3587 
3588 static void onmatch_destroy(struct action_data *data)
3589 {
3590 	unsigned int i;
3591 
3592 	lockdep_assert_held(&event_mutex);
3593 
3594 	kfree(data->onmatch.match_event);
3595 	kfree(data->onmatch.match_event_system);
3596 	kfree(data->onmatch.synth_event_name);
3597 
3598 	for (i = 0; i < data->n_params; i++)
3599 		kfree(data->params[i]);
3600 
3601 	if (data->onmatch.synth_event)
3602 		data->onmatch.synth_event->ref--;
3603 
3604 	kfree(data);
3605 }
3606 
3607 static void destroy_field_var(struct field_var *field_var)
3608 {
3609 	if (!field_var)
3610 		return;
3611 
3612 	destroy_hist_field(field_var->var, 0);
3613 	destroy_hist_field(field_var->val, 0);
3614 
3615 	kfree(field_var);
3616 }
3617 
3618 static void destroy_field_vars(struct hist_trigger_data *hist_data)
3619 {
3620 	unsigned int i;
3621 
3622 	for (i = 0; i < hist_data->n_field_vars; i++)
3623 		destroy_field_var(hist_data->field_vars[i]);
3624 }
3625 
3626 static void save_field_var(struct hist_trigger_data *hist_data,
3627 			   struct field_var *field_var)
3628 {
3629 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
3630 
3631 	if (field_var->val->flags & HIST_FIELD_FL_STRING)
3632 		hist_data->n_field_var_str++;
3633 }
3634 
3635 
3636 static int check_synth_field(struct synth_event *event,
3637 			     struct hist_field *hist_field,
3638 			     unsigned int field_pos)
3639 {
3640 	struct synth_field *field;
3641 
3642 	if (field_pos >= event->n_fields)
3643 		return -EINVAL;
3644 
3645 	field = event->fields[field_pos];
3646 
3647 	if (strcmp(field->type, hist_field->type) != 0)
3648 		return -EINVAL;
3649 
3650 	return 0;
3651 }
3652 
3653 static struct hist_field *
3654 onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data,
3655 		 char *system, char *event, char *var)
3656 {
3657 	struct hist_field *hist_field;
3658 
3659 	var++; /* skip '$' */
3660 
3661 	hist_field = find_target_event_var(hist_data, system, event, var);
3662 	if (!hist_field) {
3663 		if (!system) {
3664 			system = data->onmatch.match_event_system;
3665 			event = data->onmatch.match_event;
3666 		}
3667 
3668 		hist_field = find_event_var(hist_data, system, event, var);
3669 	}
3670 
3671 	if (!hist_field)
3672 		hist_err_event("onmatch: Couldn't find onmatch param: $", system, event, var);
3673 
3674 	return hist_field;
3675 }
3676 
3677 static struct hist_field *
3678 onmatch_create_field_var(struct hist_trigger_data *hist_data,
3679 			 struct action_data *data, char *system,
3680 			 char *event, char *var)
3681 {
3682 	struct hist_field *hist_field = NULL;
3683 	struct field_var *field_var;
3684 
3685 	/*
3686 	 * First try to create a field var on the target event (the
3687 	 * currently being defined).  This will create a variable for
3688 	 * unqualified fields on the target event, or if qualified,
3689 	 * target fields that have qualified names matching the target.
3690 	 */
3691 	field_var = create_target_field_var(hist_data, system, event, var);
3692 
3693 	if (field_var && !IS_ERR(field_var)) {
3694 		save_field_var(hist_data, field_var);
3695 		hist_field = field_var->var;
3696 	} else {
3697 		field_var = NULL;
3698 		/*
3699 		 * If no explicit system.event is specfied, default to
3700 		 * looking for fields on the onmatch(system.event.xxx)
3701 		 * event.
3702 		 */
3703 		if (!system) {
3704 			system = data->onmatch.match_event_system;
3705 			event = data->onmatch.match_event;
3706 		}
3707 
3708 		/*
3709 		 * At this point, we're looking at a field on another
3710 		 * event.  Because we can't modify a hist trigger on
3711 		 * another event to add a variable for a field, we need
3712 		 * to create a new trigger on that event and create the
3713 		 * variable at the same time.
3714 		 */
3715 		hist_field = create_field_var_hist(hist_data, system, event, var);
3716 		if (IS_ERR(hist_field))
3717 			goto free;
3718 	}
3719  out:
3720 	return hist_field;
3721  free:
3722 	destroy_field_var(field_var);
3723 	hist_field = NULL;
3724 	goto out;
3725 }
3726 
3727 static int onmatch_create(struct hist_trigger_data *hist_data,
3728 			  struct trace_event_file *file,
3729 			  struct action_data *data)
3730 {
3731 	char *event_name, *param, *system = NULL;
3732 	struct hist_field *hist_field, *var_ref;
3733 	unsigned int i, var_ref_idx;
3734 	unsigned int field_pos = 0;
3735 	struct synth_event *event;
3736 	int ret = 0;
3737 
3738 	lockdep_assert_held(&event_mutex);
3739 
3740 	event = find_synth_event(data->onmatch.synth_event_name);
3741 	if (!event) {
3742 		hist_err("onmatch: Couldn't find synthetic event: ", data->onmatch.synth_event_name);
3743 		return -EINVAL;
3744 	}
3745 	event->ref++;
3746 
3747 	var_ref_idx = hist_data->n_var_refs;
3748 
3749 	for (i = 0; i < data->n_params; i++) {
3750 		char *p;
3751 
3752 		p = param = kstrdup(data->params[i], GFP_KERNEL);
3753 		if (!param) {
3754 			ret = -ENOMEM;
3755 			goto err;
3756 		}
3757 
3758 		system = strsep(&param, ".");
3759 		if (!param) {
3760 			param = (char *)system;
3761 			system = event_name = NULL;
3762 		} else {
3763 			event_name = strsep(&param, ".");
3764 			if (!param) {
3765 				kfree(p);
3766 				ret = -EINVAL;
3767 				goto err;
3768 			}
3769 		}
3770 
3771 		if (param[0] == '$')
3772 			hist_field = onmatch_find_var(hist_data, data, system,
3773 						      event_name, param);
3774 		else
3775 			hist_field = onmatch_create_field_var(hist_data, data,
3776 							      system,
3777 							      event_name,
3778 							      param);
3779 
3780 		if (!hist_field) {
3781 			kfree(p);
3782 			ret = -EINVAL;
3783 			goto err;
3784 		}
3785 
3786 		if (check_synth_field(event, hist_field, field_pos) == 0) {
3787 			var_ref = create_var_ref(hist_data, hist_field,
3788 						 system, event_name);
3789 			if (!var_ref) {
3790 				kfree(p);
3791 				ret = -ENOMEM;
3792 				goto err;
3793 			}
3794 
3795 			field_pos++;
3796 			kfree(p);
3797 			continue;
3798 		}
3799 
3800 		hist_err_event("onmatch: Param type doesn't match synthetic event field type: ",
3801 			       system, event_name, param);
3802 		kfree(p);
3803 		ret = -EINVAL;
3804 		goto err;
3805 	}
3806 
3807 	if (field_pos != event->n_fields) {
3808 		hist_err("onmatch: Param count doesn't match synthetic event field count: ", event->name);
3809 		ret = -EINVAL;
3810 		goto err;
3811 	}
3812 
3813 	data->fn = action_trace;
3814 	data->onmatch.synth_event = event;
3815 	data->onmatch.var_ref_idx = var_ref_idx;
3816  out:
3817 	return ret;
3818  err:
3819 	event->ref--;
3820 
3821 	goto out;
3822 }
3823 
3824 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
3825 {
3826 	char *match_event, *match_event_system;
3827 	char *synth_event_name, *params;
3828 	struct action_data *data;
3829 	int ret = -EINVAL;
3830 
3831 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3832 	if (!data)
3833 		return ERR_PTR(-ENOMEM);
3834 
3835 	match_event = strsep(&str, ")");
3836 	if (!match_event || !str) {
3837 		hist_err("onmatch: Missing closing paren: ", match_event);
3838 		goto free;
3839 	}
3840 
3841 	match_event_system = strsep(&match_event, ".");
3842 	if (!match_event) {
3843 		hist_err("onmatch: Missing subsystem for match event: ", match_event_system);
3844 		goto free;
3845 	}
3846 
3847 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
3848 		hist_err_event("onmatch: Invalid subsystem or event name: ",
3849 			       match_event_system, match_event, NULL);
3850 		goto free;
3851 	}
3852 
3853 	data->onmatch.match_event = kstrdup(match_event, GFP_KERNEL);
3854 	if (!data->onmatch.match_event) {
3855 		ret = -ENOMEM;
3856 		goto free;
3857 	}
3858 
3859 	data->onmatch.match_event_system = kstrdup(match_event_system, GFP_KERNEL);
3860 	if (!data->onmatch.match_event_system) {
3861 		ret = -ENOMEM;
3862 		goto free;
3863 	}
3864 
3865 	strsep(&str, ".");
3866 	if (!str) {
3867 		hist_err("onmatch: Missing . after onmatch(): ", str);
3868 		goto free;
3869 	}
3870 
3871 	synth_event_name = strsep(&str, "(");
3872 	if (!synth_event_name || !str) {
3873 		hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name);
3874 		goto free;
3875 	}
3876 
3877 	data->onmatch.synth_event_name = kstrdup(synth_event_name, GFP_KERNEL);
3878 	if (!data->onmatch.synth_event_name) {
3879 		ret = -ENOMEM;
3880 		goto free;
3881 	}
3882 
3883 	params = strsep(&str, ")");
3884 	if (!params || !str || (str && strlen(str))) {
3885 		hist_err("onmatch: Missing closing paramlist paren: ", params);
3886 		goto free;
3887 	}
3888 
3889 	ret = parse_action_params(params, data);
3890 	if (ret)
3891 		goto free;
3892  out:
3893 	return data;
3894  free:
3895 	onmatch_destroy(data);
3896 	data = ERR_PTR(ret);
3897 	goto out;
3898 }
3899 
3900 static int create_hitcount_val(struct hist_trigger_data *hist_data)
3901 {
3902 	hist_data->fields[HITCOUNT_IDX] =
3903 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
3904 	if (!hist_data->fields[HITCOUNT_IDX])
3905 		return -ENOMEM;
3906 
3907 	hist_data->n_vals++;
3908 	hist_data->n_fields++;
3909 
3910 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
3911 		return -EINVAL;
3912 
3913 	return 0;
3914 }
3915 
3916 static int __create_val_field(struct hist_trigger_data *hist_data,
3917 			      unsigned int val_idx,
3918 			      struct trace_event_file *file,
3919 			      char *var_name, char *field_str,
3920 			      unsigned long flags)
3921 {
3922 	struct hist_field *hist_field;
3923 	int ret = 0;
3924 
3925 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
3926 	if (IS_ERR(hist_field)) {
3927 		ret = PTR_ERR(hist_field);
3928 		goto out;
3929 	}
3930 
3931 	hist_data->fields[val_idx] = hist_field;
3932 
3933 	++hist_data->n_vals;
3934 	++hist_data->n_fields;
3935 
3936 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3937 		ret = -EINVAL;
3938  out:
3939 	return ret;
3940 }
3941 
3942 static int create_val_field(struct hist_trigger_data *hist_data,
3943 			    unsigned int val_idx,
3944 			    struct trace_event_file *file,
3945 			    char *field_str)
3946 {
3947 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
3948 		return -EINVAL;
3949 
3950 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
3951 }
3952 
3953 static int create_var_field(struct hist_trigger_data *hist_data,
3954 			    unsigned int val_idx,
3955 			    struct trace_event_file *file,
3956 			    char *var_name, char *expr_str)
3957 {
3958 	unsigned long flags = 0;
3959 
3960 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3961 		return -EINVAL;
3962 
3963 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
3964 		hist_err("Variable already defined: ", var_name);
3965 		return -EINVAL;
3966 	}
3967 
3968 	flags |= HIST_FIELD_FL_VAR;
3969 	hist_data->n_vars++;
3970 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
3971 		return -EINVAL;
3972 
3973 	return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
3974 }
3975 
3976 static int create_val_fields(struct hist_trigger_data *hist_data,
3977 			     struct trace_event_file *file)
3978 {
3979 	char *fields_str, *field_str;
3980 	unsigned int i, j = 1;
3981 	int ret;
3982 
3983 	ret = create_hitcount_val(hist_data);
3984 	if (ret)
3985 		goto out;
3986 
3987 	fields_str = hist_data->attrs->vals_str;
3988 	if (!fields_str)
3989 		goto out;
3990 
3991 	strsep(&fields_str, "=");
3992 	if (!fields_str)
3993 		goto out;
3994 
3995 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
3996 		     j < TRACING_MAP_VALS_MAX; i++) {
3997 		field_str = strsep(&fields_str, ",");
3998 		if (!field_str)
3999 			break;
4000 
4001 		if (strcmp(field_str, "hitcount") == 0)
4002 			continue;
4003 
4004 		ret = create_val_field(hist_data, j++, file, field_str);
4005 		if (ret)
4006 			goto out;
4007 	}
4008 
4009 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4010 		ret = -EINVAL;
4011  out:
4012 	return ret;
4013 }
4014 
4015 static int create_key_field(struct hist_trigger_data *hist_data,
4016 			    unsigned int key_idx,
4017 			    unsigned int key_offset,
4018 			    struct trace_event_file *file,
4019 			    char *field_str)
4020 {
4021 	struct hist_field *hist_field = NULL;
4022 
4023 	unsigned long flags = 0;
4024 	unsigned int key_size;
4025 	int ret = 0;
4026 
4027 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4028 		return -EINVAL;
4029 
4030 	flags |= HIST_FIELD_FL_KEY;
4031 
4032 	if (strcmp(field_str, "stacktrace") == 0) {
4033 		flags |= HIST_FIELD_FL_STACKTRACE;
4034 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4035 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4036 	} else {
4037 		hist_field = parse_expr(hist_data, file, field_str, flags,
4038 					NULL, 0);
4039 		if (IS_ERR(hist_field)) {
4040 			ret = PTR_ERR(hist_field);
4041 			goto out;
4042 		}
4043 
4044 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF) {
4045 			hist_err("Using variable references as keys not supported: ", field_str);
4046 			destroy_hist_field(hist_field, 0);
4047 			ret = -EINVAL;
4048 			goto out;
4049 		}
4050 
4051 		key_size = hist_field->size;
4052 	}
4053 
4054 	hist_data->fields[key_idx] = hist_field;
4055 
4056 	key_size = ALIGN(key_size, sizeof(u64));
4057 	hist_data->fields[key_idx]->size = key_size;
4058 	hist_data->fields[key_idx]->offset = key_offset;
4059 
4060 	hist_data->key_size += key_size;
4061 
4062 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4063 		ret = -EINVAL;
4064 		goto out;
4065 	}
4066 
4067 	hist_data->n_keys++;
4068 	hist_data->n_fields++;
4069 
4070 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4071 		return -EINVAL;
4072 
4073 	ret = key_size;
4074  out:
4075 	return ret;
4076 }
4077 
4078 static int create_key_fields(struct hist_trigger_data *hist_data,
4079 			     struct trace_event_file *file)
4080 {
4081 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4082 	char *fields_str, *field_str;
4083 	int ret = -EINVAL;
4084 
4085 	fields_str = hist_data->attrs->keys_str;
4086 	if (!fields_str)
4087 		goto out;
4088 
4089 	strsep(&fields_str, "=");
4090 	if (!fields_str)
4091 		goto out;
4092 
4093 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4094 		field_str = strsep(&fields_str, ",");
4095 		if (!field_str)
4096 			break;
4097 		ret = create_key_field(hist_data, i, key_offset,
4098 				       file, field_str);
4099 		if (ret < 0)
4100 			goto out;
4101 		key_offset += ret;
4102 	}
4103 	if (fields_str) {
4104 		ret = -EINVAL;
4105 		goto out;
4106 	}
4107 	ret = 0;
4108  out:
4109 	return ret;
4110 }
4111 
4112 static int create_var_fields(struct hist_trigger_data *hist_data,
4113 			     struct trace_event_file *file)
4114 {
4115 	unsigned int i, j = hist_data->n_vals;
4116 	int ret = 0;
4117 
4118 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4119 
4120 	for (i = 0; i < n_vars; i++) {
4121 		char *var_name = hist_data->attrs->var_defs.name[i];
4122 		char *expr = hist_data->attrs->var_defs.expr[i];
4123 
4124 		ret = create_var_field(hist_data, j++, file, var_name, expr);
4125 		if (ret)
4126 			goto out;
4127 	}
4128  out:
4129 	return ret;
4130 }
4131 
4132 static void free_var_defs(struct hist_trigger_data *hist_data)
4133 {
4134 	unsigned int i;
4135 
4136 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4137 		kfree(hist_data->attrs->var_defs.name[i]);
4138 		kfree(hist_data->attrs->var_defs.expr[i]);
4139 	}
4140 
4141 	hist_data->attrs->var_defs.n_vars = 0;
4142 }
4143 
4144 static int parse_var_defs(struct hist_trigger_data *hist_data)
4145 {
4146 	char *s, *str, *var_name, *field_str;
4147 	unsigned int i, j, n_vars = 0;
4148 	int ret = 0;
4149 
4150 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4151 		str = hist_data->attrs->assignment_str[i];
4152 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4153 			field_str = strsep(&str, ",");
4154 			if (!field_str)
4155 				break;
4156 
4157 			var_name = strsep(&field_str, "=");
4158 			if (!var_name || !field_str) {
4159 				hist_err("Malformed assignment: ", var_name);
4160 				ret = -EINVAL;
4161 				goto free;
4162 			}
4163 
4164 			if (n_vars == TRACING_MAP_VARS_MAX) {
4165 				hist_err("Too many variables defined: ", var_name);
4166 				ret = -EINVAL;
4167 				goto free;
4168 			}
4169 
4170 			s = kstrdup(var_name, GFP_KERNEL);
4171 			if (!s) {
4172 				ret = -ENOMEM;
4173 				goto free;
4174 			}
4175 			hist_data->attrs->var_defs.name[n_vars] = s;
4176 
4177 			s = kstrdup(field_str, GFP_KERNEL);
4178 			if (!s) {
4179 				kfree(hist_data->attrs->var_defs.name[n_vars]);
4180 				ret = -ENOMEM;
4181 				goto free;
4182 			}
4183 			hist_data->attrs->var_defs.expr[n_vars++] = s;
4184 
4185 			hist_data->attrs->var_defs.n_vars = n_vars;
4186 		}
4187 	}
4188 
4189 	return ret;
4190  free:
4191 	free_var_defs(hist_data);
4192 
4193 	return ret;
4194 }
4195 
4196 static int create_hist_fields(struct hist_trigger_data *hist_data,
4197 			      struct trace_event_file *file)
4198 {
4199 	int ret;
4200 
4201 	ret = parse_var_defs(hist_data);
4202 	if (ret)
4203 		goto out;
4204 
4205 	ret = create_val_fields(hist_data, file);
4206 	if (ret)
4207 		goto out;
4208 
4209 	ret = create_var_fields(hist_data, file);
4210 	if (ret)
4211 		goto out;
4212 
4213 	ret = create_key_fields(hist_data, file);
4214 	if (ret)
4215 		goto out;
4216  out:
4217 	free_var_defs(hist_data);
4218 
4219 	return ret;
4220 }
4221 
4222 static int is_descending(const char *str)
4223 {
4224 	if (!str)
4225 		return 0;
4226 
4227 	if (strcmp(str, "descending") == 0)
4228 		return 1;
4229 
4230 	if (strcmp(str, "ascending") == 0)
4231 		return 0;
4232 
4233 	return -EINVAL;
4234 }
4235 
4236 static int create_sort_keys(struct hist_trigger_data *hist_data)
4237 {
4238 	char *fields_str = hist_data->attrs->sort_key_str;
4239 	struct tracing_map_sort_key *sort_key;
4240 	int descending, ret = 0;
4241 	unsigned int i, j, k;
4242 
4243 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4244 
4245 	if (!fields_str)
4246 		goto out;
4247 
4248 	strsep(&fields_str, "=");
4249 	if (!fields_str) {
4250 		ret = -EINVAL;
4251 		goto out;
4252 	}
4253 
4254 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4255 		struct hist_field *hist_field;
4256 		char *field_str, *field_name;
4257 		const char *test_name;
4258 
4259 		sort_key = &hist_data->sort_keys[i];
4260 
4261 		field_str = strsep(&fields_str, ",");
4262 		if (!field_str) {
4263 			if (i == 0)
4264 				ret = -EINVAL;
4265 			break;
4266 		}
4267 
4268 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4269 			ret = -EINVAL;
4270 			break;
4271 		}
4272 
4273 		field_name = strsep(&field_str, ".");
4274 		if (!field_name) {
4275 			ret = -EINVAL;
4276 			break;
4277 		}
4278 
4279 		if (strcmp(field_name, "hitcount") == 0) {
4280 			descending = is_descending(field_str);
4281 			if (descending < 0) {
4282 				ret = descending;
4283 				break;
4284 			}
4285 			sort_key->descending = descending;
4286 			continue;
4287 		}
4288 
4289 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4290 			unsigned int idx;
4291 
4292 			hist_field = hist_data->fields[j];
4293 			if (hist_field->flags & HIST_FIELD_FL_VAR)
4294 				continue;
4295 
4296 			idx = k++;
4297 
4298 			test_name = hist_field_name(hist_field, 0);
4299 
4300 			if (strcmp(field_name, test_name) == 0) {
4301 				sort_key->field_idx = idx;
4302 				descending = is_descending(field_str);
4303 				if (descending < 0) {
4304 					ret = descending;
4305 					goto out;
4306 				}
4307 				sort_key->descending = descending;
4308 				break;
4309 			}
4310 		}
4311 		if (j == hist_data->n_fields) {
4312 			ret = -EINVAL;
4313 			break;
4314 		}
4315 	}
4316 
4317 	hist_data->n_sort_keys = i;
4318  out:
4319 	return ret;
4320 }
4321 
4322 static void destroy_actions(struct hist_trigger_data *hist_data)
4323 {
4324 	unsigned int i;
4325 
4326 	for (i = 0; i < hist_data->n_actions; i++) {
4327 		struct action_data *data = hist_data->actions[i];
4328 
4329 		if (data->fn == action_trace)
4330 			onmatch_destroy(data);
4331 		else if (data->fn == onmax_save)
4332 			onmax_destroy(data);
4333 		else
4334 			kfree(data);
4335 	}
4336 }
4337 
4338 static int parse_actions(struct hist_trigger_data *hist_data)
4339 {
4340 	struct trace_array *tr = hist_data->event_file->tr;
4341 	struct action_data *data;
4342 	unsigned int i;
4343 	int ret = 0;
4344 	char *str;
4345 	int len;
4346 
4347 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4348 		str = hist_data->attrs->action_str[i];
4349 
4350 		if ((len = str_has_prefix(str, "onmatch("))) {
4351 			char *action_str = str + len;
4352 
4353 			data = onmatch_parse(tr, action_str);
4354 			if (IS_ERR(data)) {
4355 				ret = PTR_ERR(data);
4356 				break;
4357 			}
4358 			data->fn = action_trace;
4359 		} else if ((len = str_has_prefix(str, "onmax("))) {
4360 			char *action_str = str + len;
4361 
4362 			data = onmax_parse(action_str);
4363 			if (IS_ERR(data)) {
4364 				ret = PTR_ERR(data);
4365 				break;
4366 			}
4367 			data->fn = onmax_save;
4368 		} else {
4369 			ret = -EINVAL;
4370 			break;
4371 		}
4372 
4373 		hist_data->actions[hist_data->n_actions++] = data;
4374 	}
4375 
4376 	return ret;
4377 }
4378 
4379 static int create_actions(struct hist_trigger_data *hist_data,
4380 			  struct trace_event_file *file)
4381 {
4382 	struct action_data *data;
4383 	unsigned int i;
4384 	int ret = 0;
4385 
4386 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4387 		data = hist_data->actions[i];
4388 
4389 		if (data->fn == action_trace) {
4390 			ret = onmatch_create(hist_data, file, data);
4391 			if (ret)
4392 				return ret;
4393 		} else if (data->fn == onmax_save) {
4394 			ret = onmax_create(hist_data, data);
4395 			if (ret)
4396 				return ret;
4397 		}
4398 	}
4399 
4400 	return ret;
4401 }
4402 
4403 static void print_actions(struct seq_file *m,
4404 			  struct hist_trigger_data *hist_data,
4405 			  struct tracing_map_elt *elt)
4406 {
4407 	unsigned int i;
4408 
4409 	for (i = 0; i < hist_data->n_actions; i++) {
4410 		struct action_data *data = hist_data->actions[i];
4411 
4412 		if (data->fn == onmax_save)
4413 			onmax_print(m, hist_data, elt, data);
4414 	}
4415 }
4416 
4417 static void print_onmax_spec(struct seq_file *m,
4418 			     struct hist_trigger_data *hist_data,
4419 			     struct action_data *data)
4420 {
4421 	unsigned int i;
4422 
4423 	seq_puts(m, ":onmax(");
4424 	seq_printf(m, "%s", data->onmax.var_str);
4425 	seq_printf(m, ").%s(", data->onmax.fn_name);
4426 
4427 	for (i = 0; i < hist_data->n_max_vars; i++) {
4428 		seq_printf(m, "%s", hist_data->max_vars[i]->var->var.name);
4429 		if (i < hist_data->n_max_vars - 1)
4430 			seq_puts(m, ",");
4431 	}
4432 	seq_puts(m, ")");
4433 }
4434 
4435 static void print_onmatch_spec(struct seq_file *m,
4436 			       struct hist_trigger_data *hist_data,
4437 			       struct action_data *data)
4438 {
4439 	unsigned int i;
4440 
4441 	seq_printf(m, ":onmatch(%s.%s).", data->onmatch.match_event_system,
4442 		   data->onmatch.match_event);
4443 
4444 	seq_printf(m, "%s(", data->onmatch.synth_event->name);
4445 
4446 	for (i = 0; i < data->n_params; i++) {
4447 		if (i)
4448 			seq_puts(m, ",");
4449 		seq_printf(m, "%s", data->params[i]);
4450 	}
4451 
4452 	seq_puts(m, ")");
4453 }
4454 
4455 static bool actions_match(struct hist_trigger_data *hist_data,
4456 			  struct hist_trigger_data *hist_data_test)
4457 {
4458 	unsigned int i, j;
4459 
4460 	if (hist_data->n_actions != hist_data_test->n_actions)
4461 		return false;
4462 
4463 	for (i = 0; i < hist_data->n_actions; i++) {
4464 		struct action_data *data = hist_data->actions[i];
4465 		struct action_data *data_test = hist_data_test->actions[i];
4466 
4467 		if (data->fn != data_test->fn)
4468 			return false;
4469 
4470 		if (data->n_params != data_test->n_params)
4471 			return false;
4472 
4473 		for (j = 0; j < data->n_params; j++) {
4474 			if (strcmp(data->params[j], data_test->params[j]) != 0)
4475 				return false;
4476 		}
4477 
4478 		if (data->fn == action_trace) {
4479 			if (strcmp(data->onmatch.synth_event_name,
4480 				   data_test->onmatch.synth_event_name) != 0)
4481 				return false;
4482 			if (strcmp(data->onmatch.match_event_system,
4483 				   data_test->onmatch.match_event_system) != 0)
4484 				return false;
4485 			if (strcmp(data->onmatch.match_event,
4486 				   data_test->onmatch.match_event) != 0)
4487 				return false;
4488 		} else if (data->fn == onmax_save) {
4489 			if (strcmp(data->onmax.var_str,
4490 				   data_test->onmax.var_str) != 0)
4491 				return false;
4492 			if (strcmp(data->onmax.fn_name,
4493 				   data_test->onmax.fn_name) != 0)
4494 				return false;
4495 		}
4496 	}
4497 
4498 	return true;
4499 }
4500 
4501 
4502 static void print_actions_spec(struct seq_file *m,
4503 			       struct hist_trigger_data *hist_data)
4504 {
4505 	unsigned int i;
4506 
4507 	for (i = 0; i < hist_data->n_actions; i++) {
4508 		struct action_data *data = hist_data->actions[i];
4509 
4510 		if (data->fn == action_trace)
4511 			print_onmatch_spec(m, hist_data, data);
4512 		else if (data->fn == onmax_save)
4513 			print_onmax_spec(m, hist_data, data);
4514 	}
4515 }
4516 
4517 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
4518 {
4519 	unsigned int i;
4520 
4521 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
4522 		kfree(hist_data->field_var_hists[i]->cmd);
4523 		kfree(hist_data->field_var_hists[i]);
4524 	}
4525 }
4526 
4527 static void destroy_hist_data(struct hist_trigger_data *hist_data)
4528 {
4529 	if (!hist_data)
4530 		return;
4531 
4532 	destroy_hist_trigger_attrs(hist_data->attrs);
4533 	destroy_hist_fields(hist_data);
4534 	tracing_map_destroy(hist_data->map);
4535 
4536 	destroy_actions(hist_data);
4537 	destroy_field_vars(hist_data);
4538 	destroy_field_var_hists(hist_data);
4539 
4540 	kfree(hist_data);
4541 }
4542 
4543 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
4544 {
4545 	struct tracing_map *map = hist_data->map;
4546 	struct ftrace_event_field *field;
4547 	struct hist_field *hist_field;
4548 	int i, idx = 0;
4549 
4550 	for_each_hist_field(i, hist_data) {
4551 		hist_field = hist_data->fields[i];
4552 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
4553 			tracing_map_cmp_fn_t cmp_fn;
4554 
4555 			field = hist_field->field;
4556 
4557 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
4558 				cmp_fn = tracing_map_cmp_none;
4559 			else if (!field)
4560 				cmp_fn = tracing_map_cmp_num(hist_field->size,
4561 							     hist_field->is_signed);
4562 			else if (is_string_field(field))
4563 				cmp_fn = tracing_map_cmp_string;
4564 			else
4565 				cmp_fn = tracing_map_cmp_num(field->size,
4566 							     field->is_signed);
4567 			idx = tracing_map_add_key_field(map,
4568 							hist_field->offset,
4569 							cmp_fn);
4570 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
4571 			idx = tracing_map_add_sum_field(map);
4572 
4573 		if (idx < 0)
4574 			return idx;
4575 
4576 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
4577 			idx = tracing_map_add_var(map);
4578 			if (idx < 0)
4579 				return idx;
4580 			hist_field->var.idx = idx;
4581 			hist_field->var.hist_data = hist_data;
4582 		}
4583 	}
4584 
4585 	return 0;
4586 }
4587 
4588 static struct hist_trigger_data *
4589 create_hist_data(unsigned int map_bits,
4590 		 struct hist_trigger_attrs *attrs,
4591 		 struct trace_event_file *file,
4592 		 bool remove)
4593 {
4594 	const struct tracing_map_ops *map_ops = NULL;
4595 	struct hist_trigger_data *hist_data;
4596 	int ret = 0;
4597 
4598 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
4599 	if (!hist_data)
4600 		return ERR_PTR(-ENOMEM);
4601 
4602 	hist_data->attrs = attrs;
4603 	hist_data->remove = remove;
4604 	hist_data->event_file = file;
4605 
4606 	ret = parse_actions(hist_data);
4607 	if (ret)
4608 		goto free;
4609 
4610 	ret = create_hist_fields(hist_data, file);
4611 	if (ret)
4612 		goto free;
4613 
4614 	ret = create_sort_keys(hist_data);
4615 	if (ret)
4616 		goto free;
4617 
4618 	map_ops = &hist_trigger_elt_data_ops;
4619 
4620 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
4621 					    map_ops, hist_data);
4622 	if (IS_ERR(hist_data->map)) {
4623 		ret = PTR_ERR(hist_data->map);
4624 		hist_data->map = NULL;
4625 		goto free;
4626 	}
4627 
4628 	ret = create_tracing_map_fields(hist_data);
4629 	if (ret)
4630 		goto free;
4631  out:
4632 	return hist_data;
4633  free:
4634 	hist_data->attrs = NULL;
4635 
4636 	destroy_hist_data(hist_data);
4637 
4638 	hist_data = ERR_PTR(ret);
4639 
4640 	goto out;
4641 }
4642 
4643 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
4644 				    struct tracing_map_elt *elt, void *rec,
4645 				    struct ring_buffer_event *rbe,
4646 				    u64 *var_ref_vals)
4647 {
4648 	struct hist_elt_data *elt_data;
4649 	struct hist_field *hist_field;
4650 	unsigned int i, var_idx;
4651 	u64 hist_val;
4652 
4653 	elt_data = elt->private_data;
4654 	elt_data->var_ref_vals = var_ref_vals;
4655 
4656 	for_each_hist_val_field(i, hist_data) {
4657 		hist_field = hist_data->fields[i];
4658 		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4659 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
4660 			var_idx = hist_field->var.idx;
4661 			tracing_map_set_var(elt, var_idx, hist_val);
4662 			continue;
4663 		}
4664 		tracing_map_update_sum(elt, i, hist_val);
4665 	}
4666 
4667 	for_each_hist_key_field(i, hist_data) {
4668 		hist_field = hist_data->fields[i];
4669 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
4670 			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4671 			var_idx = hist_field->var.idx;
4672 			tracing_map_set_var(elt, var_idx, hist_val);
4673 		}
4674 	}
4675 
4676 	update_field_vars(hist_data, elt, rbe, rec);
4677 }
4678 
4679 static inline void add_to_key(char *compound_key, void *key,
4680 			      struct hist_field *key_field, void *rec)
4681 {
4682 	size_t size = key_field->size;
4683 
4684 	if (key_field->flags & HIST_FIELD_FL_STRING) {
4685 		struct ftrace_event_field *field;
4686 
4687 		field = key_field->field;
4688 		if (field->filter_type == FILTER_DYN_STRING)
4689 			size = *(u32 *)(rec + field->offset) >> 16;
4690 		else if (field->filter_type == FILTER_PTR_STRING)
4691 			size = strlen(key);
4692 		else if (field->filter_type == FILTER_STATIC_STRING)
4693 			size = field->size;
4694 
4695 		/* ensure NULL-termination */
4696 		if (size > key_field->size - 1)
4697 			size = key_field->size - 1;
4698 	}
4699 
4700 	memcpy(compound_key + key_field->offset, key, size);
4701 }
4702 
4703 static void
4704 hist_trigger_actions(struct hist_trigger_data *hist_data,
4705 		     struct tracing_map_elt *elt, void *rec,
4706 		     struct ring_buffer_event *rbe, u64 *var_ref_vals)
4707 {
4708 	struct action_data *data;
4709 	unsigned int i;
4710 
4711 	for (i = 0; i < hist_data->n_actions; i++) {
4712 		data = hist_data->actions[i];
4713 		data->fn(hist_data, elt, rec, rbe, data, var_ref_vals);
4714 	}
4715 }
4716 
4717 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
4718 			       struct ring_buffer_event *rbe)
4719 {
4720 	struct hist_trigger_data *hist_data = data->private_data;
4721 	bool use_compound_key = (hist_data->n_keys > 1);
4722 	unsigned long entries[HIST_STACKTRACE_DEPTH];
4723 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
4724 	char compound_key[HIST_KEY_SIZE_MAX];
4725 	struct tracing_map_elt *elt = NULL;
4726 	struct stack_trace stacktrace;
4727 	struct hist_field *key_field;
4728 	u64 field_contents;
4729 	void *key = NULL;
4730 	unsigned int i;
4731 
4732 	memset(compound_key, 0, hist_data->key_size);
4733 
4734 	for_each_hist_key_field(i, hist_data) {
4735 		key_field = hist_data->fields[i];
4736 
4737 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4738 			stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
4739 			stacktrace.entries = entries;
4740 			stacktrace.nr_entries = 0;
4741 			stacktrace.skip = HIST_STACKTRACE_SKIP;
4742 
4743 			memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
4744 			save_stack_trace(&stacktrace);
4745 
4746 			key = entries;
4747 		} else {
4748 			field_contents = key_field->fn(key_field, elt, rbe, rec);
4749 			if (key_field->flags & HIST_FIELD_FL_STRING) {
4750 				key = (void *)(unsigned long)field_contents;
4751 				use_compound_key = true;
4752 			} else
4753 				key = (void *)&field_contents;
4754 		}
4755 
4756 		if (use_compound_key)
4757 			add_to_key(compound_key, key, key_field, rec);
4758 	}
4759 
4760 	if (use_compound_key)
4761 		key = compound_key;
4762 
4763 	if (hist_data->n_var_refs &&
4764 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
4765 		return;
4766 
4767 	elt = tracing_map_insert(hist_data->map, key);
4768 	if (!elt)
4769 		return;
4770 
4771 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
4772 
4773 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
4774 		hist_trigger_actions(hist_data, elt, rec, rbe, var_ref_vals);
4775 }
4776 
4777 static void hist_trigger_stacktrace_print(struct seq_file *m,
4778 					  unsigned long *stacktrace_entries,
4779 					  unsigned int max_entries)
4780 {
4781 	char str[KSYM_SYMBOL_LEN];
4782 	unsigned int spaces = 8;
4783 	unsigned int i;
4784 
4785 	for (i = 0; i < max_entries; i++) {
4786 		if (stacktrace_entries[i] == ULONG_MAX)
4787 			return;
4788 
4789 		seq_printf(m, "%*c", 1 + spaces, ' ');
4790 		sprint_symbol(str, stacktrace_entries[i]);
4791 		seq_printf(m, "%s\n", str);
4792 	}
4793 }
4794 
4795 static void
4796 hist_trigger_entry_print(struct seq_file *m,
4797 			 struct hist_trigger_data *hist_data, void *key,
4798 			 struct tracing_map_elt *elt)
4799 {
4800 	struct hist_field *key_field;
4801 	char str[KSYM_SYMBOL_LEN];
4802 	bool multiline = false;
4803 	const char *field_name;
4804 	unsigned int i;
4805 	u64 uval;
4806 
4807 	seq_puts(m, "{ ");
4808 
4809 	for_each_hist_key_field(i, hist_data) {
4810 		key_field = hist_data->fields[i];
4811 
4812 		if (i > hist_data->n_vals)
4813 			seq_puts(m, ", ");
4814 
4815 		field_name = hist_field_name(key_field, 0);
4816 
4817 		if (key_field->flags & HIST_FIELD_FL_HEX) {
4818 			uval = *(u64 *)(key + key_field->offset);
4819 			seq_printf(m, "%s: %llx", field_name, uval);
4820 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
4821 			uval = *(u64 *)(key + key_field->offset);
4822 			sprint_symbol_no_offset(str, uval);
4823 			seq_printf(m, "%s: [%llx] %-45s", field_name,
4824 				   uval, str);
4825 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
4826 			uval = *(u64 *)(key + key_field->offset);
4827 			sprint_symbol(str, uval);
4828 			seq_printf(m, "%s: [%llx] %-55s", field_name,
4829 				   uval, str);
4830 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
4831 			struct hist_elt_data *elt_data = elt->private_data;
4832 			char *comm;
4833 
4834 			if (WARN_ON_ONCE(!elt_data))
4835 				return;
4836 
4837 			comm = elt_data->comm;
4838 
4839 			uval = *(u64 *)(key + key_field->offset);
4840 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
4841 				   comm, uval);
4842 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
4843 			const char *syscall_name;
4844 
4845 			uval = *(u64 *)(key + key_field->offset);
4846 			syscall_name = get_syscall_name(uval);
4847 			if (!syscall_name)
4848 				syscall_name = "unknown_syscall";
4849 
4850 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
4851 				   syscall_name, uval);
4852 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4853 			seq_puts(m, "stacktrace:\n");
4854 			hist_trigger_stacktrace_print(m,
4855 						      key + key_field->offset,
4856 						      HIST_STACKTRACE_DEPTH);
4857 			multiline = true;
4858 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
4859 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
4860 				   *(u64 *)(key + key_field->offset));
4861 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
4862 			seq_printf(m, "%s: %-50s", field_name,
4863 				   (char *)(key + key_field->offset));
4864 		} else {
4865 			uval = *(u64 *)(key + key_field->offset);
4866 			seq_printf(m, "%s: %10llu", field_name, uval);
4867 		}
4868 	}
4869 
4870 	if (!multiline)
4871 		seq_puts(m, " ");
4872 
4873 	seq_puts(m, "}");
4874 
4875 	seq_printf(m, " hitcount: %10llu",
4876 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
4877 
4878 	for (i = 1; i < hist_data->n_vals; i++) {
4879 		field_name = hist_field_name(hist_data->fields[i], 0);
4880 
4881 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
4882 		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
4883 			continue;
4884 
4885 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
4886 			seq_printf(m, "  %s: %10llx", field_name,
4887 				   tracing_map_read_sum(elt, i));
4888 		} else {
4889 			seq_printf(m, "  %s: %10llu", field_name,
4890 				   tracing_map_read_sum(elt, i));
4891 		}
4892 	}
4893 
4894 	print_actions(m, hist_data, elt);
4895 
4896 	seq_puts(m, "\n");
4897 }
4898 
4899 static int print_entries(struct seq_file *m,
4900 			 struct hist_trigger_data *hist_data)
4901 {
4902 	struct tracing_map_sort_entry **sort_entries = NULL;
4903 	struct tracing_map *map = hist_data->map;
4904 	int i, n_entries;
4905 
4906 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
4907 					     hist_data->n_sort_keys,
4908 					     &sort_entries);
4909 	if (n_entries < 0)
4910 		return n_entries;
4911 
4912 	for (i = 0; i < n_entries; i++)
4913 		hist_trigger_entry_print(m, hist_data,
4914 					 sort_entries[i]->key,
4915 					 sort_entries[i]->elt);
4916 
4917 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
4918 
4919 	return n_entries;
4920 }
4921 
4922 static void hist_trigger_show(struct seq_file *m,
4923 			      struct event_trigger_data *data, int n)
4924 {
4925 	struct hist_trigger_data *hist_data;
4926 	int n_entries;
4927 
4928 	if (n > 0)
4929 		seq_puts(m, "\n\n");
4930 
4931 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
4932 	data->ops->print(m, data->ops, data);
4933 	seq_puts(m, "#\n\n");
4934 
4935 	hist_data = data->private_data;
4936 	n_entries = print_entries(m, hist_data);
4937 	if (n_entries < 0)
4938 		n_entries = 0;
4939 
4940 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
4941 		   (u64)atomic64_read(&hist_data->map->hits),
4942 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
4943 }
4944 
4945 static int hist_show(struct seq_file *m, void *v)
4946 {
4947 	struct event_trigger_data *data;
4948 	struct trace_event_file *event_file;
4949 	int n = 0, ret = 0;
4950 
4951 	mutex_lock(&event_mutex);
4952 
4953 	event_file = event_file_data(m->private);
4954 	if (unlikely(!event_file)) {
4955 		ret = -ENODEV;
4956 		goto out_unlock;
4957 	}
4958 
4959 	list_for_each_entry_rcu(data, &event_file->triggers, list) {
4960 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
4961 			hist_trigger_show(m, data, n++);
4962 	}
4963 
4964 	if (have_hist_err()) {
4965 		seq_printf(m, "\nERROR: %s\n", hist_err_str);
4966 		seq_printf(m, "  Last command: %s\n", last_hist_cmd);
4967 	}
4968 
4969  out_unlock:
4970 	mutex_unlock(&event_mutex);
4971 
4972 	return ret;
4973 }
4974 
4975 static int event_hist_open(struct inode *inode, struct file *file)
4976 {
4977 	return single_open(file, hist_show, file);
4978 }
4979 
4980 const struct file_operations event_hist_fops = {
4981 	.open = event_hist_open,
4982 	.read = seq_read,
4983 	.llseek = seq_lseek,
4984 	.release = single_release,
4985 };
4986 
4987 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
4988 {
4989 	const char *field_name = hist_field_name(hist_field, 0);
4990 
4991 	if (hist_field->var.name)
4992 		seq_printf(m, "%s=", hist_field->var.name);
4993 
4994 	if (hist_field->flags & HIST_FIELD_FL_CPU)
4995 		seq_puts(m, "cpu");
4996 	else if (field_name) {
4997 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
4998 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
4999 			seq_putc(m, '$');
5000 		seq_printf(m, "%s", field_name);
5001 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5002 		seq_puts(m, "common_timestamp");
5003 
5004 	if (hist_field->flags) {
5005 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5006 		    !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5007 			const char *flags = get_hist_field_flags(hist_field);
5008 
5009 			if (flags)
5010 				seq_printf(m, ".%s", flags);
5011 		}
5012 	}
5013 }
5014 
5015 static int event_hist_trigger_print(struct seq_file *m,
5016 				    struct event_trigger_ops *ops,
5017 				    struct event_trigger_data *data)
5018 {
5019 	struct hist_trigger_data *hist_data = data->private_data;
5020 	struct hist_field *field;
5021 	bool have_var = false;
5022 	unsigned int i;
5023 
5024 	seq_puts(m, "hist:");
5025 
5026 	if (data->name)
5027 		seq_printf(m, "%s:", data->name);
5028 
5029 	seq_puts(m, "keys=");
5030 
5031 	for_each_hist_key_field(i, hist_data) {
5032 		field = hist_data->fields[i];
5033 
5034 		if (i > hist_data->n_vals)
5035 			seq_puts(m, ",");
5036 
5037 		if (field->flags & HIST_FIELD_FL_STACKTRACE)
5038 			seq_puts(m, "stacktrace");
5039 		else
5040 			hist_field_print(m, field);
5041 	}
5042 
5043 	seq_puts(m, ":vals=");
5044 
5045 	for_each_hist_val_field(i, hist_data) {
5046 		field = hist_data->fields[i];
5047 		if (field->flags & HIST_FIELD_FL_VAR) {
5048 			have_var = true;
5049 			continue;
5050 		}
5051 
5052 		if (i == HITCOUNT_IDX)
5053 			seq_puts(m, "hitcount");
5054 		else {
5055 			seq_puts(m, ",");
5056 			hist_field_print(m, field);
5057 		}
5058 	}
5059 
5060 	if (have_var) {
5061 		unsigned int n = 0;
5062 
5063 		seq_puts(m, ":");
5064 
5065 		for_each_hist_val_field(i, hist_data) {
5066 			field = hist_data->fields[i];
5067 
5068 			if (field->flags & HIST_FIELD_FL_VAR) {
5069 				if (n++)
5070 					seq_puts(m, ",");
5071 				hist_field_print(m, field);
5072 			}
5073 		}
5074 	}
5075 
5076 	seq_puts(m, ":sort=");
5077 
5078 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5079 		struct tracing_map_sort_key *sort_key;
5080 		unsigned int idx, first_key_idx;
5081 
5082 		/* skip VAR vals */
5083 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
5084 
5085 		sort_key = &hist_data->sort_keys[i];
5086 		idx = sort_key->field_idx;
5087 
5088 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
5089 			return -EINVAL;
5090 
5091 		if (i > 0)
5092 			seq_puts(m, ",");
5093 
5094 		if (idx == HITCOUNT_IDX)
5095 			seq_puts(m, "hitcount");
5096 		else {
5097 			if (idx >= first_key_idx)
5098 				idx += hist_data->n_vars;
5099 			hist_field_print(m, hist_data->fields[idx]);
5100 		}
5101 
5102 		if (sort_key->descending)
5103 			seq_puts(m, ".descending");
5104 	}
5105 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5106 	if (hist_data->enable_timestamps)
5107 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5108 
5109 	print_actions_spec(m, hist_data);
5110 
5111 	if (data->filter_str)
5112 		seq_printf(m, " if %s", data->filter_str);
5113 
5114 	if (data->paused)
5115 		seq_puts(m, " [paused]");
5116 	else
5117 		seq_puts(m, " [active]");
5118 
5119 	seq_putc(m, '\n');
5120 
5121 	return 0;
5122 }
5123 
5124 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5125 				   struct event_trigger_data *data)
5126 {
5127 	struct hist_trigger_data *hist_data = data->private_data;
5128 
5129 	if (!data->ref && hist_data->attrs->name)
5130 		save_named_trigger(hist_data->attrs->name, data);
5131 
5132 	data->ref++;
5133 
5134 	return 0;
5135 }
5136 
5137 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5138 {
5139 	struct trace_event_file *file;
5140 	unsigned int i;
5141 	char *cmd;
5142 	int ret;
5143 
5144 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5145 		file = hist_data->field_var_hists[i]->hist_data->event_file;
5146 		cmd = hist_data->field_var_hists[i]->cmd;
5147 		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5148 					      "!hist", "hist", cmd);
5149 	}
5150 }
5151 
5152 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5153 				    struct event_trigger_data *data)
5154 {
5155 	struct hist_trigger_data *hist_data = data->private_data;
5156 
5157 	if (WARN_ON_ONCE(data->ref <= 0))
5158 		return;
5159 
5160 	data->ref--;
5161 	if (!data->ref) {
5162 		if (data->name)
5163 			del_named_trigger(data);
5164 
5165 		trigger_data_free(data);
5166 
5167 		remove_hist_vars(hist_data);
5168 
5169 		unregister_field_var_hists(hist_data);
5170 
5171 		destroy_hist_data(hist_data);
5172 	}
5173 }
5174 
5175 static struct event_trigger_ops event_hist_trigger_ops = {
5176 	.func			= event_hist_trigger,
5177 	.print			= event_hist_trigger_print,
5178 	.init			= event_hist_trigger_init,
5179 	.free			= event_hist_trigger_free,
5180 };
5181 
5182 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5183 					 struct event_trigger_data *data)
5184 {
5185 	data->ref++;
5186 
5187 	save_named_trigger(data->named_data->name, data);
5188 
5189 	event_hist_trigger_init(ops, data->named_data);
5190 
5191 	return 0;
5192 }
5193 
5194 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5195 					  struct event_trigger_data *data)
5196 {
5197 	if (WARN_ON_ONCE(data->ref <= 0))
5198 		return;
5199 
5200 	event_hist_trigger_free(ops, data->named_data);
5201 
5202 	data->ref--;
5203 	if (!data->ref) {
5204 		del_named_trigger(data);
5205 		trigger_data_free(data);
5206 	}
5207 }
5208 
5209 static struct event_trigger_ops event_hist_trigger_named_ops = {
5210 	.func			= event_hist_trigger,
5211 	.print			= event_hist_trigger_print,
5212 	.init			= event_hist_trigger_named_init,
5213 	.free			= event_hist_trigger_named_free,
5214 };
5215 
5216 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5217 							    char *param)
5218 {
5219 	return &event_hist_trigger_ops;
5220 }
5221 
5222 static void hist_clear(struct event_trigger_data *data)
5223 {
5224 	struct hist_trigger_data *hist_data = data->private_data;
5225 
5226 	if (data->name)
5227 		pause_named_trigger(data);
5228 
5229 	tracepoint_synchronize_unregister();
5230 
5231 	tracing_map_clear(hist_data->map);
5232 
5233 	if (data->name)
5234 		unpause_named_trigger(data);
5235 }
5236 
5237 static bool compatible_field(struct ftrace_event_field *field,
5238 			     struct ftrace_event_field *test_field)
5239 {
5240 	if (field == test_field)
5241 		return true;
5242 	if (field == NULL || test_field == NULL)
5243 		return false;
5244 	if (strcmp(field->name, test_field->name) != 0)
5245 		return false;
5246 	if (strcmp(field->type, test_field->type) != 0)
5247 		return false;
5248 	if (field->size != test_field->size)
5249 		return false;
5250 	if (field->is_signed != test_field->is_signed)
5251 		return false;
5252 
5253 	return true;
5254 }
5255 
5256 static bool hist_trigger_match(struct event_trigger_data *data,
5257 			       struct event_trigger_data *data_test,
5258 			       struct event_trigger_data *named_data,
5259 			       bool ignore_filter)
5260 {
5261 	struct tracing_map_sort_key *sort_key, *sort_key_test;
5262 	struct hist_trigger_data *hist_data, *hist_data_test;
5263 	struct hist_field *key_field, *key_field_test;
5264 	unsigned int i;
5265 
5266 	if (named_data && (named_data != data_test) &&
5267 	    (named_data != data_test->named_data))
5268 		return false;
5269 
5270 	if (!named_data && is_named_trigger(data_test))
5271 		return false;
5272 
5273 	hist_data = data->private_data;
5274 	hist_data_test = data_test->private_data;
5275 
5276 	if (hist_data->n_vals != hist_data_test->n_vals ||
5277 	    hist_data->n_fields != hist_data_test->n_fields ||
5278 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5279 		return false;
5280 
5281 	if (!ignore_filter) {
5282 		if ((data->filter_str && !data_test->filter_str) ||
5283 		   (!data->filter_str && data_test->filter_str))
5284 			return false;
5285 	}
5286 
5287 	for_each_hist_field(i, hist_data) {
5288 		key_field = hist_data->fields[i];
5289 		key_field_test = hist_data_test->fields[i];
5290 
5291 		if (key_field->flags != key_field_test->flags)
5292 			return false;
5293 		if (!compatible_field(key_field->field, key_field_test->field))
5294 			return false;
5295 		if (key_field->offset != key_field_test->offset)
5296 			return false;
5297 		if (key_field->size != key_field_test->size)
5298 			return false;
5299 		if (key_field->is_signed != key_field_test->is_signed)
5300 			return false;
5301 		if (!!key_field->var.name != !!key_field_test->var.name)
5302 			return false;
5303 		if (key_field->var.name &&
5304 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
5305 			return false;
5306 	}
5307 
5308 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5309 		sort_key = &hist_data->sort_keys[i];
5310 		sort_key_test = &hist_data_test->sort_keys[i];
5311 
5312 		if (sort_key->field_idx != sort_key_test->field_idx ||
5313 		    sort_key->descending != sort_key_test->descending)
5314 			return false;
5315 	}
5316 
5317 	if (!ignore_filter && data->filter_str &&
5318 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
5319 		return false;
5320 
5321 	if (!actions_match(hist_data, hist_data_test))
5322 		return false;
5323 
5324 	return true;
5325 }
5326 
5327 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5328 				 struct event_trigger_data *data,
5329 				 struct trace_event_file *file)
5330 {
5331 	struct hist_trigger_data *hist_data = data->private_data;
5332 	struct event_trigger_data *test, *named_data = NULL;
5333 	int ret = 0;
5334 
5335 	if (hist_data->attrs->name) {
5336 		named_data = find_named_trigger(hist_data->attrs->name);
5337 		if (named_data) {
5338 			if (!hist_trigger_match(data, named_data, named_data,
5339 						true)) {
5340 				hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data->attrs->name);
5341 				ret = -EINVAL;
5342 				goto out;
5343 			}
5344 		}
5345 	}
5346 
5347 	if (hist_data->attrs->name && !named_data)
5348 		goto new;
5349 
5350 	list_for_each_entry_rcu(test, &file->triggers, list) {
5351 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5352 			if (!hist_trigger_match(data, test, named_data, false))
5353 				continue;
5354 			if (hist_data->attrs->pause)
5355 				test->paused = true;
5356 			else if (hist_data->attrs->cont)
5357 				test->paused = false;
5358 			else if (hist_data->attrs->clear)
5359 				hist_clear(test);
5360 			else {
5361 				hist_err("Hist trigger already exists", NULL);
5362 				ret = -EEXIST;
5363 			}
5364 			goto out;
5365 		}
5366 	}
5367  new:
5368 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
5369 		hist_err("Can't clear or continue a nonexistent hist trigger", NULL);
5370 		ret = -ENOENT;
5371 		goto out;
5372 	}
5373 
5374 	if (hist_data->attrs->pause)
5375 		data->paused = true;
5376 
5377 	if (named_data) {
5378 		data->private_data = named_data->private_data;
5379 		set_named_trigger_data(data, named_data);
5380 		data->ops = &event_hist_trigger_named_ops;
5381 	}
5382 
5383 	if (data->ops->init) {
5384 		ret = data->ops->init(data->ops, data);
5385 		if (ret < 0)
5386 			goto out;
5387 	}
5388 
5389 	if (hist_data->enable_timestamps) {
5390 		char *clock = hist_data->attrs->clock;
5391 
5392 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5393 		if (ret) {
5394 			hist_err("Couldn't set trace_clock: ", clock);
5395 			goto out;
5396 		}
5397 
5398 		tracing_set_time_stamp_abs(file->tr, true);
5399 	}
5400 
5401 	if (named_data)
5402 		destroy_hist_data(hist_data);
5403 
5404 	ret++;
5405  out:
5406 	return ret;
5407 }
5408 
5409 static int hist_trigger_enable(struct event_trigger_data *data,
5410 			       struct trace_event_file *file)
5411 {
5412 	int ret = 0;
5413 
5414 	list_add_tail_rcu(&data->list, &file->triggers);
5415 
5416 	update_cond_flag(file);
5417 
5418 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
5419 		list_del_rcu(&data->list);
5420 		update_cond_flag(file);
5421 		ret--;
5422 	}
5423 
5424 	return ret;
5425 }
5426 
5427 static bool have_hist_trigger_match(struct event_trigger_data *data,
5428 				    struct trace_event_file *file)
5429 {
5430 	struct hist_trigger_data *hist_data = data->private_data;
5431 	struct event_trigger_data *test, *named_data = NULL;
5432 	bool match = false;
5433 
5434 	if (hist_data->attrs->name)
5435 		named_data = find_named_trigger(hist_data->attrs->name);
5436 
5437 	list_for_each_entry_rcu(test, &file->triggers, list) {
5438 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5439 			if (hist_trigger_match(data, test, named_data, false)) {
5440 				match = true;
5441 				break;
5442 			}
5443 		}
5444 	}
5445 
5446 	return match;
5447 }
5448 
5449 static bool hist_trigger_check_refs(struct event_trigger_data *data,
5450 				    struct trace_event_file *file)
5451 {
5452 	struct hist_trigger_data *hist_data = data->private_data;
5453 	struct event_trigger_data *test, *named_data = NULL;
5454 
5455 	if (hist_data->attrs->name)
5456 		named_data = find_named_trigger(hist_data->attrs->name);
5457 
5458 	list_for_each_entry_rcu(test, &file->triggers, list) {
5459 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5460 			if (!hist_trigger_match(data, test, named_data, false))
5461 				continue;
5462 			hist_data = test->private_data;
5463 			if (check_var_refs(hist_data))
5464 				return true;
5465 			break;
5466 		}
5467 	}
5468 
5469 	return false;
5470 }
5471 
5472 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
5473 				    struct event_trigger_data *data,
5474 				    struct trace_event_file *file)
5475 {
5476 	struct hist_trigger_data *hist_data = data->private_data;
5477 	struct event_trigger_data *test, *named_data = NULL;
5478 	bool unregistered = false;
5479 
5480 	if (hist_data->attrs->name)
5481 		named_data = find_named_trigger(hist_data->attrs->name);
5482 
5483 	list_for_each_entry_rcu(test, &file->triggers, list) {
5484 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5485 			if (!hist_trigger_match(data, test, named_data, false))
5486 				continue;
5487 			unregistered = true;
5488 			list_del_rcu(&test->list);
5489 			trace_event_trigger_enable_disable(file, 0);
5490 			update_cond_flag(file);
5491 			break;
5492 		}
5493 	}
5494 
5495 	if (unregistered && test->ops->free)
5496 		test->ops->free(test->ops, test);
5497 
5498 	if (hist_data->enable_timestamps) {
5499 		if (!hist_data->remove || unregistered)
5500 			tracing_set_time_stamp_abs(file->tr, false);
5501 	}
5502 }
5503 
5504 static bool hist_file_check_refs(struct trace_event_file *file)
5505 {
5506 	struct hist_trigger_data *hist_data;
5507 	struct event_trigger_data *test;
5508 
5509 	list_for_each_entry_rcu(test, &file->triggers, list) {
5510 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5511 			hist_data = test->private_data;
5512 			if (check_var_refs(hist_data))
5513 				return true;
5514 		}
5515 	}
5516 
5517 	return false;
5518 }
5519 
5520 static void hist_unreg_all(struct trace_event_file *file)
5521 {
5522 	struct event_trigger_data *test, *n;
5523 	struct hist_trigger_data *hist_data;
5524 	struct synth_event *se;
5525 	const char *se_name;
5526 
5527 	lockdep_assert_held(&event_mutex);
5528 
5529 	if (hist_file_check_refs(file))
5530 		return;
5531 
5532 	list_for_each_entry_safe(test, n, &file->triggers, list) {
5533 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5534 			hist_data = test->private_data;
5535 			list_del_rcu(&test->list);
5536 			trace_event_trigger_enable_disable(file, 0);
5537 
5538 			se_name = trace_event_name(file->event_call);
5539 			se = find_synth_event(se_name);
5540 			if (se)
5541 				se->ref--;
5542 
5543 			update_cond_flag(file);
5544 			if (hist_data->enable_timestamps)
5545 				tracing_set_time_stamp_abs(file->tr, false);
5546 			if (test->ops->free)
5547 				test->ops->free(test->ops, test);
5548 		}
5549 	}
5550 }
5551 
5552 static int event_hist_trigger_func(struct event_command *cmd_ops,
5553 				   struct trace_event_file *file,
5554 				   char *glob, char *cmd, char *param)
5555 {
5556 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
5557 	struct event_trigger_data *trigger_data;
5558 	struct hist_trigger_attrs *attrs;
5559 	struct event_trigger_ops *trigger_ops;
5560 	struct hist_trigger_data *hist_data;
5561 	struct synth_event *se;
5562 	const char *se_name;
5563 	bool remove = false;
5564 	char *trigger, *p;
5565 	int ret = 0;
5566 
5567 	lockdep_assert_held(&event_mutex);
5568 
5569 	if (glob && strlen(glob)) {
5570 		last_cmd_set(param);
5571 		hist_err_clear();
5572 	}
5573 
5574 	if (!param)
5575 		return -EINVAL;
5576 
5577 	if (glob[0] == '!')
5578 		remove = true;
5579 
5580 	/*
5581 	 * separate the trigger from the filter (k:v [if filter])
5582 	 * allowing for whitespace in the trigger
5583 	 */
5584 	p = trigger = param;
5585 	do {
5586 		p = strstr(p, "if");
5587 		if (!p)
5588 			break;
5589 		if (p == param)
5590 			return -EINVAL;
5591 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
5592 			p++;
5593 			continue;
5594 		}
5595 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
5596 			return -EINVAL;
5597 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
5598 			p++;
5599 			continue;
5600 		}
5601 		break;
5602 	} while (p);
5603 
5604 	if (!p)
5605 		param = NULL;
5606 	else {
5607 		*(p - 1) = '\0';
5608 		param = strstrip(p);
5609 		trigger = strstrip(trigger);
5610 	}
5611 
5612 	attrs = parse_hist_trigger_attrs(trigger);
5613 	if (IS_ERR(attrs))
5614 		return PTR_ERR(attrs);
5615 
5616 	if (attrs->map_bits)
5617 		hist_trigger_bits = attrs->map_bits;
5618 
5619 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
5620 	if (IS_ERR(hist_data)) {
5621 		destroy_hist_trigger_attrs(attrs);
5622 		return PTR_ERR(hist_data);
5623 	}
5624 
5625 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
5626 
5627 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
5628 	if (!trigger_data) {
5629 		ret = -ENOMEM;
5630 		goto out_free;
5631 	}
5632 
5633 	trigger_data->count = -1;
5634 	trigger_data->ops = trigger_ops;
5635 	trigger_data->cmd_ops = cmd_ops;
5636 
5637 	INIT_LIST_HEAD(&trigger_data->list);
5638 	RCU_INIT_POINTER(trigger_data->filter, NULL);
5639 
5640 	trigger_data->private_data = hist_data;
5641 
5642 	/* if param is non-empty, it's supposed to be a filter */
5643 	if (param && cmd_ops->set_filter) {
5644 		ret = cmd_ops->set_filter(param, trigger_data, file);
5645 		if (ret < 0)
5646 			goto out_free;
5647 	}
5648 
5649 	if (remove) {
5650 		if (!have_hist_trigger_match(trigger_data, file))
5651 			goto out_free;
5652 
5653 		if (hist_trigger_check_refs(trigger_data, file)) {
5654 			ret = -EBUSY;
5655 			goto out_free;
5656 		}
5657 
5658 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5659 		se_name = trace_event_name(file->event_call);
5660 		se = find_synth_event(se_name);
5661 		if (se)
5662 			se->ref--;
5663 		ret = 0;
5664 		goto out_free;
5665 	}
5666 
5667 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
5668 	/*
5669 	 * The above returns on success the # of triggers registered,
5670 	 * but if it didn't register any it returns zero.  Consider no
5671 	 * triggers registered a failure too.
5672 	 */
5673 	if (!ret) {
5674 		if (!(attrs->pause || attrs->cont || attrs->clear))
5675 			ret = -ENOENT;
5676 		goto out_free;
5677 	} else if (ret < 0)
5678 		goto out_free;
5679 
5680 	if (get_named_trigger_data(trigger_data))
5681 		goto enable;
5682 
5683 	if (has_hist_vars(hist_data))
5684 		save_hist_vars(hist_data);
5685 
5686 	ret = create_actions(hist_data, file);
5687 	if (ret)
5688 		goto out_unreg;
5689 
5690 	ret = tracing_map_init(hist_data->map);
5691 	if (ret)
5692 		goto out_unreg;
5693 enable:
5694 	ret = hist_trigger_enable(trigger_data, file);
5695 	if (ret)
5696 		goto out_unreg;
5697 
5698 	se_name = trace_event_name(file->event_call);
5699 	se = find_synth_event(se_name);
5700 	if (se)
5701 		se->ref++;
5702 	/* Just return zero, not the number of registered triggers */
5703 	ret = 0;
5704  out:
5705 	if (ret == 0)
5706 		hist_err_clear();
5707 
5708 	return ret;
5709  out_unreg:
5710 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5711  out_free:
5712 	if (cmd_ops->set_filter)
5713 		cmd_ops->set_filter(NULL, trigger_data, NULL);
5714 
5715 	remove_hist_vars(hist_data);
5716 
5717 	kfree(trigger_data);
5718 
5719 	destroy_hist_data(hist_data);
5720 	goto out;
5721 }
5722 
5723 static struct event_command trigger_hist_cmd = {
5724 	.name			= "hist",
5725 	.trigger_type		= ETT_EVENT_HIST,
5726 	.flags			= EVENT_CMD_FL_NEEDS_REC,
5727 	.func			= event_hist_trigger_func,
5728 	.reg			= hist_register_trigger,
5729 	.unreg			= hist_unregister_trigger,
5730 	.unreg_all		= hist_unreg_all,
5731 	.get_trigger_ops	= event_hist_get_trigger_ops,
5732 	.set_filter		= set_trigger_filter,
5733 };
5734 
5735 __init int register_trigger_hist_cmd(void)
5736 {
5737 	int ret;
5738 
5739 	ret = register_event_command(&trigger_hist_cmd);
5740 	WARN_ON(ret < 0);
5741 
5742 	return ret;
5743 }
5744 
5745 static void
5746 hist_enable_trigger(struct event_trigger_data *data, void *rec,
5747 		    struct ring_buffer_event *event)
5748 {
5749 	struct enable_trigger_data *enable_data = data->private_data;
5750 	struct event_trigger_data *test;
5751 
5752 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
5753 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5754 			if (enable_data->enable)
5755 				test->paused = false;
5756 			else
5757 				test->paused = true;
5758 		}
5759 	}
5760 }
5761 
5762 static void
5763 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
5764 			  struct ring_buffer_event *event)
5765 {
5766 	if (!data->count)
5767 		return;
5768 
5769 	if (data->count != -1)
5770 		(data->count)--;
5771 
5772 	hist_enable_trigger(data, rec, event);
5773 }
5774 
5775 static struct event_trigger_ops hist_enable_trigger_ops = {
5776 	.func			= hist_enable_trigger,
5777 	.print			= event_enable_trigger_print,
5778 	.init			= event_trigger_init,
5779 	.free			= event_enable_trigger_free,
5780 };
5781 
5782 static struct event_trigger_ops hist_enable_count_trigger_ops = {
5783 	.func			= hist_enable_count_trigger,
5784 	.print			= event_enable_trigger_print,
5785 	.init			= event_trigger_init,
5786 	.free			= event_enable_trigger_free,
5787 };
5788 
5789 static struct event_trigger_ops hist_disable_trigger_ops = {
5790 	.func			= hist_enable_trigger,
5791 	.print			= event_enable_trigger_print,
5792 	.init			= event_trigger_init,
5793 	.free			= event_enable_trigger_free,
5794 };
5795 
5796 static struct event_trigger_ops hist_disable_count_trigger_ops = {
5797 	.func			= hist_enable_count_trigger,
5798 	.print			= event_enable_trigger_print,
5799 	.init			= event_trigger_init,
5800 	.free			= event_enable_trigger_free,
5801 };
5802 
5803 static struct event_trigger_ops *
5804 hist_enable_get_trigger_ops(char *cmd, char *param)
5805 {
5806 	struct event_trigger_ops *ops;
5807 	bool enable;
5808 
5809 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
5810 
5811 	if (enable)
5812 		ops = param ? &hist_enable_count_trigger_ops :
5813 			&hist_enable_trigger_ops;
5814 	else
5815 		ops = param ? &hist_disable_count_trigger_ops :
5816 			&hist_disable_trigger_ops;
5817 
5818 	return ops;
5819 }
5820 
5821 static void hist_enable_unreg_all(struct trace_event_file *file)
5822 {
5823 	struct event_trigger_data *test, *n;
5824 
5825 	list_for_each_entry_safe(test, n, &file->triggers, list) {
5826 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
5827 			list_del_rcu(&test->list);
5828 			update_cond_flag(file);
5829 			trace_event_trigger_enable_disable(file, 0);
5830 			if (test->ops->free)
5831 				test->ops->free(test->ops, test);
5832 		}
5833 	}
5834 }
5835 
5836 static struct event_command trigger_hist_enable_cmd = {
5837 	.name			= ENABLE_HIST_STR,
5838 	.trigger_type		= ETT_HIST_ENABLE,
5839 	.func			= event_enable_trigger_func,
5840 	.reg			= event_enable_register_trigger,
5841 	.unreg			= event_enable_unregister_trigger,
5842 	.unreg_all		= hist_enable_unreg_all,
5843 	.get_trigger_ops	= hist_enable_get_trigger_ops,
5844 	.set_filter		= set_trigger_filter,
5845 };
5846 
5847 static struct event_command trigger_hist_disable_cmd = {
5848 	.name			= DISABLE_HIST_STR,
5849 	.trigger_type		= ETT_HIST_ENABLE,
5850 	.func			= event_enable_trigger_func,
5851 	.reg			= event_enable_register_trigger,
5852 	.unreg			= event_enable_unregister_trigger,
5853 	.unreg_all		= hist_enable_unreg_all,
5854 	.get_trigger_ops	= hist_enable_get_trigger_ops,
5855 	.set_filter		= set_trigger_filter,
5856 };
5857 
5858 static __init void unregister_trigger_hist_enable_disable_cmds(void)
5859 {
5860 	unregister_event_command(&trigger_hist_enable_cmd);
5861 	unregister_event_command(&trigger_hist_disable_cmd);
5862 }
5863 
5864 __init int register_trigger_hist_enable_disable_cmds(void)
5865 {
5866 	int ret;
5867 
5868 	ret = register_event_command(&trigger_hist_enable_cmd);
5869 	if (WARN_ON(ret < 0))
5870 		return ret;
5871 	ret = register_event_command(&trigger_hist_disable_cmd);
5872 	if (WARN_ON(ret < 0))
5873 		unregister_trigger_hist_enable_disable_cmds();
5874 
5875 	return ret;
5876 }
5877 
5878 static __init int trace_events_hist_init(void)
5879 {
5880 	struct dentry *entry = NULL;
5881 	struct dentry *d_tracer;
5882 	int err = 0;
5883 
5884 	err = dyn_event_register(&synth_event_ops);
5885 	if (err) {
5886 		pr_warn("Could not register synth_event_ops\n");
5887 		return err;
5888 	}
5889 
5890 	d_tracer = tracing_init_dentry();
5891 	if (IS_ERR(d_tracer)) {
5892 		err = PTR_ERR(d_tracer);
5893 		goto err;
5894 	}
5895 
5896 	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
5897 				    NULL, &synth_events_fops);
5898 	if (!entry) {
5899 		err = -ENODEV;
5900 		goto err;
5901 	}
5902 
5903 	return err;
5904  err:
5905 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
5906 
5907 	return err;
5908 }
5909 
5910 fs_initcall(trace_events_hist_init);
5911