1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24 
25 #define SYNTH_SYSTEM		"synthetic"
26 #define SYNTH_FIELDS_MAX	16
27 
28 #define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
29 
30 #define ERRORS								\
31 	C(NONE,			"No error"),				\
32 	C(DUPLICATE_VAR,	"Variable already defined"),		\
33 	C(VAR_NOT_UNIQUE,	"Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 	C(TOO_MANY_VARS,	"Too many variables defined"),		\
35 	C(MALFORMED_ASSIGNMENT,	"Malformed assignment"),		\
36 	C(NAMED_MISMATCH,	"Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 	C(TRIGGER_EEXIST,	"Hist trigger already exists"),		\
38 	C(TRIGGER_ENOENT_CLEAR,	"Can't clear or continue a nonexistent hist trigger"), \
39 	C(SET_CLOCK_FAIL,	"Couldn't set trace_clock"),		\
40 	C(BAD_FIELD_MODIFIER,	"Invalid field modifier"),		\
41 	C(TOO_MANY_SUBEXPR,	"Too many subexpressions (3 max)"),	\
42 	C(TIMESTAMP_MISMATCH,	"Timestamp units in expression don't match"), \
43 	C(TOO_MANY_FIELD_VARS,	"Too many field variables defined"),	\
44 	C(EVENT_FILE_NOT_FOUND,	"Event file not found"),		\
45 	C(HIST_NOT_FOUND,	"Matching event histogram not found"),	\
46 	C(HIST_CREATE_FAIL,	"Couldn't create histogram for field"),	\
47 	C(SYNTH_VAR_NOT_FOUND,	"Couldn't find synthetic variable"),	\
48 	C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"),	\
49 	C(SYNTH_TYPE_MISMATCH,	"Param type doesn't match synthetic event field type"), \
50 	C(SYNTH_COUNT_MISMATCH,	"Param count doesn't match synthetic event field count"), \
51 	C(FIELD_VAR_PARSE_FAIL,	"Couldn't parse field variable"),	\
52 	C(VAR_CREATE_FIND_FAIL,	"Couldn't create or find variable"),	\
53 	C(ONX_NOT_VAR,		"For onmax(x) or onchange(x), x must be a variable"), \
54 	C(ONX_VAR_NOT_FOUND,	"Couldn't find onmax or onchange variable"), \
55 	C(ONX_VAR_CREATE_FAIL,	"Couldn't create onmax or onchange variable"), \
56 	C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"),	\
57 	C(TOO_MANY_PARAMS,	"Too many action params"),		\
58 	C(PARAM_NOT_FOUND,	"Couldn't find param"),			\
59 	C(INVALID_PARAM,	"Invalid action param"),		\
60 	C(ACTION_NOT_FOUND,	"No action found"),			\
61 	C(NO_SAVE_PARAMS,	"No params found for save()"),		\
62 	C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 	C(ACTION_MISMATCH,	"Handler doesn't support action"),	\
64 	C(NO_CLOSING_PAREN,	"No closing paren found"),		\
65 	C(SUBSYS_NOT_FOUND,	"Missing subsystem"),			\
66 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
67 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
68 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
69 	C(FIELD_NOT_FOUND,	"Couldn't find field"),
70 
71 #undef C
72 #define C(a, b)		HIST_ERR_##a
73 
74 enum { ERRORS };
75 
76 #undef C
77 #define C(a, b)		b
78 
79 static const char *err_text[] = { ERRORS };
80 
81 struct hist_field;
82 
83 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
84 				struct tracing_map_elt *elt,
85 				struct ring_buffer_event *rbe,
86 				void *event);
87 
88 #define HIST_FIELD_OPERANDS_MAX	2
89 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
90 #define HIST_ACTIONS_MAX	8
91 
92 enum field_op_id {
93 	FIELD_OP_NONE,
94 	FIELD_OP_PLUS,
95 	FIELD_OP_MINUS,
96 	FIELD_OP_UNARY_MINUS,
97 };
98 
99 /*
100  * A hist_var (histogram variable) contains variable information for
101  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
102  * flag set.  A hist_var has a variable name e.g. ts0, and is
103  * associated with a given histogram trigger, as specified by
104  * hist_data.  The hist_var idx is the unique index assigned to the
105  * variable by the hist trigger's tracing_map.  The idx is what is
106  * used to set a variable's value and, by a variable reference, to
107  * retrieve it.
108  */
109 struct hist_var {
110 	char				*name;
111 	struct hist_trigger_data	*hist_data;
112 	unsigned int			idx;
113 };
114 
115 struct hist_field {
116 	struct ftrace_event_field	*field;
117 	unsigned long			flags;
118 	hist_field_fn_t			fn;
119 	unsigned int			size;
120 	unsigned int			offset;
121 	unsigned int                    is_signed;
122 	const char			*type;
123 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
124 	struct hist_trigger_data	*hist_data;
125 
126 	/*
127 	 * Variable fields contain variable-specific info in var.
128 	 */
129 	struct hist_var			var;
130 	enum field_op_id		operator;
131 	char				*system;
132 	char				*event_name;
133 
134 	/*
135 	 * The name field is used for EXPR and VAR_REF fields.  VAR
136 	 * fields contain the variable name in var.name.
137 	 */
138 	char				*name;
139 
140 	/*
141 	 * When a histogram trigger is hit, if it has any references
142 	 * to variables, the values of those variables are collected
143 	 * into a var_ref_vals array by resolve_var_refs().  The
144 	 * current value of each variable is read from the tracing_map
145 	 * using the hist field's hist_var.idx and entered into the
146 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
147 	 */
148 	unsigned int			var_ref_idx;
149 	bool                            read_once;
150 };
151 
152 static u64 hist_field_none(struct hist_field *field,
153 			   struct tracing_map_elt *elt,
154 			   struct ring_buffer_event *rbe,
155 			   void *event)
156 {
157 	return 0;
158 }
159 
160 static u64 hist_field_counter(struct hist_field *field,
161 			      struct tracing_map_elt *elt,
162 			      struct ring_buffer_event *rbe,
163 			      void *event)
164 {
165 	return 1;
166 }
167 
168 static u64 hist_field_string(struct hist_field *hist_field,
169 			     struct tracing_map_elt *elt,
170 			     struct ring_buffer_event *rbe,
171 			     void *event)
172 {
173 	char *addr = (char *)(event + hist_field->field->offset);
174 
175 	return (u64)(unsigned long)addr;
176 }
177 
178 static u64 hist_field_dynstring(struct hist_field *hist_field,
179 				struct tracing_map_elt *elt,
180 				struct ring_buffer_event *rbe,
181 				void *event)
182 {
183 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
184 	int str_loc = str_item & 0xffff;
185 	char *addr = (char *)(event + str_loc);
186 
187 	return (u64)(unsigned long)addr;
188 }
189 
190 static u64 hist_field_pstring(struct hist_field *hist_field,
191 			      struct tracing_map_elt *elt,
192 			      struct ring_buffer_event *rbe,
193 			      void *event)
194 {
195 	char **addr = (char **)(event + hist_field->field->offset);
196 
197 	return (u64)(unsigned long)*addr;
198 }
199 
200 static u64 hist_field_log2(struct hist_field *hist_field,
201 			   struct tracing_map_elt *elt,
202 			   struct ring_buffer_event *rbe,
203 			   void *event)
204 {
205 	struct hist_field *operand = hist_field->operands[0];
206 
207 	u64 val = operand->fn(operand, elt, rbe, event);
208 
209 	return (u64) ilog2(roundup_pow_of_two(val));
210 }
211 
212 static u64 hist_field_plus(struct hist_field *hist_field,
213 			   struct tracing_map_elt *elt,
214 			   struct ring_buffer_event *rbe,
215 			   void *event)
216 {
217 	struct hist_field *operand1 = hist_field->operands[0];
218 	struct hist_field *operand2 = hist_field->operands[1];
219 
220 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
221 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
222 
223 	return val1 + val2;
224 }
225 
226 static u64 hist_field_minus(struct hist_field *hist_field,
227 			    struct tracing_map_elt *elt,
228 			    struct ring_buffer_event *rbe,
229 			    void *event)
230 {
231 	struct hist_field *operand1 = hist_field->operands[0];
232 	struct hist_field *operand2 = hist_field->operands[1];
233 
234 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
235 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
236 
237 	return val1 - val2;
238 }
239 
240 static u64 hist_field_unary_minus(struct hist_field *hist_field,
241 				  struct tracing_map_elt *elt,
242 				  struct ring_buffer_event *rbe,
243 				  void *event)
244 {
245 	struct hist_field *operand = hist_field->operands[0];
246 
247 	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
248 	u64 val = (u64)-sval;
249 
250 	return val;
251 }
252 
253 #define DEFINE_HIST_FIELD_FN(type)					\
254 	static u64 hist_field_##type(struct hist_field *hist_field,	\
255 				     struct tracing_map_elt *elt,	\
256 				     struct ring_buffer_event *rbe,	\
257 				     void *event)			\
258 {									\
259 	type *addr = (type *)(event + hist_field->field->offset);	\
260 									\
261 	return (u64)(unsigned long)*addr;				\
262 }
263 
264 DEFINE_HIST_FIELD_FN(s64);
265 DEFINE_HIST_FIELD_FN(u64);
266 DEFINE_HIST_FIELD_FN(s32);
267 DEFINE_HIST_FIELD_FN(u32);
268 DEFINE_HIST_FIELD_FN(s16);
269 DEFINE_HIST_FIELD_FN(u16);
270 DEFINE_HIST_FIELD_FN(s8);
271 DEFINE_HIST_FIELD_FN(u8);
272 
273 #define for_each_hist_field(i, hist_data)	\
274 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
275 
276 #define for_each_hist_val_field(i, hist_data)	\
277 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
278 
279 #define for_each_hist_key_field(i, hist_data)	\
280 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
281 
282 #define HIST_STACKTRACE_DEPTH	16
283 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
284 #define HIST_STACKTRACE_SKIP	5
285 
286 #define HITCOUNT_IDX		0
287 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
288 
289 enum hist_field_flags {
290 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
291 	HIST_FIELD_FL_KEY		= 1 << 1,
292 	HIST_FIELD_FL_STRING		= 1 << 2,
293 	HIST_FIELD_FL_HEX		= 1 << 3,
294 	HIST_FIELD_FL_SYM		= 1 << 4,
295 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
296 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
297 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
298 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
299 	HIST_FIELD_FL_LOG2		= 1 << 9,
300 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
301 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
302 	HIST_FIELD_FL_VAR		= 1 << 12,
303 	HIST_FIELD_FL_EXPR		= 1 << 13,
304 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
305 	HIST_FIELD_FL_CPU		= 1 << 15,
306 	HIST_FIELD_FL_ALIAS		= 1 << 16,
307 };
308 
309 struct var_defs {
310 	unsigned int	n_vars;
311 	char		*name[TRACING_MAP_VARS_MAX];
312 	char		*expr[TRACING_MAP_VARS_MAX];
313 };
314 
315 struct hist_trigger_attrs {
316 	char		*keys_str;
317 	char		*vals_str;
318 	char		*sort_key_str;
319 	char		*name;
320 	char		*clock;
321 	bool		pause;
322 	bool		cont;
323 	bool		clear;
324 	bool		ts_in_usecs;
325 	unsigned int	map_bits;
326 
327 	char		*assignment_str[TRACING_MAP_VARS_MAX];
328 	unsigned int	n_assignments;
329 
330 	char		*action_str[HIST_ACTIONS_MAX];
331 	unsigned int	n_actions;
332 
333 	struct var_defs	var_defs;
334 };
335 
336 struct field_var {
337 	struct hist_field	*var;
338 	struct hist_field	*val;
339 };
340 
341 struct field_var_hist {
342 	struct hist_trigger_data	*hist_data;
343 	char				*cmd;
344 };
345 
346 struct hist_trigger_data {
347 	struct hist_field               *fields[HIST_FIELDS_MAX];
348 	unsigned int			n_vals;
349 	unsigned int			n_keys;
350 	unsigned int			n_fields;
351 	unsigned int			n_vars;
352 	unsigned int			key_size;
353 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
354 	unsigned int			n_sort_keys;
355 	struct trace_event_file		*event_file;
356 	struct hist_trigger_attrs	*attrs;
357 	struct tracing_map		*map;
358 	bool				enable_timestamps;
359 	bool				remove;
360 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
361 	unsigned int			n_var_refs;
362 
363 	struct action_data		*actions[HIST_ACTIONS_MAX];
364 	unsigned int			n_actions;
365 
366 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
367 	unsigned int			n_field_vars;
368 	unsigned int			n_field_var_str;
369 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
370 	unsigned int			n_field_var_hists;
371 
372 	struct field_var		*save_vars[SYNTH_FIELDS_MAX];
373 	unsigned int			n_save_vars;
374 	unsigned int			n_save_var_str;
375 };
376 
377 static int synth_event_create(int argc, const char **argv);
378 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
379 static int synth_event_release(struct dyn_event *ev);
380 static bool synth_event_is_busy(struct dyn_event *ev);
381 static bool synth_event_match(const char *system, const char *event,
382 			int argc, const char **argv, struct dyn_event *ev);
383 
384 static struct dyn_event_operations synth_event_ops = {
385 	.create = synth_event_create,
386 	.show = synth_event_show,
387 	.is_busy = synth_event_is_busy,
388 	.free = synth_event_release,
389 	.match = synth_event_match,
390 };
391 
392 struct synth_field {
393 	char *type;
394 	char *name;
395 	size_t size;
396 	bool is_signed;
397 	bool is_string;
398 };
399 
400 struct synth_event {
401 	struct dyn_event			devent;
402 	int					ref;
403 	char					*name;
404 	struct synth_field			**fields;
405 	unsigned int				n_fields;
406 	unsigned int				n_u64;
407 	struct trace_event_class		class;
408 	struct trace_event_call			call;
409 	struct tracepoint			*tp;
410 };
411 
412 static bool is_synth_event(struct dyn_event *ev)
413 {
414 	return ev->ops == &synth_event_ops;
415 }
416 
417 static struct synth_event *to_synth_event(struct dyn_event *ev)
418 {
419 	return container_of(ev, struct synth_event, devent);
420 }
421 
422 static bool synth_event_is_busy(struct dyn_event *ev)
423 {
424 	struct synth_event *event = to_synth_event(ev);
425 
426 	return event->ref != 0;
427 }
428 
429 static bool synth_event_match(const char *system, const char *event,
430 			int argc, const char **argv, struct dyn_event *ev)
431 {
432 	struct synth_event *sev = to_synth_event(ev);
433 
434 	return strcmp(sev->name, event) == 0 &&
435 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
436 }
437 
438 struct action_data;
439 
440 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
441 			     struct tracing_map_elt *elt, void *rec,
442 			     struct ring_buffer_event *rbe, void *key,
443 			     struct action_data *data, u64 *var_ref_vals);
444 
445 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
446 
447 enum handler_id {
448 	HANDLER_ONMATCH = 1,
449 	HANDLER_ONMAX,
450 	HANDLER_ONCHANGE,
451 };
452 
453 enum action_id {
454 	ACTION_SAVE = 1,
455 	ACTION_TRACE,
456 	ACTION_SNAPSHOT,
457 };
458 
459 struct action_data {
460 	enum handler_id		handler;
461 	enum action_id		action;
462 	char			*action_name;
463 	action_fn_t		fn;
464 
465 	unsigned int		n_params;
466 	char			*params[SYNTH_FIELDS_MAX];
467 
468 	/*
469 	 * When a histogram trigger is hit, the values of any
470 	 * references to variables, including variables being passed
471 	 * as parameters to synthetic events, are collected into a
472 	 * var_ref_vals array.  This var_ref_idx is the index of the
473 	 * first param in the array to be passed to the synthetic
474 	 * event invocation.
475 	 */
476 	unsigned int		var_ref_idx;
477 	struct synth_event	*synth_event;
478 	bool			use_trace_keyword;
479 	char			*synth_event_name;
480 
481 	union {
482 		struct {
483 			char			*event;
484 			char			*event_system;
485 		} match_data;
486 
487 		struct {
488 			/*
489 			 * var_str contains the $-unstripped variable
490 			 * name referenced by var_ref, and used when
491 			 * printing the action.  Because var_ref
492 			 * creation is deferred to create_actions(),
493 			 * we need a per-action way to save it until
494 			 * then, thus var_str.
495 			 */
496 			char			*var_str;
497 
498 			/*
499 			 * var_ref refers to the variable being
500 			 * tracked e.g onmax($var).
501 			 */
502 			struct hist_field	*var_ref;
503 
504 			/*
505 			 * track_var contains the 'invisible' tracking
506 			 * variable created to keep the current
507 			 * e.g. max value.
508 			 */
509 			struct hist_field	*track_var;
510 
511 			check_track_val_fn_t	check_val;
512 			action_fn_t		save_data;
513 		} track_data;
514 	};
515 };
516 
517 struct track_data {
518 	u64				track_val;
519 	bool				updated;
520 
521 	unsigned int			key_len;
522 	void				*key;
523 	struct tracing_map_elt		elt;
524 
525 	struct action_data		*action_data;
526 	struct hist_trigger_data	*hist_data;
527 };
528 
529 struct hist_elt_data {
530 	char *comm;
531 	u64 *var_ref_vals;
532 	char *field_var_str[SYNTH_FIELDS_MAX];
533 };
534 
535 struct snapshot_context {
536 	struct tracing_map_elt	*elt;
537 	void			*key;
538 };
539 
540 static void track_data_free(struct track_data *track_data)
541 {
542 	struct hist_elt_data *elt_data;
543 
544 	if (!track_data)
545 		return;
546 
547 	kfree(track_data->key);
548 
549 	elt_data = track_data->elt.private_data;
550 	if (elt_data) {
551 		kfree(elt_data->comm);
552 		kfree(elt_data);
553 	}
554 
555 	kfree(track_data);
556 }
557 
558 static struct track_data *track_data_alloc(unsigned int key_len,
559 					   struct action_data *action_data,
560 					   struct hist_trigger_data *hist_data)
561 {
562 	struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
563 	struct hist_elt_data *elt_data;
564 
565 	if (!data)
566 		return ERR_PTR(-ENOMEM);
567 
568 	data->key = kzalloc(key_len, GFP_KERNEL);
569 	if (!data->key) {
570 		track_data_free(data);
571 		return ERR_PTR(-ENOMEM);
572 	}
573 
574 	data->key_len = key_len;
575 	data->action_data = action_data;
576 	data->hist_data = hist_data;
577 
578 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
579 	if (!elt_data) {
580 		track_data_free(data);
581 		return ERR_PTR(-ENOMEM);
582 	}
583 	data->elt.private_data = elt_data;
584 
585 	elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
586 	if (!elt_data->comm) {
587 		track_data_free(data);
588 		return ERR_PTR(-ENOMEM);
589 	}
590 
591 	return data;
592 }
593 
594 static char last_cmd[MAX_FILTER_STR_VAL];
595 static char last_cmd_loc[MAX_FILTER_STR_VAL];
596 
597 static int errpos(char *str)
598 {
599 	return err_pos(last_cmd, str);
600 }
601 
602 static void last_cmd_set(struct trace_event_file *file, char *str)
603 {
604 	const char *system = NULL, *name = NULL;
605 	struct trace_event_call *call;
606 
607 	if (!str)
608 		return;
609 
610 	strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
611 
612 	if (file) {
613 		call = file->event_call;
614 
615 		system = call->class->system;
616 		if (system) {
617 			name = trace_event_name(call);
618 			if (!name)
619 				system = NULL;
620 		}
621 	}
622 
623 	if (system)
624 		snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
625 }
626 
627 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
628 {
629 	tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
630 			err_type, err_pos);
631 }
632 
633 static void hist_err_clear(void)
634 {
635 	last_cmd[0] = '\0';
636 	last_cmd_loc[0] = '\0';
637 }
638 
639 struct synth_trace_event {
640 	struct trace_entry	ent;
641 	u64			fields[];
642 };
643 
644 static int synth_event_define_fields(struct trace_event_call *call)
645 {
646 	struct synth_trace_event trace;
647 	int offset = offsetof(typeof(trace), fields);
648 	struct synth_event *event = call->data;
649 	unsigned int i, size, n_u64;
650 	char *name, *type;
651 	bool is_signed;
652 	int ret = 0;
653 
654 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
655 		size = event->fields[i]->size;
656 		is_signed = event->fields[i]->is_signed;
657 		type = event->fields[i]->type;
658 		name = event->fields[i]->name;
659 		ret = trace_define_field(call, type, name, offset, size,
660 					 is_signed, FILTER_OTHER);
661 		if (ret)
662 			break;
663 
664 		if (event->fields[i]->is_string) {
665 			offset += STR_VAR_LEN_MAX;
666 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
667 		} else {
668 			offset += sizeof(u64);
669 			n_u64++;
670 		}
671 	}
672 
673 	event->n_u64 = n_u64;
674 
675 	return ret;
676 }
677 
678 static bool synth_field_signed(char *type)
679 {
680 	if (str_has_prefix(type, "u"))
681 		return false;
682 	if (strcmp(type, "gfp_t") == 0)
683 		return false;
684 
685 	return true;
686 }
687 
688 static int synth_field_is_string(char *type)
689 {
690 	if (strstr(type, "char[") != NULL)
691 		return true;
692 
693 	return false;
694 }
695 
696 static int synth_field_string_size(char *type)
697 {
698 	char buf[4], *end, *start;
699 	unsigned int len;
700 	int size, err;
701 
702 	start = strstr(type, "char[");
703 	if (start == NULL)
704 		return -EINVAL;
705 	start += sizeof("char[") - 1;
706 
707 	end = strchr(type, ']');
708 	if (!end || end < start)
709 		return -EINVAL;
710 
711 	len = end - start;
712 	if (len > 3)
713 		return -EINVAL;
714 
715 	strncpy(buf, start, len);
716 	buf[len] = '\0';
717 
718 	err = kstrtouint(buf, 0, &size);
719 	if (err)
720 		return err;
721 
722 	if (size > STR_VAR_LEN_MAX)
723 		return -EINVAL;
724 
725 	return size;
726 }
727 
728 static int synth_field_size(char *type)
729 {
730 	int size = 0;
731 
732 	if (strcmp(type, "s64") == 0)
733 		size = sizeof(s64);
734 	else if (strcmp(type, "u64") == 0)
735 		size = sizeof(u64);
736 	else if (strcmp(type, "s32") == 0)
737 		size = sizeof(s32);
738 	else if (strcmp(type, "u32") == 0)
739 		size = sizeof(u32);
740 	else if (strcmp(type, "s16") == 0)
741 		size = sizeof(s16);
742 	else if (strcmp(type, "u16") == 0)
743 		size = sizeof(u16);
744 	else if (strcmp(type, "s8") == 0)
745 		size = sizeof(s8);
746 	else if (strcmp(type, "u8") == 0)
747 		size = sizeof(u8);
748 	else if (strcmp(type, "char") == 0)
749 		size = sizeof(char);
750 	else if (strcmp(type, "unsigned char") == 0)
751 		size = sizeof(unsigned char);
752 	else if (strcmp(type, "int") == 0)
753 		size = sizeof(int);
754 	else if (strcmp(type, "unsigned int") == 0)
755 		size = sizeof(unsigned int);
756 	else if (strcmp(type, "long") == 0)
757 		size = sizeof(long);
758 	else if (strcmp(type, "unsigned long") == 0)
759 		size = sizeof(unsigned long);
760 	else if (strcmp(type, "pid_t") == 0)
761 		size = sizeof(pid_t);
762 	else if (strcmp(type, "gfp_t") == 0)
763 		size = sizeof(gfp_t);
764 	else if (synth_field_is_string(type))
765 		size = synth_field_string_size(type);
766 
767 	return size;
768 }
769 
770 static const char *synth_field_fmt(char *type)
771 {
772 	const char *fmt = "%llu";
773 
774 	if (strcmp(type, "s64") == 0)
775 		fmt = "%lld";
776 	else if (strcmp(type, "u64") == 0)
777 		fmt = "%llu";
778 	else if (strcmp(type, "s32") == 0)
779 		fmt = "%d";
780 	else if (strcmp(type, "u32") == 0)
781 		fmt = "%u";
782 	else if (strcmp(type, "s16") == 0)
783 		fmt = "%d";
784 	else if (strcmp(type, "u16") == 0)
785 		fmt = "%u";
786 	else if (strcmp(type, "s8") == 0)
787 		fmt = "%d";
788 	else if (strcmp(type, "u8") == 0)
789 		fmt = "%u";
790 	else if (strcmp(type, "char") == 0)
791 		fmt = "%d";
792 	else if (strcmp(type, "unsigned char") == 0)
793 		fmt = "%u";
794 	else if (strcmp(type, "int") == 0)
795 		fmt = "%d";
796 	else if (strcmp(type, "unsigned int") == 0)
797 		fmt = "%u";
798 	else if (strcmp(type, "long") == 0)
799 		fmt = "%ld";
800 	else if (strcmp(type, "unsigned long") == 0)
801 		fmt = "%lu";
802 	else if (strcmp(type, "pid_t") == 0)
803 		fmt = "%d";
804 	else if (strcmp(type, "gfp_t") == 0)
805 		fmt = "%x";
806 	else if (synth_field_is_string(type))
807 		fmt = "%s";
808 
809 	return fmt;
810 }
811 
812 static enum print_line_t print_synth_event(struct trace_iterator *iter,
813 					   int flags,
814 					   struct trace_event *event)
815 {
816 	struct trace_array *tr = iter->tr;
817 	struct trace_seq *s = &iter->seq;
818 	struct synth_trace_event *entry;
819 	struct synth_event *se;
820 	unsigned int i, n_u64;
821 	char print_fmt[32];
822 	const char *fmt;
823 
824 	entry = (struct synth_trace_event *)iter->ent;
825 	se = container_of(event, struct synth_event, call.event);
826 
827 	trace_seq_printf(s, "%s: ", se->name);
828 
829 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
830 		if (trace_seq_has_overflowed(s))
831 			goto end;
832 
833 		fmt = synth_field_fmt(se->fields[i]->type);
834 
835 		/* parameter types */
836 		if (tr->trace_flags & TRACE_ITER_VERBOSE)
837 			trace_seq_printf(s, "%s ", fmt);
838 
839 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
840 
841 		/* parameter values */
842 		if (se->fields[i]->is_string) {
843 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
844 					 (char *)&entry->fields[n_u64],
845 					 i == se->n_fields - 1 ? "" : " ");
846 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
847 		} else {
848 			struct trace_print_flags __flags[] = {
849 			    __def_gfpflag_names, {-1, NULL} };
850 
851 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
852 					 entry->fields[n_u64],
853 					 i == se->n_fields - 1 ? "" : " ");
854 
855 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
856 				trace_seq_puts(s, " (");
857 				trace_print_flags_seq(s, "|",
858 						      entry->fields[n_u64],
859 						      __flags);
860 				trace_seq_putc(s, ')');
861 			}
862 			n_u64++;
863 		}
864 	}
865 end:
866 	trace_seq_putc(s, '\n');
867 
868 	return trace_handle_return(s);
869 }
870 
871 static struct trace_event_functions synth_event_funcs = {
872 	.trace		= print_synth_event
873 };
874 
875 static notrace void trace_event_raw_event_synth(void *__data,
876 						u64 *var_ref_vals,
877 						unsigned int var_ref_idx)
878 {
879 	struct trace_event_file *trace_file = __data;
880 	struct synth_trace_event *entry;
881 	struct trace_event_buffer fbuffer;
882 	struct ring_buffer *buffer;
883 	struct synth_event *event;
884 	unsigned int i, n_u64;
885 	int fields_size = 0;
886 
887 	event = trace_file->event_call->data;
888 
889 	if (trace_trigger_soft_disabled(trace_file))
890 		return;
891 
892 	fields_size = event->n_u64 * sizeof(u64);
893 
894 	/*
895 	 * Avoid ring buffer recursion detection, as this event
896 	 * is being performed within another event.
897 	 */
898 	buffer = trace_file->tr->trace_buffer.buffer;
899 	ring_buffer_nest_start(buffer);
900 
901 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
902 					   sizeof(*entry) + fields_size);
903 	if (!entry)
904 		goto out;
905 
906 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
907 		if (event->fields[i]->is_string) {
908 			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
909 			char *str_field = (char *)&entry->fields[n_u64];
910 
911 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
912 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
913 		} else {
914 			entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
915 			n_u64++;
916 		}
917 	}
918 
919 	trace_event_buffer_commit(&fbuffer);
920 out:
921 	ring_buffer_nest_end(buffer);
922 }
923 
924 static void free_synth_event_print_fmt(struct trace_event_call *call)
925 {
926 	if (call) {
927 		kfree(call->print_fmt);
928 		call->print_fmt = NULL;
929 	}
930 }
931 
932 static int __set_synth_event_print_fmt(struct synth_event *event,
933 				       char *buf, int len)
934 {
935 	const char *fmt;
936 	int pos = 0;
937 	int i;
938 
939 	/* When len=0, we just calculate the needed length */
940 #define LEN_OR_ZERO (len ? len - pos : 0)
941 
942 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
943 	for (i = 0; i < event->n_fields; i++) {
944 		fmt = synth_field_fmt(event->fields[i]->type);
945 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
946 				event->fields[i]->name, fmt,
947 				i == event->n_fields - 1 ? "" : ", ");
948 	}
949 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
950 
951 	for (i = 0; i < event->n_fields; i++) {
952 		pos += snprintf(buf + pos, LEN_OR_ZERO,
953 				", REC->%s", event->fields[i]->name);
954 	}
955 
956 #undef LEN_OR_ZERO
957 
958 	/* return the length of print_fmt */
959 	return pos;
960 }
961 
962 static int set_synth_event_print_fmt(struct trace_event_call *call)
963 {
964 	struct synth_event *event = call->data;
965 	char *print_fmt;
966 	int len;
967 
968 	/* First: called with 0 length to calculate the needed length */
969 	len = __set_synth_event_print_fmt(event, NULL, 0);
970 
971 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
972 	if (!print_fmt)
973 		return -ENOMEM;
974 
975 	/* Second: actually write the @print_fmt */
976 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
977 	call->print_fmt = print_fmt;
978 
979 	return 0;
980 }
981 
982 static void free_synth_field(struct synth_field *field)
983 {
984 	kfree(field->type);
985 	kfree(field->name);
986 	kfree(field);
987 }
988 
989 static struct synth_field *parse_synth_field(int argc, const char **argv,
990 					     int *consumed)
991 {
992 	struct synth_field *field;
993 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
994 	int len, ret = 0;
995 
996 	if (field_type[0] == ';')
997 		field_type++;
998 
999 	if (!strcmp(field_type, "unsigned")) {
1000 		if (argc < 3)
1001 			return ERR_PTR(-EINVAL);
1002 		prefix = "unsigned ";
1003 		field_type = argv[1];
1004 		field_name = argv[2];
1005 		*consumed = 3;
1006 	} else {
1007 		field_name = argv[1];
1008 		*consumed = 2;
1009 	}
1010 
1011 	field = kzalloc(sizeof(*field), GFP_KERNEL);
1012 	if (!field)
1013 		return ERR_PTR(-ENOMEM);
1014 
1015 	len = strlen(field_name);
1016 	array = strchr(field_name, '[');
1017 	if (array)
1018 		len -= strlen(array);
1019 	else if (field_name[len - 1] == ';')
1020 		len--;
1021 
1022 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1023 	if (!field->name) {
1024 		ret = -ENOMEM;
1025 		goto free;
1026 	}
1027 
1028 	if (field_type[0] == ';')
1029 		field_type++;
1030 	len = strlen(field_type) + 1;
1031 	if (array)
1032 		len += strlen(array);
1033 	if (prefix)
1034 		len += strlen(prefix);
1035 
1036 	field->type = kzalloc(len, GFP_KERNEL);
1037 	if (!field->type) {
1038 		ret = -ENOMEM;
1039 		goto free;
1040 	}
1041 	if (prefix)
1042 		strcat(field->type, prefix);
1043 	strcat(field->type, field_type);
1044 	if (array) {
1045 		strcat(field->type, array);
1046 		if (field->type[len - 1] == ';')
1047 			field->type[len - 1] = '\0';
1048 	}
1049 
1050 	field->size = synth_field_size(field->type);
1051 	if (!field->size) {
1052 		ret = -EINVAL;
1053 		goto free;
1054 	}
1055 
1056 	if (synth_field_is_string(field->type))
1057 		field->is_string = true;
1058 
1059 	field->is_signed = synth_field_signed(field->type);
1060 
1061  out:
1062 	return field;
1063  free:
1064 	free_synth_field(field);
1065 	field = ERR_PTR(ret);
1066 	goto out;
1067 }
1068 
1069 static void free_synth_tracepoint(struct tracepoint *tp)
1070 {
1071 	if (!tp)
1072 		return;
1073 
1074 	kfree(tp->name);
1075 	kfree(tp);
1076 }
1077 
1078 static struct tracepoint *alloc_synth_tracepoint(char *name)
1079 {
1080 	struct tracepoint *tp;
1081 
1082 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1083 	if (!tp)
1084 		return ERR_PTR(-ENOMEM);
1085 
1086 	tp->name = kstrdup(name, GFP_KERNEL);
1087 	if (!tp->name) {
1088 		kfree(tp);
1089 		return ERR_PTR(-ENOMEM);
1090 	}
1091 
1092 	return tp;
1093 }
1094 
1095 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1096 				    unsigned int var_ref_idx);
1097 
1098 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1099 			       unsigned int var_ref_idx)
1100 {
1101 	struct tracepoint *tp = event->tp;
1102 
1103 	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1104 		struct tracepoint_func *probe_func_ptr;
1105 		synth_probe_func_t probe_func;
1106 		void *__data;
1107 
1108 		if (!(cpu_online(raw_smp_processor_id())))
1109 			return;
1110 
1111 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1112 		if (probe_func_ptr) {
1113 			do {
1114 				probe_func = probe_func_ptr->func;
1115 				__data = probe_func_ptr->data;
1116 				probe_func(__data, var_ref_vals, var_ref_idx);
1117 			} while ((++probe_func_ptr)->func);
1118 		}
1119 	}
1120 }
1121 
1122 static struct synth_event *find_synth_event(const char *name)
1123 {
1124 	struct dyn_event *pos;
1125 	struct synth_event *event;
1126 
1127 	for_each_dyn_event(pos) {
1128 		if (!is_synth_event(pos))
1129 			continue;
1130 		event = to_synth_event(pos);
1131 		if (strcmp(event->name, name) == 0)
1132 			return event;
1133 	}
1134 
1135 	return NULL;
1136 }
1137 
1138 static int register_synth_event(struct synth_event *event)
1139 {
1140 	struct trace_event_call *call = &event->call;
1141 	int ret = 0;
1142 
1143 	event->call.class = &event->class;
1144 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1145 	if (!event->class.system) {
1146 		ret = -ENOMEM;
1147 		goto out;
1148 	}
1149 
1150 	event->tp = alloc_synth_tracepoint(event->name);
1151 	if (IS_ERR(event->tp)) {
1152 		ret = PTR_ERR(event->tp);
1153 		event->tp = NULL;
1154 		goto out;
1155 	}
1156 
1157 	INIT_LIST_HEAD(&call->class->fields);
1158 	call->event.funcs = &synth_event_funcs;
1159 	call->class->define_fields = synth_event_define_fields;
1160 
1161 	ret = register_trace_event(&call->event);
1162 	if (!ret) {
1163 		ret = -ENODEV;
1164 		goto out;
1165 	}
1166 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
1167 	call->class->reg = trace_event_reg;
1168 	call->class->probe = trace_event_raw_event_synth;
1169 	call->data = event;
1170 	call->tp = event->tp;
1171 
1172 	ret = trace_add_event_call(call);
1173 	if (ret) {
1174 		pr_warn("Failed to register synthetic event: %s\n",
1175 			trace_event_name(call));
1176 		goto err;
1177 	}
1178 
1179 	ret = set_synth_event_print_fmt(call);
1180 	if (ret < 0) {
1181 		trace_remove_event_call(call);
1182 		goto err;
1183 	}
1184  out:
1185 	return ret;
1186  err:
1187 	unregister_trace_event(&call->event);
1188 	goto out;
1189 }
1190 
1191 static int unregister_synth_event(struct synth_event *event)
1192 {
1193 	struct trace_event_call *call = &event->call;
1194 	int ret;
1195 
1196 	ret = trace_remove_event_call(call);
1197 
1198 	return ret;
1199 }
1200 
1201 static void free_synth_event(struct synth_event *event)
1202 {
1203 	unsigned int i;
1204 
1205 	if (!event)
1206 		return;
1207 
1208 	for (i = 0; i < event->n_fields; i++)
1209 		free_synth_field(event->fields[i]);
1210 
1211 	kfree(event->fields);
1212 	kfree(event->name);
1213 	kfree(event->class.system);
1214 	free_synth_tracepoint(event->tp);
1215 	free_synth_event_print_fmt(&event->call);
1216 	kfree(event);
1217 }
1218 
1219 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1220 					     struct synth_field **fields)
1221 {
1222 	struct synth_event *event;
1223 	unsigned int i;
1224 
1225 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1226 	if (!event) {
1227 		event = ERR_PTR(-ENOMEM);
1228 		goto out;
1229 	}
1230 
1231 	event->name = kstrdup(name, GFP_KERNEL);
1232 	if (!event->name) {
1233 		kfree(event);
1234 		event = ERR_PTR(-ENOMEM);
1235 		goto out;
1236 	}
1237 
1238 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1239 	if (!event->fields) {
1240 		free_synth_event(event);
1241 		event = ERR_PTR(-ENOMEM);
1242 		goto out;
1243 	}
1244 
1245 	dyn_event_init(&event->devent, &synth_event_ops);
1246 
1247 	for (i = 0; i < n_fields; i++)
1248 		event->fields[i] = fields[i];
1249 
1250 	event->n_fields = n_fields;
1251  out:
1252 	return event;
1253 }
1254 
1255 static void action_trace(struct hist_trigger_data *hist_data,
1256 			 struct tracing_map_elt *elt, void *rec,
1257 			 struct ring_buffer_event *rbe, void *key,
1258 			 struct action_data *data, u64 *var_ref_vals)
1259 {
1260 	struct synth_event *event = data->synth_event;
1261 
1262 	trace_synth(event, var_ref_vals, data->var_ref_idx);
1263 }
1264 
1265 struct hist_var_data {
1266 	struct list_head list;
1267 	struct hist_trigger_data *hist_data;
1268 };
1269 
1270 static int __create_synth_event(int argc, const char *name, const char **argv)
1271 {
1272 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1273 	struct synth_event *event = NULL;
1274 	int i, consumed = 0, n_fields = 0, ret = 0;
1275 
1276 	/*
1277 	 * Argument syntax:
1278 	 *  - Add synthetic event: <event_name> field[;field] ...
1279 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1280 	 *      where 'field' = type field_name
1281 	 */
1282 
1283 	if (name[0] == '\0' || argc < 1)
1284 		return -EINVAL;
1285 
1286 	mutex_lock(&event_mutex);
1287 
1288 	event = find_synth_event(name);
1289 	if (event) {
1290 		ret = -EEXIST;
1291 		goto out;
1292 	}
1293 
1294 	for (i = 0; i < argc - 1; i++) {
1295 		if (strcmp(argv[i], ";") == 0)
1296 			continue;
1297 		if (n_fields == SYNTH_FIELDS_MAX) {
1298 			ret = -EINVAL;
1299 			goto err;
1300 		}
1301 
1302 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1303 		if (IS_ERR(field)) {
1304 			ret = PTR_ERR(field);
1305 			goto err;
1306 		}
1307 		fields[n_fields++] = field;
1308 		i += consumed - 1;
1309 	}
1310 
1311 	if (i < argc && strcmp(argv[i], ";") != 0) {
1312 		ret = -EINVAL;
1313 		goto err;
1314 	}
1315 
1316 	event = alloc_synth_event(name, n_fields, fields);
1317 	if (IS_ERR(event)) {
1318 		ret = PTR_ERR(event);
1319 		event = NULL;
1320 		goto err;
1321 	}
1322 	ret = register_synth_event(event);
1323 	if (!ret)
1324 		dyn_event_add(&event->devent);
1325 	else
1326 		free_synth_event(event);
1327  out:
1328 	mutex_unlock(&event_mutex);
1329 
1330 	return ret;
1331  err:
1332 	for (i = 0; i < n_fields; i++)
1333 		free_synth_field(fields[i]);
1334 
1335 	goto out;
1336 }
1337 
1338 static int create_or_delete_synth_event(int argc, char **argv)
1339 {
1340 	const char *name = argv[0];
1341 	struct synth_event *event = NULL;
1342 	int ret;
1343 
1344 	/* trace_run_command() ensures argc != 0 */
1345 	if (name[0] == '!') {
1346 		mutex_lock(&event_mutex);
1347 		event = find_synth_event(name + 1);
1348 		if (event) {
1349 			if (event->ref)
1350 				ret = -EBUSY;
1351 			else {
1352 				ret = unregister_synth_event(event);
1353 				if (!ret) {
1354 					dyn_event_remove(&event->devent);
1355 					free_synth_event(event);
1356 				}
1357 			}
1358 		} else
1359 			ret = -ENOENT;
1360 		mutex_unlock(&event_mutex);
1361 		return ret;
1362 	}
1363 
1364 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1365 	return ret == -ECANCELED ? -EINVAL : ret;
1366 }
1367 
1368 static int synth_event_create(int argc, const char **argv)
1369 {
1370 	const char *name = argv[0];
1371 	int len;
1372 
1373 	if (name[0] != 's' || name[1] != ':')
1374 		return -ECANCELED;
1375 	name += 2;
1376 
1377 	/* This interface accepts group name prefix */
1378 	if (strchr(name, '/')) {
1379 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
1380 		if (len == 0)
1381 			return -EINVAL;
1382 		name += len;
1383 	}
1384 	return __create_synth_event(argc - 1, name, argv + 1);
1385 }
1386 
1387 static int synth_event_release(struct dyn_event *ev)
1388 {
1389 	struct synth_event *event = to_synth_event(ev);
1390 	int ret;
1391 
1392 	if (event->ref)
1393 		return -EBUSY;
1394 
1395 	ret = unregister_synth_event(event);
1396 	if (ret)
1397 		return ret;
1398 
1399 	dyn_event_remove(ev);
1400 	free_synth_event(event);
1401 	return 0;
1402 }
1403 
1404 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1405 {
1406 	struct synth_field *field;
1407 	unsigned int i;
1408 
1409 	seq_printf(m, "%s\t", event->name);
1410 
1411 	for (i = 0; i < event->n_fields; i++) {
1412 		field = event->fields[i];
1413 
1414 		/* parameter values */
1415 		seq_printf(m, "%s %s%s", field->type, field->name,
1416 			   i == event->n_fields - 1 ? "" : "; ");
1417 	}
1418 
1419 	seq_putc(m, '\n');
1420 
1421 	return 0;
1422 }
1423 
1424 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1425 {
1426 	struct synth_event *event = to_synth_event(ev);
1427 
1428 	seq_printf(m, "s:%s/", event->class.system);
1429 
1430 	return __synth_event_show(m, event);
1431 }
1432 
1433 static int synth_events_seq_show(struct seq_file *m, void *v)
1434 {
1435 	struct dyn_event *ev = v;
1436 
1437 	if (!is_synth_event(ev))
1438 		return 0;
1439 
1440 	return __synth_event_show(m, to_synth_event(ev));
1441 }
1442 
1443 static const struct seq_operations synth_events_seq_op = {
1444 	.start	= dyn_event_seq_start,
1445 	.next	= dyn_event_seq_next,
1446 	.stop	= dyn_event_seq_stop,
1447 	.show	= synth_events_seq_show,
1448 };
1449 
1450 static int synth_events_open(struct inode *inode, struct file *file)
1451 {
1452 	int ret;
1453 
1454 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1455 	if (ret)
1456 		return ret;
1457 
1458 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1459 		ret = dyn_events_release_all(&synth_event_ops);
1460 		if (ret < 0)
1461 			return ret;
1462 	}
1463 
1464 	return seq_open(file, &synth_events_seq_op);
1465 }
1466 
1467 static ssize_t synth_events_write(struct file *file,
1468 				  const char __user *buffer,
1469 				  size_t count, loff_t *ppos)
1470 {
1471 	return trace_parse_run_command(file, buffer, count, ppos,
1472 				       create_or_delete_synth_event);
1473 }
1474 
1475 static const struct file_operations synth_events_fops = {
1476 	.open           = synth_events_open,
1477 	.write		= synth_events_write,
1478 	.read           = seq_read,
1479 	.llseek         = seq_lseek,
1480 	.release        = seq_release,
1481 };
1482 
1483 static u64 hist_field_timestamp(struct hist_field *hist_field,
1484 				struct tracing_map_elt *elt,
1485 				struct ring_buffer_event *rbe,
1486 				void *event)
1487 {
1488 	struct hist_trigger_data *hist_data = hist_field->hist_data;
1489 	struct trace_array *tr = hist_data->event_file->tr;
1490 
1491 	u64 ts = ring_buffer_event_time_stamp(rbe);
1492 
1493 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1494 		ts = ns2usecs(ts);
1495 
1496 	return ts;
1497 }
1498 
1499 static u64 hist_field_cpu(struct hist_field *hist_field,
1500 			  struct tracing_map_elt *elt,
1501 			  struct ring_buffer_event *rbe,
1502 			  void *event)
1503 {
1504 	int cpu = smp_processor_id();
1505 
1506 	return cpu;
1507 }
1508 
1509 /**
1510  * check_field_for_var_ref - Check if a VAR_REF field references a variable
1511  * @hist_field: The VAR_REF field to check
1512  * @var_data: The hist trigger that owns the variable
1513  * @var_idx: The trigger variable identifier
1514  *
1515  * Check the given VAR_REF field to see whether or not it references
1516  * the given variable associated with the given trigger.
1517  *
1518  * Return: The VAR_REF field if it does reference the variable, NULL if not
1519  */
1520 static struct hist_field *
1521 check_field_for_var_ref(struct hist_field *hist_field,
1522 			struct hist_trigger_data *var_data,
1523 			unsigned int var_idx)
1524 {
1525 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1526 
1527 	if (hist_field && hist_field->var.idx == var_idx &&
1528 	    hist_field->var.hist_data == var_data)
1529 		return hist_field;
1530 
1531 	return NULL;
1532 }
1533 
1534 /**
1535  * find_var_ref - Check if a trigger has a reference to a trigger variable
1536  * @hist_data: The hist trigger that might have a reference to the variable
1537  * @var_data: The hist trigger that owns the variable
1538  * @var_idx: The trigger variable identifier
1539  *
1540  * Check the list of var_refs[] on the first hist trigger to see
1541  * whether any of them are references to the variable on the second
1542  * trigger.
1543  *
1544  * Return: The VAR_REF field referencing the variable if so, NULL if not
1545  */
1546 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1547 				       struct hist_trigger_data *var_data,
1548 				       unsigned int var_idx)
1549 {
1550 	struct hist_field *hist_field;
1551 	unsigned int i;
1552 
1553 	for (i = 0; i < hist_data->n_var_refs; i++) {
1554 		hist_field = hist_data->var_refs[i];
1555 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
1556 			return hist_field;
1557 	}
1558 
1559 	return NULL;
1560 }
1561 
1562 /**
1563  * find_any_var_ref - Check if there is a reference to a given trigger variable
1564  * @hist_data: The hist trigger
1565  * @var_idx: The trigger variable identifier
1566  *
1567  * Check to see whether the given variable is currently referenced by
1568  * any other trigger.
1569  *
1570  * The trigger the variable is defined on is explicitly excluded - the
1571  * assumption being that a self-reference doesn't prevent a trigger
1572  * from being removed.
1573  *
1574  * Return: The VAR_REF field referencing the variable if so, NULL if not
1575  */
1576 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1577 					   unsigned int var_idx)
1578 {
1579 	struct trace_array *tr = hist_data->event_file->tr;
1580 	struct hist_field *found = NULL;
1581 	struct hist_var_data *var_data;
1582 
1583 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1584 		if (var_data->hist_data == hist_data)
1585 			continue;
1586 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1587 		if (found)
1588 			break;
1589 	}
1590 
1591 	return found;
1592 }
1593 
1594 /**
1595  * check_var_refs - Check if there is a reference to any of trigger's variables
1596  * @hist_data: The hist trigger
1597  *
1598  * A trigger can define one or more variables.  If any one of them is
1599  * currently referenced by any other trigger, this function will
1600  * determine that.
1601 
1602  * Typically used to determine whether or not a trigger can be removed
1603  * - if there are any references to a trigger's variables, it cannot.
1604  *
1605  * Return: True if there is a reference to any of trigger's variables
1606  */
1607 static bool check_var_refs(struct hist_trigger_data *hist_data)
1608 {
1609 	struct hist_field *field;
1610 	bool found = false;
1611 	int i;
1612 
1613 	for_each_hist_field(i, hist_data) {
1614 		field = hist_data->fields[i];
1615 		if (field && field->flags & HIST_FIELD_FL_VAR) {
1616 			if (find_any_var_ref(hist_data, field->var.idx)) {
1617 				found = true;
1618 				break;
1619 			}
1620 		}
1621 	}
1622 
1623 	return found;
1624 }
1625 
1626 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1627 {
1628 	struct trace_array *tr = hist_data->event_file->tr;
1629 	struct hist_var_data *var_data, *found = NULL;
1630 
1631 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1632 		if (var_data->hist_data == hist_data) {
1633 			found = var_data;
1634 			break;
1635 		}
1636 	}
1637 
1638 	return found;
1639 }
1640 
1641 static bool field_has_hist_vars(struct hist_field *hist_field,
1642 				unsigned int level)
1643 {
1644 	int i;
1645 
1646 	if (level > 3)
1647 		return false;
1648 
1649 	if (!hist_field)
1650 		return false;
1651 
1652 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
1653 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
1654 		return true;
1655 
1656 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1657 		struct hist_field *operand;
1658 
1659 		operand = hist_field->operands[i];
1660 		if (field_has_hist_vars(operand, level + 1))
1661 			return true;
1662 	}
1663 
1664 	return false;
1665 }
1666 
1667 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1668 {
1669 	struct hist_field *hist_field;
1670 	int i;
1671 
1672 	for_each_hist_field(i, hist_data) {
1673 		hist_field = hist_data->fields[i];
1674 		if (field_has_hist_vars(hist_field, 0))
1675 			return true;
1676 	}
1677 
1678 	return false;
1679 }
1680 
1681 static int save_hist_vars(struct hist_trigger_data *hist_data)
1682 {
1683 	struct trace_array *tr = hist_data->event_file->tr;
1684 	struct hist_var_data *var_data;
1685 
1686 	var_data = find_hist_vars(hist_data);
1687 	if (var_data)
1688 		return 0;
1689 
1690 	if (tracing_check_open_get_tr(tr))
1691 		return -ENODEV;
1692 
1693 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1694 	if (!var_data) {
1695 		trace_array_put(tr);
1696 		return -ENOMEM;
1697 	}
1698 
1699 	var_data->hist_data = hist_data;
1700 	list_add(&var_data->list, &tr->hist_vars);
1701 
1702 	return 0;
1703 }
1704 
1705 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1706 {
1707 	struct trace_array *tr = hist_data->event_file->tr;
1708 	struct hist_var_data *var_data;
1709 
1710 	var_data = find_hist_vars(hist_data);
1711 	if (!var_data)
1712 		return;
1713 
1714 	if (WARN_ON(check_var_refs(hist_data)))
1715 		return;
1716 
1717 	list_del(&var_data->list);
1718 
1719 	kfree(var_data);
1720 
1721 	trace_array_put(tr);
1722 }
1723 
1724 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1725 					 const char *var_name)
1726 {
1727 	struct hist_field *hist_field, *found = NULL;
1728 	int i;
1729 
1730 	for_each_hist_field(i, hist_data) {
1731 		hist_field = hist_data->fields[i];
1732 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1733 		    strcmp(hist_field->var.name, var_name) == 0) {
1734 			found = hist_field;
1735 			break;
1736 		}
1737 	}
1738 
1739 	return found;
1740 }
1741 
1742 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1743 				   struct trace_event_file *file,
1744 				   const char *var_name)
1745 {
1746 	struct hist_trigger_data *test_data;
1747 	struct event_trigger_data *test;
1748 	struct hist_field *hist_field;
1749 
1750 	hist_field = find_var_field(hist_data, var_name);
1751 	if (hist_field)
1752 		return hist_field;
1753 
1754 	list_for_each_entry_rcu(test, &file->triggers, list) {
1755 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1756 			test_data = test->private_data;
1757 			hist_field = find_var_field(test_data, var_name);
1758 			if (hist_field)
1759 				return hist_field;
1760 		}
1761 	}
1762 
1763 	return NULL;
1764 }
1765 
1766 static struct trace_event_file *find_var_file(struct trace_array *tr,
1767 					      char *system,
1768 					      char *event_name,
1769 					      char *var_name)
1770 {
1771 	struct hist_trigger_data *var_hist_data;
1772 	struct hist_var_data *var_data;
1773 	struct trace_event_file *file, *found = NULL;
1774 
1775 	if (system)
1776 		return find_event_file(tr, system, event_name);
1777 
1778 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1779 		var_hist_data = var_data->hist_data;
1780 		file = var_hist_data->event_file;
1781 		if (file == found)
1782 			continue;
1783 
1784 		if (find_var_field(var_hist_data, var_name)) {
1785 			if (found) {
1786 				hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1787 				return NULL;
1788 			}
1789 
1790 			found = file;
1791 		}
1792 	}
1793 
1794 	return found;
1795 }
1796 
1797 static struct hist_field *find_file_var(struct trace_event_file *file,
1798 					const char *var_name)
1799 {
1800 	struct hist_trigger_data *test_data;
1801 	struct event_trigger_data *test;
1802 	struct hist_field *hist_field;
1803 
1804 	list_for_each_entry_rcu(test, &file->triggers, list) {
1805 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1806 			test_data = test->private_data;
1807 			hist_field = find_var_field(test_data, var_name);
1808 			if (hist_field)
1809 				return hist_field;
1810 		}
1811 	}
1812 
1813 	return NULL;
1814 }
1815 
1816 static struct hist_field *
1817 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1818 {
1819 	struct trace_array *tr = hist_data->event_file->tr;
1820 	struct hist_field *hist_field, *found = NULL;
1821 	struct trace_event_file *file;
1822 	unsigned int i;
1823 
1824 	for (i = 0; i < hist_data->n_actions; i++) {
1825 		struct action_data *data = hist_data->actions[i];
1826 
1827 		if (data->handler == HANDLER_ONMATCH) {
1828 			char *system = data->match_data.event_system;
1829 			char *event_name = data->match_data.event;
1830 
1831 			file = find_var_file(tr, system, event_name, var_name);
1832 			if (!file)
1833 				continue;
1834 			hist_field = find_file_var(file, var_name);
1835 			if (hist_field) {
1836 				if (found) {
1837 					hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1838 						 errpos(var_name));
1839 					return ERR_PTR(-EINVAL);
1840 				}
1841 
1842 				found = hist_field;
1843 			}
1844 		}
1845 	}
1846 	return found;
1847 }
1848 
1849 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1850 					 char *system,
1851 					 char *event_name,
1852 					 char *var_name)
1853 {
1854 	struct trace_array *tr = hist_data->event_file->tr;
1855 	struct hist_field *hist_field = NULL;
1856 	struct trace_event_file *file;
1857 
1858 	if (!system || !event_name) {
1859 		hist_field = find_match_var(hist_data, var_name);
1860 		if (IS_ERR(hist_field))
1861 			return NULL;
1862 		if (hist_field)
1863 			return hist_field;
1864 	}
1865 
1866 	file = find_var_file(tr, system, event_name, var_name);
1867 	if (!file)
1868 		return NULL;
1869 
1870 	hist_field = find_file_var(file, var_name);
1871 
1872 	return hist_field;
1873 }
1874 
1875 static u64 hist_field_var_ref(struct hist_field *hist_field,
1876 			      struct tracing_map_elt *elt,
1877 			      struct ring_buffer_event *rbe,
1878 			      void *event)
1879 {
1880 	struct hist_elt_data *elt_data;
1881 	u64 var_val = 0;
1882 
1883 	if (WARN_ON_ONCE(!elt))
1884 		return var_val;
1885 
1886 	elt_data = elt->private_data;
1887 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1888 
1889 	return var_val;
1890 }
1891 
1892 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1893 			     u64 *var_ref_vals, bool self)
1894 {
1895 	struct hist_trigger_data *var_data;
1896 	struct tracing_map_elt *var_elt;
1897 	struct hist_field *hist_field;
1898 	unsigned int i, var_idx;
1899 	bool resolved = true;
1900 	u64 var_val = 0;
1901 
1902 	for (i = 0; i < hist_data->n_var_refs; i++) {
1903 		hist_field = hist_data->var_refs[i];
1904 		var_idx = hist_field->var.idx;
1905 		var_data = hist_field->var.hist_data;
1906 
1907 		if (var_data == NULL) {
1908 			resolved = false;
1909 			break;
1910 		}
1911 
1912 		if ((self && var_data != hist_data) ||
1913 		    (!self && var_data == hist_data))
1914 			continue;
1915 
1916 		var_elt = tracing_map_lookup(var_data->map, key);
1917 		if (!var_elt) {
1918 			resolved = false;
1919 			break;
1920 		}
1921 
1922 		if (!tracing_map_var_set(var_elt, var_idx)) {
1923 			resolved = false;
1924 			break;
1925 		}
1926 
1927 		if (self || !hist_field->read_once)
1928 			var_val = tracing_map_read_var(var_elt, var_idx);
1929 		else
1930 			var_val = tracing_map_read_var_once(var_elt, var_idx);
1931 
1932 		var_ref_vals[i] = var_val;
1933 	}
1934 
1935 	return resolved;
1936 }
1937 
1938 static const char *hist_field_name(struct hist_field *field,
1939 				   unsigned int level)
1940 {
1941 	const char *field_name = "";
1942 
1943 	if (level > 1)
1944 		return field_name;
1945 
1946 	if (field->field)
1947 		field_name = field->field->name;
1948 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
1949 		 field->flags & HIST_FIELD_FL_ALIAS)
1950 		field_name = hist_field_name(field->operands[0], ++level);
1951 	else if (field->flags & HIST_FIELD_FL_CPU)
1952 		field_name = "cpu";
1953 	else if (field->flags & HIST_FIELD_FL_EXPR ||
1954 		 field->flags & HIST_FIELD_FL_VAR_REF) {
1955 		if (field->system) {
1956 			static char full_name[MAX_FILTER_STR_VAL];
1957 
1958 			strcat(full_name, field->system);
1959 			strcat(full_name, ".");
1960 			strcat(full_name, field->event_name);
1961 			strcat(full_name, ".");
1962 			strcat(full_name, field->name);
1963 			field_name = full_name;
1964 		} else
1965 			field_name = field->name;
1966 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1967 		field_name = "common_timestamp";
1968 
1969 	if (field_name == NULL)
1970 		field_name = "";
1971 
1972 	return field_name;
1973 }
1974 
1975 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1976 {
1977 	hist_field_fn_t fn = NULL;
1978 
1979 	switch (field_size) {
1980 	case 8:
1981 		if (field_is_signed)
1982 			fn = hist_field_s64;
1983 		else
1984 			fn = hist_field_u64;
1985 		break;
1986 	case 4:
1987 		if (field_is_signed)
1988 			fn = hist_field_s32;
1989 		else
1990 			fn = hist_field_u32;
1991 		break;
1992 	case 2:
1993 		if (field_is_signed)
1994 			fn = hist_field_s16;
1995 		else
1996 			fn = hist_field_u16;
1997 		break;
1998 	case 1:
1999 		if (field_is_signed)
2000 			fn = hist_field_s8;
2001 		else
2002 			fn = hist_field_u8;
2003 		break;
2004 	}
2005 
2006 	return fn;
2007 }
2008 
2009 static int parse_map_size(char *str)
2010 {
2011 	unsigned long size, map_bits;
2012 	int ret;
2013 
2014 	strsep(&str, "=");
2015 	if (!str) {
2016 		ret = -EINVAL;
2017 		goto out;
2018 	}
2019 
2020 	ret = kstrtoul(str, 0, &size);
2021 	if (ret)
2022 		goto out;
2023 
2024 	map_bits = ilog2(roundup_pow_of_two(size));
2025 	if (map_bits < TRACING_MAP_BITS_MIN ||
2026 	    map_bits > TRACING_MAP_BITS_MAX)
2027 		ret = -EINVAL;
2028 	else
2029 		ret = map_bits;
2030  out:
2031 	return ret;
2032 }
2033 
2034 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2035 {
2036 	unsigned int i;
2037 
2038 	if (!attrs)
2039 		return;
2040 
2041 	for (i = 0; i < attrs->n_assignments; i++)
2042 		kfree(attrs->assignment_str[i]);
2043 
2044 	for (i = 0; i < attrs->n_actions; i++)
2045 		kfree(attrs->action_str[i]);
2046 
2047 	kfree(attrs->name);
2048 	kfree(attrs->sort_key_str);
2049 	kfree(attrs->keys_str);
2050 	kfree(attrs->vals_str);
2051 	kfree(attrs->clock);
2052 	kfree(attrs);
2053 }
2054 
2055 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2056 {
2057 	int ret = -EINVAL;
2058 
2059 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
2060 		return ret;
2061 
2062 	if ((str_has_prefix(str, "onmatch(")) ||
2063 	    (str_has_prefix(str, "onmax(")) ||
2064 	    (str_has_prefix(str, "onchange("))) {
2065 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2066 		if (!attrs->action_str[attrs->n_actions]) {
2067 			ret = -ENOMEM;
2068 			return ret;
2069 		}
2070 		attrs->n_actions++;
2071 		ret = 0;
2072 	}
2073 	return ret;
2074 }
2075 
2076 static int parse_assignment(struct trace_array *tr,
2077 			    char *str, struct hist_trigger_attrs *attrs)
2078 {
2079 	int ret = 0;
2080 
2081 	if ((str_has_prefix(str, "key=")) ||
2082 	    (str_has_prefix(str, "keys="))) {
2083 		attrs->keys_str = kstrdup(str, GFP_KERNEL);
2084 		if (!attrs->keys_str) {
2085 			ret = -ENOMEM;
2086 			goto out;
2087 		}
2088 	} else if ((str_has_prefix(str, "val=")) ||
2089 		   (str_has_prefix(str, "vals=")) ||
2090 		   (str_has_prefix(str, "values="))) {
2091 		attrs->vals_str = kstrdup(str, GFP_KERNEL);
2092 		if (!attrs->vals_str) {
2093 			ret = -ENOMEM;
2094 			goto out;
2095 		}
2096 	} else if (str_has_prefix(str, "sort=")) {
2097 		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
2098 		if (!attrs->sort_key_str) {
2099 			ret = -ENOMEM;
2100 			goto out;
2101 		}
2102 	} else if (str_has_prefix(str, "name=")) {
2103 		attrs->name = kstrdup(str, GFP_KERNEL);
2104 		if (!attrs->name) {
2105 			ret = -ENOMEM;
2106 			goto out;
2107 		}
2108 	} else if (str_has_prefix(str, "clock=")) {
2109 		strsep(&str, "=");
2110 		if (!str) {
2111 			ret = -EINVAL;
2112 			goto out;
2113 		}
2114 
2115 		str = strstrip(str);
2116 		attrs->clock = kstrdup(str, GFP_KERNEL);
2117 		if (!attrs->clock) {
2118 			ret = -ENOMEM;
2119 			goto out;
2120 		}
2121 	} else if (str_has_prefix(str, "size=")) {
2122 		int map_bits = parse_map_size(str);
2123 
2124 		if (map_bits < 0) {
2125 			ret = map_bits;
2126 			goto out;
2127 		}
2128 		attrs->map_bits = map_bits;
2129 	} else {
2130 		char *assignment;
2131 
2132 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2133 			hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2134 			ret = -EINVAL;
2135 			goto out;
2136 		}
2137 
2138 		assignment = kstrdup(str, GFP_KERNEL);
2139 		if (!assignment) {
2140 			ret = -ENOMEM;
2141 			goto out;
2142 		}
2143 
2144 		attrs->assignment_str[attrs->n_assignments++] = assignment;
2145 	}
2146  out:
2147 	return ret;
2148 }
2149 
2150 static struct hist_trigger_attrs *
2151 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2152 {
2153 	struct hist_trigger_attrs *attrs;
2154 	int ret = 0;
2155 
2156 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2157 	if (!attrs)
2158 		return ERR_PTR(-ENOMEM);
2159 
2160 	while (trigger_str) {
2161 		char *str = strsep(&trigger_str, ":");
2162 
2163 		if (strchr(str, '=')) {
2164 			ret = parse_assignment(tr, str, attrs);
2165 			if (ret)
2166 				goto free;
2167 		} else if (strcmp(str, "pause") == 0)
2168 			attrs->pause = true;
2169 		else if ((strcmp(str, "cont") == 0) ||
2170 			 (strcmp(str, "continue") == 0))
2171 			attrs->cont = true;
2172 		else if (strcmp(str, "clear") == 0)
2173 			attrs->clear = true;
2174 		else {
2175 			ret = parse_action(str, attrs);
2176 			if (ret)
2177 				goto free;
2178 		}
2179 	}
2180 
2181 	if (!attrs->keys_str) {
2182 		ret = -EINVAL;
2183 		goto free;
2184 	}
2185 
2186 	if (!attrs->clock) {
2187 		attrs->clock = kstrdup("global", GFP_KERNEL);
2188 		if (!attrs->clock) {
2189 			ret = -ENOMEM;
2190 			goto free;
2191 		}
2192 	}
2193 
2194 	return attrs;
2195  free:
2196 	destroy_hist_trigger_attrs(attrs);
2197 
2198 	return ERR_PTR(ret);
2199 }
2200 
2201 static inline void save_comm(char *comm, struct task_struct *task)
2202 {
2203 	if (!task->pid) {
2204 		strcpy(comm, "<idle>");
2205 		return;
2206 	}
2207 
2208 	if (WARN_ON_ONCE(task->pid < 0)) {
2209 		strcpy(comm, "<XXX>");
2210 		return;
2211 	}
2212 
2213 	strncpy(comm, task->comm, TASK_COMM_LEN);
2214 }
2215 
2216 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2217 {
2218 	unsigned int i;
2219 
2220 	for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2221 		kfree(elt_data->field_var_str[i]);
2222 
2223 	kfree(elt_data->comm);
2224 	kfree(elt_data);
2225 }
2226 
2227 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2228 {
2229 	struct hist_elt_data *elt_data = elt->private_data;
2230 
2231 	hist_elt_data_free(elt_data);
2232 }
2233 
2234 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2235 {
2236 	struct hist_trigger_data *hist_data = elt->map->private_data;
2237 	unsigned int size = TASK_COMM_LEN;
2238 	struct hist_elt_data *elt_data;
2239 	struct hist_field *key_field;
2240 	unsigned int i, n_str;
2241 
2242 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2243 	if (!elt_data)
2244 		return -ENOMEM;
2245 
2246 	for_each_hist_key_field(i, hist_data) {
2247 		key_field = hist_data->fields[i];
2248 
2249 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2250 			elt_data->comm = kzalloc(size, GFP_KERNEL);
2251 			if (!elt_data->comm) {
2252 				kfree(elt_data);
2253 				return -ENOMEM;
2254 			}
2255 			break;
2256 		}
2257 	}
2258 
2259 	n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
2260 
2261 	size = STR_VAR_LEN_MAX;
2262 
2263 	for (i = 0; i < n_str; i++) {
2264 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2265 		if (!elt_data->field_var_str[i]) {
2266 			hist_elt_data_free(elt_data);
2267 			return -ENOMEM;
2268 		}
2269 	}
2270 
2271 	elt->private_data = elt_data;
2272 
2273 	return 0;
2274 }
2275 
2276 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2277 {
2278 	struct hist_elt_data *elt_data = elt->private_data;
2279 
2280 	if (elt_data->comm)
2281 		save_comm(elt_data->comm, current);
2282 }
2283 
2284 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2285 	.elt_alloc	= hist_trigger_elt_data_alloc,
2286 	.elt_free	= hist_trigger_elt_data_free,
2287 	.elt_init	= hist_trigger_elt_data_init,
2288 };
2289 
2290 static const char *get_hist_field_flags(struct hist_field *hist_field)
2291 {
2292 	const char *flags_str = NULL;
2293 
2294 	if (hist_field->flags & HIST_FIELD_FL_HEX)
2295 		flags_str = "hex";
2296 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
2297 		flags_str = "sym";
2298 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2299 		flags_str = "sym-offset";
2300 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2301 		flags_str = "execname";
2302 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2303 		flags_str = "syscall";
2304 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2305 		flags_str = "log2";
2306 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2307 		flags_str = "usecs";
2308 
2309 	return flags_str;
2310 }
2311 
2312 static void expr_field_str(struct hist_field *field, char *expr)
2313 {
2314 	if (field->flags & HIST_FIELD_FL_VAR_REF)
2315 		strcat(expr, "$");
2316 
2317 	strcat(expr, hist_field_name(field, 0));
2318 
2319 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2320 		const char *flags_str = get_hist_field_flags(field);
2321 
2322 		if (flags_str) {
2323 			strcat(expr, ".");
2324 			strcat(expr, flags_str);
2325 		}
2326 	}
2327 }
2328 
2329 static char *expr_str(struct hist_field *field, unsigned int level)
2330 {
2331 	char *expr;
2332 
2333 	if (level > 1)
2334 		return NULL;
2335 
2336 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2337 	if (!expr)
2338 		return NULL;
2339 
2340 	if (!field->operands[0]) {
2341 		expr_field_str(field, expr);
2342 		return expr;
2343 	}
2344 
2345 	if (field->operator == FIELD_OP_UNARY_MINUS) {
2346 		char *subexpr;
2347 
2348 		strcat(expr, "-(");
2349 		subexpr = expr_str(field->operands[0], ++level);
2350 		if (!subexpr) {
2351 			kfree(expr);
2352 			return NULL;
2353 		}
2354 		strcat(expr, subexpr);
2355 		strcat(expr, ")");
2356 
2357 		kfree(subexpr);
2358 
2359 		return expr;
2360 	}
2361 
2362 	expr_field_str(field->operands[0], expr);
2363 
2364 	switch (field->operator) {
2365 	case FIELD_OP_MINUS:
2366 		strcat(expr, "-");
2367 		break;
2368 	case FIELD_OP_PLUS:
2369 		strcat(expr, "+");
2370 		break;
2371 	default:
2372 		kfree(expr);
2373 		return NULL;
2374 	}
2375 
2376 	expr_field_str(field->operands[1], expr);
2377 
2378 	return expr;
2379 }
2380 
2381 static int contains_operator(char *str)
2382 {
2383 	enum field_op_id field_op = FIELD_OP_NONE;
2384 	char *op;
2385 
2386 	op = strpbrk(str, "+-");
2387 	if (!op)
2388 		return FIELD_OP_NONE;
2389 
2390 	switch (*op) {
2391 	case '-':
2392 		if (*str == '-')
2393 			field_op = FIELD_OP_UNARY_MINUS;
2394 		else
2395 			field_op = FIELD_OP_MINUS;
2396 		break;
2397 	case '+':
2398 		field_op = FIELD_OP_PLUS;
2399 		break;
2400 	default:
2401 		break;
2402 	}
2403 
2404 	return field_op;
2405 }
2406 
2407 static void __destroy_hist_field(struct hist_field *hist_field)
2408 {
2409 	kfree(hist_field->var.name);
2410 	kfree(hist_field->name);
2411 	kfree(hist_field->type);
2412 
2413 	kfree(hist_field);
2414 }
2415 
2416 static void destroy_hist_field(struct hist_field *hist_field,
2417 			       unsigned int level)
2418 {
2419 	unsigned int i;
2420 
2421 	if (level > 3)
2422 		return;
2423 
2424 	if (!hist_field)
2425 		return;
2426 
2427 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2428 		return; /* var refs will be destroyed separately */
2429 
2430 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2431 		destroy_hist_field(hist_field->operands[i], level + 1);
2432 
2433 	__destroy_hist_field(hist_field);
2434 }
2435 
2436 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2437 					    struct ftrace_event_field *field,
2438 					    unsigned long flags,
2439 					    char *var_name)
2440 {
2441 	struct hist_field *hist_field;
2442 
2443 	if (field && is_function_field(field))
2444 		return NULL;
2445 
2446 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2447 	if (!hist_field)
2448 		return NULL;
2449 
2450 	hist_field->hist_data = hist_data;
2451 
2452 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2453 		goto out; /* caller will populate */
2454 
2455 	if (flags & HIST_FIELD_FL_VAR_REF) {
2456 		hist_field->fn = hist_field_var_ref;
2457 		goto out;
2458 	}
2459 
2460 	if (flags & HIST_FIELD_FL_HITCOUNT) {
2461 		hist_field->fn = hist_field_counter;
2462 		hist_field->size = sizeof(u64);
2463 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2464 		if (!hist_field->type)
2465 			goto free;
2466 		goto out;
2467 	}
2468 
2469 	if (flags & HIST_FIELD_FL_STACKTRACE) {
2470 		hist_field->fn = hist_field_none;
2471 		goto out;
2472 	}
2473 
2474 	if (flags & HIST_FIELD_FL_LOG2) {
2475 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2476 		hist_field->fn = hist_field_log2;
2477 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2478 		hist_field->size = hist_field->operands[0]->size;
2479 		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2480 		if (!hist_field->type)
2481 			goto free;
2482 		goto out;
2483 	}
2484 
2485 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
2486 		hist_field->fn = hist_field_timestamp;
2487 		hist_field->size = sizeof(u64);
2488 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2489 		if (!hist_field->type)
2490 			goto free;
2491 		goto out;
2492 	}
2493 
2494 	if (flags & HIST_FIELD_FL_CPU) {
2495 		hist_field->fn = hist_field_cpu;
2496 		hist_field->size = sizeof(int);
2497 		hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2498 		if (!hist_field->type)
2499 			goto free;
2500 		goto out;
2501 	}
2502 
2503 	if (WARN_ON_ONCE(!field))
2504 		goto out;
2505 
2506 	if (is_string_field(field)) {
2507 		flags |= HIST_FIELD_FL_STRING;
2508 
2509 		hist_field->size = MAX_FILTER_STR_VAL;
2510 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2511 		if (!hist_field->type)
2512 			goto free;
2513 
2514 		if (field->filter_type == FILTER_STATIC_STRING)
2515 			hist_field->fn = hist_field_string;
2516 		else if (field->filter_type == FILTER_DYN_STRING)
2517 			hist_field->fn = hist_field_dynstring;
2518 		else
2519 			hist_field->fn = hist_field_pstring;
2520 	} else {
2521 		hist_field->size = field->size;
2522 		hist_field->is_signed = field->is_signed;
2523 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2524 		if (!hist_field->type)
2525 			goto free;
2526 
2527 		hist_field->fn = select_value_fn(field->size,
2528 						 field->is_signed);
2529 		if (!hist_field->fn) {
2530 			destroy_hist_field(hist_field, 0);
2531 			return NULL;
2532 		}
2533 	}
2534  out:
2535 	hist_field->field = field;
2536 	hist_field->flags = flags;
2537 
2538 	if (var_name) {
2539 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2540 		if (!hist_field->var.name)
2541 			goto free;
2542 	}
2543 
2544 	return hist_field;
2545  free:
2546 	destroy_hist_field(hist_field, 0);
2547 	return NULL;
2548 }
2549 
2550 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2551 {
2552 	unsigned int i;
2553 
2554 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
2555 		if (hist_data->fields[i]) {
2556 			destroy_hist_field(hist_data->fields[i], 0);
2557 			hist_data->fields[i] = NULL;
2558 		}
2559 	}
2560 
2561 	for (i = 0; i < hist_data->n_var_refs; i++) {
2562 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2563 		__destroy_hist_field(hist_data->var_refs[i]);
2564 		hist_data->var_refs[i] = NULL;
2565 	}
2566 }
2567 
2568 static int init_var_ref(struct hist_field *ref_field,
2569 			struct hist_field *var_field,
2570 			char *system, char *event_name)
2571 {
2572 	int err = 0;
2573 
2574 	ref_field->var.idx = var_field->var.idx;
2575 	ref_field->var.hist_data = var_field->hist_data;
2576 	ref_field->size = var_field->size;
2577 	ref_field->is_signed = var_field->is_signed;
2578 	ref_field->flags |= var_field->flags &
2579 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2580 
2581 	if (system) {
2582 		ref_field->system = kstrdup(system, GFP_KERNEL);
2583 		if (!ref_field->system)
2584 			return -ENOMEM;
2585 	}
2586 
2587 	if (event_name) {
2588 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2589 		if (!ref_field->event_name) {
2590 			err = -ENOMEM;
2591 			goto free;
2592 		}
2593 	}
2594 
2595 	if (var_field->var.name) {
2596 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2597 		if (!ref_field->name) {
2598 			err = -ENOMEM;
2599 			goto free;
2600 		}
2601 	} else if (var_field->name) {
2602 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2603 		if (!ref_field->name) {
2604 			err = -ENOMEM;
2605 			goto free;
2606 		}
2607 	}
2608 
2609 	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2610 	if (!ref_field->type) {
2611 		err = -ENOMEM;
2612 		goto free;
2613 	}
2614  out:
2615 	return err;
2616  free:
2617 	kfree(ref_field->system);
2618 	kfree(ref_field->event_name);
2619 	kfree(ref_field->name);
2620 
2621 	goto out;
2622 }
2623 
2624 /**
2625  * create_var_ref - Create a variable reference and attach it to trigger
2626  * @hist_data: The trigger that will be referencing the variable
2627  * @var_field: The VAR field to create a reference to
2628  * @system: The optional system string
2629  * @event_name: The optional event_name string
2630  *
2631  * Given a variable hist_field, create a VAR_REF hist_field that
2632  * represents a reference to it.
2633  *
2634  * This function also adds the reference to the trigger that
2635  * now references the variable.
2636  *
2637  * Return: The VAR_REF field if successful, NULL if not
2638  */
2639 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2640 					 struct hist_field *var_field,
2641 					 char *system, char *event_name)
2642 {
2643 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
2644 	struct hist_field *ref_field;
2645 
2646 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2647 	if (ref_field) {
2648 		if (init_var_ref(ref_field, var_field, system, event_name)) {
2649 			destroy_hist_field(ref_field, 0);
2650 			return NULL;
2651 		}
2652 
2653 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2654 		ref_field->var_ref_idx = hist_data->n_var_refs++;
2655 	}
2656 
2657 	return ref_field;
2658 }
2659 
2660 static bool is_var_ref(char *var_name)
2661 {
2662 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2663 		return false;
2664 
2665 	return true;
2666 }
2667 
2668 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2669 				 char *var_name)
2670 {
2671 	char *name, *field;
2672 	unsigned int i;
2673 
2674 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2675 		name = hist_data->attrs->var_defs.name[i];
2676 
2677 		if (strcmp(var_name, name) == 0) {
2678 			field = hist_data->attrs->var_defs.expr[i];
2679 			if (contains_operator(field) || is_var_ref(field))
2680 				continue;
2681 			return field;
2682 		}
2683 	}
2684 
2685 	return NULL;
2686 }
2687 
2688 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2689 				 char *system, char *event_name,
2690 				 char *var_name)
2691 {
2692 	struct trace_event_call *call;
2693 
2694 	if (system && event_name) {
2695 		call = hist_data->event_file->event_call;
2696 
2697 		if (strcmp(system, call->class->system) != 0)
2698 			return NULL;
2699 
2700 		if (strcmp(event_name, trace_event_name(call)) != 0)
2701 			return NULL;
2702 	}
2703 
2704 	if (!!system != !!event_name)
2705 		return NULL;
2706 
2707 	if (!is_var_ref(var_name))
2708 		return NULL;
2709 
2710 	var_name++;
2711 
2712 	return field_name_from_var(hist_data, var_name);
2713 }
2714 
2715 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2716 					char *system, char *event_name,
2717 					char *var_name)
2718 {
2719 	struct hist_field *var_field = NULL, *ref_field = NULL;
2720 	struct trace_array *tr = hist_data->event_file->tr;
2721 
2722 	if (!is_var_ref(var_name))
2723 		return NULL;
2724 
2725 	var_name++;
2726 
2727 	var_field = find_event_var(hist_data, system, event_name, var_name);
2728 	if (var_field)
2729 		ref_field = create_var_ref(hist_data, var_field,
2730 					   system, event_name);
2731 
2732 	if (!ref_field)
2733 		hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2734 
2735 	return ref_field;
2736 }
2737 
2738 static struct ftrace_event_field *
2739 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2740 	    char *field_str, unsigned long *flags)
2741 {
2742 	struct ftrace_event_field *field = NULL;
2743 	char *field_name, *modifier, *str;
2744 	struct trace_array *tr = file->tr;
2745 
2746 	modifier = str = kstrdup(field_str, GFP_KERNEL);
2747 	if (!modifier)
2748 		return ERR_PTR(-ENOMEM);
2749 
2750 	field_name = strsep(&modifier, ".");
2751 	if (modifier) {
2752 		if (strcmp(modifier, "hex") == 0)
2753 			*flags |= HIST_FIELD_FL_HEX;
2754 		else if (strcmp(modifier, "sym") == 0)
2755 			*flags |= HIST_FIELD_FL_SYM;
2756 		else if (strcmp(modifier, "sym-offset") == 0)
2757 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
2758 		else if ((strcmp(modifier, "execname") == 0) &&
2759 			 (strcmp(field_name, "common_pid") == 0))
2760 			*flags |= HIST_FIELD_FL_EXECNAME;
2761 		else if (strcmp(modifier, "syscall") == 0)
2762 			*flags |= HIST_FIELD_FL_SYSCALL;
2763 		else if (strcmp(modifier, "log2") == 0)
2764 			*flags |= HIST_FIELD_FL_LOG2;
2765 		else if (strcmp(modifier, "usecs") == 0)
2766 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2767 		else {
2768 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2769 			field = ERR_PTR(-EINVAL);
2770 			goto out;
2771 		}
2772 	}
2773 
2774 	if (strcmp(field_name, "common_timestamp") == 0) {
2775 		*flags |= HIST_FIELD_FL_TIMESTAMP;
2776 		hist_data->enable_timestamps = true;
2777 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2778 			hist_data->attrs->ts_in_usecs = true;
2779 	} else if (strcmp(field_name, "cpu") == 0)
2780 		*flags |= HIST_FIELD_FL_CPU;
2781 	else {
2782 		field = trace_find_event_field(file->event_call, field_name);
2783 		if (!field || !field->size) {
2784 			hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2785 			field = ERR_PTR(-EINVAL);
2786 			goto out;
2787 		}
2788 	}
2789  out:
2790 	kfree(str);
2791 
2792 	return field;
2793 }
2794 
2795 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2796 				       struct hist_field *var_ref,
2797 				       char *var_name)
2798 {
2799 	struct hist_field *alias = NULL;
2800 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2801 
2802 	alias = create_hist_field(hist_data, NULL, flags, var_name);
2803 	if (!alias)
2804 		return NULL;
2805 
2806 	alias->fn = var_ref->fn;
2807 	alias->operands[0] = var_ref;
2808 
2809 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2810 		destroy_hist_field(alias, 0);
2811 		return NULL;
2812 	}
2813 
2814 	alias->var_ref_idx = var_ref->var_ref_idx;
2815 
2816 	return alias;
2817 }
2818 
2819 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2820 				     struct trace_event_file *file, char *str,
2821 				     unsigned long *flags, char *var_name)
2822 {
2823 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2824 	struct ftrace_event_field *field = NULL;
2825 	struct hist_field *hist_field = NULL;
2826 	int ret = 0;
2827 
2828 	s = strchr(str, '.');
2829 	if (s) {
2830 		s = strchr(++s, '.');
2831 		if (s) {
2832 			ref_system = strsep(&str, ".");
2833 			if (!str) {
2834 				ret = -EINVAL;
2835 				goto out;
2836 			}
2837 			ref_event = strsep(&str, ".");
2838 			if (!str) {
2839 				ret = -EINVAL;
2840 				goto out;
2841 			}
2842 			ref_var = str;
2843 		}
2844 	}
2845 
2846 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2847 	if (!s) {
2848 		hist_field = parse_var_ref(hist_data, ref_system,
2849 					   ref_event, ref_var);
2850 		if (hist_field) {
2851 			if (var_name) {
2852 				hist_field = create_alias(hist_data, hist_field, var_name);
2853 				if (!hist_field) {
2854 					ret = -ENOMEM;
2855 					goto out;
2856 				}
2857 			}
2858 			return hist_field;
2859 		}
2860 	} else
2861 		str = s;
2862 
2863 	field = parse_field(hist_data, file, str, flags);
2864 	if (IS_ERR(field)) {
2865 		ret = PTR_ERR(field);
2866 		goto out;
2867 	}
2868 
2869 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
2870 	if (!hist_field) {
2871 		ret = -ENOMEM;
2872 		goto out;
2873 	}
2874 
2875 	return hist_field;
2876  out:
2877 	return ERR_PTR(ret);
2878 }
2879 
2880 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2881 				     struct trace_event_file *file,
2882 				     char *str, unsigned long flags,
2883 				     char *var_name, unsigned int level);
2884 
2885 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2886 				      struct trace_event_file *file,
2887 				      char *str, unsigned long flags,
2888 				      char *var_name, unsigned int level)
2889 {
2890 	struct hist_field *operand1, *expr = NULL;
2891 	unsigned long operand_flags;
2892 	int ret = 0;
2893 	char *s;
2894 
2895 	/* we support only -(xxx) i.e. explicit parens required */
2896 
2897 	if (level > 3) {
2898 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2899 		ret = -EINVAL;
2900 		goto free;
2901 	}
2902 
2903 	str++; /* skip leading '-' */
2904 
2905 	s = strchr(str, '(');
2906 	if (s)
2907 		str++;
2908 	else {
2909 		ret = -EINVAL;
2910 		goto free;
2911 	}
2912 
2913 	s = strrchr(str, ')');
2914 	if (s)
2915 		*s = '\0';
2916 	else {
2917 		ret = -EINVAL; /* no closing ')' */
2918 		goto free;
2919 	}
2920 
2921 	flags |= HIST_FIELD_FL_EXPR;
2922 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2923 	if (!expr) {
2924 		ret = -ENOMEM;
2925 		goto free;
2926 	}
2927 
2928 	operand_flags = 0;
2929 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2930 	if (IS_ERR(operand1)) {
2931 		ret = PTR_ERR(operand1);
2932 		goto free;
2933 	}
2934 
2935 	expr->flags |= operand1->flags &
2936 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2937 	expr->fn = hist_field_unary_minus;
2938 	expr->operands[0] = operand1;
2939 	expr->operator = FIELD_OP_UNARY_MINUS;
2940 	expr->name = expr_str(expr, 0);
2941 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
2942 	if (!expr->type) {
2943 		ret = -ENOMEM;
2944 		goto free;
2945 	}
2946 
2947 	return expr;
2948  free:
2949 	destroy_hist_field(expr, 0);
2950 	return ERR_PTR(ret);
2951 }
2952 
2953 static int check_expr_operands(struct trace_array *tr,
2954 			       struct hist_field *operand1,
2955 			       struct hist_field *operand2)
2956 {
2957 	unsigned long operand1_flags = operand1->flags;
2958 	unsigned long operand2_flags = operand2->flags;
2959 
2960 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2961 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2962 		struct hist_field *var;
2963 
2964 		var = find_var_field(operand1->var.hist_data, operand1->name);
2965 		if (!var)
2966 			return -EINVAL;
2967 		operand1_flags = var->flags;
2968 	}
2969 
2970 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2971 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2972 		struct hist_field *var;
2973 
2974 		var = find_var_field(operand2->var.hist_data, operand2->name);
2975 		if (!var)
2976 			return -EINVAL;
2977 		operand2_flags = var->flags;
2978 	}
2979 
2980 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2981 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2982 		hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2983 		return -EINVAL;
2984 	}
2985 
2986 	return 0;
2987 }
2988 
2989 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2990 				     struct trace_event_file *file,
2991 				     char *str, unsigned long flags,
2992 				     char *var_name, unsigned int level)
2993 {
2994 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2995 	unsigned long operand_flags;
2996 	int field_op, ret = -EINVAL;
2997 	char *sep, *operand1_str;
2998 
2999 	if (level > 3) {
3000 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3001 		return ERR_PTR(-EINVAL);
3002 	}
3003 
3004 	field_op = contains_operator(str);
3005 
3006 	if (field_op == FIELD_OP_NONE)
3007 		return parse_atom(hist_data, file, str, &flags, var_name);
3008 
3009 	if (field_op == FIELD_OP_UNARY_MINUS)
3010 		return parse_unary(hist_data, file, str, flags, var_name, ++level);
3011 
3012 	switch (field_op) {
3013 	case FIELD_OP_MINUS:
3014 		sep = "-";
3015 		break;
3016 	case FIELD_OP_PLUS:
3017 		sep = "+";
3018 		break;
3019 	default:
3020 		goto free;
3021 	}
3022 
3023 	operand1_str = strsep(&str, sep);
3024 	if (!operand1_str || !str)
3025 		goto free;
3026 
3027 	operand_flags = 0;
3028 	operand1 = parse_atom(hist_data, file, operand1_str,
3029 			      &operand_flags, NULL);
3030 	if (IS_ERR(operand1)) {
3031 		ret = PTR_ERR(operand1);
3032 		operand1 = NULL;
3033 		goto free;
3034 	}
3035 
3036 	/* rest of string could be another expression e.g. b+c in a+b+c */
3037 	operand_flags = 0;
3038 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3039 	if (IS_ERR(operand2)) {
3040 		ret = PTR_ERR(operand2);
3041 		operand2 = NULL;
3042 		goto free;
3043 	}
3044 
3045 	ret = check_expr_operands(file->tr, operand1, operand2);
3046 	if (ret)
3047 		goto free;
3048 
3049 	flags |= HIST_FIELD_FL_EXPR;
3050 
3051 	flags |= operand1->flags &
3052 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3053 
3054 	expr = create_hist_field(hist_data, NULL, flags, var_name);
3055 	if (!expr) {
3056 		ret = -ENOMEM;
3057 		goto free;
3058 	}
3059 
3060 	operand1->read_once = true;
3061 	operand2->read_once = true;
3062 
3063 	expr->operands[0] = operand1;
3064 	expr->operands[1] = operand2;
3065 	expr->operator = field_op;
3066 	expr->name = expr_str(expr, 0);
3067 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
3068 	if (!expr->type) {
3069 		ret = -ENOMEM;
3070 		goto free;
3071 	}
3072 
3073 	switch (field_op) {
3074 	case FIELD_OP_MINUS:
3075 		expr->fn = hist_field_minus;
3076 		break;
3077 	case FIELD_OP_PLUS:
3078 		expr->fn = hist_field_plus;
3079 		break;
3080 	default:
3081 		ret = -EINVAL;
3082 		goto free;
3083 	}
3084 
3085 	return expr;
3086  free:
3087 	destroy_hist_field(operand1, 0);
3088 	destroy_hist_field(operand2, 0);
3089 	destroy_hist_field(expr, 0);
3090 
3091 	return ERR_PTR(ret);
3092 }
3093 
3094 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3095 				 struct trace_event_file *file)
3096 {
3097 	struct event_trigger_data *test;
3098 
3099 	list_for_each_entry_rcu(test, &file->triggers, list) {
3100 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3101 			if (test->private_data == hist_data)
3102 				return test->filter_str;
3103 		}
3104 	}
3105 
3106 	return NULL;
3107 }
3108 
3109 static struct event_command trigger_hist_cmd;
3110 static int event_hist_trigger_func(struct event_command *cmd_ops,
3111 				   struct trace_event_file *file,
3112 				   char *glob, char *cmd, char *param);
3113 
3114 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3115 			    struct hist_trigger_data *hist_data,
3116 			    unsigned int n_keys)
3117 {
3118 	struct hist_field *target_hist_field, *hist_field;
3119 	unsigned int n, i, j;
3120 
3121 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
3122 		return false;
3123 
3124 	i = hist_data->n_vals;
3125 	j = target_hist_data->n_vals;
3126 
3127 	for (n = 0; n < n_keys; n++) {
3128 		hist_field = hist_data->fields[i + n];
3129 		target_hist_field = target_hist_data->fields[j + n];
3130 
3131 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
3132 			return false;
3133 		if (hist_field->size != target_hist_field->size)
3134 			return false;
3135 		if (hist_field->is_signed != target_hist_field->is_signed)
3136 			return false;
3137 	}
3138 
3139 	return true;
3140 }
3141 
3142 static struct hist_trigger_data *
3143 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3144 		     struct trace_event_file *file)
3145 {
3146 	struct hist_trigger_data *hist_data;
3147 	struct event_trigger_data *test;
3148 	unsigned int n_keys;
3149 
3150 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3151 
3152 	list_for_each_entry_rcu(test, &file->triggers, list) {
3153 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3154 			hist_data = test->private_data;
3155 
3156 			if (compatible_keys(target_hist_data, hist_data, n_keys))
3157 				return hist_data;
3158 		}
3159 	}
3160 
3161 	return NULL;
3162 }
3163 
3164 static struct trace_event_file *event_file(struct trace_array *tr,
3165 					   char *system, char *event_name)
3166 {
3167 	struct trace_event_file *file;
3168 
3169 	file = __find_event_file(tr, system, event_name);
3170 	if (!file)
3171 		return ERR_PTR(-EINVAL);
3172 
3173 	return file;
3174 }
3175 
3176 static struct hist_field *
3177 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3178 			 char *system, char *event_name, char *field_name)
3179 {
3180 	struct hist_field *event_var;
3181 	char *synthetic_name;
3182 
3183 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3184 	if (!synthetic_name)
3185 		return ERR_PTR(-ENOMEM);
3186 
3187 	strcpy(synthetic_name, "synthetic_");
3188 	strcat(synthetic_name, field_name);
3189 
3190 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3191 
3192 	kfree(synthetic_name);
3193 
3194 	return event_var;
3195 }
3196 
3197 /**
3198  * create_field_var_hist - Automatically create a histogram and var for a field
3199  * @target_hist_data: The target hist trigger
3200  * @subsys_name: Optional subsystem name
3201  * @event_name: Optional event name
3202  * @field_name: The name of the field (and the resulting variable)
3203  *
3204  * Hist trigger actions fetch data from variables, not directly from
3205  * events.  However, for convenience, users are allowed to directly
3206  * specify an event field in an action, which will be automatically
3207  * converted into a variable on their behalf.
3208 
3209  * If a user specifies a field on an event that isn't the event the
3210  * histogram currently being defined (the target event histogram), the
3211  * only way that can be accomplished is if a new hist trigger is
3212  * created and the field variable defined on that.
3213  *
3214  * This function creates a new histogram compatible with the target
3215  * event (meaning a histogram with the same key as the target
3216  * histogram), and creates a variable for the specified field, but
3217  * with 'synthetic_' prepended to the variable name in order to avoid
3218  * collision with normal field variables.
3219  *
3220  * Return: The variable created for the field.
3221  */
3222 static struct hist_field *
3223 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3224 		      char *subsys_name, char *event_name, char *field_name)
3225 {
3226 	struct trace_array *tr = target_hist_data->event_file->tr;
3227 	struct hist_field *event_var = ERR_PTR(-EINVAL);
3228 	struct hist_trigger_data *hist_data;
3229 	unsigned int i, n, first = true;
3230 	struct field_var_hist *var_hist;
3231 	struct trace_event_file *file;
3232 	struct hist_field *key_field;
3233 	char *saved_filter;
3234 	char *cmd;
3235 	int ret;
3236 
3237 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3238 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3239 		return ERR_PTR(-EINVAL);
3240 	}
3241 
3242 	file = event_file(tr, subsys_name, event_name);
3243 
3244 	if (IS_ERR(file)) {
3245 		hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3246 		ret = PTR_ERR(file);
3247 		return ERR_PTR(ret);
3248 	}
3249 
3250 	/*
3251 	 * Look for a histogram compatible with target.  We'll use the
3252 	 * found histogram specification to create a new matching
3253 	 * histogram with our variable on it.  target_hist_data is not
3254 	 * yet a registered histogram so we can't use that.
3255 	 */
3256 	hist_data = find_compatible_hist(target_hist_data, file);
3257 	if (!hist_data) {
3258 		hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3259 		return ERR_PTR(-EINVAL);
3260 	}
3261 
3262 	/* See if a synthetic field variable has already been created */
3263 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3264 					     event_name, field_name);
3265 	if (!IS_ERR_OR_NULL(event_var))
3266 		return event_var;
3267 
3268 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3269 	if (!var_hist)
3270 		return ERR_PTR(-ENOMEM);
3271 
3272 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3273 	if (!cmd) {
3274 		kfree(var_hist);
3275 		return ERR_PTR(-ENOMEM);
3276 	}
3277 
3278 	/* Use the same keys as the compatible histogram */
3279 	strcat(cmd, "keys=");
3280 
3281 	for_each_hist_key_field(i, hist_data) {
3282 		key_field = hist_data->fields[i];
3283 		if (!first)
3284 			strcat(cmd, ",");
3285 		strcat(cmd, key_field->field->name);
3286 		first = false;
3287 	}
3288 
3289 	/* Create the synthetic field variable specification */
3290 	strcat(cmd, ":synthetic_");
3291 	strcat(cmd, field_name);
3292 	strcat(cmd, "=");
3293 	strcat(cmd, field_name);
3294 
3295 	/* Use the same filter as the compatible histogram */
3296 	saved_filter = find_trigger_filter(hist_data, file);
3297 	if (saved_filter) {
3298 		strcat(cmd, " if ");
3299 		strcat(cmd, saved_filter);
3300 	}
3301 
3302 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3303 	if (!var_hist->cmd) {
3304 		kfree(cmd);
3305 		kfree(var_hist);
3306 		return ERR_PTR(-ENOMEM);
3307 	}
3308 
3309 	/* Save the compatible histogram information */
3310 	var_hist->hist_data = hist_data;
3311 
3312 	/* Create the new histogram with our variable */
3313 	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3314 				      "", "hist", cmd);
3315 	if (ret) {
3316 		kfree(cmd);
3317 		kfree(var_hist->cmd);
3318 		kfree(var_hist);
3319 		hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3320 		return ERR_PTR(ret);
3321 	}
3322 
3323 	kfree(cmd);
3324 
3325 	/* If we can't find the variable, something went wrong */
3326 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3327 					     event_name, field_name);
3328 	if (IS_ERR_OR_NULL(event_var)) {
3329 		kfree(var_hist->cmd);
3330 		kfree(var_hist);
3331 		hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3332 		return ERR_PTR(-EINVAL);
3333 	}
3334 
3335 	n = target_hist_data->n_field_var_hists;
3336 	target_hist_data->field_var_hists[n] = var_hist;
3337 	target_hist_data->n_field_var_hists++;
3338 
3339 	return event_var;
3340 }
3341 
3342 static struct hist_field *
3343 find_target_event_var(struct hist_trigger_data *hist_data,
3344 		      char *subsys_name, char *event_name, char *var_name)
3345 {
3346 	struct trace_event_file *file = hist_data->event_file;
3347 	struct hist_field *hist_field = NULL;
3348 
3349 	if (subsys_name) {
3350 		struct trace_event_call *call;
3351 
3352 		if (!event_name)
3353 			return NULL;
3354 
3355 		call = file->event_call;
3356 
3357 		if (strcmp(subsys_name, call->class->system) != 0)
3358 			return NULL;
3359 
3360 		if (strcmp(event_name, trace_event_name(call)) != 0)
3361 			return NULL;
3362 	}
3363 
3364 	hist_field = find_var_field(hist_data, var_name);
3365 
3366 	return hist_field;
3367 }
3368 
3369 static inline void __update_field_vars(struct tracing_map_elt *elt,
3370 				       struct ring_buffer_event *rbe,
3371 				       void *rec,
3372 				       struct field_var **field_vars,
3373 				       unsigned int n_field_vars,
3374 				       unsigned int field_var_str_start)
3375 {
3376 	struct hist_elt_data *elt_data = elt->private_data;
3377 	unsigned int i, j, var_idx;
3378 	u64 var_val;
3379 
3380 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3381 		struct field_var *field_var = field_vars[i];
3382 		struct hist_field *var = field_var->var;
3383 		struct hist_field *val = field_var->val;
3384 
3385 		var_val = val->fn(val, elt, rbe, rec);
3386 		var_idx = var->var.idx;
3387 
3388 		if (val->flags & HIST_FIELD_FL_STRING) {
3389 			char *str = elt_data->field_var_str[j++];
3390 			char *val_str = (char *)(uintptr_t)var_val;
3391 
3392 			strscpy(str, val_str, STR_VAR_LEN_MAX);
3393 			var_val = (u64)(uintptr_t)str;
3394 		}
3395 		tracing_map_set_var(elt, var_idx, var_val);
3396 	}
3397 }
3398 
3399 static void update_field_vars(struct hist_trigger_data *hist_data,
3400 			      struct tracing_map_elt *elt,
3401 			      struct ring_buffer_event *rbe,
3402 			      void *rec)
3403 {
3404 	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
3405 			    hist_data->n_field_vars, 0);
3406 }
3407 
3408 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3409 				 struct tracing_map_elt *elt, void *rec,
3410 				 struct ring_buffer_event *rbe, void *key,
3411 				 struct action_data *data, u64 *var_ref_vals)
3412 {
3413 	__update_field_vars(elt, rbe, rec, hist_data->save_vars,
3414 			    hist_data->n_save_vars, hist_data->n_field_var_str);
3415 }
3416 
3417 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3418 				     struct trace_event_file *file,
3419 				     char *name, int size, const char *type)
3420 {
3421 	struct hist_field *var;
3422 	int idx;
3423 
3424 	if (find_var(hist_data, file, name) && !hist_data->remove) {
3425 		var = ERR_PTR(-EINVAL);
3426 		goto out;
3427 	}
3428 
3429 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3430 	if (!var) {
3431 		var = ERR_PTR(-ENOMEM);
3432 		goto out;
3433 	}
3434 
3435 	idx = tracing_map_add_var(hist_data->map);
3436 	if (idx < 0) {
3437 		kfree(var);
3438 		var = ERR_PTR(-EINVAL);
3439 		goto out;
3440 	}
3441 
3442 	var->flags = HIST_FIELD_FL_VAR;
3443 	var->var.idx = idx;
3444 	var->var.hist_data = var->hist_data = hist_data;
3445 	var->size = size;
3446 	var->var.name = kstrdup(name, GFP_KERNEL);
3447 	var->type = kstrdup(type, GFP_KERNEL);
3448 	if (!var->var.name || !var->type) {
3449 		kfree(var->var.name);
3450 		kfree(var->type);
3451 		kfree(var);
3452 		var = ERR_PTR(-ENOMEM);
3453 	}
3454  out:
3455 	return var;
3456 }
3457 
3458 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3459 					  struct trace_event_file *file,
3460 					  char *field_name)
3461 {
3462 	struct hist_field *val = NULL, *var = NULL;
3463 	unsigned long flags = HIST_FIELD_FL_VAR;
3464 	struct trace_array *tr = file->tr;
3465 	struct field_var *field_var;
3466 	int ret = 0;
3467 
3468 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3469 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3470 		ret = -EINVAL;
3471 		goto err;
3472 	}
3473 
3474 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
3475 	if (IS_ERR(val)) {
3476 		hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3477 		ret = PTR_ERR(val);
3478 		goto err;
3479 	}
3480 
3481 	var = create_var(hist_data, file, field_name, val->size, val->type);
3482 	if (IS_ERR(var)) {
3483 		hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3484 		kfree(val);
3485 		ret = PTR_ERR(var);
3486 		goto err;
3487 	}
3488 
3489 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3490 	if (!field_var) {
3491 		kfree(val);
3492 		kfree(var);
3493 		ret =  -ENOMEM;
3494 		goto err;
3495 	}
3496 
3497 	field_var->var = var;
3498 	field_var->val = val;
3499  out:
3500 	return field_var;
3501  err:
3502 	field_var = ERR_PTR(ret);
3503 	goto out;
3504 }
3505 
3506 /**
3507  * create_target_field_var - Automatically create a variable for a field
3508  * @target_hist_data: The target hist trigger
3509  * @subsys_name: Optional subsystem name
3510  * @event_name: Optional event name
3511  * @var_name: The name of the field (and the resulting variable)
3512  *
3513  * Hist trigger actions fetch data from variables, not directly from
3514  * events.  However, for convenience, users are allowed to directly
3515  * specify an event field in an action, which will be automatically
3516  * converted into a variable on their behalf.
3517 
3518  * This function creates a field variable with the name var_name on
3519  * the hist trigger currently being defined on the target event.  If
3520  * subsys_name and event_name are specified, this function simply
3521  * verifies that they do in fact match the target event subsystem and
3522  * event name.
3523  *
3524  * Return: The variable created for the field.
3525  */
3526 static struct field_var *
3527 create_target_field_var(struct hist_trigger_data *target_hist_data,
3528 			char *subsys_name, char *event_name, char *var_name)
3529 {
3530 	struct trace_event_file *file = target_hist_data->event_file;
3531 
3532 	if (subsys_name) {
3533 		struct trace_event_call *call;
3534 
3535 		if (!event_name)
3536 			return NULL;
3537 
3538 		call = file->event_call;
3539 
3540 		if (strcmp(subsys_name, call->class->system) != 0)
3541 			return NULL;
3542 
3543 		if (strcmp(event_name, trace_event_name(call)) != 0)
3544 			return NULL;
3545 	}
3546 
3547 	return create_field_var(target_hist_data, file, var_name);
3548 }
3549 
3550 static bool check_track_val_max(u64 track_val, u64 var_val)
3551 {
3552 	if (var_val <= track_val)
3553 		return false;
3554 
3555 	return true;
3556 }
3557 
3558 static bool check_track_val_changed(u64 track_val, u64 var_val)
3559 {
3560 	if (var_val == track_val)
3561 		return false;
3562 
3563 	return true;
3564 }
3565 
3566 static u64 get_track_val(struct hist_trigger_data *hist_data,
3567 			 struct tracing_map_elt *elt,
3568 			 struct action_data *data)
3569 {
3570 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3571 	u64 track_val;
3572 
3573 	track_val = tracing_map_read_var(elt, track_var_idx);
3574 
3575 	return track_val;
3576 }
3577 
3578 static void save_track_val(struct hist_trigger_data *hist_data,
3579 			   struct tracing_map_elt *elt,
3580 			   struct action_data *data, u64 var_val)
3581 {
3582 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3583 
3584 	tracing_map_set_var(elt, track_var_idx, var_val);
3585 }
3586 
3587 static void save_track_data(struct hist_trigger_data *hist_data,
3588 			    struct tracing_map_elt *elt, void *rec,
3589 			    struct ring_buffer_event *rbe, void *key,
3590 			    struct action_data *data, u64 *var_ref_vals)
3591 {
3592 	if (data->track_data.save_data)
3593 		data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3594 }
3595 
3596 static bool check_track_val(struct tracing_map_elt *elt,
3597 			    struct action_data *data,
3598 			    u64 var_val)
3599 {
3600 	struct hist_trigger_data *hist_data;
3601 	u64 track_val;
3602 
3603 	hist_data = data->track_data.track_var->hist_data;
3604 	track_val = get_track_val(hist_data, elt, data);
3605 
3606 	return data->track_data.check_val(track_val, var_val);
3607 }
3608 
3609 #ifdef CONFIG_TRACER_SNAPSHOT
3610 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3611 {
3612 	/* called with tr->max_lock held */
3613 	struct track_data *track_data = tr->cond_snapshot->cond_data;
3614 	struct hist_elt_data *elt_data, *track_elt_data;
3615 	struct snapshot_context *context = cond_data;
3616 	struct action_data *action;
3617 	u64 track_val;
3618 
3619 	if (!track_data)
3620 		return false;
3621 
3622 	action = track_data->action_data;
3623 
3624 	track_val = get_track_val(track_data->hist_data, context->elt,
3625 				  track_data->action_data);
3626 
3627 	if (!action->track_data.check_val(track_data->track_val, track_val))
3628 		return false;
3629 
3630 	track_data->track_val = track_val;
3631 	memcpy(track_data->key, context->key, track_data->key_len);
3632 
3633 	elt_data = context->elt->private_data;
3634 	track_elt_data = track_data->elt.private_data;
3635 	if (elt_data->comm)
3636 		strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3637 
3638 	track_data->updated = true;
3639 
3640 	return true;
3641 }
3642 
3643 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3644 				     struct tracing_map_elt *elt, void *rec,
3645 				     struct ring_buffer_event *rbe, void *key,
3646 				     struct action_data *data,
3647 				     u64 *var_ref_vals)
3648 {
3649 	struct trace_event_file *file = hist_data->event_file;
3650 	struct snapshot_context context;
3651 
3652 	context.elt = elt;
3653 	context.key = key;
3654 
3655 	tracing_snapshot_cond(file->tr, &context);
3656 }
3657 
3658 static void hist_trigger_print_key(struct seq_file *m,
3659 				   struct hist_trigger_data *hist_data,
3660 				   void *key,
3661 				   struct tracing_map_elt *elt);
3662 
3663 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3664 {
3665 	unsigned int i;
3666 
3667 	if (!hist_data->n_actions)
3668 		return NULL;
3669 
3670 	for (i = 0; i < hist_data->n_actions; i++) {
3671 		struct action_data *data = hist_data->actions[i];
3672 
3673 		if (data->action == ACTION_SNAPSHOT)
3674 			return data;
3675 	}
3676 
3677 	return NULL;
3678 }
3679 
3680 static void track_data_snapshot_print(struct seq_file *m,
3681 				      struct hist_trigger_data *hist_data)
3682 {
3683 	struct trace_event_file *file = hist_data->event_file;
3684 	struct track_data *track_data;
3685 	struct action_data *action;
3686 
3687 	track_data = tracing_cond_snapshot_data(file->tr);
3688 	if (!track_data)
3689 		return;
3690 
3691 	if (!track_data->updated)
3692 		return;
3693 
3694 	action = snapshot_action(hist_data);
3695 	if (!action)
3696 		return;
3697 
3698 	seq_puts(m, "\nSnapshot taken (see tracing/snapshot).  Details:\n");
3699 	seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3700 		   action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3701 		   action->track_data.var_str, track_data->track_val);
3702 
3703 	seq_puts(m, "\ttriggered by event with key: ");
3704 	hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3705 	seq_putc(m, '\n');
3706 }
3707 #else
3708 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3709 {
3710 	return false;
3711 }
3712 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3713 				     struct tracing_map_elt *elt, void *rec,
3714 				     struct ring_buffer_event *rbe, void *key,
3715 				     struct action_data *data,
3716 				     u64 *var_ref_vals) {}
3717 static void track_data_snapshot_print(struct seq_file *m,
3718 				      struct hist_trigger_data *hist_data) {}
3719 #endif /* CONFIG_TRACER_SNAPSHOT */
3720 
3721 static void track_data_print(struct seq_file *m,
3722 			     struct hist_trigger_data *hist_data,
3723 			     struct tracing_map_elt *elt,
3724 			     struct action_data *data)
3725 {
3726 	u64 track_val = get_track_val(hist_data, elt, data);
3727 	unsigned int i, save_var_idx;
3728 
3729 	if (data->handler == HANDLER_ONMAX)
3730 		seq_printf(m, "\n\tmax: %10llu", track_val);
3731 	else if (data->handler == HANDLER_ONCHANGE)
3732 		seq_printf(m, "\n\tchanged: %10llu", track_val);
3733 
3734 	if (data->action == ACTION_SNAPSHOT)
3735 		return;
3736 
3737 	for (i = 0; i < hist_data->n_save_vars; i++) {
3738 		struct hist_field *save_val = hist_data->save_vars[i]->val;
3739 		struct hist_field *save_var = hist_data->save_vars[i]->var;
3740 		u64 val;
3741 
3742 		save_var_idx = save_var->var.idx;
3743 
3744 		val = tracing_map_read_var(elt, save_var_idx);
3745 
3746 		if (save_val->flags & HIST_FIELD_FL_STRING) {
3747 			seq_printf(m, "  %s: %-32s", save_var->var.name,
3748 				   (char *)(uintptr_t)(val));
3749 		} else
3750 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
3751 	}
3752 }
3753 
3754 static void ontrack_action(struct hist_trigger_data *hist_data,
3755 			   struct tracing_map_elt *elt, void *rec,
3756 			   struct ring_buffer_event *rbe, void *key,
3757 			   struct action_data *data, u64 *var_ref_vals)
3758 {
3759 	u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3760 
3761 	if (check_track_val(elt, data, var_val)) {
3762 		save_track_val(hist_data, elt, data, var_val);
3763 		save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3764 	}
3765 }
3766 
3767 static void action_data_destroy(struct action_data *data)
3768 {
3769 	unsigned int i;
3770 
3771 	lockdep_assert_held(&event_mutex);
3772 
3773 	kfree(data->action_name);
3774 
3775 	for (i = 0; i < data->n_params; i++)
3776 		kfree(data->params[i]);
3777 
3778 	if (data->synth_event)
3779 		data->synth_event->ref--;
3780 
3781 	kfree(data->synth_event_name);
3782 
3783 	kfree(data);
3784 }
3785 
3786 static void track_data_destroy(struct hist_trigger_data *hist_data,
3787 			       struct action_data *data)
3788 {
3789 	struct trace_event_file *file = hist_data->event_file;
3790 
3791 	destroy_hist_field(data->track_data.track_var, 0);
3792 
3793 	if (data->action == ACTION_SNAPSHOT) {
3794 		struct track_data *track_data;
3795 
3796 		track_data = tracing_cond_snapshot_data(file->tr);
3797 		if (track_data && track_data->hist_data == hist_data) {
3798 			tracing_snapshot_cond_disable(file->tr);
3799 			track_data_free(track_data);
3800 		}
3801 	}
3802 
3803 	kfree(data->track_data.var_str);
3804 
3805 	action_data_destroy(data);
3806 }
3807 
3808 static int action_create(struct hist_trigger_data *hist_data,
3809 			 struct action_data *data);
3810 
3811 static int track_data_create(struct hist_trigger_data *hist_data,
3812 			     struct action_data *data)
3813 {
3814 	struct hist_field *var_field, *ref_field, *track_var = NULL;
3815 	struct trace_event_file *file = hist_data->event_file;
3816 	struct trace_array *tr = file->tr;
3817 	char *track_data_var_str;
3818 	int ret = 0;
3819 
3820 	track_data_var_str = data->track_data.var_str;
3821 	if (track_data_var_str[0] != '$') {
3822 		hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3823 		return -EINVAL;
3824 	}
3825 	track_data_var_str++;
3826 
3827 	var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3828 	if (!var_field) {
3829 		hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3830 		return -EINVAL;
3831 	}
3832 
3833 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3834 	if (!ref_field)
3835 		return -ENOMEM;
3836 
3837 	data->track_data.var_ref = ref_field;
3838 
3839 	if (data->handler == HANDLER_ONMAX)
3840 		track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3841 	if (IS_ERR(track_var)) {
3842 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3843 		ret = PTR_ERR(track_var);
3844 		goto out;
3845 	}
3846 
3847 	if (data->handler == HANDLER_ONCHANGE)
3848 		track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3849 	if (IS_ERR(track_var)) {
3850 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3851 		ret = PTR_ERR(track_var);
3852 		goto out;
3853 	}
3854 	data->track_data.track_var = track_var;
3855 
3856 	ret = action_create(hist_data, data);
3857  out:
3858 	return ret;
3859 }
3860 
3861 static int parse_action_params(struct trace_array *tr, char *params,
3862 			       struct action_data *data)
3863 {
3864 	char *param, *saved_param;
3865 	bool first_param = true;
3866 	int ret = 0;
3867 
3868 	while (params) {
3869 		if (data->n_params >= SYNTH_FIELDS_MAX) {
3870 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3871 			goto out;
3872 		}
3873 
3874 		param = strsep(&params, ",");
3875 		if (!param) {
3876 			hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3877 			ret = -EINVAL;
3878 			goto out;
3879 		}
3880 
3881 		param = strstrip(param);
3882 		if (strlen(param) < 2) {
3883 			hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3884 			ret = -EINVAL;
3885 			goto out;
3886 		}
3887 
3888 		saved_param = kstrdup(param, GFP_KERNEL);
3889 		if (!saved_param) {
3890 			ret = -ENOMEM;
3891 			goto out;
3892 		}
3893 
3894 		if (first_param && data->use_trace_keyword) {
3895 			data->synth_event_name = saved_param;
3896 			first_param = false;
3897 			continue;
3898 		}
3899 		first_param = false;
3900 
3901 		data->params[data->n_params++] = saved_param;
3902 	}
3903  out:
3904 	return ret;
3905 }
3906 
3907 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3908 			enum handler_id handler)
3909 {
3910 	char *action_name;
3911 	int ret = 0;
3912 
3913 	strsep(&str, ".");
3914 	if (!str) {
3915 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3916 		ret = -EINVAL;
3917 		goto out;
3918 	}
3919 
3920 	action_name = strsep(&str, "(");
3921 	if (!action_name || !str) {
3922 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3923 		ret = -EINVAL;
3924 		goto out;
3925 	}
3926 
3927 	if (str_has_prefix(action_name, "save")) {
3928 		char *params = strsep(&str, ")");
3929 
3930 		if (!params) {
3931 			hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3932 			ret = -EINVAL;
3933 			goto out;
3934 		}
3935 
3936 		ret = parse_action_params(tr, params, data);
3937 		if (ret)
3938 			goto out;
3939 
3940 		if (handler == HANDLER_ONMAX)
3941 			data->track_data.check_val = check_track_val_max;
3942 		else if (handler == HANDLER_ONCHANGE)
3943 			data->track_data.check_val = check_track_val_changed;
3944 		else {
3945 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3946 			ret = -EINVAL;
3947 			goto out;
3948 		}
3949 
3950 		data->track_data.save_data = save_track_data_vars;
3951 		data->fn = ontrack_action;
3952 		data->action = ACTION_SAVE;
3953 	} else if (str_has_prefix(action_name, "snapshot")) {
3954 		char *params = strsep(&str, ")");
3955 
3956 		if (!str) {
3957 			hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3958 			ret = -EINVAL;
3959 			goto out;
3960 		}
3961 
3962 		if (handler == HANDLER_ONMAX)
3963 			data->track_data.check_val = check_track_val_max;
3964 		else if (handler == HANDLER_ONCHANGE)
3965 			data->track_data.check_val = check_track_val_changed;
3966 		else {
3967 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3968 			ret = -EINVAL;
3969 			goto out;
3970 		}
3971 
3972 		data->track_data.save_data = save_track_data_snapshot;
3973 		data->fn = ontrack_action;
3974 		data->action = ACTION_SNAPSHOT;
3975 	} else {
3976 		char *params = strsep(&str, ")");
3977 
3978 		if (str_has_prefix(action_name, "trace"))
3979 			data->use_trace_keyword = true;
3980 
3981 		if (params) {
3982 			ret = parse_action_params(tr, params, data);
3983 			if (ret)
3984 				goto out;
3985 		}
3986 
3987 		if (handler == HANDLER_ONMAX)
3988 			data->track_data.check_val = check_track_val_max;
3989 		else if (handler == HANDLER_ONCHANGE)
3990 			data->track_data.check_val = check_track_val_changed;
3991 
3992 		if (handler != HANDLER_ONMATCH) {
3993 			data->track_data.save_data = action_trace;
3994 			data->fn = ontrack_action;
3995 		} else
3996 			data->fn = action_trace;
3997 
3998 		data->action = ACTION_TRACE;
3999 	}
4000 
4001 	data->action_name = kstrdup(action_name, GFP_KERNEL);
4002 	if (!data->action_name) {
4003 		ret = -ENOMEM;
4004 		goto out;
4005 	}
4006 
4007 	data->handler = handler;
4008  out:
4009 	return ret;
4010 }
4011 
4012 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4013 					    char *str, enum handler_id handler)
4014 {
4015 	struct action_data *data;
4016 	int ret = -EINVAL;
4017 	char *var_str;
4018 
4019 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4020 	if (!data)
4021 		return ERR_PTR(-ENOMEM);
4022 
4023 	var_str = strsep(&str, ")");
4024 	if (!var_str || !str) {
4025 		ret = -EINVAL;
4026 		goto free;
4027 	}
4028 
4029 	data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4030 	if (!data->track_data.var_str) {
4031 		ret = -ENOMEM;
4032 		goto free;
4033 	}
4034 
4035 	ret = action_parse(hist_data->event_file->tr, str, data, handler);
4036 	if (ret)
4037 		goto free;
4038  out:
4039 	return data;
4040  free:
4041 	track_data_destroy(hist_data, data);
4042 	data = ERR_PTR(ret);
4043 	goto out;
4044 }
4045 
4046 static void onmatch_destroy(struct action_data *data)
4047 {
4048 	kfree(data->match_data.event);
4049 	kfree(data->match_data.event_system);
4050 
4051 	action_data_destroy(data);
4052 }
4053 
4054 static void destroy_field_var(struct field_var *field_var)
4055 {
4056 	if (!field_var)
4057 		return;
4058 
4059 	destroy_hist_field(field_var->var, 0);
4060 	destroy_hist_field(field_var->val, 0);
4061 
4062 	kfree(field_var);
4063 }
4064 
4065 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4066 {
4067 	unsigned int i;
4068 
4069 	for (i = 0; i < hist_data->n_field_vars; i++)
4070 		destroy_field_var(hist_data->field_vars[i]);
4071 }
4072 
4073 static void save_field_var(struct hist_trigger_data *hist_data,
4074 			   struct field_var *field_var)
4075 {
4076 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4077 
4078 	if (field_var->val->flags & HIST_FIELD_FL_STRING)
4079 		hist_data->n_field_var_str++;
4080 }
4081 
4082 
4083 static int check_synth_field(struct synth_event *event,
4084 			     struct hist_field *hist_field,
4085 			     unsigned int field_pos)
4086 {
4087 	struct synth_field *field;
4088 
4089 	if (field_pos >= event->n_fields)
4090 		return -EINVAL;
4091 
4092 	field = event->fields[field_pos];
4093 
4094 	if (strcmp(field->type, hist_field->type) != 0)
4095 		return -EINVAL;
4096 
4097 	return 0;
4098 }
4099 
4100 static struct hist_field *
4101 trace_action_find_var(struct hist_trigger_data *hist_data,
4102 		      struct action_data *data,
4103 		      char *system, char *event, char *var)
4104 {
4105 	struct trace_array *tr = hist_data->event_file->tr;
4106 	struct hist_field *hist_field;
4107 
4108 	var++; /* skip '$' */
4109 
4110 	hist_field = find_target_event_var(hist_data, system, event, var);
4111 	if (!hist_field) {
4112 		if (!system && data->handler == HANDLER_ONMATCH) {
4113 			system = data->match_data.event_system;
4114 			event = data->match_data.event;
4115 		}
4116 
4117 		hist_field = find_event_var(hist_data, system, event, var);
4118 	}
4119 
4120 	if (!hist_field)
4121 		hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4122 
4123 	return hist_field;
4124 }
4125 
4126 static struct hist_field *
4127 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4128 			      struct action_data *data, char *system,
4129 			      char *event, char *var)
4130 {
4131 	struct hist_field *hist_field = NULL;
4132 	struct field_var *field_var;
4133 
4134 	/*
4135 	 * First try to create a field var on the target event (the
4136 	 * currently being defined).  This will create a variable for
4137 	 * unqualified fields on the target event, or if qualified,
4138 	 * target fields that have qualified names matching the target.
4139 	 */
4140 	field_var = create_target_field_var(hist_data, system, event, var);
4141 
4142 	if (field_var && !IS_ERR(field_var)) {
4143 		save_field_var(hist_data, field_var);
4144 		hist_field = field_var->var;
4145 	} else {
4146 		field_var = NULL;
4147 		/*
4148 		 * If no explicit system.event is specfied, default to
4149 		 * looking for fields on the onmatch(system.event.xxx)
4150 		 * event.
4151 		 */
4152 		if (!system && data->handler == HANDLER_ONMATCH) {
4153 			system = data->match_data.event_system;
4154 			event = data->match_data.event;
4155 		}
4156 
4157 		/*
4158 		 * At this point, we're looking at a field on another
4159 		 * event.  Because we can't modify a hist trigger on
4160 		 * another event to add a variable for a field, we need
4161 		 * to create a new trigger on that event and create the
4162 		 * variable at the same time.
4163 		 */
4164 		hist_field = create_field_var_hist(hist_data, system, event, var);
4165 		if (IS_ERR(hist_field))
4166 			goto free;
4167 	}
4168  out:
4169 	return hist_field;
4170  free:
4171 	destroy_field_var(field_var);
4172 	hist_field = NULL;
4173 	goto out;
4174 }
4175 
4176 static int trace_action_create(struct hist_trigger_data *hist_data,
4177 			       struct action_data *data)
4178 {
4179 	struct trace_array *tr = hist_data->event_file->tr;
4180 	char *event_name, *param, *system = NULL;
4181 	struct hist_field *hist_field, *var_ref;
4182 	unsigned int i, var_ref_idx;
4183 	unsigned int field_pos = 0;
4184 	struct synth_event *event;
4185 	char *synth_event_name;
4186 	int ret = 0;
4187 
4188 	lockdep_assert_held(&event_mutex);
4189 
4190 	if (data->use_trace_keyword)
4191 		synth_event_name = data->synth_event_name;
4192 	else
4193 		synth_event_name = data->action_name;
4194 
4195 	event = find_synth_event(synth_event_name);
4196 	if (!event) {
4197 		hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4198 		return -EINVAL;
4199 	}
4200 
4201 	event->ref++;
4202 
4203 	var_ref_idx = hist_data->n_var_refs;
4204 
4205 	for (i = 0; i < data->n_params; i++) {
4206 		char *p;
4207 
4208 		p = param = kstrdup(data->params[i], GFP_KERNEL);
4209 		if (!param) {
4210 			ret = -ENOMEM;
4211 			goto err;
4212 		}
4213 
4214 		system = strsep(&param, ".");
4215 		if (!param) {
4216 			param = (char *)system;
4217 			system = event_name = NULL;
4218 		} else {
4219 			event_name = strsep(&param, ".");
4220 			if (!param) {
4221 				kfree(p);
4222 				ret = -EINVAL;
4223 				goto err;
4224 			}
4225 		}
4226 
4227 		if (param[0] == '$')
4228 			hist_field = trace_action_find_var(hist_data, data,
4229 							   system, event_name,
4230 							   param);
4231 		else
4232 			hist_field = trace_action_create_field_var(hist_data,
4233 								   data,
4234 								   system,
4235 								   event_name,
4236 								   param);
4237 
4238 		if (!hist_field) {
4239 			kfree(p);
4240 			ret = -EINVAL;
4241 			goto err;
4242 		}
4243 
4244 		if (check_synth_field(event, hist_field, field_pos) == 0) {
4245 			var_ref = create_var_ref(hist_data, hist_field,
4246 						 system, event_name);
4247 			if (!var_ref) {
4248 				kfree(p);
4249 				ret = -ENOMEM;
4250 				goto err;
4251 			}
4252 
4253 			field_pos++;
4254 			kfree(p);
4255 			continue;
4256 		}
4257 
4258 		hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4259 		kfree(p);
4260 		ret = -EINVAL;
4261 		goto err;
4262 	}
4263 
4264 	if (field_pos != event->n_fields) {
4265 		hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4266 		ret = -EINVAL;
4267 		goto err;
4268 	}
4269 
4270 	data->synth_event = event;
4271 	data->var_ref_idx = var_ref_idx;
4272  out:
4273 	return ret;
4274  err:
4275 	event->ref--;
4276 
4277 	goto out;
4278 }
4279 
4280 static int action_create(struct hist_trigger_data *hist_data,
4281 			 struct action_data *data)
4282 {
4283 	struct trace_event_file *file = hist_data->event_file;
4284 	struct trace_array *tr = file->tr;
4285 	struct track_data *track_data;
4286 	struct field_var *field_var;
4287 	unsigned int i;
4288 	char *param;
4289 	int ret = 0;
4290 
4291 	if (data->action == ACTION_TRACE)
4292 		return trace_action_create(hist_data, data);
4293 
4294 	if (data->action == ACTION_SNAPSHOT) {
4295 		track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4296 		if (IS_ERR(track_data)) {
4297 			ret = PTR_ERR(track_data);
4298 			goto out;
4299 		}
4300 
4301 		ret = tracing_snapshot_cond_enable(file->tr, track_data,
4302 						   cond_snapshot_update);
4303 		if (ret)
4304 			track_data_free(track_data);
4305 
4306 		goto out;
4307 	}
4308 
4309 	if (data->action == ACTION_SAVE) {
4310 		if (hist_data->n_save_vars) {
4311 			ret = -EEXIST;
4312 			hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4313 			goto out;
4314 		}
4315 
4316 		for (i = 0; i < data->n_params; i++) {
4317 			param = kstrdup(data->params[i], GFP_KERNEL);
4318 			if (!param) {
4319 				ret = -ENOMEM;
4320 				goto out;
4321 			}
4322 
4323 			field_var = create_target_field_var(hist_data, NULL, NULL, param);
4324 			if (IS_ERR(field_var)) {
4325 				hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4326 					 errpos(param));
4327 				ret = PTR_ERR(field_var);
4328 				kfree(param);
4329 				goto out;
4330 			}
4331 
4332 			hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4333 			if (field_var->val->flags & HIST_FIELD_FL_STRING)
4334 				hist_data->n_save_var_str++;
4335 			kfree(param);
4336 		}
4337 	}
4338  out:
4339 	return ret;
4340 }
4341 
4342 static int onmatch_create(struct hist_trigger_data *hist_data,
4343 			  struct action_data *data)
4344 {
4345 	return action_create(hist_data, data);
4346 }
4347 
4348 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4349 {
4350 	char *match_event, *match_event_system;
4351 	struct action_data *data;
4352 	int ret = -EINVAL;
4353 
4354 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4355 	if (!data)
4356 		return ERR_PTR(-ENOMEM);
4357 
4358 	match_event = strsep(&str, ")");
4359 	if (!match_event || !str) {
4360 		hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4361 		goto free;
4362 	}
4363 
4364 	match_event_system = strsep(&match_event, ".");
4365 	if (!match_event) {
4366 		hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4367 		goto free;
4368 	}
4369 
4370 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4371 		hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4372 		goto free;
4373 	}
4374 
4375 	data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4376 	if (!data->match_data.event) {
4377 		ret = -ENOMEM;
4378 		goto free;
4379 	}
4380 
4381 	data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4382 	if (!data->match_data.event_system) {
4383 		ret = -ENOMEM;
4384 		goto free;
4385 	}
4386 
4387 	ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4388 	if (ret)
4389 		goto free;
4390  out:
4391 	return data;
4392  free:
4393 	onmatch_destroy(data);
4394 	data = ERR_PTR(ret);
4395 	goto out;
4396 }
4397 
4398 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4399 {
4400 	hist_data->fields[HITCOUNT_IDX] =
4401 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4402 	if (!hist_data->fields[HITCOUNT_IDX])
4403 		return -ENOMEM;
4404 
4405 	hist_data->n_vals++;
4406 	hist_data->n_fields++;
4407 
4408 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4409 		return -EINVAL;
4410 
4411 	return 0;
4412 }
4413 
4414 static int __create_val_field(struct hist_trigger_data *hist_data,
4415 			      unsigned int val_idx,
4416 			      struct trace_event_file *file,
4417 			      char *var_name, char *field_str,
4418 			      unsigned long flags)
4419 {
4420 	struct hist_field *hist_field;
4421 	int ret = 0;
4422 
4423 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4424 	if (IS_ERR(hist_field)) {
4425 		ret = PTR_ERR(hist_field);
4426 		goto out;
4427 	}
4428 
4429 	hist_data->fields[val_idx] = hist_field;
4430 
4431 	++hist_data->n_vals;
4432 	++hist_data->n_fields;
4433 
4434 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4435 		ret = -EINVAL;
4436  out:
4437 	return ret;
4438 }
4439 
4440 static int create_val_field(struct hist_trigger_data *hist_data,
4441 			    unsigned int val_idx,
4442 			    struct trace_event_file *file,
4443 			    char *field_str)
4444 {
4445 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4446 		return -EINVAL;
4447 
4448 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4449 }
4450 
4451 static int create_var_field(struct hist_trigger_data *hist_data,
4452 			    unsigned int val_idx,
4453 			    struct trace_event_file *file,
4454 			    char *var_name, char *expr_str)
4455 {
4456 	struct trace_array *tr = hist_data->event_file->tr;
4457 	unsigned long flags = 0;
4458 
4459 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4460 		return -EINVAL;
4461 
4462 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4463 		hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4464 		return -EINVAL;
4465 	}
4466 
4467 	flags |= HIST_FIELD_FL_VAR;
4468 	hist_data->n_vars++;
4469 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4470 		return -EINVAL;
4471 
4472 	return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4473 }
4474 
4475 static int create_val_fields(struct hist_trigger_data *hist_data,
4476 			     struct trace_event_file *file)
4477 {
4478 	char *fields_str, *field_str;
4479 	unsigned int i, j = 1;
4480 	int ret;
4481 
4482 	ret = create_hitcount_val(hist_data);
4483 	if (ret)
4484 		goto out;
4485 
4486 	fields_str = hist_data->attrs->vals_str;
4487 	if (!fields_str)
4488 		goto out;
4489 
4490 	strsep(&fields_str, "=");
4491 	if (!fields_str)
4492 		goto out;
4493 
4494 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4495 		     j < TRACING_MAP_VALS_MAX; i++) {
4496 		field_str = strsep(&fields_str, ",");
4497 		if (!field_str)
4498 			break;
4499 
4500 		if (strcmp(field_str, "hitcount") == 0)
4501 			continue;
4502 
4503 		ret = create_val_field(hist_data, j++, file, field_str);
4504 		if (ret)
4505 			goto out;
4506 	}
4507 
4508 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4509 		ret = -EINVAL;
4510  out:
4511 	return ret;
4512 }
4513 
4514 static int create_key_field(struct hist_trigger_data *hist_data,
4515 			    unsigned int key_idx,
4516 			    unsigned int key_offset,
4517 			    struct trace_event_file *file,
4518 			    char *field_str)
4519 {
4520 	struct trace_array *tr = hist_data->event_file->tr;
4521 	struct hist_field *hist_field = NULL;
4522 	unsigned long flags = 0;
4523 	unsigned int key_size;
4524 	int ret = 0;
4525 
4526 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4527 		return -EINVAL;
4528 
4529 	flags |= HIST_FIELD_FL_KEY;
4530 
4531 	if (strcmp(field_str, "stacktrace") == 0) {
4532 		flags |= HIST_FIELD_FL_STACKTRACE;
4533 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4534 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4535 	} else {
4536 		hist_field = parse_expr(hist_data, file, field_str, flags,
4537 					NULL, 0);
4538 		if (IS_ERR(hist_field)) {
4539 			ret = PTR_ERR(hist_field);
4540 			goto out;
4541 		}
4542 
4543 		if (field_has_hist_vars(hist_field, 0))	{
4544 			hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4545 			destroy_hist_field(hist_field, 0);
4546 			ret = -EINVAL;
4547 			goto out;
4548 		}
4549 
4550 		key_size = hist_field->size;
4551 	}
4552 
4553 	hist_data->fields[key_idx] = hist_field;
4554 
4555 	key_size = ALIGN(key_size, sizeof(u64));
4556 	hist_data->fields[key_idx]->size = key_size;
4557 	hist_data->fields[key_idx]->offset = key_offset;
4558 
4559 	hist_data->key_size += key_size;
4560 
4561 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4562 		ret = -EINVAL;
4563 		goto out;
4564 	}
4565 
4566 	hist_data->n_keys++;
4567 	hist_data->n_fields++;
4568 
4569 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4570 		return -EINVAL;
4571 
4572 	ret = key_size;
4573  out:
4574 	return ret;
4575 }
4576 
4577 static int create_key_fields(struct hist_trigger_data *hist_data,
4578 			     struct trace_event_file *file)
4579 {
4580 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4581 	char *fields_str, *field_str;
4582 	int ret = -EINVAL;
4583 
4584 	fields_str = hist_data->attrs->keys_str;
4585 	if (!fields_str)
4586 		goto out;
4587 
4588 	strsep(&fields_str, "=");
4589 	if (!fields_str)
4590 		goto out;
4591 
4592 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4593 		field_str = strsep(&fields_str, ",");
4594 		if (!field_str)
4595 			break;
4596 		ret = create_key_field(hist_data, i, key_offset,
4597 				       file, field_str);
4598 		if (ret < 0)
4599 			goto out;
4600 		key_offset += ret;
4601 	}
4602 	if (fields_str) {
4603 		ret = -EINVAL;
4604 		goto out;
4605 	}
4606 	ret = 0;
4607  out:
4608 	return ret;
4609 }
4610 
4611 static int create_var_fields(struct hist_trigger_data *hist_data,
4612 			     struct trace_event_file *file)
4613 {
4614 	unsigned int i, j = hist_data->n_vals;
4615 	int ret = 0;
4616 
4617 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4618 
4619 	for (i = 0; i < n_vars; i++) {
4620 		char *var_name = hist_data->attrs->var_defs.name[i];
4621 		char *expr = hist_data->attrs->var_defs.expr[i];
4622 
4623 		ret = create_var_field(hist_data, j++, file, var_name, expr);
4624 		if (ret)
4625 			goto out;
4626 	}
4627  out:
4628 	return ret;
4629 }
4630 
4631 static void free_var_defs(struct hist_trigger_data *hist_data)
4632 {
4633 	unsigned int i;
4634 
4635 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4636 		kfree(hist_data->attrs->var_defs.name[i]);
4637 		kfree(hist_data->attrs->var_defs.expr[i]);
4638 	}
4639 
4640 	hist_data->attrs->var_defs.n_vars = 0;
4641 }
4642 
4643 static int parse_var_defs(struct hist_trigger_data *hist_data)
4644 {
4645 	struct trace_array *tr = hist_data->event_file->tr;
4646 	char *s, *str, *var_name, *field_str;
4647 	unsigned int i, j, n_vars = 0;
4648 	int ret = 0;
4649 
4650 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4651 		str = hist_data->attrs->assignment_str[i];
4652 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4653 			field_str = strsep(&str, ",");
4654 			if (!field_str)
4655 				break;
4656 
4657 			var_name = strsep(&field_str, "=");
4658 			if (!var_name || !field_str) {
4659 				hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4660 					 errpos(var_name));
4661 				ret = -EINVAL;
4662 				goto free;
4663 			}
4664 
4665 			if (n_vars == TRACING_MAP_VARS_MAX) {
4666 				hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4667 				ret = -EINVAL;
4668 				goto free;
4669 			}
4670 
4671 			s = kstrdup(var_name, GFP_KERNEL);
4672 			if (!s) {
4673 				ret = -ENOMEM;
4674 				goto free;
4675 			}
4676 			hist_data->attrs->var_defs.name[n_vars] = s;
4677 
4678 			s = kstrdup(field_str, GFP_KERNEL);
4679 			if (!s) {
4680 				kfree(hist_data->attrs->var_defs.name[n_vars]);
4681 				ret = -ENOMEM;
4682 				goto free;
4683 			}
4684 			hist_data->attrs->var_defs.expr[n_vars++] = s;
4685 
4686 			hist_data->attrs->var_defs.n_vars = n_vars;
4687 		}
4688 	}
4689 
4690 	return ret;
4691  free:
4692 	free_var_defs(hist_data);
4693 
4694 	return ret;
4695 }
4696 
4697 static int create_hist_fields(struct hist_trigger_data *hist_data,
4698 			      struct trace_event_file *file)
4699 {
4700 	int ret;
4701 
4702 	ret = parse_var_defs(hist_data);
4703 	if (ret)
4704 		goto out;
4705 
4706 	ret = create_val_fields(hist_data, file);
4707 	if (ret)
4708 		goto out;
4709 
4710 	ret = create_var_fields(hist_data, file);
4711 	if (ret)
4712 		goto out;
4713 
4714 	ret = create_key_fields(hist_data, file);
4715 	if (ret)
4716 		goto out;
4717  out:
4718 	free_var_defs(hist_data);
4719 
4720 	return ret;
4721 }
4722 
4723 static int is_descending(const char *str)
4724 {
4725 	if (!str)
4726 		return 0;
4727 
4728 	if (strcmp(str, "descending") == 0)
4729 		return 1;
4730 
4731 	if (strcmp(str, "ascending") == 0)
4732 		return 0;
4733 
4734 	return -EINVAL;
4735 }
4736 
4737 static int create_sort_keys(struct hist_trigger_data *hist_data)
4738 {
4739 	char *fields_str = hist_data->attrs->sort_key_str;
4740 	struct tracing_map_sort_key *sort_key;
4741 	int descending, ret = 0;
4742 	unsigned int i, j, k;
4743 
4744 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4745 
4746 	if (!fields_str)
4747 		goto out;
4748 
4749 	strsep(&fields_str, "=");
4750 	if (!fields_str) {
4751 		ret = -EINVAL;
4752 		goto out;
4753 	}
4754 
4755 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4756 		struct hist_field *hist_field;
4757 		char *field_str, *field_name;
4758 		const char *test_name;
4759 
4760 		sort_key = &hist_data->sort_keys[i];
4761 
4762 		field_str = strsep(&fields_str, ",");
4763 		if (!field_str) {
4764 			if (i == 0)
4765 				ret = -EINVAL;
4766 			break;
4767 		}
4768 
4769 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4770 			ret = -EINVAL;
4771 			break;
4772 		}
4773 
4774 		field_name = strsep(&field_str, ".");
4775 		if (!field_name) {
4776 			ret = -EINVAL;
4777 			break;
4778 		}
4779 
4780 		if (strcmp(field_name, "hitcount") == 0) {
4781 			descending = is_descending(field_str);
4782 			if (descending < 0) {
4783 				ret = descending;
4784 				break;
4785 			}
4786 			sort_key->descending = descending;
4787 			continue;
4788 		}
4789 
4790 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4791 			unsigned int idx;
4792 
4793 			hist_field = hist_data->fields[j];
4794 			if (hist_field->flags & HIST_FIELD_FL_VAR)
4795 				continue;
4796 
4797 			idx = k++;
4798 
4799 			test_name = hist_field_name(hist_field, 0);
4800 
4801 			if (strcmp(field_name, test_name) == 0) {
4802 				sort_key->field_idx = idx;
4803 				descending = is_descending(field_str);
4804 				if (descending < 0) {
4805 					ret = descending;
4806 					goto out;
4807 				}
4808 				sort_key->descending = descending;
4809 				break;
4810 			}
4811 		}
4812 		if (j == hist_data->n_fields) {
4813 			ret = -EINVAL;
4814 			break;
4815 		}
4816 	}
4817 
4818 	hist_data->n_sort_keys = i;
4819  out:
4820 	return ret;
4821 }
4822 
4823 static void destroy_actions(struct hist_trigger_data *hist_data)
4824 {
4825 	unsigned int i;
4826 
4827 	for (i = 0; i < hist_data->n_actions; i++) {
4828 		struct action_data *data = hist_data->actions[i];
4829 
4830 		if (data->handler == HANDLER_ONMATCH)
4831 			onmatch_destroy(data);
4832 		else if (data->handler == HANDLER_ONMAX ||
4833 			 data->handler == HANDLER_ONCHANGE)
4834 			track_data_destroy(hist_data, data);
4835 		else
4836 			kfree(data);
4837 	}
4838 }
4839 
4840 static int parse_actions(struct hist_trigger_data *hist_data)
4841 {
4842 	struct trace_array *tr = hist_data->event_file->tr;
4843 	struct action_data *data;
4844 	unsigned int i;
4845 	int ret = 0;
4846 	char *str;
4847 	int len;
4848 
4849 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4850 		str = hist_data->attrs->action_str[i];
4851 
4852 		if ((len = str_has_prefix(str, "onmatch("))) {
4853 			char *action_str = str + len;
4854 
4855 			data = onmatch_parse(tr, action_str);
4856 			if (IS_ERR(data)) {
4857 				ret = PTR_ERR(data);
4858 				break;
4859 			}
4860 		} else if ((len = str_has_prefix(str, "onmax("))) {
4861 			char *action_str = str + len;
4862 
4863 			data = track_data_parse(hist_data, action_str,
4864 						HANDLER_ONMAX);
4865 			if (IS_ERR(data)) {
4866 				ret = PTR_ERR(data);
4867 				break;
4868 			}
4869 		} else if ((len = str_has_prefix(str, "onchange("))) {
4870 			char *action_str = str + len;
4871 
4872 			data = track_data_parse(hist_data, action_str,
4873 						HANDLER_ONCHANGE);
4874 			if (IS_ERR(data)) {
4875 				ret = PTR_ERR(data);
4876 				break;
4877 			}
4878 		} else {
4879 			ret = -EINVAL;
4880 			break;
4881 		}
4882 
4883 		hist_data->actions[hist_data->n_actions++] = data;
4884 	}
4885 
4886 	return ret;
4887 }
4888 
4889 static int create_actions(struct hist_trigger_data *hist_data)
4890 {
4891 	struct action_data *data;
4892 	unsigned int i;
4893 	int ret = 0;
4894 
4895 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4896 		data = hist_data->actions[i];
4897 
4898 		if (data->handler == HANDLER_ONMATCH) {
4899 			ret = onmatch_create(hist_data, data);
4900 			if (ret)
4901 				break;
4902 		} else if (data->handler == HANDLER_ONMAX ||
4903 			   data->handler == HANDLER_ONCHANGE) {
4904 			ret = track_data_create(hist_data, data);
4905 			if (ret)
4906 				break;
4907 		} else {
4908 			ret = -EINVAL;
4909 			break;
4910 		}
4911 	}
4912 
4913 	return ret;
4914 }
4915 
4916 static void print_actions(struct seq_file *m,
4917 			  struct hist_trigger_data *hist_data,
4918 			  struct tracing_map_elt *elt)
4919 {
4920 	unsigned int i;
4921 
4922 	for (i = 0; i < hist_data->n_actions; i++) {
4923 		struct action_data *data = hist_data->actions[i];
4924 
4925 		if (data->action == ACTION_SNAPSHOT)
4926 			continue;
4927 
4928 		if (data->handler == HANDLER_ONMAX ||
4929 		    data->handler == HANDLER_ONCHANGE)
4930 			track_data_print(m, hist_data, elt, data);
4931 	}
4932 }
4933 
4934 static void print_action_spec(struct seq_file *m,
4935 			      struct hist_trigger_data *hist_data,
4936 			      struct action_data *data)
4937 {
4938 	unsigned int i;
4939 
4940 	if (data->action == ACTION_SAVE) {
4941 		for (i = 0; i < hist_data->n_save_vars; i++) {
4942 			seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4943 			if (i < hist_data->n_save_vars - 1)
4944 				seq_puts(m, ",");
4945 		}
4946 	} else if (data->action == ACTION_TRACE) {
4947 		if (data->use_trace_keyword)
4948 			seq_printf(m, "%s", data->synth_event_name);
4949 		for (i = 0; i < data->n_params; i++) {
4950 			if (i || data->use_trace_keyword)
4951 				seq_puts(m, ",");
4952 			seq_printf(m, "%s", data->params[i]);
4953 		}
4954 	}
4955 }
4956 
4957 static void print_track_data_spec(struct seq_file *m,
4958 				  struct hist_trigger_data *hist_data,
4959 				  struct action_data *data)
4960 {
4961 	if (data->handler == HANDLER_ONMAX)
4962 		seq_puts(m, ":onmax(");
4963 	else if (data->handler == HANDLER_ONCHANGE)
4964 		seq_puts(m, ":onchange(");
4965 	seq_printf(m, "%s", data->track_data.var_str);
4966 	seq_printf(m, ").%s(", data->action_name);
4967 
4968 	print_action_spec(m, hist_data, data);
4969 
4970 	seq_puts(m, ")");
4971 }
4972 
4973 static void print_onmatch_spec(struct seq_file *m,
4974 			       struct hist_trigger_data *hist_data,
4975 			       struct action_data *data)
4976 {
4977 	seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
4978 		   data->match_data.event);
4979 
4980 	seq_printf(m, "%s(", data->action_name);
4981 
4982 	print_action_spec(m, hist_data, data);
4983 
4984 	seq_puts(m, ")");
4985 }
4986 
4987 static bool actions_match(struct hist_trigger_data *hist_data,
4988 			  struct hist_trigger_data *hist_data_test)
4989 {
4990 	unsigned int i, j;
4991 
4992 	if (hist_data->n_actions != hist_data_test->n_actions)
4993 		return false;
4994 
4995 	for (i = 0; i < hist_data->n_actions; i++) {
4996 		struct action_data *data = hist_data->actions[i];
4997 		struct action_data *data_test = hist_data_test->actions[i];
4998 		char *action_name, *action_name_test;
4999 
5000 		if (data->handler != data_test->handler)
5001 			return false;
5002 		if (data->action != data_test->action)
5003 			return false;
5004 
5005 		if (data->n_params != data_test->n_params)
5006 			return false;
5007 
5008 		for (j = 0; j < data->n_params; j++) {
5009 			if (strcmp(data->params[j], data_test->params[j]) != 0)
5010 				return false;
5011 		}
5012 
5013 		if (data->use_trace_keyword)
5014 			action_name = data->synth_event_name;
5015 		else
5016 			action_name = data->action_name;
5017 
5018 		if (data_test->use_trace_keyword)
5019 			action_name_test = data_test->synth_event_name;
5020 		else
5021 			action_name_test = data_test->action_name;
5022 
5023 		if (strcmp(action_name, action_name_test) != 0)
5024 			return false;
5025 
5026 		if (data->handler == HANDLER_ONMATCH) {
5027 			if (strcmp(data->match_data.event_system,
5028 				   data_test->match_data.event_system) != 0)
5029 				return false;
5030 			if (strcmp(data->match_data.event,
5031 				   data_test->match_data.event) != 0)
5032 				return false;
5033 		} else if (data->handler == HANDLER_ONMAX ||
5034 			   data->handler == HANDLER_ONCHANGE) {
5035 			if (strcmp(data->track_data.var_str,
5036 				   data_test->track_data.var_str) != 0)
5037 				return false;
5038 		}
5039 	}
5040 
5041 	return true;
5042 }
5043 
5044 
5045 static void print_actions_spec(struct seq_file *m,
5046 			       struct hist_trigger_data *hist_data)
5047 {
5048 	unsigned int i;
5049 
5050 	for (i = 0; i < hist_data->n_actions; i++) {
5051 		struct action_data *data = hist_data->actions[i];
5052 
5053 		if (data->handler == HANDLER_ONMATCH)
5054 			print_onmatch_spec(m, hist_data, data);
5055 		else if (data->handler == HANDLER_ONMAX ||
5056 			 data->handler == HANDLER_ONCHANGE)
5057 			print_track_data_spec(m, hist_data, data);
5058 	}
5059 }
5060 
5061 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5062 {
5063 	unsigned int i;
5064 
5065 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5066 		kfree(hist_data->field_var_hists[i]->cmd);
5067 		kfree(hist_data->field_var_hists[i]);
5068 	}
5069 }
5070 
5071 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5072 {
5073 	if (!hist_data)
5074 		return;
5075 
5076 	destroy_hist_trigger_attrs(hist_data->attrs);
5077 	destroy_hist_fields(hist_data);
5078 	tracing_map_destroy(hist_data->map);
5079 
5080 	destroy_actions(hist_data);
5081 	destroy_field_vars(hist_data);
5082 	destroy_field_var_hists(hist_data);
5083 
5084 	kfree(hist_data);
5085 }
5086 
5087 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5088 {
5089 	struct tracing_map *map = hist_data->map;
5090 	struct ftrace_event_field *field;
5091 	struct hist_field *hist_field;
5092 	int i, idx = 0;
5093 
5094 	for_each_hist_field(i, hist_data) {
5095 		hist_field = hist_data->fields[i];
5096 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
5097 			tracing_map_cmp_fn_t cmp_fn;
5098 
5099 			field = hist_field->field;
5100 
5101 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5102 				cmp_fn = tracing_map_cmp_none;
5103 			else if (!field)
5104 				cmp_fn = tracing_map_cmp_num(hist_field->size,
5105 							     hist_field->is_signed);
5106 			else if (is_string_field(field))
5107 				cmp_fn = tracing_map_cmp_string;
5108 			else
5109 				cmp_fn = tracing_map_cmp_num(field->size,
5110 							     field->is_signed);
5111 			idx = tracing_map_add_key_field(map,
5112 							hist_field->offset,
5113 							cmp_fn);
5114 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5115 			idx = tracing_map_add_sum_field(map);
5116 
5117 		if (idx < 0)
5118 			return idx;
5119 
5120 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5121 			idx = tracing_map_add_var(map);
5122 			if (idx < 0)
5123 				return idx;
5124 			hist_field->var.idx = idx;
5125 			hist_field->var.hist_data = hist_data;
5126 		}
5127 	}
5128 
5129 	return 0;
5130 }
5131 
5132 static struct hist_trigger_data *
5133 create_hist_data(unsigned int map_bits,
5134 		 struct hist_trigger_attrs *attrs,
5135 		 struct trace_event_file *file,
5136 		 bool remove)
5137 {
5138 	const struct tracing_map_ops *map_ops = NULL;
5139 	struct hist_trigger_data *hist_data;
5140 	int ret = 0;
5141 
5142 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5143 	if (!hist_data)
5144 		return ERR_PTR(-ENOMEM);
5145 
5146 	hist_data->attrs = attrs;
5147 	hist_data->remove = remove;
5148 	hist_data->event_file = file;
5149 
5150 	ret = parse_actions(hist_data);
5151 	if (ret)
5152 		goto free;
5153 
5154 	ret = create_hist_fields(hist_data, file);
5155 	if (ret)
5156 		goto free;
5157 
5158 	ret = create_sort_keys(hist_data);
5159 	if (ret)
5160 		goto free;
5161 
5162 	map_ops = &hist_trigger_elt_data_ops;
5163 
5164 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5165 					    map_ops, hist_data);
5166 	if (IS_ERR(hist_data->map)) {
5167 		ret = PTR_ERR(hist_data->map);
5168 		hist_data->map = NULL;
5169 		goto free;
5170 	}
5171 
5172 	ret = create_tracing_map_fields(hist_data);
5173 	if (ret)
5174 		goto free;
5175  out:
5176 	return hist_data;
5177  free:
5178 	hist_data->attrs = NULL;
5179 
5180 	destroy_hist_data(hist_data);
5181 
5182 	hist_data = ERR_PTR(ret);
5183 
5184 	goto out;
5185 }
5186 
5187 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5188 				    struct tracing_map_elt *elt, void *rec,
5189 				    struct ring_buffer_event *rbe,
5190 				    u64 *var_ref_vals)
5191 {
5192 	struct hist_elt_data *elt_data;
5193 	struct hist_field *hist_field;
5194 	unsigned int i, var_idx;
5195 	u64 hist_val;
5196 
5197 	elt_data = elt->private_data;
5198 	elt_data->var_ref_vals = var_ref_vals;
5199 
5200 	for_each_hist_val_field(i, hist_data) {
5201 		hist_field = hist_data->fields[i];
5202 		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5203 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5204 			var_idx = hist_field->var.idx;
5205 			tracing_map_set_var(elt, var_idx, hist_val);
5206 			continue;
5207 		}
5208 		tracing_map_update_sum(elt, i, hist_val);
5209 	}
5210 
5211 	for_each_hist_key_field(i, hist_data) {
5212 		hist_field = hist_data->fields[i];
5213 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5214 			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5215 			var_idx = hist_field->var.idx;
5216 			tracing_map_set_var(elt, var_idx, hist_val);
5217 		}
5218 	}
5219 
5220 	update_field_vars(hist_data, elt, rbe, rec);
5221 }
5222 
5223 static inline void add_to_key(char *compound_key, void *key,
5224 			      struct hist_field *key_field, void *rec)
5225 {
5226 	size_t size = key_field->size;
5227 
5228 	if (key_field->flags & HIST_FIELD_FL_STRING) {
5229 		struct ftrace_event_field *field;
5230 
5231 		field = key_field->field;
5232 		if (field->filter_type == FILTER_DYN_STRING)
5233 			size = *(u32 *)(rec + field->offset) >> 16;
5234 		else if (field->filter_type == FILTER_PTR_STRING)
5235 			size = strlen(key);
5236 		else if (field->filter_type == FILTER_STATIC_STRING)
5237 			size = field->size;
5238 
5239 		/* ensure NULL-termination */
5240 		if (size > key_field->size - 1)
5241 			size = key_field->size - 1;
5242 
5243 		strncpy(compound_key + key_field->offset, (char *)key, size);
5244 	} else
5245 		memcpy(compound_key + key_field->offset, key, size);
5246 }
5247 
5248 static void
5249 hist_trigger_actions(struct hist_trigger_data *hist_data,
5250 		     struct tracing_map_elt *elt, void *rec,
5251 		     struct ring_buffer_event *rbe, void *key,
5252 		     u64 *var_ref_vals)
5253 {
5254 	struct action_data *data;
5255 	unsigned int i;
5256 
5257 	for (i = 0; i < hist_data->n_actions; i++) {
5258 		data = hist_data->actions[i];
5259 		data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5260 	}
5261 }
5262 
5263 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5264 			       struct ring_buffer_event *rbe)
5265 {
5266 	struct hist_trigger_data *hist_data = data->private_data;
5267 	bool use_compound_key = (hist_data->n_keys > 1);
5268 	unsigned long entries[HIST_STACKTRACE_DEPTH];
5269 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5270 	char compound_key[HIST_KEY_SIZE_MAX];
5271 	struct tracing_map_elt *elt = NULL;
5272 	struct hist_field *key_field;
5273 	u64 field_contents;
5274 	void *key = NULL;
5275 	unsigned int i;
5276 
5277 	memset(compound_key, 0, hist_data->key_size);
5278 
5279 	for_each_hist_key_field(i, hist_data) {
5280 		key_field = hist_data->fields[i];
5281 
5282 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5283 			memset(entries, 0, HIST_STACKTRACE_SIZE);
5284 			stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5285 					 HIST_STACKTRACE_SKIP);
5286 			key = entries;
5287 		} else {
5288 			field_contents = key_field->fn(key_field, elt, rbe, rec);
5289 			if (key_field->flags & HIST_FIELD_FL_STRING) {
5290 				key = (void *)(unsigned long)field_contents;
5291 				use_compound_key = true;
5292 			} else
5293 				key = (void *)&field_contents;
5294 		}
5295 
5296 		if (use_compound_key)
5297 			add_to_key(compound_key, key, key_field, rec);
5298 	}
5299 
5300 	if (use_compound_key)
5301 		key = compound_key;
5302 
5303 	if (hist_data->n_var_refs &&
5304 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
5305 		return;
5306 
5307 	elt = tracing_map_insert(hist_data->map, key);
5308 	if (!elt)
5309 		return;
5310 
5311 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5312 
5313 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5314 		hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5315 }
5316 
5317 static void hist_trigger_stacktrace_print(struct seq_file *m,
5318 					  unsigned long *stacktrace_entries,
5319 					  unsigned int max_entries)
5320 {
5321 	char str[KSYM_SYMBOL_LEN];
5322 	unsigned int spaces = 8;
5323 	unsigned int i;
5324 
5325 	for (i = 0; i < max_entries; i++) {
5326 		if (!stacktrace_entries[i])
5327 			return;
5328 
5329 		seq_printf(m, "%*c", 1 + spaces, ' ');
5330 		sprint_symbol(str, stacktrace_entries[i]);
5331 		seq_printf(m, "%s\n", str);
5332 	}
5333 }
5334 
5335 static void hist_trigger_print_key(struct seq_file *m,
5336 				   struct hist_trigger_data *hist_data,
5337 				   void *key,
5338 				   struct tracing_map_elt *elt)
5339 {
5340 	struct hist_field *key_field;
5341 	char str[KSYM_SYMBOL_LEN];
5342 	bool multiline = false;
5343 	const char *field_name;
5344 	unsigned int i;
5345 	u64 uval;
5346 
5347 	seq_puts(m, "{ ");
5348 
5349 	for_each_hist_key_field(i, hist_data) {
5350 		key_field = hist_data->fields[i];
5351 
5352 		if (i > hist_data->n_vals)
5353 			seq_puts(m, ", ");
5354 
5355 		field_name = hist_field_name(key_field, 0);
5356 
5357 		if (key_field->flags & HIST_FIELD_FL_HEX) {
5358 			uval = *(u64 *)(key + key_field->offset);
5359 			seq_printf(m, "%s: %llx", field_name, uval);
5360 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
5361 			uval = *(u64 *)(key + key_field->offset);
5362 			sprint_symbol_no_offset(str, uval);
5363 			seq_printf(m, "%s: [%llx] %-45s", field_name,
5364 				   uval, str);
5365 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5366 			uval = *(u64 *)(key + key_field->offset);
5367 			sprint_symbol(str, uval);
5368 			seq_printf(m, "%s: [%llx] %-55s", field_name,
5369 				   uval, str);
5370 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5371 			struct hist_elt_data *elt_data = elt->private_data;
5372 			char *comm;
5373 
5374 			if (WARN_ON_ONCE(!elt_data))
5375 				return;
5376 
5377 			comm = elt_data->comm;
5378 
5379 			uval = *(u64 *)(key + key_field->offset);
5380 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
5381 				   comm, uval);
5382 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5383 			const char *syscall_name;
5384 
5385 			uval = *(u64 *)(key + key_field->offset);
5386 			syscall_name = get_syscall_name(uval);
5387 			if (!syscall_name)
5388 				syscall_name = "unknown_syscall";
5389 
5390 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
5391 				   syscall_name, uval);
5392 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5393 			seq_puts(m, "stacktrace:\n");
5394 			hist_trigger_stacktrace_print(m,
5395 						      key + key_field->offset,
5396 						      HIST_STACKTRACE_DEPTH);
5397 			multiline = true;
5398 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5399 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5400 				   *(u64 *)(key + key_field->offset));
5401 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
5402 			seq_printf(m, "%s: %-50s", field_name,
5403 				   (char *)(key + key_field->offset));
5404 		} else {
5405 			uval = *(u64 *)(key + key_field->offset);
5406 			seq_printf(m, "%s: %10llu", field_name, uval);
5407 		}
5408 	}
5409 
5410 	if (!multiline)
5411 		seq_puts(m, " ");
5412 
5413 	seq_puts(m, "}");
5414 }
5415 
5416 static void hist_trigger_entry_print(struct seq_file *m,
5417 				     struct hist_trigger_data *hist_data,
5418 				     void *key,
5419 				     struct tracing_map_elt *elt)
5420 {
5421 	const char *field_name;
5422 	unsigned int i;
5423 
5424 	hist_trigger_print_key(m, hist_data, key, elt);
5425 
5426 	seq_printf(m, " hitcount: %10llu",
5427 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
5428 
5429 	for (i = 1; i < hist_data->n_vals; i++) {
5430 		field_name = hist_field_name(hist_data->fields[i], 0);
5431 
5432 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5433 		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5434 			continue;
5435 
5436 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5437 			seq_printf(m, "  %s: %10llx", field_name,
5438 				   tracing_map_read_sum(elt, i));
5439 		} else {
5440 			seq_printf(m, "  %s: %10llu", field_name,
5441 				   tracing_map_read_sum(elt, i));
5442 		}
5443 	}
5444 
5445 	print_actions(m, hist_data, elt);
5446 
5447 	seq_puts(m, "\n");
5448 }
5449 
5450 static int print_entries(struct seq_file *m,
5451 			 struct hist_trigger_data *hist_data)
5452 {
5453 	struct tracing_map_sort_entry **sort_entries = NULL;
5454 	struct tracing_map *map = hist_data->map;
5455 	int i, n_entries;
5456 
5457 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5458 					     hist_data->n_sort_keys,
5459 					     &sort_entries);
5460 	if (n_entries < 0)
5461 		return n_entries;
5462 
5463 	for (i = 0; i < n_entries; i++)
5464 		hist_trigger_entry_print(m, hist_data,
5465 					 sort_entries[i]->key,
5466 					 sort_entries[i]->elt);
5467 
5468 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
5469 
5470 	return n_entries;
5471 }
5472 
5473 static void hist_trigger_show(struct seq_file *m,
5474 			      struct event_trigger_data *data, int n)
5475 {
5476 	struct hist_trigger_data *hist_data;
5477 	int n_entries;
5478 
5479 	if (n > 0)
5480 		seq_puts(m, "\n\n");
5481 
5482 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
5483 	data->ops->print(m, data->ops, data);
5484 	seq_puts(m, "#\n\n");
5485 
5486 	hist_data = data->private_data;
5487 	n_entries = print_entries(m, hist_data);
5488 	if (n_entries < 0)
5489 		n_entries = 0;
5490 
5491 	track_data_snapshot_print(m, hist_data);
5492 
5493 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
5494 		   (u64)atomic64_read(&hist_data->map->hits),
5495 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
5496 }
5497 
5498 static int hist_show(struct seq_file *m, void *v)
5499 {
5500 	struct event_trigger_data *data;
5501 	struct trace_event_file *event_file;
5502 	int n = 0, ret = 0;
5503 
5504 	mutex_lock(&event_mutex);
5505 
5506 	event_file = event_file_data(m->private);
5507 	if (unlikely(!event_file)) {
5508 		ret = -ENODEV;
5509 		goto out_unlock;
5510 	}
5511 
5512 	list_for_each_entry_rcu(data, &event_file->triggers, list) {
5513 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5514 			hist_trigger_show(m, data, n++);
5515 	}
5516 
5517  out_unlock:
5518 	mutex_unlock(&event_mutex);
5519 
5520 	return ret;
5521 }
5522 
5523 static int event_hist_open(struct inode *inode, struct file *file)
5524 {
5525 	int ret;
5526 
5527 	ret = security_locked_down(LOCKDOWN_TRACEFS);
5528 	if (ret)
5529 		return ret;
5530 
5531 	return single_open(file, hist_show, file);
5532 }
5533 
5534 const struct file_operations event_hist_fops = {
5535 	.open = event_hist_open,
5536 	.read = seq_read,
5537 	.llseek = seq_lseek,
5538 	.release = single_release,
5539 };
5540 
5541 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5542 {
5543 	const char *field_name = hist_field_name(hist_field, 0);
5544 
5545 	if (hist_field->var.name)
5546 		seq_printf(m, "%s=", hist_field->var.name);
5547 
5548 	if (hist_field->flags & HIST_FIELD_FL_CPU)
5549 		seq_puts(m, "cpu");
5550 	else if (field_name) {
5551 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5552 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
5553 			seq_putc(m, '$');
5554 		seq_printf(m, "%s", field_name);
5555 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5556 		seq_puts(m, "common_timestamp");
5557 
5558 	if (hist_field->flags) {
5559 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5560 		    !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5561 			const char *flags = get_hist_field_flags(hist_field);
5562 
5563 			if (flags)
5564 				seq_printf(m, ".%s", flags);
5565 		}
5566 	}
5567 }
5568 
5569 static int event_hist_trigger_print(struct seq_file *m,
5570 				    struct event_trigger_ops *ops,
5571 				    struct event_trigger_data *data)
5572 {
5573 	struct hist_trigger_data *hist_data = data->private_data;
5574 	struct hist_field *field;
5575 	bool have_var = false;
5576 	unsigned int i;
5577 
5578 	seq_puts(m, "hist:");
5579 
5580 	if (data->name)
5581 		seq_printf(m, "%s:", data->name);
5582 
5583 	seq_puts(m, "keys=");
5584 
5585 	for_each_hist_key_field(i, hist_data) {
5586 		field = hist_data->fields[i];
5587 
5588 		if (i > hist_data->n_vals)
5589 			seq_puts(m, ",");
5590 
5591 		if (field->flags & HIST_FIELD_FL_STACKTRACE)
5592 			seq_puts(m, "stacktrace");
5593 		else
5594 			hist_field_print(m, field);
5595 	}
5596 
5597 	seq_puts(m, ":vals=");
5598 
5599 	for_each_hist_val_field(i, hist_data) {
5600 		field = hist_data->fields[i];
5601 		if (field->flags & HIST_FIELD_FL_VAR) {
5602 			have_var = true;
5603 			continue;
5604 		}
5605 
5606 		if (i == HITCOUNT_IDX)
5607 			seq_puts(m, "hitcount");
5608 		else {
5609 			seq_puts(m, ",");
5610 			hist_field_print(m, field);
5611 		}
5612 	}
5613 
5614 	if (have_var) {
5615 		unsigned int n = 0;
5616 
5617 		seq_puts(m, ":");
5618 
5619 		for_each_hist_val_field(i, hist_data) {
5620 			field = hist_data->fields[i];
5621 
5622 			if (field->flags & HIST_FIELD_FL_VAR) {
5623 				if (n++)
5624 					seq_puts(m, ",");
5625 				hist_field_print(m, field);
5626 			}
5627 		}
5628 	}
5629 
5630 	seq_puts(m, ":sort=");
5631 
5632 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5633 		struct tracing_map_sort_key *sort_key;
5634 		unsigned int idx, first_key_idx;
5635 
5636 		/* skip VAR vals */
5637 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
5638 
5639 		sort_key = &hist_data->sort_keys[i];
5640 		idx = sort_key->field_idx;
5641 
5642 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
5643 			return -EINVAL;
5644 
5645 		if (i > 0)
5646 			seq_puts(m, ",");
5647 
5648 		if (idx == HITCOUNT_IDX)
5649 			seq_puts(m, "hitcount");
5650 		else {
5651 			if (idx >= first_key_idx)
5652 				idx += hist_data->n_vars;
5653 			hist_field_print(m, hist_data->fields[idx]);
5654 		}
5655 
5656 		if (sort_key->descending)
5657 			seq_puts(m, ".descending");
5658 	}
5659 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5660 	if (hist_data->enable_timestamps)
5661 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5662 
5663 	print_actions_spec(m, hist_data);
5664 
5665 	if (data->filter_str)
5666 		seq_printf(m, " if %s", data->filter_str);
5667 
5668 	if (data->paused)
5669 		seq_puts(m, " [paused]");
5670 	else
5671 		seq_puts(m, " [active]");
5672 
5673 	seq_putc(m, '\n');
5674 
5675 	return 0;
5676 }
5677 
5678 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5679 				   struct event_trigger_data *data)
5680 {
5681 	struct hist_trigger_data *hist_data = data->private_data;
5682 
5683 	if (!data->ref && hist_data->attrs->name)
5684 		save_named_trigger(hist_data->attrs->name, data);
5685 
5686 	data->ref++;
5687 
5688 	return 0;
5689 }
5690 
5691 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5692 {
5693 	struct trace_event_file *file;
5694 	unsigned int i;
5695 	char *cmd;
5696 	int ret;
5697 
5698 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5699 		file = hist_data->field_var_hists[i]->hist_data->event_file;
5700 		cmd = hist_data->field_var_hists[i]->cmd;
5701 		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5702 					      "!hist", "hist", cmd);
5703 	}
5704 }
5705 
5706 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5707 				    struct event_trigger_data *data)
5708 {
5709 	struct hist_trigger_data *hist_data = data->private_data;
5710 
5711 	if (WARN_ON_ONCE(data->ref <= 0))
5712 		return;
5713 
5714 	data->ref--;
5715 	if (!data->ref) {
5716 		if (data->name)
5717 			del_named_trigger(data);
5718 
5719 		trigger_data_free(data);
5720 
5721 		remove_hist_vars(hist_data);
5722 
5723 		unregister_field_var_hists(hist_data);
5724 
5725 		destroy_hist_data(hist_data);
5726 	}
5727 }
5728 
5729 static struct event_trigger_ops event_hist_trigger_ops = {
5730 	.func			= event_hist_trigger,
5731 	.print			= event_hist_trigger_print,
5732 	.init			= event_hist_trigger_init,
5733 	.free			= event_hist_trigger_free,
5734 };
5735 
5736 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5737 					 struct event_trigger_data *data)
5738 {
5739 	data->ref++;
5740 
5741 	save_named_trigger(data->named_data->name, data);
5742 
5743 	event_hist_trigger_init(ops, data->named_data);
5744 
5745 	return 0;
5746 }
5747 
5748 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5749 					  struct event_trigger_data *data)
5750 {
5751 	if (WARN_ON_ONCE(data->ref <= 0))
5752 		return;
5753 
5754 	event_hist_trigger_free(ops, data->named_data);
5755 
5756 	data->ref--;
5757 	if (!data->ref) {
5758 		del_named_trigger(data);
5759 		trigger_data_free(data);
5760 	}
5761 }
5762 
5763 static struct event_trigger_ops event_hist_trigger_named_ops = {
5764 	.func			= event_hist_trigger,
5765 	.print			= event_hist_trigger_print,
5766 	.init			= event_hist_trigger_named_init,
5767 	.free			= event_hist_trigger_named_free,
5768 };
5769 
5770 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5771 							    char *param)
5772 {
5773 	return &event_hist_trigger_ops;
5774 }
5775 
5776 static void hist_clear(struct event_trigger_data *data)
5777 {
5778 	struct hist_trigger_data *hist_data = data->private_data;
5779 
5780 	if (data->name)
5781 		pause_named_trigger(data);
5782 
5783 	tracepoint_synchronize_unregister();
5784 
5785 	tracing_map_clear(hist_data->map);
5786 
5787 	if (data->name)
5788 		unpause_named_trigger(data);
5789 }
5790 
5791 static bool compatible_field(struct ftrace_event_field *field,
5792 			     struct ftrace_event_field *test_field)
5793 {
5794 	if (field == test_field)
5795 		return true;
5796 	if (field == NULL || test_field == NULL)
5797 		return false;
5798 	if (strcmp(field->name, test_field->name) != 0)
5799 		return false;
5800 	if (strcmp(field->type, test_field->type) != 0)
5801 		return false;
5802 	if (field->size != test_field->size)
5803 		return false;
5804 	if (field->is_signed != test_field->is_signed)
5805 		return false;
5806 
5807 	return true;
5808 }
5809 
5810 static bool hist_trigger_match(struct event_trigger_data *data,
5811 			       struct event_trigger_data *data_test,
5812 			       struct event_trigger_data *named_data,
5813 			       bool ignore_filter)
5814 {
5815 	struct tracing_map_sort_key *sort_key, *sort_key_test;
5816 	struct hist_trigger_data *hist_data, *hist_data_test;
5817 	struct hist_field *key_field, *key_field_test;
5818 	unsigned int i;
5819 
5820 	if (named_data && (named_data != data_test) &&
5821 	    (named_data != data_test->named_data))
5822 		return false;
5823 
5824 	if (!named_data && is_named_trigger(data_test))
5825 		return false;
5826 
5827 	hist_data = data->private_data;
5828 	hist_data_test = data_test->private_data;
5829 
5830 	if (hist_data->n_vals != hist_data_test->n_vals ||
5831 	    hist_data->n_fields != hist_data_test->n_fields ||
5832 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5833 		return false;
5834 
5835 	if (!ignore_filter) {
5836 		if ((data->filter_str && !data_test->filter_str) ||
5837 		   (!data->filter_str && data_test->filter_str))
5838 			return false;
5839 	}
5840 
5841 	for_each_hist_field(i, hist_data) {
5842 		key_field = hist_data->fields[i];
5843 		key_field_test = hist_data_test->fields[i];
5844 
5845 		if (key_field->flags != key_field_test->flags)
5846 			return false;
5847 		if (!compatible_field(key_field->field, key_field_test->field))
5848 			return false;
5849 		if (key_field->offset != key_field_test->offset)
5850 			return false;
5851 		if (key_field->size != key_field_test->size)
5852 			return false;
5853 		if (key_field->is_signed != key_field_test->is_signed)
5854 			return false;
5855 		if (!!key_field->var.name != !!key_field_test->var.name)
5856 			return false;
5857 		if (key_field->var.name &&
5858 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
5859 			return false;
5860 	}
5861 
5862 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5863 		sort_key = &hist_data->sort_keys[i];
5864 		sort_key_test = &hist_data_test->sort_keys[i];
5865 
5866 		if (sort_key->field_idx != sort_key_test->field_idx ||
5867 		    sort_key->descending != sort_key_test->descending)
5868 			return false;
5869 	}
5870 
5871 	if (!ignore_filter && data->filter_str &&
5872 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
5873 		return false;
5874 
5875 	if (!actions_match(hist_data, hist_data_test))
5876 		return false;
5877 
5878 	return true;
5879 }
5880 
5881 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5882 				 struct event_trigger_data *data,
5883 				 struct trace_event_file *file)
5884 {
5885 	struct hist_trigger_data *hist_data = data->private_data;
5886 	struct event_trigger_data *test, *named_data = NULL;
5887 	struct trace_array *tr = file->tr;
5888 	int ret = 0;
5889 
5890 	if (hist_data->attrs->name) {
5891 		named_data = find_named_trigger(hist_data->attrs->name);
5892 		if (named_data) {
5893 			if (!hist_trigger_match(data, named_data, named_data,
5894 						true)) {
5895 				hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5896 				ret = -EINVAL;
5897 				goto out;
5898 			}
5899 		}
5900 	}
5901 
5902 	if (hist_data->attrs->name && !named_data)
5903 		goto new;
5904 
5905 	list_for_each_entry_rcu(test, &file->triggers, list) {
5906 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5907 			if (!hist_trigger_match(data, test, named_data, false))
5908 				continue;
5909 			if (hist_data->attrs->pause)
5910 				test->paused = true;
5911 			else if (hist_data->attrs->cont)
5912 				test->paused = false;
5913 			else if (hist_data->attrs->clear)
5914 				hist_clear(test);
5915 			else {
5916 				hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5917 				ret = -EEXIST;
5918 			}
5919 			goto out;
5920 		}
5921 	}
5922  new:
5923 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
5924 		hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5925 		ret = -ENOENT;
5926 		goto out;
5927 	}
5928 
5929 	if (hist_data->attrs->pause)
5930 		data->paused = true;
5931 
5932 	if (named_data) {
5933 		data->private_data = named_data->private_data;
5934 		set_named_trigger_data(data, named_data);
5935 		data->ops = &event_hist_trigger_named_ops;
5936 	}
5937 
5938 	if (data->ops->init) {
5939 		ret = data->ops->init(data->ops, data);
5940 		if (ret < 0)
5941 			goto out;
5942 	}
5943 
5944 	if (hist_data->enable_timestamps) {
5945 		char *clock = hist_data->attrs->clock;
5946 
5947 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5948 		if (ret) {
5949 			hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5950 			goto out;
5951 		}
5952 
5953 		tracing_set_time_stamp_abs(file->tr, true);
5954 	}
5955 
5956 	if (named_data)
5957 		destroy_hist_data(hist_data);
5958 
5959 	ret++;
5960  out:
5961 	return ret;
5962 }
5963 
5964 static int hist_trigger_enable(struct event_trigger_data *data,
5965 			       struct trace_event_file *file)
5966 {
5967 	int ret = 0;
5968 
5969 	list_add_tail_rcu(&data->list, &file->triggers);
5970 
5971 	update_cond_flag(file);
5972 
5973 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
5974 		list_del_rcu(&data->list);
5975 		update_cond_flag(file);
5976 		ret--;
5977 	}
5978 
5979 	return ret;
5980 }
5981 
5982 static bool have_hist_trigger_match(struct event_trigger_data *data,
5983 				    struct trace_event_file *file)
5984 {
5985 	struct hist_trigger_data *hist_data = data->private_data;
5986 	struct event_trigger_data *test, *named_data = NULL;
5987 	bool match = false;
5988 
5989 	if (hist_data->attrs->name)
5990 		named_data = find_named_trigger(hist_data->attrs->name);
5991 
5992 	list_for_each_entry_rcu(test, &file->triggers, list) {
5993 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5994 			if (hist_trigger_match(data, test, named_data, false)) {
5995 				match = true;
5996 				break;
5997 			}
5998 		}
5999 	}
6000 
6001 	return match;
6002 }
6003 
6004 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6005 				    struct trace_event_file *file)
6006 {
6007 	struct hist_trigger_data *hist_data = data->private_data;
6008 	struct event_trigger_data *test, *named_data = NULL;
6009 
6010 	if (hist_data->attrs->name)
6011 		named_data = find_named_trigger(hist_data->attrs->name);
6012 
6013 	list_for_each_entry_rcu(test, &file->triggers, list) {
6014 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6015 			if (!hist_trigger_match(data, test, named_data, false))
6016 				continue;
6017 			hist_data = test->private_data;
6018 			if (check_var_refs(hist_data))
6019 				return true;
6020 			break;
6021 		}
6022 	}
6023 
6024 	return false;
6025 }
6026 
6027 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6028 				    struct event_trigger_data *data,
6029 				    struct trace_event_file *file)
6030 {
6031 	struct hist_trigger_data *hist_data = data->private_data;
6032 	struct event_trigger_data *test, *named_data = NULL;
6033 	bool unregistered = false;
6034 
6035 	if (hist_data->attrs->name)
6036 		named_data = find_named_trigger(hist_data->attrs->name);
6037 
6038 	list_for_each_entry_rcu(test, &file->triggers, list) {
6039 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6040 			if (!hist_trigger_match(data, test, named_data, false))
6041 				continue;
6042 			unregistered = true;
6043 			list_del_rcu(&test->list);
6044 			trace_event_trigger_enable_disable(file, 0);
6045 			update_cond_flag(file);
6046 			break;
6047 		}
6048 	}
6049 
6050 	if (unregistered && test->ops->free)
6051 		test->ops->free(test->ops, test);
6052 
6053 	if (hist_data->enable_timestamps) {
6054 		if (!hist_data->remove || unregistered)
6055 			tracing_set_time_stamp_abs(file->tr, false);
6056 	}
6057 }
6058 
6059 static bool hist_file_check_refs(struct trace_event_file *file)
6060 {
6061 	struct hist_trigger_data *hist_data;
6062 	struct event_trigger_data *test;
6063 
6064 	list_for_each_entry_rcu(test, &file->triggers, list) {
6065 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6066 			hist_data = test->private_data;
6067 			if (check_var_refs(hist_data))
6068 				return true;
6069 		}
6070 	}
6071 
6072 	return false;
6073 }
6074 
6075 static void hist_unreg_all(struct trace_event_file *file)
6076 {
6077 	struct event_trigger_data *test, *n;
6078 	struct hist_trigger_data *hist_data;
6079 	struct synth_event *se;
6080 	const char *se_name;
6081 
6082 	lockdep_assert_held(&event_mutex);
6083 
6084 	if (hist_file_check_refs(file))
6085 		return;
6086 
6087 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6088 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6089 			hist_data = test->private_data;
6090 			list_del_rcu(&test->list);
6091 			trace_event_trigger_enable_disable(file, 0);
6092 
6093 			se_name = trace_event_name(file->event_call);
6094 			se = find_synth_event(se_name);
6095 			if (se)
6096 				se->ref--;
6097 
6098 			update_cond_flag(file);
6099 			if (hist_data->enable_timestamps)
6100 				tracing_set_time_stamp_abs(file->tr, false);
6101 			if (test->ops->free)
6102 				test->ops->free(test->ops, test);
6103 		}
6104 	}
6105 }
6106 
6107 static int event_hist_trigger_func(struct event_command *cmd_ops,
6108 				   struct trace_event_file *file,
6109 				   char *glob, char *cmd, char *param)
6110 {
6111 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6112 	struct event_trigger_data *trigger_data;
6113 	struct hist_trigger_attrs *attrs;
6114 	struct event_trigger_ops *trigger_ops;
6115 	struct hist_trigger_data *hist_data;
6116 	struct synth_event *se;
6117 	const char *se_name;
6118 	bool remove = false;
6119 	char *trigger, *p;
6120 	int ret = 0;
6121 
6122 	lockdep_assert_held(&event_mutex);
6123 
6124 	if (glob && strlen(glob)) {
6125 		hist_err_clear();
6126 		last_cmd_set(file, param);
6127 	}
6128 
6129 	if (!param)
6130 		return -EINVAL;
6131 
6132 	if (glob[0] == '!')
6133 		remove = true;
6134 
6135 	/*
6136 	 * separate the trigger from the filter (k:v [if filter])
6137 	 * allowing for whitespace in the trigger
6138 	 */
6139 	p = trigger = param;
6140 	do {
6141 		p = strstr(p, "if");
6142 		if (!p)
6143 			break;
6144 		if (p == param)
6145 			return -EINVAL;
6146 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6147 			p++;
6148 			continue;
6149 		}
6150 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6151 			return -EINVAL;
6152 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6153 			p++;
6154 			continue;
6155 		}
6156 		break;
6157 	} while (p);
6158 
6159 	if (!p)
6160 		param = NULL;
6161 	else {
6162 		*(p - 1) = '\0';
6163 		param = strstrip(p);
6164 		trigger = strstrip(trigger);
6165 	}
6166 
6167 	attrs = parse_hist_trigger_attrs(file->tr, trigger);
6168 	if (IS_ERR(attrs))
6169 		return PTR_ERR(attrs);
6170 
6171 	if (attrs->map_bits)
6172 		hist_trigger_bits = attrs->map_bits;
6173 
6174 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6175 	if (IS_ERR(hist_data)) {
6176 		destroy_hist_trigger_attrs(attrs);
6177 		return PTR_ERR(hist_data);
6178 	}
6179 
6180 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6181 
6182 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6183 	if (!trigger_data) {
6184 		ret = -ENOMEM;
6185 		goto out_free;
6186 	}
6187 
6188 	trigger_data->count = -1;
6189 	trigger_data->ops = trigger_ops;
6190 	trigger_data->cmd_ops = cmd_ops;
6191 
6192 	INIT_LIST_HEAD(&trigger_data->list);
6193 	RCU_INIT_POINTER(trigger_data->filter, NULL);
6194 
6195 	trigger_data->private_data = hist_data;
6196 
6197 	/* if param is non-empty, it's supposed to be a filter */
6198 	if (param && cmd_ops->set_filter) {
6199 		ret = cmd_ops->set_filter(param, trigger_data, file);
6200 		if (ret < 0)
6201 			goto out_free;
6202 	}
6203 
6204 	if (remove) {
6205 		if (!have_hist_trigger_match(trigger_data, file))
6206 			goto out_free;
6207 
6208 		if (hist_trigger_check_refs(trigger_data, file)) {
6209 			ret = -EBUSY;
6210 			goto out_free;
6211 		}
6212 
6213 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6214 		se_name = trace_event_name(file->event_call);
6215 		se = find_synth_event(se_name);
6216 		if (se)
6217 			se->ref--;
6218 		ret = 0;
6219 		goto out_free;
6220 	}
6221 
6222 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6223 	/*
6224 	 * The above returns on success the # of triggers registered,
6225 	 * but if it didn't register any it returns zero.  Consider no
6226 	 * triggers registered a failure too.
6227 	 */
6228 	if (!ret) {
6229 		if (!(attrs->pause || attrs->cont || attrs->clear))
6230 			ret = -ENOENT;
6231 		goto out_free;
6232 	} else if (ret < 0)
6233 		goto out_free;
6234 
6235 	if (get_named_trigger_data(trigger_data))
6236 		goto enable;
6237 
6238 	if (has_hist_vars(hist_data))
6239 		save_hist_vars(hist_data);
6240 
6241 	ret = create_actions(hist_data);
6242 	if (ret)
6243 		goto out_unreg;
6244 
6245 	ret = tracing_map_init(hist_data->map);
6246 	if (ret)
6247 		goto out_unreg;
6248 enable:
6249 	ret = hist_trigger_enable(trigger_data, file);
6250 	if (ret)
6251 		goto out_unreg;
6252 
6253 	se_name = trace_event_name(file->event_call);
6254 	se = find_synth_event(se_name);
6255 	if (se)
6256 		se->ref++;
6257 	/* Just return zero, not the number of registered triggers */
6258 	ret = 0;
6259  out:
6260 	if (ret == 0)
6261 		hist_err_clear();
6262 
6263 	return ret;
6264  out_unreg:
6265 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6266  out_free:
6267 	if (cmd_ops->set_filter)
6268 		cmd_ops->set_filter(NULL, trigger_data, NULL);
6269 
6270 	remove_hist_vars(hist_data);
6271 
6272 	kfree(trigger_data);
6273 
6274 	destroy_hist_data(hist_data);
6275 	goto out;
6276 }
6277 
6278 static struct event_command trigger_hist_cmd = {
6279 	.name			= "hist",
6280 	.trigger_type		= ETT_EVENT_HIST,
6281 	.flags			= EVENT_CMD_FL_NEEDS_REC,
6282 	.func			= event_hist_trigger_func,
6283 	.reg			= hist_register_trigger,
6284 	.unreg			= hist_unregister_trigger,
6285 	.unreg_all		= hist_unreg_all,
6286 	.get_trigger_ops	= event_hist_get_trigger_ops,
6287 	.set_filter		= set_trigger_filter,
6288 };
6289 
6290 __init int register_trigger_hist_cmd(void)
6291 {
6292 	int ret;
6293 
6294 	ret = register_event_command(&trigger_hist_cmd);
6295 	WARN_ON(ret < 0);
6296 
6297 	return ret;
6298 }
6299 
6300 static void
6301 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6302 		    struct ring_buffer_event *event)
6303 {
6304 	struct enable_trigger_data *enable_data = data->private_data;
6305 	struct event_trigger_data *test;
6306 
6307 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
6308 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6309 			if (enable_data->enable)
6310 				test->paused = false;
6311 			else
6312 				test->paused = true;
6313 		}
6314 	}
6315 }
6316 
6317 static void
6318 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6319 			  struct ring_buffer_event *event)
6320 {
6321 	if (!data->count)
6322 		return;
6323 
6324 	if (data->count != -1)
6325 		(data->count)--;
6326 
6327 	hist_enable_trigger(data, rec, event);
6328 }
6329 
6330 static struct event_trigger_ops hist_enable_trigger_ops = {
6331 	.func			= hist_enable_trigger,
6332 	.print			= event_enable_trigger_print,
6333 	.init			= event_trigger_init,
6334 	.free			= event_enable_trigger_free,
6335 };
6336 
6337 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6338 	.func			= hist_enable_count_trigger,
6339 	.print			= event_enable_trigger_print,
6340 	.init			= event_trigger_init,
6341 	.free			= event_enable_trigger_free,
6342 };
6343 
6344 static struct event_trigger_ops hist_disable_trigger_ops = {
6345 	.func			= hist_enable_trigger,
6346 	.print			= event_enable_trigger_print,
6347 	.init			= event_trigger_init,
6348 	.free			= event_enable_trigger_free,
6349 };
6350 
6351 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6352 	.func			= hist_enable_count_trigger,
6353 	.print			= event_enable_trigger_print,
6354 	.init			= event_trigger_init,
6355 	.free			= event_enable_trigger_free,
6356 };
6357 
6358 static struct event_trigger_ops *
6359 hist_enable_get_trigger_ops(char *cmd, char *param)
6360 {
6361 	struct event_trigger_ops *ops;
6362 	bool enable;
6363 
6364 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6365 
6366 	if (enable)
6367 		ops = param ? &hist_enable_count_trigger_ops :
6368 			&hist_enable_trigger_ops;
6369 	else
6370 		ops = param ? &hist_disable_count_trigger_ops :
6371 			&hist_disable_trigger_ops;
6372 
6373 	return ops;
6374 }
6375 
6376 static void hist_enable_unreg_all(struct trace_event_file *file)
6377 {
6378 	struct event_trigger_data *test, *n;
6379 
6380 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6381 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6382 			list_del_rcu(&test->list);
6383 			update_cond_flag(file);
6384 			trace_event_trigger_enable_disable(file, 0);
6385 			if (test->ops->free)
6386 				test->ops->free(test->ops, test);
6387 		}
6388 	}
6389 }
6390 
6391 static struct event_command trigger_hist_enable_cmd = {
6392 	.name			= ENABLE_HIST_STR,
6393 	.trigger_type		= ETT_HIST_ENABLE,
6394 	.func			= event_enable_trigger_func,
6395 	.reg			= event_enable_register_trigger,
6396 	.unreg			= event_enable_unregister_trigger,
6397 	.unreg_all		= hist_enable_unreg_all,
6398 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6399 	.set_filter		= set_trigger_filter,
6400 };
6401 
6402 static struct event_command trigger_hist_disable_cmd = {
6403 	.name			= DISABLE_HIST_STR,
6404 	.trigger_type		= ETT_HIST_ENABLE,
6405 	.func			= event_enable_trigger_func,
6406 	.reg			= event_enable_register_trigger,
6407 	.unreg			= event_enable_unregister_trigger,
6408 	.unreg_all		= hist_enable_unreg_all,
6409 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6410 	.set_filter		= set_trigger_filter,
6411 };
6412 
6413 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6414 {
6415 	unregister_event_command(&trigger_hist_enable_cmd);
6416 	unregister_event_command(&trigger_hist_disable_cmd);
6417 }
6418 
6419 __init int register_trigger_hist_enable_disable_cmds(void)
6420 {
6421 	int ret;
6422 
6423 	ret = register_event_command(&trigger_hist_enable_cmd);
6424 	if (WARN_ON(ret < 0))
6425 		return ret;
6426 	ret = register_event_command(&trigger_hist_disable_cmd);
6427 	if (WARN_ON(ret < 0))
6428 		unregister_trigger_hist_enable_disable_cmds();
6429 
6430 	return ret;
6431 }
6432 
6433 static __init int trace_events_hist_init(void)
6434 {
6435 	struct dentry *entry = NULL;
6436 	struct dentry *d_tracer;
6437 	int err = 0;
6438 
6439 	err = dyn_event_register(&synth_event_ops);
6440 	if (err) {
6441 		pr_warn("Could not register synth_event_ops\n");
6442 		return err;
6443 	}
6444 
6445 	d_tracer = tracing_init_dentry();
6446 	if (IS_ERR(d_tracer)) {
6447 		err = PTR_ERR(d_tracer);
6448 		goto err;
6449 	}
6450 
6451 	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6452 				    NULL, &synth_events_fops);
6453 	if (!entry) {
6454 		err = -ENODEV;
6455 		goto err;
6456 	}
6457 
6458 	return err;
6459  err:
6460 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6461 
6462 	return err;
6463 }
6464 
6465 fs_initcall(trace_events_hist_init);
6466