1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24 
25 #define SYNTH_SYSTEM		"synthetic"
26 #define SYNTH_FIELDS_MAX	16
27 
28 #define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
29 
30 #define ERRORS								\
31 	C(NONE,			"No error"),				\
32 	C(DUPLICATE_VAR,	"Variable already defined"),		\
33 	C(VAR_NOT_UNIQUE,	"Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 	C(TOO_MANY_VARS,	"Too many variables defined"),		\
35 	C(MALFORMED_ASSIGNMENT,	"Malformed assignment"),		\
36 	C(NAMED_MISMATCH,	"Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 	C(TRIGGER_EEXIST,	"Hist trigger already exists"),		\
38 	C(TRIGGER_ENOENT_CLEAR,	"Can't clear or continue a nonexistent hist trigger"), \
39 	C(SET_CLOCK_FAIL,	"Couldn't set trace_clock"),		\
40 	C(BAD_FIELD_MODIFIER,	"Invalid field modifier"),		\
41 	C(TOO_MANY_SUBEXPR,	"Too many subexpressions (3 max)"),	\
42 	C(TIMESTAMP_MISMATCH,	"Timestamp units in expression don't match"), \
43 	C(TOO_MANY_FIELD_VARS,	"Too many field variables defined"),	\
44 	C(EVENT_FILE_NOT_FOUND,	"Event file not found"),		\
45 	C(HIST_NOT_FOUND,	"Matching event histogram not found"),	\
46 	C(HIST_CREATE_FAIL,	"Couldn't create histogram for field"),	\
47 	C(SYNTH_VAR_NOT_FOUND,	"Couldn't find synthetic variable"),	\
48 	C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"),	\
49 	C(SYNTH_TYPE_MISMATCH,	"Param type doesn't match synthetic event field type"), \
50 	C(SYNTH_COUNT_MISMATCH,	"Param count doesn't match synthetic event field count"), \
51 	C(FIELD_VAR_PARSE_FAIL,	"Couldn't parse field variable"),	\
52 	C(VAR_CREATE_FIND_FAIL,	"Couldn't create or find variable"),	\
53 	C(ONX_NOT_VAR,		"For onmax(x) or onchange(x), x must be a variable"), \
54 	C(ONX_VAR_NOT_FOUND,	"Couldn't find onmax or onchange variable"), \
55 	C(ONX_VAR_CREATE_FAIL,	"Couldn't create onmax or onchange variable"), \
56 	C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"),	\
57 	C(TOO_MANY_PARAMS,	"Too many action params"),		\
58 	C(PARAM_NOT_FOUND,	"Couldn't find param"),			\
59 	C(INVALID_PARAM,	"Invalid action param"),		\
60 	C(ACTION_NOT_FOUND,	"No action found"),			\
61 	C(NO_SAVE_PARAMS,	"No params found for save()"),		\
62 	C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 	C(ACTION_MISMATCH,	"Handler doesn't support action"),	\
64 	C(NO_CLOSING_PAREN,	"No closing paren found"),		\
65 	C(SUBSYS_NOT_FOUND,	"Missing subsystem"),			\
66 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
67 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
68 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
69 	C(FIELD_NOT_FOUND,	"Couldn't find field"),
70 
71 #undef C
72 #define C(a, b)		HIST_ERR_##a
73 
74 enum { ERRORS };
75 
76 #undef C
77 #define C(a, b)		b
78 
79 static const char *err_text[] = { ERRORS };
80 
81 struct hist_field;
82 
83 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
84 				struct tracing_map_elt *elt,
85 				struct ring_buffer_event *rbe,
86 				void *event);
87 
88 #define HIST_FIELD_OPERANDS_MAX	2
89 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
90 #define HIST_ACTIONS_MAX	8
91 
92 enum field_op_id {
93 	FIELD_OP_NONE,
94 	FIELD_OP_PLUS,
95 	FIELD_OP_MINUS,
96 	FIELD_OP_UNARY_MINUS,
97 };
98 
99 /*
100  * A hist_var (histogram variable) contains variable information for
101  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
102  * flag set.  A hist_var has a variable name e.g. ts0, and is
103  * associated with a given histogram trigger, as specified by
104  * hist_data.  The hist_var idx is the unique index assigned to the
105  * variable by the hist trigger's tracing_map.  The idx is what is
106  * used to set a variable's value and, by a variable reference, to
107  * retrieve it.
108  */
109 struct hist_var {
110 	char				*name;
111 	struct hist_trigger_data	*hist_data;
112 	unsigned int			idx;
113 };
114 
115 struct hist_field {
116 	struct ftrace_event_field	*field;
117 	unsigned long			flags;
118 	hist_field_fn_t			fn;
119 	unsigned int			size;
120 	unsigned int			offset;
121 	unsigned int                    is_signed;
122 	const char			*type;
123 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
124 	struct hist_trigger_data	*hist_data;
125 
126 	/*
127 	 * Variable fields contain variable-specific info in var.
128 	 */
129 	struct hist_var			var;
130 	enum field_op_id		operator;
131 	char				*system;
132 	char				*event_name;
133 
134 	/*
135 	 * The name field is used for EXPR and VAR_REF fields.  VAR
136 	 * fields contain the variable name in var.name.
137 	 */
138 	char				*name;
139 
140 	/*
141 	 * When a histogram trigger is hit, if it has any references
142 	 * to variables, the values of those variables are collected
143 	 * into a var_ref_vals array by resolve_var_refs().  The
144 	 * current value of each variable is read from the tracing_map
145 	 * using the hist field's hist_var.idx and entered into the
146 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
147 	 */
148 	unsigned int			var_ref_idx;
149 	bool                            read_once;
150 };
151 
152 static u64 hist_field_none(struct hist_field *field,
153 			   struct tracing_map_elt *elt,
154 			   struct ring_buffer_event *rbe,
155 			   void *event)
156 {
157 	return 0;
158 }
159 
160 static u64 hist_field_counter(struct hist_field *field,
161 			      struct tracing_map_elt *elt,
162 			      struct ring_buffer_event *rbe,
163 			      void *event)
164 {
165 	return 1;
166 }
167 
168 static u64 hist_field_string(struct hist_field *hist_field,
169 			     struct tracing_map_elt *elt,
170 			     struct ring_buffer_event *rbe,
171 			     void *event)
172 {
173 	char *addr = (char *)(event + hist_field->field->offset);
174 
175 	return (u64)(unsigned long)addr;
176 }
177 
178 static u64 hist_field_dynstring(struct hist_field *hist_field,
179 				struct tracing_map_elt *elt,
180 				struct ring_buffer_event *rbe,
181 				void *event)
182 {
183 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
184 	int str_loc = str_item & 0xffff;
185 	char *addr = (char *)(event + str_loc);
186 
187 	return (u64)(unsigned long)addr;
188 }
189 
190 static u64 hist_field_pstring(struct hist_field *hist_field,
191 			      struct tracing_map_elt *elt,
192 			      struct ring_buffer_event *rbe,
193 			      void *event)
194 {
195 	char **addr = (char **)(event + hist_field->field->offset);
196 
197 	return (u64)(unsigned long)*addr;
198 }
199 
200 static u64 hist_field_log2(struct hist_field *hist_field,
201 			   struct tracing_map_elt *elt,
202 			   struct ring_buffer_event *rbe,
203 			   void *event)
204 {
205 	struct hist_field *operand = hist_field->operands[0];
206 
207 	u64 val = operand->fn(operand, elt, rbe, event);
208 
209 	return (u64) ilog2(roundup_pow_of_two(val));
210 }
211 
212 static u64 hist_field_plus(struct hist_field *hist_field,
213 			   struct tracing_map_elt *elt,
214 			   struct ring_buffer_event *rbe,
215 			   void *event)
216 {
217 	struct hist_field *operand1 = hist_field->operands[0];
218 	struct hist_field *operand2 = hist_field->operands[1];
219 
220 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
221 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
222 
223 	return val1 + val2;
224 }
225 
226 static u64 hist_field_minus(struct hist_field *hist_field,
227 			    struct tracing_map_elt *elt,
228 			    struct ring_buffer_event *rbe,
229 			    void *event)
230 {
231 	struct hist_field *operand1 = hist_field->operands[0];
232 	struct hist_field *operand2 = hist_field->operands[1];
233 
234 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
235 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
236 
237 	return val1 - val2;
238 }
239 
240 static u64 hist_field_unary_minus(struct hist_field *hist_field,
241 				  struct tracing_map_elt *elt,
242 				  struct ring_buffer_event *rbe,
243 				  void *event)
244 {
245 	struct hist_field *operand = hist_field->operands[0];
246 
247 	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
248 	u64 val = (u64)-sval;
249 
250 	return val;
251 }
252 
253 #define DEFINE_HIST_FIELD_FN(type)					\
254 	static u64 hist_field_##type(struct hist_field *hist_field,	\
255 				     struct tracing_map_elt *elt,	\
256 				     struct ring_buffer_event *rbe,	\
257 				     void *event)			\
258 {									\
259 	type *addr = (type *)(event + hist_field->field->offset);	\
260 									\
261 	return (u64)(unsigned long)*addr;				\
262 }
263 
264 DEFINE_HIST_FIELD_FN(s64);
265 DEFINE_HIST_FIELD_FN(u64);
266 DEFINE_HIST_FIELD_FN(s32);
267 DEFINE_HIST_FIELD_FN(u32);
268 DEFINE_HIST_FIELD_FN(s16);
269 DEFINE_HIST_FIELD_FN(u16);
270 DEFINE_HIST_FIELD_FN(s8);
271 DEFINE_HIST_FIELD_FN(u8);
272 
273 #define for_each_hist_field(i, hist_data)	\
274 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
275 
276 #define for_each_hist_val_field(i, hist_data)	\
277 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
278 
279 #define for_each_hist_key_field(i, hist_data)	\
280 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
281 
282 #define HIST_STACKTRACE_DEPTH	16
283 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
284 #define HIST_STACKTRACE_SKIP	5
285 
286 #define HITCOUNT_IDX		0
287 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
288 
289 enum hist_field_flags {
290 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
291 	HIST_FIELD_FL_KEY		= 1 << 1,
292 	HIST_FIELD_FL_STRING		= 1 << 2,
293 	HIST_FIELD_FL_HEX		= 1 << 3,
294 	HIST_FIELD_FL_SYM		= 1 << 4,
295 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
296 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
297 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
298 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
299 	HIST_FIELD_FL_LOG2		= 1 << 9,
300 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
301 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
302 	HIST_FIELD_FL_VAR		= 1 << 12,
303 	HIST_FIELD_FL_EXPR		= 1 << 13,
304 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
305 	HIST_FIELD_FL_CPU		= 1 << 15,
306 	HIST_FIELD_FL_ALIAS		= 1 << 16,
307 };
308 
309 struct var_defs {
310 	unsigned int	n_vars;
311 	char		*name[TRACING_MAP_VARS_MAX];
312 	char		*expr[TRACING_MAP_VARS_MAX];
313 };
314 
315 struct hist_trigger_attrs {
316 	char		*keys_str;
317 	char		*vals_str;
318 	char		*sort_key_str;
319 	char		*name;
320 	char		*clock;
321 	bool		pause;
322 	bool		cont;
323 	bool		clear;
324 	bool		ts_in_usecs;
325 	unsigned int	map_bits;
326 
327 	char		*assignment_str[TRACING_MAP_VARS_MAX];
328 	unsigned int	n_assignments;
329 
330 	char		*action_str[HIST_ACTIONS_MAX];
331 	unsigned int	n_actions;
332 
333 	struct var_defs	var_defs;
334 };
335 
336 struct field_var {
337 	struct hist_field	*var;
338 	struct hist_field	*val;
339 };
340 
341 struct field_var_hist {
342 	struct hist_trigger_data	*hist_data;
343 	char				*cmd;
344 };
345 
346 struct hist_trigger_data {
347 	struct hist_field               *fields[HIST_FIELDS_MAX];
348 	unsigned int			n_vals;
349 	unsigned int			n_keys;
350 	unsigned int			n_fields;
351 	unsigned int			n_vars;
352 	unsigned int			key_size;
353 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
354 	unsigned int			n_sort_keys;
355 	struct trace_event_file		*event_file;
356 	struct hist_trigger_attrs	*attrs;
357 	struct tracing_map		*map;
358 	bool				enable_timestamps;
359 	bool				remove;
360 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
361 	unsigned int			n_var_refs;
362 
363 	struct action_data		*actions[HIST_ACTIONS_MAX];
364 	unsigned int			n_actions;
365 
366 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
367 	unsigned int			n_field_vars;
368 	unsigned int			n_field_var_str;
369 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
370 	unsigned int			n_field_var_hists;
371 
372 	struct field_var		*save_vars[SYNTH_FIELDS_MAX];
373 	unsigned int			n_save_vars;
374 	unsigned int			n_save_var_str;
375 };
376 
377 static int synth_event_create(int argc, const char **argv);
378 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
379 static int synth_event_release(struct dyn_event *ev);
380 static bool synth_event_is_busy(struct dyn_event *ev);
381 static bool synth_event_match(const char *system, const char *event,
382 			int argc, const char **argv, struct dyn_event *ev);
383 
384 static struct dyn_event_operations synth_event_ops = {
385 	.create = synth_event_create,
386 	.show = synth_event_show,
387 	.is_busy = synth_event_is_busy,
388 	.free = synth_event_release,
389 	.match = synth_event_match,
390 };
391 
392 struct synth_field {
393 	char *type;
394 	char *name;
395 	size_t size;
396 	bool is_signed;
397 	bool is_string;
398 };
399 
400 struct synth_event {
401 	struct dyn_event			devent;
402 	int					ref;
403 	char					*name;
404 	struct synth_field			**fields;
405 	unsigned int				n_fields;
406 	unsigned int				n_u64;
407 	struct trace_event_class		class;
408 	struct trace_event_call			call;
409 	struct tracepoint			*tp;
410 };
411 
412 static bool is_synth_event(struct dyn_event *ev)
413 {
414 	return ev->ops == &synth_event_ops;
415 }
416 
417 static struct synth_event *to_synth_event(struct dyn_event *ev)
418 {
419 	return container_of(ev, struct synth_event, devent);
420 }
421 
422 static bool synth_event_is_busy(struct dyn_event *ev)
423 {
424 	struct synth_event *event = to_synth_event(ev);
425 
426 	return event->ref != 0;
427 }
428 
429 static bool synth_event_match(const char *system, const char *event,
430 			int argc, const char **argv, struct dyn_event *ev)
431 {
432 	struct synth_event *sev = to_synth_event(ev);
433 
434 	return strcmp(sev->name, event) == 0 &&
435 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
436 }
437 
438 struct action_data;
439 
440 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
441 			     struct tracing_map_elt *elt, void *rec,
442 			     struct ring_buffer_event *rbe, void *key,
443 			     struct action_data *data, u64 *var_ref_vals);
444 
445 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
446 
447 enum handler_id {
448 	HANDLER_ONMATCH = 1,
449 	HANDLER_ONMAX,
450 	HANDLER_ONCHANGE,
451 };
452 
453 enum action_id {
454 	ACTION_SAVE = 1,
455 	ACTION_TRACE,
456 	ACTION_SNAPSHOT,
457 };
458 
459 struct action_data {
460 	enum handler_id		handler;
461 	enum action_id		action;
462 	char			*action_name;
463 	action_fn_t		fn;
464 
465 	unsigned int		n_params;
466 	char			*params[SYNTH_FIELDS_MAX];
467 
468 	/*
469 	 * When a histogram trigger is hit, the values of any
470 	 * references to variables, including variables being passed
471 	 * as parameters to synthetic events, are collected into a
472 	 * var_ref_vals array.  This var_ref_idx is the index of the
473 	 * first param in the array to be passed to the synthetic
474 	 * event invocation.
475 	 */
476 	unsigned int		var_ref_idx;
477 	struct synth_event	*synth_event;
478 	bool			use_trace_keyword;
479 	char			*synth_event_name;
480 
481 	union {
482 		struct {
483 			char			*event;
484 			char			*event_system;
485 		} match_data;
486 
487 		struct {
488 			/*
489 			 * var_str contains the $-unstripped variable
490 			 * name referenced by var_ref, and used when
491 			 * printing the action.  Because var_ref
492 			 * creation is deferred to create_actions(),
493 			 * we need a per-action way to save it until
494 			 * then, thus var_str.
495 			 */
496 			char			*var_str;
497 
498 			/*
499 			 * var_ref refers to the variable being
500 			 * tracked e.g onmax($var).
501 			 */
502 			struct hist_field	*var_ref;
503 
504 			/*
505 			 * track_var contains the 'invisible' tracking
506 			 * variable created to keep the current
507 			 * e.g. max value.
508 			 */
509 			struct hist_field	*track_var;
510 
511 			check_track_val_fn_t	check_val;
512 			action_fn_t		save_data;
513 		} track_data;
514 	};
515 };
516 
517 struct track_data {
518 	u64				track_val;
519 	bool				updated;
520 
521 	unsigned int			key_len;
522 	void				*key;
523 	struct tracing_map_elt		elt;
524 
525 	struct action_data		*action_data;
526 	struct hist_trigger_data	*hist_data;
527 };
528 
529 struct hist_elt_data {
530 	char *comm;
531 	u64 *var_ref_vals;
532 	char *field_var_str[SYNTH_FIELDS_MAX];
533 };
534 
535 struct snapshot_context {
536 	struct tracing_map_elt	*elt;
537 	void			*key;
538 };
539 
540 static void track_data_free(struct track_data *track_data)
541 {
542 	struct hist_elt_data *elt_data;
543 
544 	if (!track_data)
545 		return;
546 
547 	kfree(track_data->key);
548 
549 	elt_data = track_data->elt.private_data;
550 	if (elt_data) {
551 		kfree(elt_data->comm);
552 		kfree(elt_data);
553 	}
554 
555 	kfree(track_data);
556 }
557 
558 static struct track_data *track_data_alloc(unsigned int key_len,
559 					   struct action_data *action_data,
560 					   struct hist_trigger_data *hist_data)
561 {
562 	struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
563 	struct hist_elt_data *elt_data;
564 
565 	if (!data)
566 		return ERR_PTR(-ENOMEM);
567 
568 	data->key = kzalloc(key_len, GFP_KERNEL);
569 	if (!data->key) {
570 		track_data_free(data);
571 		return ERR_PTR(-ENOMEM);
572 	}
573 
574 	data->key_len = key_len;
575 	data->action_data = action_data;
576 	data->hist_data = hist_data;
577 
578 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
579 	if (!elt_data) {
580 		track_data_free(data);
581 		return ERR_PTR(-ENOMEM);
582 	}
583 	data->elt.private_data = elt_data;
584 
585 	elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
586 	if (!elt_data->comm) {
587 		track_data_free(data);
588 		return ERR_PTR(-ENOMEM);
589 	}
590 
591 	return data;
592 }
593 
594 static char last_cmd[MAX_FILTER_STR_VAL];
595 static char last_cmd_loc[MAX_FILTER_STR_VAL];
596 
597 static int errpos(char *str)
598 {
599 	return err_pos(last_cmd, str);
600 }
601 
602 static void last_cmd_set(struct trace_event_file *file, char *str)
603 {
604 	const char *system = NULL, *name = NULL;
605 	struct trace_event_call *call;
606 
607 	if (!str)
608 		return;
609 
610 	strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
611 
612 	if (file) {
613 		call = file->event_call;
614 
615 		system = call->class->system;
616 		if (system) {
617 			name = trace_event_name(call);
618 			if (!name)
619 				system = NULL;
620 		}
621 	}
622 
623 	if (system)
624 		snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
625 }
626 
627 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
628 {
629 	tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
630 			err_type, err_pos);
631 }
632 
633 static void hist_err_clear(void)
634 {
635 	last_cmd[0] = '\0';
636 	last_cmd_loc[0] = '\0';
637 }
638 
639 struct synth_trace_event {
640 	struct trace_entry	ent;
641 	u64			fields[];
642 };
643 
644 static int synth_event_define_fields(struct trace_event_call *call)
645 {
646 	struct synth_trace_event trace;
647 	int offset = offsetof(typeof(trace), fields);
648 	struct synth_event *event = call->data;
649 	unsigned int i, size, n_u64;
650 	char *name, *type;
651 	bool is_signed;
652 	int ret = 0;
653 
654 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
655 		size = event->fields[i]->size;
656 		is_signed = event->fields[i]->is_signed;
657 		type = event->fields[i]->type;
658 		name = event->fields[i]->name;
659 		ret = trace_define_field(call, type, name, offset, size,
660 					 is_signed, FILTER_OTHER);
661 		if (ret)
662 			break;
663 
664 		if (event->fields[i]->is_string) {
665 			offset += STR_VAR_LEN_MAX;
666 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
667 		} else {
668 			offset += sizeof(u64);
669 			n_u64++;
670 		}
671 	}
672 
673 	event->n_u64 = n_u64;
674 
675 	return ret;
676 }
677 
678 static bool synth_field_signed(char *type)
679 {
680 	if (str_has_prefix(type, "u"))
681 		return false;
682 
683 	return true;
684 }
685 
686 static int synth_field_is_string(char *type)
687 {
688 	if (strstr(type, "char[") != NULL)
689 		return true;
690 
691 	return false;
692 }
693 
694 static int synth_field_string_size(char *type)
695 {
696 	char buf[4], *end, *start;
697 	unsigned int len;
698 	int size, err;
699 
700 	start = strstr(type, "char[");
701 	if (start == NULL)
702 		return -EINVAL;
703 	start += sizeof("char[") - 1;
704 
705 	end = strchr(type, ']');
706 	if (!end || end < start)
707 		return -EINVAL;
708 
709 	len = end - start;
710 	if (len > 3)
711 		return -EINVAL;
712 
713 	strncpy(buf, start, len);
714 	buf[len] = '\0';
715 
716 	err = kstrtouint(buf, 0, &size);
717 	if (err)
718 		return err;
719 
720 	if (size > STR_VAR_LEN_MAX)
721 		return -EINVAL;
722 
723 	return size;
724 }
725 
726 static int synth_field_size(char *type)
727 {
728 	int size = 0;
729 
730 	if (strcmp(type, "s64") == 0)
731 		size = sizeof(s64);
732 	else if (strcmp(type, "u64") == 0)
733 		size = sizeof(u64);
734 	else if (strcmp(type, "s32") == 0)
735 		size = sizeof(s32);
736 	else if (strcmp(type, "u32") == 0)
737 		size = sizeof(u32);
738 	else if (strcmp(type, "s16") == 0)
739 		size = sizeof(s16);
740 	else if (strcmp(type, "u16") == 0)
741 		size = sizeof(u16);
742 	else if (strcmp(type, "s8") == 0)
743 		size = sizeof(s8);
744 	else if (strcmp(type, "u8") == 0)
745 		size = sizeof(u8);
746 	else if (strcmp(type, "char") == 0)
747 		size = sizeof(char);
748 	else if (strcmp(type, "unsigned char") == 0)
749 		size = sizeof(unsigned char);
750 	else if (strcmp(type, "int") == 0)
751 		size = sizeof(int);
752 	else if (strcmp(type, "unsigned int") == 0)
753 		size = sizeof(unsigned int);
754 	else if (strcmp(type, "long") == 0)
755 		size = sizeof(long);
756 	else if (strcmp(type, "unsigned long") == 0)
757 		size = sizeof(unsigned long);
758 	else if (strcmp(type, "pid_t") == 0)
759 		size = sizeof(pid_t);
760 	else if (strcmp(type, "gfp_t") == 0)
761 		size = sizeof(gfp_t);
762 	else if (synth_field_is_string(type))
763 		size = synth_field_string_size(type);
764 
765 	return size;
766 }
767 
768 static const char *synth_field_fmt(char *type)
769 {
770 	const char *fmt = "%llu";
771 
772 	if (strcmp(type, "s64") == 0)
773 		fmt = "%lld";
774 	else if (strcmp(type, "u64") == 0)
775 		fmt = "%llu";
776 	else if (strcmp(type, "s32") == 0)
777 		fmt = "%d";
778 	else if (strcmp(type, "u32") == 0)
779 		fmt = "%u";
780 	else if (strcmp(type, "s16") == 0)
781 		fmt = "%d";
782 	else if (strcmp(type, "u16") == 0)
783 		fmt = "%u";
784 	else if (strcmp(type, "s8") == 0)
785 		fmt = "%d";
786 	else if (strcmp(type, "u8") == 0)
787 		fmt = "%u";
788 	else if (strcmp(type, "char") == 0)
789 		fmt = "%d";
790 	else if (strcmp(type, "unsigned char") == 0)
791 		fmt = "%u";
792 	else if (strcmp(type, "int") == 0)
793 		fmt = "%d";
794 	else if (strcmp(type, "unsigned int") == 0)
795 		fmt = "%u";
796 	else if (strcmp(type, "long") == 0)
797 		fmt = "%ld";
798 	else if (strcmp(type, "unsigned long") == 0)
799 		fmt = "%lu";
800 	else if (strcmp(type, "pid_t") == 0)
801 		fmt = "%d";
802 	else if (strcmp(type, "gfp_t") == 0)
803 		fmt = "%x";
804 	else if (synth_field_is_string(type))
805 		fmt = "%s";
806 
807 	return fmt;
808 }
809 
810 static enum print_line_t print_synth_event(struct trace_iterator *iter,
811 					   int flags,
812 					   struct trace_event *event)
813 {
814 	struct trace_array *tr = iter->tr;
815 	struct trace_seq *s = &iter->seq;
816 	struct synth_trace_event *entry;
817 	struct synth_event *se;
818 	unsigned int i, n_u64;
819 	char print_fmt[32];
820 	const char *fmt;
821 
822 	entry = (struct synth_trace_event *)iter->ent;
823 	se = container_of(event, struct synth_event, call.event);
824 
825 	trace_seq_printf(s, "%s: ", se->name);
826 
827 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
828 		if (trace_seq_has_overflowed(s))
829 			goto end;
830 
831 		fmt = synth_field_fmt(se->fields[i]->type);
832 
833 		/* parameter types */
834 		if (tr->trace_flags & TRACE_ITER_VERBOSE)
835 			trace_seq_printf(s, "%s ", fmt);
836 
837 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
838 
839 		/* parameter values */
840 		if (se->fields[i]->is_string) {
841 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
842 					 (char *)&entry->fields[n_u64],
843 					 i == se->n_fields - 1 ? "" : " ");
844 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
845 		} else {
846 			struct trace_print_flags __flags[] = {
847 			    __def_gfpflag_names, {-1, NULL} };
848 
849 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
850 					 entry->fields[n_u64],
851 					 i == se->n_fields - 1 ? "" : " ");
852 
853 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
854 				trace_seq_puts(s, " (");
855 				trace_print_flags_seq(s, "|",
856 						      entry->fields[n_u64],
857 						      __flags);
858 				trace_seq_putc(s, ')');
859 			}
860 			n_u64++;
861 		}
862 	}
863 end:
864 	trace_seq_putc(s, '\n');
865 
866 	return trace_handle_return(s);
867 }
868 
869 static struct trace_event_functions synth_event_funcs = {
870 	.trace		= print_synth_event
871 };
872 
873 static notrace void trace_event_raw_event_synth(void *__data,
874 						u64 *var_ref_vals,
875 						unsigned int var_ref_idx)
876 {
877 	struct trace_event_file *trace_file = __data;
878 	struct synth_trace_event *entry;
879 	struct trace_event_buffer fbuffer;
880 	struct ring_buffer *buffer;
881 	struct synth_event *event;
882 	unsigned int i, n_u64;
883 	int fields_size = 0;
884 
885 	event = trace_file->event_call->data;
886 
887 	if (trace_trigger_soft_disabled(trace_file))
888 		return;
889 
890 	fields_size = event->n_u64 * sizeof(u64);
891 
892 	/*
893 	 * Avoid ring buffer recursion detection, as this event
894 	 * is being performed within another event.
895 	 */
896 	buffer = trace_file->tr->trace_buffer.buffer;
897 	ring_buffer_nest_start(buffer);
898 
899 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
900 					   sizeof(*entry) + fields_size);
901 	if (!entry)
902 		goto out;
903 
904 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
905 		if (event->fields[i]->is_string) {
906 			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
907 			char *str_field = (char *)&entry->fields[n_u64];
908 
909 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
910 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
911 		} else {
912 			entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
913 			n_u64++;
914 		}
915 	}
916 
917 	trace_event_buffer_commit(&fbuffer);
918 out:
919 	ring_buffer_nest_end(buffer);
920 }
921 
922 static void free_synth_event_print_fmt(struct trace_event_call *call)
923 {
924 	if (call) {
925 		kfree(call->print_fmt);
926 		call->print_fmt = NULL;
927 	}
928 }
929 
930 static int __set_synth_event_print_fmt(struct synth_event *event,
931 				       char *buf, int len)
932 {
933 	const char *fmt;
934 	int pos = 0;
935 	int i;
936 
937 	/* When len=0, we just calculate the needed length */
938 #define LEN_OR_ZERO (len ? len - pos : 0)
939 
940 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
941 	for (i = 0; i < event->n_fields; i++) {
942 		fmt = synth_field_fmt(event->fields[i]->type);
943 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
944 				event->fields[i]->name, fmt,
945 				i == event->n_fields - 1 ? "" : ", ");
946 	}
947 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
948 
949 	for (i = 0; i < event->n_fields; i++) {
950 		pos += snprintf(buf + pos, LEN_OR_ZERO,
951 				", REC->%s", event->fields[i]->name);
952 	}
953 
954 #undef LEN_OR_ZERO
955 
956 	/* return the length of print_fmt */
957 	return pos;
958 }
959 
960 static int set_synth_event_print_fmt(struct trace_event_call *call)
961 {
962 	struct synth_event *event = call->data;
963 	char *print_fmt;
964 	int len;
965 
966 	/* First: called with 0 length to calculate the needed length */
967 	len = __set_synth_event_print_fmt(event, NULL, 0);
968 
969 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
970 	if (!print_fmt)
971 		return -ENOMEM;
972 
973 	/* Second: actually write the @print_fmt */
974 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
975 	call->print_fmt = print_fmt;
976 
977 	return 0;
978 }
979 
980 static void free_synth_field(struct synth_field *field)
981 {
982 	kfree(field->type);
983 	kfree(field->name);
984 	kfree(field);
985 }
986 
987 static struct synth_field *parse_synth_field(int argc, const char **argv,
988 					     int *consumed)
989 {
990 	struct synth_field *field;
991 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
992 	int len, ret = 0;
993 
994 	if (field_type[0] == ';')
995 		field_type++;
996 
997 	if (!strcmp(field_type, "unsigned")) {
998 		if (argc < 3)
999 			return ERR_PTR(-EINVAL);
1000 		prefix = "unsigned ";
1001 		field_type = argv[1];
1002 		field_name = argv[2];
1003 		*consumed = 3;
1004 	} else {
1005 		field_name = argv[1];
1006 		*consumed = 2;
1007 	}
1008 
1009 	field = kzalloc(sizeof(*field), GFP_KERNEL);
1010 	if (!field)
1011 		return ERR_PTR(-ENOMEM);
1012 
1013 	len = strlen(field_name);
1014 	array = strchr(field_name, '[');
1015 	if (array)
1016 		len -= strlen(array);
1017 	else if (field_name[len - 1] == ';')
1018 		len--;
1019 
1020 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1021 	if (!field->name) {
1022 		ret = -ENOMEM;
1023 		goto free;
1024 	}
1025 
1026 	if (field_type[0] == ';')
1027 		field_type++;
1028 	len = strlen(field_type) + 1;
1029 	if (array)
1030 		len += strlen(array);
1031 	if (prefix)
1032 		len += strlen(prefix);
1033 
1034 	field->type = kzalloc(len, GFP_KERNEL);
1035 	if (!field->type) {
1036 		ret = -ENOMEM;
1037 		goto free;
1038 	}
1039 	if (prefix)
1040 		strcat(field->type, prefix);
1041 	strcat(field->type, field_type);
1042 	if (array) {
1043 		strcat(field->type, array);
1044 		if (field->type[len - 1] == ';')
1045 			field->type[len - 1] = '\0';
1046 	}
1047 
1048 	field->size = synth_field_size(field->type);
1049 	if (!field->size) {
1050 		ret = -EINVAL;
1051 		goto free;
1052 	}
1053 
1054 	if (synth_field_is_string(field->type))
1055 		field->is_string = true;
1056 
1057 	field->is_signed = synth_field_signed(field->type);
1058 
1059  out:
1060 	return field;
1061  free:
1062 	free_synth_field(field);
1063 	field = ERR_PTR(ret);
1064 	goto out;
1065 }
1066 
1067 static void free_synth_tracepoint(struct tracepoint *tp)
1068 {
1069 	if (!tp)
1070 		return;
1071 
1072 	kfree(tp->name);
1073 	kfree(tp);
1074 }
1075 
1076 static struct tracepoint *alloc_synth_tracepoint(char *name)
1077 {
1078 	struct tracepoint *tp;
1079 
1080 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1081 	if (!tp)
1082 		return ERR_PTR(-ENOMEM);
1083 
1084 	tp->name = kstrdup(name, GFP_KERNEL);
1085 	if (!tp->name) {
1086 		kfree(tp);
1087 		return ERR_PTR(-ENOMEM);
1088 	}
1089 
1090 	return tp;
1091 }
1092 
1093 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1094 				    unsigned int var_ref_idx);
1095 
1096 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1097 			       unsigned int var_ref_idx)
1098 {
1099 	struct tracepoint *tp = event->tp;
1100 
1101 	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1102 		struct tracepoint_func *probe_func_ptr;
1103 		synth_probe_func_t probe_func;
1104 		void *__data;
1105 
1106 		if (!(cpu_online(raw_smp_processor_id())))
1107 			return;
1108 
1109 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1110 		if (probe_func_ptr) {
1111 			do {
1112 				probe_func = probe_func_ptr->func;
1113 				__data = probe_func_ptr->data;
1114 				probe_func(__data, var_ref_vals, var_ref_idx);
1115 			} while ((++probe_func_ptr)->func);
1116 		}
1117 	}
1118 }
1119 
1120 static struct synth_event *find_synth_event(const char *name)
1121 {
1122 	struct dyn_event *pos;
1123 	struct synth_event *event;
1124 
1125 	for_each_dyn_event(pos) {
1126 		if (!is_synth_event(pos))
1127 			continue;
1128 		event = to_synth_event(pos);
1129 		if (strcmp(event->name, name) == 0)
1130 			return event;
1131 	}
1132 
1133 	return NULL;
1134 }
1135 
1136 static int register_synth_event(struct synth_event *event)
1137 {
1138 	struct trace_event_call *call = &event->call;
1139 	int ret = 0;
1140 
1141 	event->call.class = &event->class;
1142 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1143 	if (!event->class.system) {
1144 		ret = -ENOMEM;
1145 		goto out;
1146 	}
1147 
1148 	event->tp = alloc_synth_tracepoint(event->name);
1149 	if (IS_ERR(event->tp)) {
1150 		ret = PTR_ERR(event->tp);
1151 		event->tp = NULL;
1152 		goto out;
1153 	}
1154 
1155 	INIT_LIST_HEAD(&call->class->fields);
1156 	call->event.funcs = &synth_event_funcs;
1157 	call->class->define_fields = synth_event_define_fields;
1158 
1159 	ret = register_trace_event(&call->event);
1160 	if (!ret) {
1161 		ret = -ENODEV;
1162 		goto out;
1163 	}
1164 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
1165 	call->class->reg = trace_event_reg;
1166 	call->class->probe = trace_event_raw_event_synth;
1167 	call->data = event;
1168 	call->tp = event->tp;
1169 
1170 	ret = trace_add_event_call(call);
1171 	if (ret) {
1172 		pr_warn("Failed to register synthetic event: %s\n",
1173 			trace_event_name(call));
1174 		goto err;
1175 	}
1176 
1177 	ret = set_synth_event_print_fmt(call);
1178 	if (ret < 0) {
1179 		trace_remove_event_call(call);
1180 		goto err;
1181 	}
1182  out:
1183 	return ret;
1184  err:
1185 	unregister_trace_event(&call->event);
1186 	goto out;
1187 }
1188 
1189 static int unregister_synth_event(struct synth_event *event)
1190 {
1191 	struct trace_event_call *call = &event->call;
1192 	int ret;
1193 
1194 	ret = trace_remove_event_call(call);
1195 
1196 	return ret;
1197 }
1198 
1199 static void free_synth_event(struct synth_event *event)
1200 {
1201 	unsigned int i;
1202 
1203 	if (!event)
1204 		return;
1205 
1206 	for (i = 0; i < event->n_fields; i++)
1207 		free_synth_field(event->fields[i]);
1208 
1209 	kfree(event->fields);
1210 	kfree(event->name);
1211 	kfree(event->class.system);
1212 	free_synth_tracepoint(event->tp);
1213 	free_synth_event_print_fmt(&event->call);
1214 	kfree(event);
1215 }
1216 
1217 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1218 					     struct synth_field **fields)
1219 {
1220 	struct synth_event *event;
1221 	unsigned int i;
1222 
1223 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1224 	if (!event) {
1225 		event = ERR_PTR(-ENOMEM);
1226 		goto out;
1227 	}
1228 
1229 	event->name = kstrdup(name, GFP_KERNEL);
1230 	if (!event->name) {
1231 		kfree(event);
1232 		event = ERR_PTR(-ENOMEM);
1233 		goto out;
1234 	}
1235 
1236 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1237 	if (!event->fields) {
1238 		free_synth_event(event);
1239 		event = ERR_PTR(-ENOMEM);
1240 		goto out;
1241 	}
1242 
1243 	dyn_event_init(&event->devent, &synth_event_ops);
1244 
1245 	for (i = 0; i < n_fields; i++)
1246 		event->fields[i] = fields[i];
1247 
1248 	event->n_fields = n_fields;
1249  out:
1250 	return event;
1251 }
1252 
1253 static void action_trace(struct hist_trigger_data *hist_data,
1254 			 struct tracing_map_elt *elt, void *rec,
1255 			 struct ring_buffer_event *rbe, void *key,
1256 			 struct action_data *data, u64 *var_ref_vals)
1257 {
1258 	struct synth_event *event = data->synth_event;
1259 
1260 	trace_synth(event, var_ref_vals, data->var_ref_idx);
1261 }
1262 
1263 struct hist_var_data {
1264 	struct list_head list;
1265 	struct hist_trigger_data *hist_data;
1266 };
1267 
1268 static int __create_synth_event(int argc, const char *name, const char **argv)
1269 {
1270 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1271 	struct synth_event *event = NULL;
1272 	int i, consumed = 0, n_fields = 0, ret = 0;
1273 
1274 	/*
1275 	 * Argument syntax:
1276 	 *  - Add synthetic event: <event_name> field[;field] ...
1277 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1278 	 *      where 'field' = type field_name
1279 	 */
1280 
1281 	if (name[0] == '\0' || argc < 1)
1282 		return -EINVAL;
1283 
1284 	mutex_lock(&event_mutex);
1285 
1286 	event = find_synth_event(name);
1287 	if (event) {
1288 		ret = -EEXIST;
1289 		goto out;
1290 	}
1291 
1292 	for (i = 0; i < argc - 1; i++) {
1293 		if (strcmp(argv[i], ";") == 0)
1294 			continue;
1295 		if (n_fields == SYNTH_FIELDS_MAX) {
1296 			ret = -EINVAL;
1297 			goto err;
1298 		}
1299 
1300 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1301 		if (IS_ERR(field)) {
1302 			ret = PTR_ERR(field);
1303 			goto err;
1304 		}
1305 		fields[n_fields++] = field;
1306 		i += consumed - 1;
1307 	}
1308 
1309 	if (i < argc && strcmp(argv[i], ";") != 0) {
1310 		ret = -EINVAL;
1311 		goto err;
1312 	}
1313 
1314 	event = alloc_synth_event(name, n_fields, fields);
1315 	if (IS_ERR(event)) {
1316 		ret = PTR_ERR(event);
1317 		event = NULL;
1318 		goto err;
1319 	}
1320 	ret = register_synth_event(event);
1321 	if (!ret)
1322 		dyn_event_add(&event->devent);
1323 	else
1324 		free_synth_event(event);
1325  out:
1326 	mutex_unlock(&event_mutex);
1327 
1328 	return ret;
1329  err:
1330 	for (i = 0; i < n_fields; i++)
1331 		free_synth_field(fields[i]);
1332 
1333 	goto out;
1334 }
1335 
1336 static int create_or_delete_synth_event(int argc, char **argv)
1337 {
1338 	const char *name = argv[0];
1339 	struct synth_event *event = NULL;
1340 	int ret;
1341 
1342 	/* trace_run_command() ensures argc != 0 */
1343 	if (name[0] == '!') {
1344 		mutex_lock(&event_mutex);
1345 		event = find_synth_event(name + 1);
1346 		if (event) {
1347 			if (event->ref)
1348 				ret = -EBUSY;
1349 			else {
1350 				ret = unregister_synth_event(event);
1351 				if (!ret) {
1352 					dyn_event_remove(&event->devent);
1353 					free_synth_event(event);
1354 				}
1355 			}
1356 		} else
1357 			ret = -ENOENT;
1358 		mutex_unlock(&event_mutex);
1359 		return ret;
1360 	}
1361 
1362 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1363 	return ret == -ECANCELED ? -EINVAL : ret;
1364 }
1365 
1366 static int synth_event_create(int argc, const char **argv)
1367 {
1368 	const char *name = argv[0];
1369 	int len;
1370 
1371 	if (name[0] != 's' || name[1] != ':')
1372 		return -ECANCELED;
1373 	name += 2;
1374 
1375 	/* This interface accepts group name prefix */
1376 	if (strchr(name, '/')) {
1377 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
1378 		if (len == 0)
1379 			return -EINVAL;
1380 		name += len;
1381 	}
1382 	return __create_synth_event(argc - 1, name, argv + 1);
1383 }
1384 
1385 static int synth_event_release(struct dyn_event *ev)
1386 {
1387 	struct synth_event *event = to_synth_event(ev);
1388 	int ret;
1389 
1390 	if (event->ref)
1391 		return -EBUSY;
1392 
1393 	ret = unregister_synth_event(event);
1394 	if (ret)
1395 		return ret;
1396 
1397 	dyn_event_remove(ev);
1398 	free_synth_event(event);
1399 	return 0;
1400 }
1401 
1402 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1403 {
1404 	struct synth_field *field;
1405 	unsigned int i;
1406 
1407 	seq_printf(m, "%s\t", event->name);
1408 
1409 	for (i = 0; i < event->n_fields; i++) {
1410 		field = event->fields[i];
1411 
1412 		/* parameter values */
1413 		seq_printf(m, "%s %s%s", field->type, field->name,
1414 			   i == event->n_fields - 1 ? "" : "; ");
1415 	}
1416 
1417 	seq_putc(m, '\n');
1418 
1419 	return 0;
1420 }
1421 
1422 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1423 {
1424 	struct synth_event *event = to_synth_event(ev);
1425 
1426 	seq_printf(m, "s:%s/", event->class.system);
1427 
1428 	return __synth_event_show(m, event);
1429 }
1430 
1431 static int synth_events_seq_show(struct seq_file *m, void *v)
1432 {
1433 	struct dyn_event *ev = v;
1434 
1435 	if (!is_synth_event(ev))
1436 		return 0;
1437 
1438 	return __synth_event_show(m, to_synth_event(ev));
1439 }
1440 
1441 static const struct seq_operations synth_events_seq_op = {
1442 	.start	= dyn_event_seq_start,
1443 	.next	= dyn_event_seq_next,
1444 	.stop	= dyn_event_seq_stop,
1445 	.show	= synth_events_seq_show,
1446 };
1447 
1448 static int synth_events_open(struct inode *inode, struct file *file)
1449 {
1450 	int ret;
1451 
1452 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1453 	if (ret)
1454 		return ret;
1455 
1456 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1457 		ret = dyn_events_release_all(&synth_event_ops);
1458 		if (ret < 0)
1459 			return ret;
1460 	}
1461 
1462 	return seq_open(file, &synth_events_seq_op);
1463 }
1464 
1465 static ssize_t synth_events_write(struct file *file,
1466 				  const char __user *buffer,
1467 				  size_t count, loff_t *ppos)
1468 {
1469 	return trace_parse_run_command(file, buffer, count, ppos,
1470 				       create_or_delete_synth_event);
1471 }
1472 
1473 static const struct file_operations synth_events_fops = {
1474 	.open           = synth_events_open,
1475 	.write		= synth_events_write,
1476 	.read           = seq_read,
1477 	.llseek         = seq_lseek,
1478 	.release        = seq_release,
1479 };
1480 
1481 static u64 hist_field_timestamp(struct hist_field *hist_field,
1482 				struct tracing_map_elt *elt,
1483 				struct ring_buffer_event *rbe,
1484 				void *event)
1485 {
1486 	struct hist_trigger_data *hist_data = hist_field->hist_data;
1487 	struct trace_array *tr = hist_data->event_file->tr;
1488 
1489 	u64 ts = ring_buffer_event_time_stamp(rbe);
1490 
1491 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1492 		ts = ns2usecs(ts);
1493 
1494 	return ts;
1495 }
1496 
1497 static u64 hist_field_cpu(struct hist_field *hist_field,
1498 			  struct tracing_map_elt *elt,
1499 			  struct ring_buffer_event *rbe,
1500 			  void *event)
1501 {
1502 	int cpu = smp_processor_id();
1503 
1504 	return cpu;
1505 }
1506 
1507 /**
1508  * check_field_for_var_ref - Check if a VAR_REF field references a variable
1509  * @hist_field: The VAR_REF field to check
1510  * @var_data: The hist trigger that owns the variable
1511  * @var_idx: The trigger variable identifier
1512  *
1513  * Check the given VAR_REF field to see whether or not it references
1514  * the given variable associated with the given trigger.
1515  *
1516  * Return: The VAR_REF field if it does reference the variable, NULL if not
1517  */
1518 static struct hist_field *
1519 check_field_for_var_ref(struct hist_field *hist_field,
1520 			struct hist_trigger_data *var_data,
1521 			unsigned int var_idx)
1522 {
1523 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1524 
1525 	if (hist_field && hist_field->var.idx == var_idx &&
1526 	    hist_field->var.hist_data == var_data)
1527 		return hist_field;
1528 
1529 	return NULL;
1530 }
1531 
1532 /**
1533  * find_var_ref - Check if a trigger has a reference to a trigger variable
1534  * @hist_data: The hist trigger that might have a reference to the variable
1535  * @var_data: The hist trigger that owns the variable
1536  * @var_idx: The trigger variable identifier
1537  *
1538  * Check the list of var_refs[] on the first hist trigger to see
1539  * whether any of them are references to the variable on the second
1540  * trigger.
1541  *
1542  * Return: The VAR_REF field referencing the variable if so, NULL if not
1543  */
1544 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1545 				       struct hist_trigger_data *var_data,
1546 				       unsigned int var_idx)
1547 {
1548 	struct hist_field *hist_field;
1549 	unsigned int i;
1550 
1551 	for (i = 0; i < hist_data->n_var_refs; i++) {
1552 		hist_field = hist_data->var_refs[i];
1553 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
1554 			return hist_field;
1555 	}
1556 
1557 	return NULL;
1558 }
1559 
1560 /**
1561  * find_any_var_ref - Check if there is a reference to a given trigger variable
1562  * @hist_data: The hist trigger
1563  * @var_idx: The trigger variable identifier
1564  *
1565  * Check to see whether the given variable is currently referenced by
1566  * any other trigger.
1567  *
1568  * The trigger the variable is defined on is explicitly excluded - the
1569  * assumption being that a self-reference doesn't prevent a trigger
1570  * from being removed.
1571  *
1572  * Return: The VAR_REF field referencing the variable if so, NULL if not
1573  */
1574 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1575 					   unsigned int var_idx)
1576 {
1577 	struct trace_array *tr = hist_data->event_file->tr;
1578 	struct hist_field *found = NULL;
1579 	struct hist_var_data *var_data;
1580 
1581 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1582 		if (var_data->hist_data == hist_data)
1583 			continue;
1584 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1585 		if (found)
1586 			break;
1587 	}
1588 
1589 	return found;
1590 }
1591 
1592 /**
1593  * check_var_refs - Check if there is a reference to any of trigger's variables
1594  * @hist_data: The hist trigger
1595  *
1596  * A trigger can define one or more variables.  If any one of them is
1597  * currently referenced by any other trigger, this function will
1598  * determine that.
1599 
1600  * Typically used to determine whether or not a trigger can be removed
1601  * - if there are any references to a trigger's variables, it cannot.
1602  *
1603  * Return: True if there is a reference to any of trigger's variables
1604  */
1605 static bool check_var_refs(struct hist_trigger_data *hist_data)
1606 {
1607 	struct hist_field *field;
1608 	bool found = false;
1609 	int i;
1610 
1611 	for_each_hist_field(i, hist_data) {
1612 		field = hist_data->fields[i];
1613 		if (field && field->flags & HIST_FIELD_FL_VAR) {
1614 			if (find_any_var_ref(hist_data, field->var.idx)) {
1615 				found = true;
1616 				break;
1617 			}
1618 		}
1619 	}
1620 
1621 	return found;
1622 }
1623 
1624 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1625 {
1626 	struct trace_array *tr = hist_data->event_file->tr;
1627 	struct hist_var_data *var_data, *found = NULL;
1628 
1629 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1630 		if (var_data->hist_data == hist_data) {
1631 			found = var_data;
1632 			break;
1633 		}
1634 	}
1635 
1636 	return found;
1637 }
1638 
1639 static bool field_has_hist_vars(struct hist_field *hist_field,
1640 				unsigned int level)
1641 {
1642 	int i;
1643 
1644 	if (level > 3)
1645 		return false;
1646 
1647 	if (!hist_field)
1648 		return false;
1649 
1650 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
1651 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
1652 		return true;
1653 
1654 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1655 		struct hist_field *operand;
1656 
1657 		operand = hist_field->operands[i];
1658 		if (field_has_hist_vars(operand, level + 1))
1659 			return true;
1660 	}
1661 
1662 	return false;
1663 }
1664 
1665 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1666 {
1667 	struct hist_field *hist_field;
1668 	int i;
1669 
1670 	for_each_hist_field(i, hist_data) {
1671 		hist_field = hist_data->fields[i];
1672 		if (field_has_hist_vars(hist_field, 0))
1673 			return true;
1674 	}
1675 
1676 	return false;
1677 }
1678 
1679 static int save_hist_vars(struct hist_trigger_data *hist_data)
1680 {
1681 	struct trace_array *tr = hist_data->event_file->tr;
1682 	struct hist_var_data *var_data;
1683 
1684 	var_data = find_hist_vars(hist_data);
1685 	if (var_data)
1686 		return 0;
1687 
1688 	if (tracing_check_open_get_tr(tr))
1689 		return -ENODEV;
1690 
1691 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1692 	if (!var_data) {
1693 		trace_array_put(tr);
1694 		return -ENOMEM;
1695 	}
1696 
1697 	var_data->hist_data = hist_data;
1698 	list_add(&var_data->list, &tr->hist_vars);
1699 
1700 	return 0;
1701 }
1702 
1703 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1704 {
1705 	struct trace_array *tr = hist_data->event_file->tr;
1706 	struct hist_var_data *var_data;
1707 
1708 	var_data = find_hist_vars(hist_data);
1709 	if (!var_data)
1710 		return;
1711 
1712 	if (WARN_ON(check_var_refs(hist_data)))
1713 		return;
1714 
1715 	list_del(&var_data->list);
1716 
1717 	kfree(var_data);
1718 
1719 	trace_array_put(tr);
1720 }
1721 
1722 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1723 					 const char *var_name)
1724 {
1725 	struct hist_field *hist_field, *found = NULL;
1726 	int i;
1727 
1728 	for_each_hist_field(i, hist_data) {
1729 		hist_field = hist_data->fields[i];
1730 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1731 		    strcmp(hist_field->var.name, var_name) == 0) {
1732 			found = hist_field;
1733 			break;
1734 		}
1735 	}
1736 
1737 	return found;
1738 }
1739 
1740 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1741 				   struct trace_event_file *file,
1742 				   const char *var_name)
1743 {
1744 	struct hist_trigger_data *test_data;
1745 	struct event_trigger_data *test;
1746 	struct hist_field *hist_field;
1747 
1748 	hist_field = find_var_field(hist_data, var_name);
1749 	if (hist_field)
1750 		return hist_field;
1751 
1752 	list_for_each_entry_rcu(test, &file->triggers, list) {
1753 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1754 			test_data = test->private_data;
1755 			hist_field = find_var_field(test_data, var_name);
1756 			if (hist_field)
1757 				return hist_field;
1758 		}
1759 	}
1760 
1761 	return NULL;
1762 }
1763 
1764 static struct trace_event_file *find_var_file(struct trace_array *tr,
1765 					      char *system,
1766 					      char *event_name,
1767 					      char *var_name)
1768 {
1769 	struct hist_trigger_data *var_hist_data;
1770 	struct hist_var_data *var_data;
1771 	struct trace_event_file *file, *found = NULL;
1772 
1773 	if (system)
1774 		return find_event_file(tr, system, event_name);
1775 
1776 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1777 		var_hist_data = var_data->hist_data;
1778 		file = var_hist_data->event_file;
1779 		if (file == found)
1780 			continue;
1781 
1782 		if (find_var_field(var_hist_data, var_name)) {
1783 			if (found) {
1784 				hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1785 				return NULL;
1786 			}
1787 
1788 			found = file;
1789 		}
1790 	}
1791 
1792 	return found;
1793 }
1794 
1795 static struct hist_field *find_file_var(struct trace_event_file *file,
1796 					const char *var_name)
1797 {
1798 	struct hist_trigger_data *test_data;
1799 	struct event_trigger_data *test;
1800 	struct hist_field *hist_field;
1801 
1802 	list_for_each_entry_rcu(test, &file->triggers, list) {
1803 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1804 			test_data = test->private_data;
1805 			hist_field = find_var_field(test_data, var_name);
1806 			if (hist_field)
1807 				return hist_field;
1808 		}
1809 	}
1810 
1811 	return NULL;
1812 }
1813 
1814 static struct hist_field *
1815 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1816 {
1817 	struct trace_array *tr = hist_data->event_file->tr;
1818 	struct hist_field *hist_field, *found = NULL;
1819 	struct trace_event_file *file;
1820 	unsigned int i;
1821 
1822 	for (i = 0; i < hist_data->n_actions; i++) {
1823 		struct action_data *data = hist_data->actions[i];
1824 
1825 		if (data->handler == HANDLER_ONMATCH) {
1826 			char *system = data->match_data.event_system;
1827 			char *event_name = data->match_data.event;
1828 
1829 			file = find_var_file(tr, system, event_name, var_name);
1830 			if (!file)
1831 				continue;
1832 			hist_field = find_file_var(file, var_name);
1833 			if (hist_field) {
1834 				if (found) {
1835 					hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1836 						 errpos(var_name));
1837 					return ERR_PTR(-EINVAL);
1838 				}
1839 
1840 				found = hist_field;
1841 			}
1842 		}
1843 	}
1844 	return found;
1845 }
1846 
1847 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1848 					 char *system,
1849 					 char *event_name,
1850 					 char *var_name)
1851 {
1852 	struct trace_array *tr = hist_data->event_file->tr;
1853 	struct hist_field *hist_field = NULL;
1854 	struct trace_event_file *file;
1855 
1856 	if (!system || !event_name) {
1857 		hist_field = find_match_var(hist_data, var_name);
1858 		if (IS_ERR(hist_field))
1859 			return NULL;
1860 		if (hist_field)
1861 			return hist_field;
1862 	}
1863 
1864 	file = find_var_file(tr, system, event_name, var_name);
1865 	if (!file)
1866 		return NULL;
1867 
1868 	hist_field = find_file_var(file, var_name);
1869 
1870 	return hist_field;
1871 }
1872 
1873 static u64 hist_field_var_ref(struct hist_field *hist_field,
1874 			      struct tracing_map_elt *elt,
1875 			      struct ring_buffer_event *rbe,
1876 			      void *event)
1877 {
1878 	struct hist_elt_data *elt_data;
1879 	u64 var_val = 0;
1880 
1881 	if (WARN_ON_ONCE(!elt))
1882 		return var_val;
1883 
1884 	elt_data = elt->private_data;
1885 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1886 
1887 	return var_val;
1888 }
1889 
1890 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1891 			     u64 *var_ref_vals, bool self)
1892 {
1893 	struct hist_trigger_data *var_data;
1894 	struct tracing_map_elt *var_elt;
1895 	struct hist_field *hist_field;
1896 	unsigned int i, var_idx;
1897 	bool resolved = true;
1898 	u64 var_val = 0;
1899 
1900 	for (i = 0; i < hist_data->n_var_refs; i++) {
1901 		hist_field = hist_data->var_refs[i];
1902 		var_idx = hist_field->var.idx;
1903 		var_data = hist_field->var.hist_data;
1904 
1905 		if (var_data == NULL) {
1906 			resolved = false;
1907 			break;
1908 		}
1909 
1910 		if ((self && var_data != hist_data) ||
1911 		    (!self && var_data == hist_data))
1912 			continue;
1913 
1914 		var_elt = tracing_map_lookup(var_data->map, key);
1915 		if (!var_elt) {
1916 			resolved = false;
1917 			break;
1918 		}
1919 
1920 		if (!tracing_map_var_set(var_elt, var_idx)) {
1921 			resolved = false;
1922 			break;
1923 		}
1924 
1925 		if (self || !hist_field->read_once)
1926 			var_val = tracing_map_read_var(var_elt, var_idx);
1927 		else
1928 			var_val = tracing_map_read_var_once(var_elt, var_idx);
1929 
1930 		var_ref_vals[i] = var_val;
1931 	}
1932 
1933 	return resolved;
1934 }
1935 
1936 static const char *hist_field_name(struct hist_field *field,
1937 				   unsigned int level)
1938 {
1939 	const char *field_name = "";
1940 
1941 	if (level > 1)
1942 		return field_name;
1943 
1944 	if (field->field)
1945 		field_name = field->field->name;
1946 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
1947 		 field->flags & HIST_FIELD_FL_ALIAS)
1948 		field_name = hist_field_name(field->operands[0], ++level);
1949 	else if (field->flags & HIST_FIELD_FL_CPU)
1950 		field_name = "cpu";
1951 	else if (field->flags & HIST_FIELD_FL_EXPR ||
1952 		 field->flags & HIST_FIELD_FL_VAR_REF) {
1953 		if (field->system) {
1954 			static char full_name[MAX_FILTER_STR_VAL];
1955 
1956 			strcat(full_name, field->system);
1957 			strcat(full_name, ".");
1958 			strcat(full_name, field->event_name);
1959 			strcat(full_name, ".");
1960 			strcat(full_name, field->name);
1961 			field_name = full_name;
1962 		} else
1963 			field_name = field->name;
1964 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1965 		field_name = "common_timestamp";
1966 
1967 	if (field_name == NULL)
1968 		field_name = "";
1969 
1970 	return field_name;
1971 }
1972 
1973 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1974 {
1975 	hist_field_fn_t fn = NULL;
1976 
1977 	switch (field_size) {
1978 	case 8:
1979 		if (field_is_signed)
1980 			fn = hist_field_s64;
1981 		else
1982 			fn = hist_field_u64;
1983 		break;
1984 	case 4:
1985 		if (field_is_signed)
1986 			fn = hist_field_s32;
1987 		else
1988 			fn = hist_field_u32;
1989 		break;
1990 	case 2:
1991 		if (field_is_signed)
1992 			fn = hist_field_s16;
1993 		else
1994 			fn = hist_field_u16;
1995 		break;
1996 	case 1:
1997 		if (field_is_signed)
1998 			fn = hist_field_s8;
1999 		else
2000 			fn = hist_field_u8;
2001 		break;
2002 	}
2003 
2004 	return fn;
2005 }
2006 
2007 static int parse_map_size(char *str)
2008 {
2009 	unsigned long size, map_bits;
2010 	int ret;
2011 
2012 	strsep(&str, "=");
2013 	if (!str) {
2014 		ret = -EINVAL;
2015 		goto out;
2016 	}
2017 
2018 	ret = kstrtoul(str, 0, &size);
2019 	if (ret)
2020 		goto out;
2021 
2022 	map_bits = ilog2(roundup_pow_of_two(size));
2023 	if (map_bits < TRACING_MAP_BITS_MIN ||
2024 	    map_bits > TRACING_MAP_BITS_MAX)
2025 		ret = -EINVAL;
2026 	else
2027 		ret = map_bits;
2028  out:
2029 	return ret;
2030 }
2031 
2032 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2033 {
2034 	unsigned int i;
2035 
2036 	if (!attrs)
2037 		return;
2038 
2039 	for (i = 0; i < attrs->n_assignments; i++)
2040 		kfree(attrs->assignment_str[i]);
2041 
2042 	for (i = 0; i < attrs->n_actions; i++)
2043 		kfree(attrs->action_str[i]);
2044 
2045 	kfree(attrs->name);
2046 	kfree(attrs->sort_key_str);
2047 	kfree(attrs->keys_str);
2048 	kfree(attrs->vals_str);
2049 	kfree(attrs->clock);
2050 	kfree(attrs);
2051 }
2052 
2053 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2054 {
2055 	int ret = -EINVAL;
2056 
2057 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
2058 		return ret;
2059 
2060 	if ((str_has_prefix(str, "onmatch(")) ||
2061 	    (str_has_prefix(str, "onmax(")) ||
2062 	    (str_has_prefix(str, "onchange("))) {
2063 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2064 		if (!attrs->action_str[attrs->n_actions]) {
2065 			ret = -ENOMEM;
2066 			return ret;
2067 		}
2068 		attrs->n_actions++;
2069 		ret = 0;
2070 	}
2071 	return ret;
2072 }
2073 
2074 static int parse_assignment(struct trace_array *tr,
2075 			    char *str, struct hist_trigger_attrs *attrs)
2076 {
2077 	int ret = 0;
2078 
2079 	if ((str_has_prefix(str, "key=")) ||
2080 	    (str_has_prefix(str, "keys="))) {
2081 		attrs->keys_str = kstrdup(str, GFP_KERNEL);
2082 		if (!attrs->keys_str) {
2083 			ret = -ENOMEM;
2084 			goto out;
2085 		}
2086 	} else if ((str_has_prefix(str, "val=")) ||
2087 		   (str_has_prefix(str, "vals=")) ||
2088 		   (str_has_prefix(str, "values="))) {
2089 		attrs->vals_str = kstrdup(str, GFP_KERNEL);
2090 		if (!attrs->vals_str) {
2091 			ret = -ENOMEM;
2092 			goto out;
2093 		}
2094 	} else if (str_has_prefix(str, "sort=")) {
2095 		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
2096 		if (!attrs->sort_key_str) {
2097 			ret = -ENOMEM;
2098 			goto out;
2099 		}
2100 	} else if (str_has_prefix(str, "name=")) {
2101 		attrs->name = kstrdup(str, GFP_KERNEL);
2102 		if (!attrs->name) {
2103 			ret = -ENOMEM;
2104 			goto out;
2105 		}
2106 	} else if (str_has_prefix(str, "clock=")) {
2107 		strsep(&str, "=");
2108 		if (!str) {
2109 			ret = -EINVAL;
2110 			goto out;
2111 		}
2112 
2113 		str = strstrip(str);
2114 		attrs->clock = kstrdup(str, GFP_KERNEL);
2115 		if (!attrs->clock) {
2116 			ret = -ENOMEM;
2117 			goto out;
2118 		}
2119 	} else if (str_has_prefix(str, "size=")) {
2120 		int map_bits = parse_map_size(str);
2121 
2122 		if (map_bits < 0) {
2123 			ret = map_bits;
2124 			goto out;
2125 		}
2126 		attrs->map_bits = map_bits;
2127 	} else {
2128 		char *assignment;
2129 
2130 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2131 			hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2132 			ret = -EINVAL;
2133 			goto out;
2134 		}
2135 
2136 		assignment = kstrdup(str, GFP_KERNEL);
2137 		if (!assignment) {
2138 			ret = -ENOMEM;
2139 			goto out;
2140 		}
2141 
2142 		attrs->assignment_str[attrs->n_assignments++] = assignment;
2143 	}
2144  out:
2145 	return ret;
2146 }
2147 
2148 static struct hist_trigger_attrs *
2149 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2150 {
2151 	struct hist_trigger_attrs *attrs;
2152 	int ret = 0;
2153 
2154 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2155 	if (!attrs)
2156 		return ERR_PTR(-ENOMEM);
2157 
2158 	while (trigger_str) {
2159 		char *str = strsep(&trigger_str, ":");
2160 
2161 		if (strchr(str, '=')) {
2162 			ret = parse_assignment(tr, str, attrs);
2163 			if (ret)
2164 				goto free;
2165 		} else if (strcmp(str, "pause") == 0)
2166 			attrs->pause = true;
2167 		else if ((strcmp(str, "cont") == 0) ||
2168 			 (strcmp(str, "continue") == 0))
2169 			attrs->cont = true;
2170 		else if (strcmp(str, "clear") == 0)
2171 			attrs->clear = true;
2172 		else {
2173 			ret = parse_action(str, attrs);
2174 			if (ret)
2175 				goto free;
2176 		}
2177 	}
2178 
2179 	if (!attrs->keys_str) {
2180 		ret = -EINVAL;
2181 		goto free;
2182 	}
2183 
2184 	if (!attrs->clock) {
2185 		attrs->clock = kstrdup("global", GFP_KERNEL);
2186 		if (!attrs->clock) {
2187 			ret = -ENOMEM;
2188 			goto free;
2189 		}
2190 	}
2191 
2192 	return attrs;
2193  free:
2194 	destroy_hist_trigger_attrs(attrs);
2195 
2196 	return ERR_PTR(ret);
2197 }
2198 
2199 static inline void save_comm(char *comm, struct task_struct *task)
2200 {
2201 	if (!task->pid) {
2202 		strcpy(comm, "<idle>");
2203 		return;
2204 	}
2205 
2206 	if (WARN_ON_ONCE(task->pid < 0)) {
2207 		strcpy(comm, "<XXX>");
2208 		return;
2209 	}
2210 
2211 	strncpy(comm, task->comm, TASK_COMM_LEN);
2212 }
2213 
2214 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2215 {
2216 	unsigned int i;
2217 
2218 	for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2219 		kfree(elt_data->field_var_str[i]);
2220 
2221 	kfree(elt_data->comm);
2222 	kfree(elt_data);
2223 }
2224 
2225 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2226 {
2227 	struct hist_elt_data *elt_data = elt->private_data;
2228 
2229 	hist_elt_data_free(elt_data);
2230 }
2231 
2232 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2233 {
2234 	struct hist_trigger_data *hist_data = elt->map->private_data;
2235 	unsigned int size = TASK_COMM_LEN;
2236 	struct hist_elt_data *elt_data;
2237 	struct hist_field *key_field;
2238 	unsigned int i, n_str;
2239 
2240 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2241 	if (!elt_data)
2242 		return -ENOMEM;
2243 
2244 	for_each_hist_key_field(i, hist_data) {
2245 		key_field = hist_data->fields[i];
2246 
2247 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2248 			elt_data->comm = kzalloc(size, GFP_KERNEL);
2249 			if (!elt_data->comm) {
2250 				kfree(elt_data);
2251 				return -ENOMEM;
2252 			}
2253 			break;
2254 		}
2255 	}
2256 
2257 	n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
2258 
2259 	size = STR_VAR_LEN_MAX;
2260 
2261 	for (i = 0; i < n_str; i++) {
2262 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2263 		if (!elt_data->field_var_str[i]) {
2264 			hist_elt_data_free(elt_data);
2265 			return -ENOMEM;
2266 		}
2267 	}
2268 
2269 	elt->private_data = elt_data;
2270 
2271 	return 0;
2272 }
2273 
2274 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2275 {
2276 	struct hist_elt_data *elt_data = elt->private_data;
2277 
2278 	if (elt_data->comm)
2279 		save_comm(elt_data->comm, current);
2280 }
2281 
2282 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2283 	.elt_alloc	= hist_trigger_elt_data_alloc,
2284 	.elt_free	= hist_trigger_elt_data_free,
2285 	.elt_init	= hist_trigger_elt_data_init,
2286 };
2287 
2288 static const char *get_hist_field_flags(struct hist_field *hist_field)
2289 {
2290 	const char *flags_str = NULL;
2291 
2292 	if (hist_field->flags & HIST_FIELD_FL_HEX)
2293 		flags_str = "hex";
2294 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
2295 		flags_str = "sym";
2296 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2297 		flags_str = "sym-offset";
2298 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2299 		flags_str = "execname";
2300 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2301 		flags_str = "syscall";
2302 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2303 		flags_str = "log2";
2304 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2305 		flags_str = "usecs";
2306 
2307 	return flags_str;
2308 }
2309 
2310 static void expr_field_str(struct hist_field *field, char *expr)
2311 {
2312 	if (field->flags & HIST_FIELD_FL_VAR_REF)
2313 		strcat(expr, "$");
2314 
2315 	strcat(expr, hist_field_name(field, 0));
2316 
2317 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2318 		const char *flags_str = get_hist_field_flags(field);
2319 
2320 		if (flags_str) {
2321 			strcat(expr, ".");
2322 			strcat(expr, flags_str);
2323 		}
2324 	}
2325 }
2326 
2327 static char *expr_str(struct hist_field *field, unsigned int level)
2328 {
2329 	char *expr;
2330 
2331 	if (level > 1)
2332 		return NULL;
2333 
2334 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2335 	if (!expr)
2336 		return NULL;
2337 
2338 	if (!field->operands[0]) {
2339 		expr_field_str(field, expr);
2340 		return expr;
2341 	}
2342 
2343 	if (field->operator == FIELD_OP_UNARY_MINUS) {
2344 		char *subexpr;
2345 
2346 		strcat(expr, "-(");
2347 		subexpr = expr_str(field->operands[0], ++level);
2348 		if (!subexpr) {
2349 			kfree(expr);
2350 			return NULL;
2351 		}
2352 		strcat(expr, subexpr);
2353 		strcat(expr, ")");
2354 
2355 		kfree(subexpr);
2356 
2357 		return expr;
2358 	}
2359 
2360 	expr_field_str(field->operands[0], expr);
2361 
2362 	switch (field->operator) {
2363 	case FIELD_OP_MINUS:
2364 		strcat(expr, "-");
2365 		break;
2366 	case FIELD_OP_PLUS:
2367 		strcat(expr, "+");
2368 		break;
2369 	default:
2370 		kfree(expr);
2371 		return NULL;
2372 	}
2373 
2374 	expr_field_str(field->operands[1], expr);
2375 
2376 	return expr;
2377 }
2378 
2379 static int contains_operator(char *str)
2380 {
2381 	enum field_op_id field_op = FIELD_OP_NONE;
2382 	char *op;
2383 
2384 	op = strpbrk(str, "+-");
2385 	if (!op)
2386 		return FIELD_OP_NONE;
2387 
2388 	switch (*op) {
2389 	case '-':
2390 		if (*str == '-')
2391 			field_op = FIELD_OP_UNARY_MINUS;
2392 		else
2393 			field_op = FIELD_OP_MINUS;
2394 		break;
2395 	case '+':
2396 		field_op = FIELD_OP_PLUS;
2397 		break;
2398 	default:
2399 		break;
2400 	}
2401 
2402 	return field_op;
2403 }
2404 
2405 static void __destroy_hist_field(struct hist_field *hist_field)
2406 {
2407 	kfree(hist_field->var.name);
2408 	kfree(hist_field->name);
2409 	kfree(hist_field->type);
2410 
2411 	kfree(hist_field);
2412 }
2413 
2414 static void destroy_hist_field(struct hist_field *hist_field,
2415 			       unsigned int level)
2416 {
2417 	unsigned int i;
2418 
2419 	if (level > 3)
2420 		return;
2421 
2422 	if (!hist_field)
2423 		return;
2424 
2425 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2426 		return; /* var refs will be destroyed separately */
2427 
2428 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2429 		destroy_hist_field(hist_field->operands[i], level + 1);
2430 
2431 	__destroy_hist_field(hist_field);
2432 }
2433 
2434 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2435 					    struct ftrace_event_field *field,
2436 					    unsigned long flags,
2437 					    char *var_name)
2438 {
2439 	struct hist_field *hist_field;
2440 
2441 	if (field && is_function_field(field))
2442 		return NULL;
2443 
2444 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2445 	if (!hist_field)
2446 		return NULL;
2447 
2448 	hist_field->hist_data = hist_data;
2449 
2450 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2451 		goto out; /* caller will populate */
2452 
2453 	if (flags & HIST_FIELD_FL_VAR_REF) {
2454 		hist_field->fn = hist_field_var_ref;
2455 		goto out;
2456 	}
2457 
2458 	if (flags & HIST_FIELD_FL_HITCOUNT) {
2459 		hist_field->fn = hist_field_counter;
2460 		hist_field->size = sizeof(u64);
2461 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2462 		if (!hist_field->type)
2463 			goto free;
2464 		goto out;
2465 	}
2466 
2467 	if (flags & HIST_FIELD_FL_STACKTRACE) {
2468 		hist_field->fn = hist_field_none;
2469 		goto out;
2470 	}
2471 
2472 	if (flags & HIST_FIELD_FL_LOG2) {
2473 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2474 		hist_field->fn = hist_field_log2;
2475 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2476 		hist_field->size = hist_field->operands[0]->size;
2477 		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2478 		if (!hist_field->type)
2479 			goto free;
2480 		goto out;
2481 	}
2482 
2483 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
2484 		hist_field->fn = hist_field_timestamp;
2485 		hist_field->size = sizeof(u64);
2486 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2487 		if (!hist_field->type)
2488 			goto free;
2489 		goto out;
2490 	}
2491 
2492 	if (flags & HIST_FIELD_FL_CPU) {
2493 		hist_field->fn = hist_field_cpu;
2494 		hist_field->size = sizeof(int);
2495 		hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2496 		if (!hist_field->type)
2497 			goto free;
2498 		goto out;
2499 	}
2500 
2501 	if (WARN_ON_ONCE(!field))
2502 		goto out;
2503 
2504 	if (is_string_field(field)) {
2505 		flags |= HIST_FIELD_FL_STRING;
2506 
2507 		hist_field->size = MAX_FILTER_STR_VAL;
2508 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2509 		if (!hist_field->type)
2510 			goto free;
2511 
2512 		if (field->filter_type == FILTER_STATIC_STRING)
2513 			hist_field->fn = hist_field_string;
2514 		else if (field->filter_type == FILTER_DYN_STRING)
2515 			hist_field->fn = hist_field_dynstring;
2516 		else
2517 			hist_field->fn = hist_field_pstring;
2518 	} else {
2519 		hist_field->size = field->size;
2520 		hist_field->is_signed = field->is_signed;
2521 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2522 		if (!hist_field->type)
2523 			goto free;
2524 
2525 		hist_field->fn = select_value_fn(field->size,
2526 						 field->is_signed);
2527 		if (!hist_field->fn) {
2528 			destroy_hist_field(hist_field, 0);
2529 			return NULL;
2530 		}
2531 	}
2532  out:
2533 	hist_field->field = field;
2534 	hist_field->flags = flags;
2535 
2536 	if (var_name) {
2537 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2538 		if (!hist_field->var.name)
2539 			goto free;
2540 	}
2541 
2542 	return hist_field;
2543  free:
2544 	destroy_hist_field(hist_field, 0);
2545 	return NULL;
2546 }
2547 
2548 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2549 {
2550 	unsigned int i;
2551 
2552 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
2553 		if (hist_data->fields[i]) {
2554 			destroy_hist_field(hist_data->fields[i], 0);
2555 			hist_data->fields[i] = NULL;
2556 		}
2557 	}
2558 
2559 	for (i = 0; i < hist_data->n_var_refs; i++) {
2560 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2561 		__destroy_hist_field(hist_data->var_refs[i]);
2562 		hist_data->var_refs[i] = NULL;
2563 	}
2564 }
2565 
2566 static int init_var_ref(struct hist_field *ref_field,
2567 			struct hist_field *var_field,
2568 			char *system, char *event_name)
2569 {
2570 	int err = 0;
2571 
2572 	ref_field->var.idx = var_field->var.idx;
2573 	ref_field->var.hist_data = var_field->hist_data;
2574 	ref_field->size = var_field->size;
2575 	ref_field->is_signed = var_field->is_signed;
2576 	ref_field->flags |= var_field->flags &
2577 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2578 
2579 	if (system) {
2580 		ref_field->system = kstrdup(system, GFP_KERNEL);
2581 		if (!ref_field->system)
2582 			return -ENOMEM;
2583 	}
2584 
2585 	if (event_name) {
2586 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2587 		if (!ref_field->event_name) {
2588 			err = -ENOMEM;
2589 			goto free;
2590 		}
2591 	}
2592 
2593 	if (var_field->var.name) {
2594 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2595 		if (!ref_field->name) {
2596 			err = -ENOMEM;
2597 			goto free;
2598 		}
2599 	} else if (var_field->name) {
2600 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2601 		if (!ref_field->name) {
2602 			err = -ENOMEM;
2603 			goto free;
2604 		}
2605 	}
2606 
2607 	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2608 	if (!ref_field->type) {
2609 		err = -ENOMEM;
2610 		goto free;
2611 	}
2612  out:
2613 	return err;
2614  free:
2615 	kfree(ref_field->system);
2616 	kfree(ref_field->event_name);
2617 	kfree(ref_field->name);
2618 
2619 	goto out;
2620 }
2621 
2622 /**
2623  * create_var_ref - Create a variable reference and attach it to trigger
2624  * @hist_data: The trigger that will be referencing the variable
2625  * @var_field: The VAR field to create a reference to
2626  * @system: The optional system string
2627  * @event_name: The optional event_name string
2628  *
2629  * Given a variable hist_field, create a VAR_REF hist_field that
2630  * represents a reference to it.
2631  *
2632  * This function also adds the reference to the trigger that
2633  * now references the variable.
2634  *
2635  * Return: The VAR_REF field if successful, NULL if not
2636  */
2637 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2638 					 struct hist_field *var_field,
2639 					 char *system, char *event_name)
2640 {
2641 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
2642 	struct hist_field *ref_field;
2643 
2644 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2645 	if (ref_field) {
2646 		if (init_var_ref(ref_field, var_field, system, event_name)) {
2647 			destroy_hist_field(ref_field, 0);
2648 			return NULL;
2649 		}
2650 
2651 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2652 		ref_field->var_ref_idx = hist_data->n_var_refs++;
2653 	}
2654 
2655 	return ref_field;
2656 }
2657 
2658 static bool is_var_ref(char *var_name)
2659 {
2660 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2661 		return false;
2662 
2663 	return true;
2664 }
2665 
2666 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2667 				 char *var_name)
2668 {
2669 	char *name, *field;
2670 	unsigned int i;
2671 
2672 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2673 		name = hist_data->attrs->var_defs.name[i];
2674 
2675 		if (strcmp(var_name, name) == 0) {
2676 			field = hist_data->attrs->var_defs.expr[i];
2677 			if (contains_operator(field) || is_var_ref(field))
2678 				continue;
2679 			return field;
2680 		}
2681 	}
2682 
2683 	return NULL;
2684 }
2685 
2686 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2687 				 char *system, char *event_name,
2688 				 char *var_name)
2689 {
2690 	struct trace_event_call *call;
2691 
2692 	if (system && event_name) {
2693 		call = hist_data->event_file->event_call;
2694 
2695 		if (strcmp(system, call->class->system) != 0)
2696 			return NULL;
2697 
2698 		if (strcmp(event_name, trace_event_name(call)) != 0)
2699 			return NULL;
2700 	}
2701 
2702 	if (!!system != !!event_name)
2703 		return NULL;
2704 
2705 	if (!is_var_ref(var_name))
2706 		return NULL;
2707 
2708 	var_name++;
2709 
2710 	return field_name_from_var(hist_data, var_name);
2711 }
2712 
2713 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2714 					char *system, char *event_name,
2715 					char *var_name)
2716 {
2717 	struct hist_field *var_field = NULL, *ref_field = NULL;
2718 	struct trace_array *tr = hist_data->event_file->tr;
2719 
2720 	if (!is_var_ref(var_name))
2721 		return NULL;
2722 
2723 	var_name++;
2724 
2725 	var_field = find_event_var(hist_data, system, event_name, var_name);
2726 	if (var_field)
2727 		ref_field = create_var_ref(hist_data, var_field,
2728 					   system, event_name);
2729 
2730 	if (!ref_field)
2731 		hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2732 
2733 	return ref_field;
2734 }
2735 
2736 static struct ftrace_event_field *
2737 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2738 	    char *field_str, unsigned long *flags)
2739 {
2740 	struct ftrace_event_field *field = NULL;
2741 	char *field_name, *modifier, *str;
2742 	struct trace_array *tr = file->tr;
2743 
2744 	modifier = str = kstrdup(field_str, GFP_KERNEL);
2745 	if (!modifier)
2746 		return ERR_PTR(-ENOMEM);
2747 
2748 	field_name = strsep(&modifier, ".");
2749 	if (modifier) {
2750 		if (strcmp(modifier, "hex") == 0)
2751 			*flags |= HIST_FIELD_FL_HEX;
2752 		else if (strcmp(modifier, "sym") == 0)
2753 			*flags |= HIST_FIELD_FL_SYM;
2754 		else if (strcmp(modifier, "sym-offset") == 0)
2755 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
2756 		else if ((strcmp(modifier, "execname") == 0) &&
2757 			 (strcmp(field_name, "common_pid") == 0))
2758 			*flags |= HIST_FIELD_FL_EXECNAME;
2759 		else if (strcmp(modifier, "syscall") == 0)
2760 			*flags |= HIST_FIELD_FL_SYSCALL;
2761 		else if (strcmp(modifier, "log2") == 0)
2762 			*flags |= HIST_FIELD_FL_LOG2;
2763 		else if (strcmp(modifier, "usecs") == 0)
2764 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2765 		else {
2766 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2767 			field = ERR_PTR(-EINVAL);
2768 			goto out;
2769 		}
2770 	}
2771 
2772 	if (strcmp(field_name, "common_timestamp") == 0) {
2773 		*flags |= HIST_FIELD_FL_TIMESTAMP;
2774 		hist_data->enable_timestamps = true;
2775 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2776 			hist_data->attrs->ts_in_usecs = true;
2777 	} else if (strcmp(field_name, "cpu") == 0)
2778 		*flags |= HIST_FIELD_FL_CPU;
2779 	else {
2780 		field = trace_find_event_field(file->event_call, field_name);
2781 		if (!field || !field->size) {
2782 			hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2783 			field = ERR_PTR(-EINVAL);
2784 			goto out;
2785 		}
2786 	}
2787  out:
2788 	kfree(str);
2789 
2790 	return field;
2791 }
2792 
2793 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2794 				       struct hist_field *var_ref,
2795 				       char *var_name)
2796 {
2797 	struct hist_field *alias = NULL;
2798 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2799 
2800 	alias = create_hist_field(hist_data, NULL, flags, var_name);
2801 	if (!alias)
2802 		return NULL;
2803 
2804 	alias->fn = var_ref->fn;
2805 	alias->operands[0] = var_ref;
2806 
2807 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2808 		destroy_hist_field(alias, 0);
2809 		return NULL;
2810 	}
2811 
2812 	alias->var_ref_idx = var_ref->var_ref_idx;
2813 
2814 	return alias;
2815 }
2816 
2817 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2818 				     struct trace_event_file *file, char *str,
2819 				     unsigned long *flags, char *var_name)
2820 {
2821 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2822 	struct ftrace_event_field *field = NULL;
2823 	struct hist_field *hist_field = NULL;
2824 	int ret = 0;
2825 
2826 	s = strchr(str, '.');
2827 	if (s) {
2828 		s = strchr(++s, '.');
2829 		if (s) {
2830 			ref_system = strsep(&str, ".");
2831 			if (!str) {
2832 				ret = -EINVAL;
2833 				goto out;
2834 			}
2835 			ref_event = strsep(&str, ".");
2836 			if (!str) {
2837 				ret = -EINVAL;
2838 				goto out;
2839 			}
2840 			ref_var = str;
2841 		}
2842 	}
2843 
2844 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2845 	if (!s) {
2846 		hist_field = parse_var_ref(hist_data, ref_system,
2847 					   ref_event, ref_var);
2848 		if (hist_field) {
2849 			if (var_name) {
2850 				hist_field = create_alias(hist_data, hist_field, var_name);
2851 				if (!hist_field) {
2852 					ret = -ENOMEM;
2853 					goto out;
2854 				}
2855 			}
2856 			return hist_field;
2857 		}
2858 	} else
2859 		str = s;
2860 
2861 	field = parse_field(hist_data, file, str, flags);
2862 	if (IS_ERR(field)) {
2863 		ret = PTR_ERR(field);
2864 		goto out;
2865 	}
2866 
2867 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
2868 	if (!hist_field) {
2869 		ret = -ENOMEM;
2870 		goto out;
2871 	}
2872 
2873 	return hist_field;
2874  out:
2875 	return ERR_PTR(ret);
2876 }
2877 
2878 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2879 				     struct trace_event_file *file,
2880 				     char *str, unsigned long flags,
2881 				     char *var_name, unsigned int level);
2882 
2883 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2884 				      struct trace_event_file *file,
2885 				      char *str, unsigned long flags,
2886 				      char *var_name, unsigned int level)
2887 {
2888 	struct hist_field *operand1, *expr = NULL;
2889 	unsigned long operand_flags;
2890 	int ret = 0;
2891 	char *s;
2892 
2893 	/* we support only -(xxx) i.e. explicit parens required */
2894 
2895 	if (level > 3) {
2896 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2897 		ret = -EINVAL;
2898 		goto free;
2899 	}
2900 
2901 	str++; /* skip leading '-' */
2902 
2903 	s = strchr(str, '(');
2904 	if (s)
2905 		str++;
2906 	else {
2907 		ret = -EINVAL;
2908 		goto free;
2909 	}
2910 
2911 	s = strrchr(str, ')');
2912 	if (s)
2913 		*s = '\0';
2914 	else {
2915 		ret = -EINVAL; /* no closing ')' */
2916 		goto free;
2917 	}
2918 
2919 	flags |= HIST_FIELD_FL_EXPR;
2920 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2921 	if (!expr) {
2922 		ret = -ENOMEM;
2923 		goto free;
2924 	}
2925 
2926 	operand_flags = 0;
2927 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2928 	if (IS_ERR(operand1)) {
2929 		ret = PTR_ERR(operand1);
2930 		goto free;
2931 	}
2932 
2933 	expr->flags |= operand1->flags &
2934 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2935 	expr->fn = hist_field_unary_minus;
2936 	expr->operands[0] = operand1;
2937 	expr->operator = FIELD_OP_UNARY_MINUS;
2938 	expr->name = expr_str(expr, 0);
2939 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
2940 	if (!expr->type) {
2941 		ret = -ENOMEM;
2942 		goto free;
2943 	}
2944 
2945 	return expr;
2946  free:
2947 	destroy_hist_field(expr, 0);
2948 	return ERR_PTR(ret);
2949 }
2950 
2951 static int check_expr_operands(struct trace_array *tr,
2952 			       struct hist_field *operand1,
2953 			       struct hist_field *operand2)
2954 {
2955 	unsigned long operand1_flags = operand1->flags;
2956 	unsigned long operand2_flags = operand2->flags;
2957 
2958 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2959 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2960 		struct hist_field *var;
2961 
2962 		var = find_var_field(operand1->var.hist_data, operand1->name);
2963 		if (!var)
2964 			return -EINVAL;
2965 		operand1_flags = var->flags;
2966 	}
2967 
2968 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2969 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2970 		struct hist_field *var;
2971 
2972 		var = find_var_field(operand2->var.hist_data, operand2->name);
2973 		if (!var)
2974 			return -EINVAL;
2975 		operand2_flags = var->flags;
2976 	}
2977 
2978 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2979 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2980 		hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2981 		return -EINVAL;
2982 	}
2983 
2984 	return 0;
2985 }
2986 
2987 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2988 				     struct trace_event_file *file,
2989 				     char *str, unsigned long flags,
2990 				     char *var_name, unsigned int level)
2991 {
2992 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2993 	unsigned long operand_flags;
2994 	int field_op, ret = -EINVAL;
2995 	char *sep, *operand1_str;
2996 
2997 	if (level > 3) {
2998 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2999 		return ERR_PTR(-EINVAL);
3000 	}
3001 
3002 	field_op = contains_operator(str);
3003 
3004 	if (field_op == FIELD_OP_NONE)
3005 		return parse_atom(hist_data, file, str, &flags, var_name);
3006 
3007 	if (field_op == FIELD_OP_UNARY_MINUS)
3008 		return parse_unary(hist_data, file, str, flags, var_name, ++level);
3009 
3010 	switch (field_op) {
3011 	case FIELD_OP_MINUS:
3012 		sep = "-";
3013 		break;
3014 	case FIELD_OP_PLUS:
3015 		sep = "+";
3016 		break;
3017 	default:
3018 		goto free;
3019 	}
3020 
3021 	operand1_str = strsep(&str, sep);
3022 	if (!operand1_str || !str)
3023 		goto free;
3024 
3025 	operand_flags = 0;
3026 	operand1 = parse_atom(hist_data, file, operand1_str,
3027 			      &operand_flags, NULL);
3028 	if (IS_ERR(operand1)) {
3029 		ret = PTR_ERR(operand1);
3030 		operand1 = NULL;
3031 		goto free;
3032 	}
3033 
3034 	/* rest of string could be another expression e.g. b+c in a+b+c */
3035 	operand_flags = 0;
3036 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3037 	if (IS_ERR(operand2)) {
3038 		ret = PTR_ERR(operand2);
3039 		operand2 = NULL;
3040 		goto free;
3041 	}
3042 
3043 	ret = check_expr_operands(file->tr, operand1, operand2);
3044 	if (ret)
3045 		goto free;
3046 
3047 	flags |= HIST_FIELD_FL_EXPR;
3048 
3049 	flags |= operand1->flags &
3050 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3051 
3052 	expr = create_hist_field(hist_data, NULL, flags, var_name);
3053 	if (!expr) {
3054 		ret = -ENOMEM;
3055 		goto free;
3056 	}
3057 
3058 	operand1->read_once = true;
3059 	operand2->read_once = true;
3060 
3061 	expr->operands[0] = operand1;
3062 	expr->operands[1] = operand2;
3063 	expr->operator = field_op;
3064 	expr->name = expr_str(expr, 0);
3065 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
3066 	if (!expr->type) {
3067 		ret = -ENOMEM;
3068 		goto free;
3069 	}
3070 
3071 	switch (field_op) {
3072 	case FIELD_OP_MINUS:
3073 		expr->fn = hist_field_minus;
3074 		break;
3075 	case FIELD_OP_PLUS:
3076 		expr->fn = hist_field_plus;
3077 		break;
3078 	default:
3079 		ret = -EINVAL;
3080 		goto free;
3081 	}
3082 
3083 	return expr;
3084  free:
3085 	destroy_hist_field(operand1, 0);
3086 	destroy_hist_field(operand2, 0);
3087 	destroy_hist_field(expr, 0);
3088 
3089 	return ERR_PTR(ret);
3090 }
3091 
3092 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3093 				 struct trace_event_file *file)
3094 {
3095 	struct event_trigger_data *test;
3096 
3097 	list_for_each_entry_rcu(test, &file->triggers, list) {
3098 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3099 			if (test->private_data == hist_data)
3100 				return test->filter_str;
3101 		}
3102 	}
3103 
3104 	return NULL;
3105 }
3106 
3107 static struct event_command trigger_hist_cmd;
3108 static int event_hist_trigger_func(struct event_command *cmd_ops,
3109 				   struct trace_event_file *file,
3110 				   char *glob, char *cmd, char *param);
3111 
3112 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3113 			    struct hist_trigger_data *hist_data,
3114 			    unsigned int n_keys)
3115 {
3116 	struct hist_field *target_hist_field, *hist_field;
3117 	unsigned int n, i, j;
3118 
3119 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
3120 		return false;
3121 
3122 	i = hist_data->n_vals;
3123 	j = target_hist_data->n_vals;
3124 
3125 	for (n = 0; n < n_keys; n++) {
3126 		hist_field = hist_data->fields[i + n];
3127 		target_hist_field = target_hist_data->fields[j + n];
3128 
3129 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
3130 			return false;
3131 		if (hist_field->size != target_hist_field->size)
3132 			return false;
3133 		if (hist_field->is_signed != target_hist_field->is_signed)
3134 			return false;
3135 	}
3136 
3137 	return true;
3138 }
3139 
3140 static struct hist_trigger_data *
3141 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3142 		     struct trace_event_file *file)
3143 {
3144 	struct hist_trigger_data *hist_data;
3145 	struct event_trigger_data *test;
3146 	unsigned int n_keys;
3147 
3148 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3149 
3150 	list_for_each_entry_rcu(test, &file->triggers, list) {
3151 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3152 			hist_data = test->private_data;
3153 
3154 			if (compatible_keys(target_hist_data, hist_data, n_keys))
3155 				return hist_data;
3156 		}
3157 	}
3158 
3159 	return NULL;
3160 }
3161 
3162 static struct trace_event_file *event_file(struct trace_array *tr,
3163 					   char *system, char *event_name)
3164 {
3165 	struct trace_event_file *file;
3166 
3167 	file = __find_event_file(tr, system, event_name);
3168 	if (!file)
3169 		return ERR_PTR(-EINVAL);
3170 
3171 	return file;
3172 }
3173 
3174 static struct hist_field *
3175 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3176 			 char *system, char *event_name, char *field_name)
3177 {
3178 	struct hist_field *event_var;
3179 	char *synthetic_name;
3180 
3181 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3182 	if (!synthetic_name)
3183 		return ERR_PTR(-ENOMEM);
3184 
3185 	strcpy(synthetic_name, "synthetic_");
3186 	strcat(synthetic_name, field_name);
3187 
3188 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3189 
3190 	kfree(synthetic_name);
3191 
3192 	return event_var;
3193 }
3194 
3195 /**
3196  * create_field_var_hist - Automatically create a histogram and var for a field
3197  * @target_hist_data: The target hist trigger
3198  * @subsys_name: Optional subsystem name
3199  * @event_name: Optional event name
3200  * @field_name: The name of the field (and the resulting variable)
3201  *
3202  * Hist trigger actions fetch data from variables, not directly from
3203  * events.  However, for convenience, users are allowed to directly
3204  * specify an event field in an action, which will be automatically
3205  * converted into a variable on their behalf.
3206 
3207  * If a user specifies a field on an event that isn't the event the
3208  * histogram currently being defined (the target event histogram), the
3209  * only way that can be accomplished is if a new hist trigger is
3210  * created and the field variable defined on that.
3211  *
3212  * This function creates a new histogram compatible with the target
3213  * event (meaning a histogram with the same key as the target
3214  * histogram), and creates a variable for the specified field, but
3215  * with 'synthetic_' prepended to the variable name in order to avoid
3216  * collision with normal field variables.
3217  *
3218  * Return: The variable created for the field.
3219  */
3220 static struct hist_field *
3221 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3222 		      char *subsys_name, char *event_name, char *field_name)
3223 {
3224 	struct trace_array *tr = target_hist_data->event_file->tr;
3225 	struct hist_field *event_var = ERR_PTR(-EINVAL);
3226 	struct hist_trigger_data *hist_data;
3227 	unsigned int i, n, first = true;
3228 	struct field_var_hist *var_hist;
3229 	struct trace_event_file *file;
3230 	struct hist_field *key_field;
3231 	char *saved_filter;
3232 	char *cmd;
3233 	int ret;
3234 
3235 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3236 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3237 		return ERR_PTR(-EINVAL);
3238 	}
3239 
3240 	file = event_file(tr, subsys_name, event_name);
3241 
3242 	if (IS_ERR(file)) {
3243 		hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3244 		ret = PTR_ERR(file);
3245 		return ERR_PTR(ret);
3246 	}
3247 
3248 	/*
3249 	 * Look for a histogram compatible with target.  We'll use the
3250 	 * found histogram specification to create a new matching
3251 	 * histogram with our variable on it.  target_hist_data is not
3252 	 * yet a registered histogram so we can't use that.
3253 	 */
3254 	hist_data = find_compatible_hist(target_hist_data, file);
3255 	if (!hist_data) {
3256 		hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3257 		return ERR_PTR(-EINVAL);
3258 	}
3259 
3260 	/* See if a synthetic field variable has already been created */
3261 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3262 					     event_name, field_name);
3263 	if (!IS_ERR_OR_NULL(event_var))
3264 		return event_var;
3265 
3266 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3267 	if (!var_hist)
3268 		return ERR_PTR(-ENOMEM);
3269 
3270 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3271 	if (!cmd) {
3272 		kfree(var_hist);
3273 		return ERR_PTR(-ENOMEM);
3274 	}
3275 
3276 	/* Use the same keys as the compatible histogram */
3277 	strcat(cmd, "keys=");
3278 
3279 	for_each_hist_key_field(i, hist_data) {
3280 		key_field = hist_data->fields[i];
3281 		if (!first)
3282 			strcat(cmd, ",");
3283 		strcat(cmd, key_field->field->name);
3284 		first = false;
3285 	}
3286 
3287 	/* Create the synthetic field variable specification */
3288 	strcat(cmd, ":synthetic_");
3289 	strcat(cmd, field_name);
3290 	strcat(cmd, "=");
3291 	strcat(cmd, field_name);
3292 
3293 	/* Use the same filter as the compatible histogram */
3294 	saved_filter = find_trigger_filter(hist_data, file);
3295 	if (saved_filter) {
3296 		strcat(cmd, " if ");
3297 		strcat(cmd, saved_filter);
3298 	}
3299 
3300 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3301 	if (!var_hist->cmd) {
3302 		kfree(cmd);
3303 		kfree(var_hist);
3304 		return ERR_PTR(-ENOMEM);
3305 	}
3306 
3307 	/* Save the compatible histogram information */
3308 	var_hist->hist_data = hist_data;
3309 
3310 	/* Create the new histogram with our variable */
3311 	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3312 				      "", "hist", cmd);
3313 	if (ret) {
3314 		kfree(cmd);
3315 		kfree(var_hist->cmd);
3316 		kfree(var_hist);
3317 		hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3318 		return ERR_PTR(ret);
3319 	}
3320 
3321 	kfree(cmd);
3322 
3323 	/* If we can't find the variable, something went wrong */
3324 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3325 					     event_name, field_name);
3326 	if (IS_ERR_OR_NULL(event_var)) {
3327 		kfree(var_hist->cmd);
3328 		kfree(var_hist);
3329 		hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3330 		return ERR_PTR(-EINVAL);
3331 	}
3332 
3333 	n = target_hist_data->n_field_var_hists;
3334 	target_hist_data->field_var_hists[n] = var_hist;
3335 	target_hist_data->n_field_var_hists++;
3336 
3337 	return event_var;
3338 }
3339 
3340 static struct hist_field *
3341 find_target_event_var(struct hist_trigger_data *hist_data,
3342 		      char *subsys_name, char *event_name, char *var_name)
3343 {
3344 	struct trace_event_file *file = hist_data->event_file;
3345 	struct hist_field *hist_field = NULL;
3346 
3347 	if (subsys_name) {
3348 		struct trace_event_call *call;
3349 
3350 		if (!event_name)
3351 			return NULL;
3352 
3353 		call = file->event_call;
3354 
3355 		if (strcmp(subsys_name, call->class->system) != 0)
3356 			return NULL;
3357 
3358 		if (strcmp(event_name, trace_event_name(call)) != 0)
3359 			return NULL;
3360 	}
3361 
3362 	hist_field = find_var_field(hist_data, var_name);
3363 
3364 	return hist_field;
3365 }
3366 
3367 static inline void __update_field_vars(struct tracing_map_elt *elt,
3368 				       struct ring_buffer_event *rbe,
3369 				       void *rec,
3370 				       struct field_var **field_vars,
3371 				       unsigned int n_field_vars,
3372 				       unsigned int field_var_str_start)
3373 {
3374 	struct hist_elt_data *elt_data = elt->private_data;
3375 	unsigned int i, j, var_idx;
3376 	u64 var_val;
3377 
3378 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3379 		struct field_var *field_var = field_vars[i];
3380 		struct hist_field *var = field_var->var;
3381 		struct hist_field *val = field_var->val;
3382 
3383 		var_val = val->fn(val, elt, rbe, rec);
3384 		var_idx = var->var.idx;
3385 
3386 		if (val->flags & HIST_FIELD_FL_STRING) {
3387 			char *str = elt_data->field_var_str[j++];
3388 			char *val_str = (char *)(uintptr_t)var_val;
3389 
3390 			strscpy(str, val_str, STR_VAR_LEN_MAX);
3391 			var_val = (u64)(uintptr_t)str;
3392 		}
3393 		tracing_map_set_var(elt, var_idx, var_val);
3394 	}
3395 }
3396 
3397 static void update_field_vars(struct hist_trigger_data *hist_data,
3398 			      struct tracing_map_elt *elt,
3399 			      struct ring_buffer_event *rbe,
3400 			      void *rec)
3401 {
3402 	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
3403 			    hist_data->n_field_vars, 0);
3404 }
3405 
3406 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3407 				 struct tracing_map_elt *elt, void *rec,
3408 				 struct ring_buffer_event *rbe, void *key,
3409 				 struct action_data *data, u64 *var_ref_vals)
3410 {
3411 	__update_field_vars(elt, rbe, rec, hist_data->save_vars,
3412 			    hist_data->n_save_vars, hist_data->n_field_var_str);
3413 }
3414 
3415 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3416 				     struct trace_event_file *file,
3417 				     char *name, int size, const char *type)
3418 {
3419 	struct hist_field *var;
3420 	int idx;
3421 
3422 	if (find_var(hist_data, file, name) && !hist_data->remove) {
3423 		var = ERR_PTR(-EINVAL);
3424 		goto out;
3425 	}
3426 
3427 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3428 	if (!var) {
3429 		var = ERR_PTR(-ENOMEM);
3430 		goto out;
3431 	}
3432 
3433 	idx = tracing_map_add_var(hist_data->map);
3434 	if (idx < 0) {
3435 		kfree(var);
3436 		var = ERR_PTR(-EINVAL);
3437 		goto out;
3438 	}
3439 
3440 	var->flags = HIST_FIELD_FL_VAR;
3441 	var->var.idx = idx;
3442 	var->var.hist_data = var->hist_data = hist_data;
3443 	var->size = size;
3444 	var->var.name = kstrdup(name, GFP_KERNEL);
3445 	var->type = kstrdup(type, GFP_KERNEL);
3446 	if (!var->var.name || !var->type) {
3447 		kfree(var->var.name);
3448 		kfree(var->type);
3449 		kfree(var);
3450 		var = ERR_PTR(-ENOMEM);
3451 	}
3452  out:
3453 	return var;
3454 }
3455 
3456 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3457 					  struct trace_event_file *file,
3458 					  char *field_name)
3459 {
3460 	struct hist_field *val = NULL, *var = NULL;
3461 	unsigned long flags = HIST_FIELD_FL_VAR;
3462 	struct trace_array *tr = file->tr;
3463 	struct field_var *field_var;
3464 	int ret = 0;
3465 
3466 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3467 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3468 		ret = -EINVAL;
3469 		goto err;
3470 	}
3471 
3472 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
3473 	if (IS_ERR(val)) {
3474 		hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3475 		ret = PTR_ERR(val);
3476 		goto err;
3477 	}
3478 
3479 	var = create_var(hist_data, file, field_name, val->size, val->type);
3480 	if (IS_ERR(var)) {
3481 		hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3482 		kfree(val);
3483 		ret = PTR_ERR(var);
3484 		goto err;
3485 	}
3486 
3487 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3488 	if (!field_var) {
3489 		kfree(val);
3490 		kfree(var);
3491 		ret =  -ENOMEM;
3492 		goto err;
3493 	}
3494 
3495 	field_var->var = var;
3496 	field_var->val = val;
3497  out:
3498 	return field_var;
3499  err:
3500 	field_var = ERR_PTR(ret);
3501 	goto out;
3502 }
3503 
3504 /**
3505  * create_target_field_var - Automatically create a variable for a field
3506  * @target_hist_data: The target hist trigger
3507  * @subsys_name: Optional subsystem name
3508  * @event_name: Optional event name
3509  * @var_name: The name of the field (and the resulting variable)
3510  *
3511  * Hist trigger actions fetch data from variables, not directly from
3512  * events.  However, for convenience, users are allowed to directly
3513  * specify an event field in an action, which will be automatically
3514  * converted into a variable on their behalf.
3515 
3516  * This function creates a field variable with the name var_name on
3517  * the hist trigger currently being defined on the target event.  If
3518  * subsys_name and event_name are specified, this function simply
3519  * verifies that they do in fact match the target event subsystem and
3520  * event name.
3521  *
3522  * Return: The variable created for the field.
3523  */
3524 static struct field_var *
3525 create_target_field_var(struct hist_trigger_data *target_hist_data,
3526 			char *subsys_name, char *event_name, char *var_name)
3527 {
3528 	struct trace_event_file *file = target_hist_data->event_file;
3529 
3530 	if (subsys_name) {
3531 		struct trace_event_call *call;
3532 
3533 		if (!event_name)
3534 			return NULL;
3535 
3536 		call = file->event_call;
3537 
3538 		if (strcmp(subsys_name, call->class->system) != 0)
3539 			return NULL;
3540 
3541 		if (strcmp(event_name, trace_event_name(call)) != 0)
3542 			return NULL;
3543 	}
3544 
3545 	return create_field_var(target_hist_data, file, var_name);
3546 }
3547 
3548 static bool check_track_val_max(u64 track_val, u64 var_val)
3549 {
3550 	if (var_val <= track_val)
3551 		return false;
3552 
3553 	return true;
3554 }
3555 
3556 static bool check_track_val_changed(u64 track_val, u64 var_val)
3557 {
3558 	if (var_val == track_val)
3559 		return false;
3560 
3561 	return true;
3562 }
3563 
3564 static u64 get_track_val(struct hist_trigger_data *hist_data,
3565 			 struct tracing_map_elt *elt,
3566 			 struct action_data *data)
3567 {
3568 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3569 	u64 track_val;
3570 
3571 	track_val = tracing_map_read_var(elt, track_var_idx);
3572 
3573 	return track_val;
3574 }
3575 
3576 static void save_track_val(struct hist_trigger_data *hist_data,
3577 			   struct tracing_map_elt *elt,
3578 			   struct action_data *data, u64 var_val)
3579 {
3580 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3581 
3582 	tracing_map_set_var(elt, track_var_idx, var_val);
3583 }
3584 
3585 static void save_track_data(struct hist_trigger_data *hist_data,
3586 			    struct tracing_map_elt *elt, void *rec,
3587 			    struct ring_buffer_event *rbe, void *key,
3588 			    struct action_data *data, u64 *var_ref_vals)
3589 {
3590 	if (data->track_data.save_data)
3591 		data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3592 }
3593 
3594 static bool check_track_val(struct tracing_map_elt *elt,
3595 			    struct action_data *data,
3596 			    u64 var_val)
3597 {
3598 	struct hist_trigger_data *hist_data;
3599 	u64 track_val;
3600 
3601 	hist_data = data->track_data.track_var->hist_data;
3602 	track_val = get_track_val(hist_data, elt, data);
3603 
3604 	return data->track_data.check_val(track_val, var_val);
3605 }
3606 
3607 #ifdef CONFIG_TRACER_SNAPSHOT
3608 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3609 {
3610 	/* called with tr->max_lock held */
3611 	struct track_data *track_data = tr->cond_snapshot->cond_data;
3612 	struct hist_elt_data *elt_data, *track_elt_data;
3613 	struct snapshot_context *context = cond_data;
3614 	struct action_data *action;
3615 	u64 track_val;
3616 
3617 	if (!track_data)
3618 		return false;
3619 
3620 	action = track_data->action_data;
3621 
3622 	track_val = get_track_val(track_data->hist_data, context->elt,
3623 				  track_data->action_data);
3624 
3625 	if (!action->track_data.check_val(track_data->track_val, track_val))
3626 		return false;
3627 
3628 	track_data->track_val = track_val;
3629 	memcpy(track_data->key, context->key, track_data->key_len);
3630 
3631 	elt_data = context->elt->private_data;
3632 	track_elt_data = track_data->elt.private_data;
3633 	if (elt_data->comm)
3634 		strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3635 
3636 	track_data->updated = true;
3637 
3638 	return true;
3639 }
3640 
3641 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3642 				     struct tracing_map_elt *elt, void *rec,
3643 				     struct ring_buffer_event *rbe, void *key,
3644 				     struct action_data *data,
3645 				     u64 *var_ref_vals)
3646 {
3647 	struct trace_event_file *file = hist_data->event_file;
3648 	struct snapshot_context context;
3649 
3650 	context.elt = elt;
3651 	context.key = key;
3652 
3653 	tracing_snapshot_cond(file->tr, &context);
3654 }
3655 
3656 static void hist_trigger_print_key(struct seq_file *m,
3657 				   struct hist_trigger_data *hist_data,
3658 				   void *key,
3659 				   struct tracing_map_elt *elt);
3660 
3661 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3662 {
3663 	unsigned int i;
3664 
3665 	if (!hist_data->n_actions)
3666 		return NULL;
3667 
3668 	for (i = 0; i < hist_data->n_actions; i++) {
3669 		struct action_data *data = hist_data->actions[i];
3670 
3671 		if (data->action == ACTION_SNAPSHOT)
3672 			return data;
3673 	}
3674 
3675 	return NULL;
3676 }
3677 
3678 static void track_data_snapshot_print(struct seq_file *m,
3679 				      struct hist_trigger_data *hist_data)
3680 {
3681 	struct trace_event_file *file = hist_data->event_file;
3682 	struct track_data *track_data;
3683 	struct action_data *action;
3684 
3685 	track_data = tracing_cond_snapshot_data(file->tr);
3686 	if (!track_data)
3687 		return;
3688 
3689 	if (!track_data->updated)
3690 		return;
3691 
3692 	action = snapshot_action(hist_data);
3693 	if (!action)
3694 		return;
3695 
3696 	seq_puts(m, "\nSnapshot taken (see tracing/snapshot).  Details:\n");
3697 	seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3698 		   action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3699 		   action->track_data.var_str, track_data->track_val);
3700 
3701 	seq_puts(m, "\ttriggered by event with key: ");
3702 	hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3703 	seq_putc(m, '\n');
3704 }
3705 #else
3706 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3707 {
3708 	return false;
3709 }
3710 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3711 				     struct tracing_map_elt *elt, void *rec,
3712 				     struct ring_buffer_event *rbe, void *key,
3713 				     struct action_data *data,
3714 				     u64 *var_ref_vals) {}
3715 static void track_data_snapshot_print(struct seq_file *m,
3716 				      struct hist_trigger_data *hist_data) {}
3717 #endif /* CONFIG_TRACER_SNAPSHOT */
3718 
3719 static void track_data_print(struct seq_file *m,
3720 			     struct hist_trigger_data *hist_data,
3721 			     struct tracing_map_elt *elt,
3722 			     struct action_data *data)
3723 {
3724 	u64 track_val = get_track_val(hist_data, elt, data);
3725 	unsigned int i, save_var_idx;
3726 
3727 	if (data->handler == HANDLER_ONMAX)
3728 		seq_printf(m, "\n\tmax: %10llu", track_val);
3729 	else if (data->handler == HANDLER_ONCHANGE)
3730 		seq_printf(m, "\n\tchanged: %10llu", track_val);
3731 
3732 	if (data->action == ACTION_SNAPSHOT)
3733 		return;
3734 
3735 	for (i = 0; i < hist_data->n_save_vars; i++) {
3736 		struct hist_field *save_val = hist_data->save_vars[i]->val;
3737 		struct hist_field *save_var = hist_data->save_vars[i]->var;
3738 		u64 val;
3739 
3740 		save_var_idx = save_var->var.idx;
3741 
3742 		val = tracing_map_read_var(elt, save_var_idx);
3743 
3744 		if (save_val->flags & HIST_FIELD_FL_STRING) {
3745 			seq_printf(m, "  %s: %-32s", save_var->var.name,
3746 				   (char *)(uintptr_t)(val));
3747 		} else
3748 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
3749 	}
3750 }
3751 
3752 static void ontrack_action(struct hist_trigger_data *hist_data,
3753 			   struct tracing_map_elt *elt, void *rec,
3754 			   struct ring_buffer_event *rbe, void *key,
3755 			   struct action_data *data, u64 *var_ref_vals)
3756 {
3757 	u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3758 
3759 	if (check_track_val(elt, data, var_val)) {
3760 		save_track_val(hist_data, elt, data, var_val);
3761 		save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3762 	}
3763 }
3764 
3765 static void action_data_destroy(struct action_data *data)
3766 {
3767 	unsigned int i;
3768 
3769 	lockdep_assert_held(&event_mutex);
3770 
3771 	kfree(data->action_name);
3772 
3773 	for (i = 0; i < data->n_params; i++)
3774 		kfree(data->params[i]);
3775 
3776 	if (data->synth_event)
3777 		data->synth_event->ref--;
3778 
3779 	kfree(data->synth_event_name);
3780 
3781 	kfree(data);
3782 }
3783 
3784 static void track_data_destroy(struct hist_trigger_data *hist_data,
3785 			       struct action_data *data)
3786 {
3787 	struct trace_event_file *file = hist_data->event_file;
3788 
3789 	destroy_hist_field(data->track_data.track_var, 0);
3790 
3791 	if (data->action == ACTION_SNAPSHOT) {
3792 		struct track_data *track_data;
3793 
3794 		track_data = tracing_cond_snapshot_data(file->tr);
3795 		if (track_data && track_data->hist_data == hist_data) {
3796 			tracing_snapshot_cond_disable(file->tr);
3797 			track_data_free(track_data);
3798 		}
3799 	}
3800 
3801 	kfree(data->track_data.var_str);
3802 
3803 	action_data_destroy(data);
3804 }
3805 
3806 static int action_create(struct hist_trigger_data *hist_data,
3807 			 struct action_data *data);
3808 
3809 static int track_data_create(struct hist_trigger_data *hist_data,
3810 			     struct action_data *data)
3811 {
3812 	struct hist_field *var_field, *ref_field, *track_var = NULL;
3813 	struct trace_event_file *file = hist_data->event_file;
3814 	struct trace_array *tr = file->tr;
3815 	char *track_data_var_str;
3816 	int ret = 0;
3817 
3818 	track_data_var_str = data->track_data.var_str;
3819 	if (track_data_var_str[0] != '$') {
3820 		hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3821 		return -EINVAL;
3822 	}
3823 	track_data_var_str++;
3824 
3825 	var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3826 	if (!var_field) {
3827 		hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3828 		return -EINVAL;
3829 	}
3830 
3831 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3832 	if (!ref_field)
3833 		return -ENOMEM;
3834 
3835 	data->track_data.var_ref = ref_field;
3836 
3837 	if (data->handler == HANDLER_ONMAX)
3838 		track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3839 	if (IS_ERR(track_var)) {
3840 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3841 		ret = PTR_ERR(track_var);
3842 		goto out;
3843 	}
3844 
3845 	if (data->handler == HANDLER_ONCHANGE)
3846 		track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3847 	if (IS_ERR(track_var)) {
3848 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3849 		ret = PTR_ERR(track_var);
3850 		goto out;
3851 	}
3852 	data->track_data.track_var = track_var;
3853 
3854 	ret = action_create(hist_data, data);
3855  out:
3856 	return ret;
3857 }
3858 
3859 static int parse_action_params(struct trace_array *tr, char *params,
3860 			       struct action_data *data)
3861 {
3862 	char *param, *saved_param;
3863 	bool first_param = true;
3864 	int ret = 0;
3865 
3866 	while (params) {
3867 		if (data->n_params >= SYNTH_FIELDS_MAX) {
3868 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3869 			goto out;
3870 		}
3871 
3872 		param = strsep(&params, ",");
3873 		if (!param) {
3874 			hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3875 			ret = -EINVAL;
3876 			goto out;
3877 		}
3878 
3879 		param = strstrip(param);
3880 		if (strlen(param) < 2) {
3881 			hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3882 			ret = -EINVAL;
3883 			goto out;
3884 		}
3885 
3886 		saved_param = kstrdup(param, GFP_KERNEL);
3887 		if (!saved_param) {
3888 			ret = -ENOMEM;
3889 			goto out;
3890 		}
3891 
3892 		if (first_param && data->use_trace_keyword) {
3893 			data->synth_event_name = saved_param;
3894 			first_param = false;
3895 			continue;
3896 		}
3897 		first_param = false;
3898 
3899 		data->params[data->n_params++] = saved_param;
3900 	}
3901  out:
3902 	return ret;
3903 }
3904 
3905 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3906 			enum handler_id handler)
3907 {
3908 	char *action_name;
3909 	int ret = 0;
3910 
3911 	strsep(&str, ".");
3912 	if (!str) {
3913 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3914 		ret = -EINVAL;
3915 		goto out;
3916 	}
3917 
3918 	action_name = strsep(&str, "(");
3919 	if (!action_name || !str) {
3920 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3921 		ret = -EINVAL;
3922 		goto out;
3923 	}
3924 
3925 	if (str_has_prefix(action_name, "save")) {
3926 		char *params = strsep(&str, ")");
3927 
3928 		if (!params) {
3929 			hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3930 			ret = -EINVAL;
3931 			goto out;
3932 		}
3933 
3934 		ret = parse_action_params(tr, params, data);
3935 		if (ret)
3936 			goto out;
3937 
3938 		if (handler == HANDLER_ONMAX)
3939 			data->track_data.check_val = check_track_val_max;
3940 		else if (handler == HANDLER_ONCHANGE)
3941 			data->track_data.check_val = check_track_val_changed;
3942 		else {
3943 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3944 			ret = -EINVAL;
3945 			goto out;
3946 		}
3947 
3948 		data->track_data.save_data = save_track_data_vars;
3949 		data->fn = ontrack_action;
3950 		data->action = ACTION_SAVE;
3951 	} else if (str_has_prefix(action_name, "snapshot")) {
3952 		char *params = strsep(&str, ")");
3953 
3954 		if (!str) {
3955 			hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3956 			ret = -EINVAL;
3957 			goto out;
3958 		}
3959 
3960 		if (handler == HANDLER_ONMAX)
3961 			data->track_data.check_val = check_track_val_max;
3962 		else if (handler == HANDLER_ONCHANGE)
3963 			data->track_data.check_val = check_track_val_changed;
3964 		else {
3965 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3966 			ret = -EINVAL;
3967 			goto out;
3968 		}
3969 
3970 		data->track_data.save_data = save_track_data_snapshot;
3971 		data->fn = ontrack_action;
3972 		data->action = ACTION_SNAPSHOT;
3973 	} else {
3974 		char *params = strsep(&str, ")");
3975 
3976 		if (str_has_prefix(action_name, "trace"))
3977 			data->use_trace_keyword = true;
3978 
3979 		if (params) {
3980 			ret = parse_action_params(tr, params, data);
3981 			if (ret)
3982 				goto out;
3983 		}
3984 
3985 		if (handler == HANDLER_ONMAX)
3986 			data->track_data.check_val = check_track_val_max;
3987 		else if (handler == HANDLER_ONCHANGE)
3988 			data->track_data.check_val = check_track_val_changed;
3989 
3990 		if (handler != HANDLER_ONMATCH) {
3991 			data->track_data.save_data = action_trace;
3992 			data->fn = ontrack_action;
3993 		} else
3994 			data->fn = action_trace;
3995 
3996 		data->action = ACTION_TRACE;
3997 	}
3998 
3999 	data->action_name = kstrdup(action_name, GFP_KERNEL);
4000 	if (!data->action_name) {
4001 		ret = -ENOMEM;
4002 		goto out;
4003 	}
4004 
4005 	data->handler = handler;
4006  out:
4007 	return ret;
4008 }
4009 
4010 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4011 					    char *str, enum handler_id handler)
4012 {
4013 	struct action_data *data;
4014 	int ret = -EINVAL;
4015 	char *var_str;
4016 
4017 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4018 	if (!data)
4019 		return ERR_PTR(-ENOMEM);
4020 
4021 	var_str = strsep(&str, ")");
4022 	if (!var_str || !str) {
4023 		ret = -EINVAL;
4024 		goto free;
4025 	}
4026 
4027 	data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4028 	if (!data->track_data.var_str) {
4029 		ret = -ENOMEM;
4030 		goto free;
4031 	}
4032 
4033 	ret = action_parse(hist_data->event_file->tr, str, data, handler);
4034 	if (ret)
4035 		goto free;
4036  out:
4037 	return data;
4038  free:
4039 	track_data_destroy(hist_data, data);
4040 	data = ERR_PTR(ret);
4041 	goto out;
4042 }
4043 
4044 static void onmatch_destroy(struct action_data *data)
4045 {
4046 	kfree(data->match_data.event);
4047 	kfree(data->match_data.event_system);
4048 
4049 	action_data_destroy(data);
4050 }
4051 
4052 static void destroy_field_var(struct field_var *field_var)
4053 {
4054 	if (!field_var)
4055 		return;
4056 
4057 	destroy_hist_field(field_var->var, 0);
4058 	destroy_hist_field(field_var->val, 0);
4059 
4060 	kfree(field_var);
4061 }
4062 
4063 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4064 {
4065 	unsigned int i;
4066 
4067 	for (i = 0; i < hist_data->n_field_vars; i++)
4068 		destroy_field_var(hist_data->field_vars[i]);
4069 }
4070 
4071 static void save_field_var(struct hist_trigger_data *hist_data,
4072 			   struct field_var *field_var)
4073 {
4074 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4075 
4076 	if (field_var->val->flags & HIST_FIELD_FL_STRING)
4077 		hist_data->n_field_var_str++;
4078 }
4079 
4080 
4081 static int check_synth_field(struct synth_event *event,
4082 			     struct hist_field *hist_field,
4083 			     unsigned int field_pos)
4084 {
4085 	struct synth_field *field;
4086 
4087 	if (field_pos >= event->n_fields)
4088 		return -EINVAL;
4089 
4090 	field = event->fields[field_pos];
4091 
4092 	if (strcmp(field->type, hist_field->type) != 0)
4093 		return -EINVAL;
4094 
4095 	return 0;
4096 }
4097 
4098 static struct hist_field *
4099 trace_action_find_var(struct hist_trigger_data *hist_data,
4100 		      struct action_data *data,
4101 		      char *system, char *event, char *var)
4102 {
4103 	struct trace_array *tr = hist_data->event_file->tr;
4104 	struct hist_field *hist_field;
4105 
4106 	var++; /* skip '$' */
4107 
4108 	hist_field = find_target_event_var(hist_data, system, event, var);
4109 	if (!hist_field) {
4110 		if (!system && data->handler == HANDLER_ONMATCH) {
4111 			system = data->match_data.event_system;
4112 			event = data->match_data.event;
4113 		}
4114 
4115 		hist_field = find_event_var(hist_data, system, event, var);
4116 	}
4117 
4118 	if (!hist_field)
4119 		hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4120 
4121 	return hist_field;
4122 }
4123 
4124 static struct hist_field *
4125 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4126 			      struct action_data *data, char *system,
4127 			      char *event, char *var)
4128 {
4129 	struct hist_field *hist_field = NULL;
4130 	struct field_var *field_var;
4131 
4132 	/*
4133 	 * First try to create a field var on the target event (the
4134 	 * currently being defined).  This will create a variable for
4135 	 * unqualified fields on the target event, or if qualified,
4136 	 * target fields that have qualified names matching the target.
4137 	 */
4138 	field_var = create_target_field_var(hist_data, system, event, var);
4139 
4140 	if (field_var && !IS_ERR(field_var)) {
4141 		save_field_var(hist_data, field_var);
4142 		hist_field = field_var->var;
4143 	} else {
4144 		field_var = NULL;
4145 		/*
4146 		 * If no explicit system.event is specfied, default to
4147 		 * looking for fields on the onmatch(system.event.xxx)
4148 		 * event.
4149 		 */
4150 		if (!system && data->handler == HANDLER_ONMATCH) {
4151 			system = data->match_data.event_system;
4152 			event = data->match_data.event;
4153 		}
4154 
4155 		/*
4156 		 * At this point, we're looking at a field on another
4157 		 * event.  Because we can't modify a hist trigger on
4158 		 * another event to add a variable for a field, we need
4159 		 * to create a new trigger on that event and create the
4160 		 * variable at the same time.
4161 		 */
4162 		hist_field = create_field_var_hist(hist_data, system, event, var);
4163 		if (IS_ERR(hist_field))
4164 			goto free;
4165 	}
4166  out:
4167 	return hist_field;
4168  free:
4169 	destroy_field_var(field_var);
4170 	hist_field = NULL;
4171 	goto out;
4172 }
4173 
4174 static int trace_action_create(struct hist_trigger_data *hist_data,
4175 			       struct action_data *data)
4176 {
4177 	struct trace_array *tr = hist_data->event_file->tr;
4178 	char *event_name, *param, *system = NULL;
4179 	struct hist_field *hist_field, *var_ref;
4180 	unsigned int i, var_ref_idx;
4181 	unsigned int field_pos = 0;
4182 	struct synth_event *event;
4183 	char *synth_event_name;
4184 	int ret = 0;
4185 
4186 	lockdep_assert_held(&event_mutex);
4187 
4188 	if (data->use_trace_keyword)
4189 		synth_event_name = data->synth_event_name;
4190 	else
4191 		synth_event_name = data->action_name;
4192 
4193 	event = find_synth_event(synth_event_name);
4194 	if (!event) {
4195 		hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4196 		return -EINVAL;
4197 	}
4198 
4199 	event->ref++;
4200 
4201 	var_ref_idx = hist_data->n_var_refs;
4202 
4203 	for (i = 0; i < data->n_params; i++) {
4204 		char *p;
4205 
4206 		p = param = kstrdup(data->params[i], GFP_KERNEL);
4207 		if (!param) {
4208 			ret = -ENOMEM;
4209 			goto err;
4210 		}
4211 
4212 		system = strsep(&param, ".");
4213 		if (!param) {
4214 			param = (char *)system;
4215 			system = event_name = NULL;
4216 		} else {
4217 			event_name = strsep(&param, ".");
4218 			if (!param) {
4219 				kfree(p);
4220 				ret = -EINVAL;
4221 				goto err;
4222 			}
4223 		}
4224 
4225 		if (param[0] == '$')
4226 			hist_field = trace_action_find_var(hist_data, data,
4227 							   system, event_name,
4228 							   param);
4229 		else
4230 			hist_field = trace_action_create_field_var(hist_data,
4231 								   data,
4232 								   system,
4233 								   event_name,
4234 								   param);
4235 
4236 		if (!hist_field) {
4237 			kfree(p);
4238 			ret = -EINVAL;
4239 			goto err;
4240 		}
4241 
4242 		if (check_synth_field(event, hist_field, field_pos) == 0) {
4243 			var_ref = create_var_ref(hist_data, hist_field,
4244 						 system, event_name);
4245 			if (!var_ref) {
4246 				kfree(p);
4247 				ret = -ENOMEM;
4248 				goto err;
4249 			}
4250 
4251 			field_pos++;
4252 			kfree(p);
4253 			continue;
4254 		}
4255 
4256 		hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4257 		kfree(p);
4258 		ret = -EINVAL;
4259 		goto err;
4260 	}
4261 
4262 	if (field_pos != event->n_fields) {
4263 		hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4264 		ret = -EINVAL;
4265 		goto err;
4266 	}
4267 
4268 	data->synth_event = event;
4269 	data->var_ref_idx = var_ref_idx;
4270  out:
4271 	return ret;
4272  err:
4273 	event->ref--;
4274 
4275 	goto out;
4276 }
4277 
4278 static int action_create(struct hist_trigger_data *hist_data,
4279 			 struct action_data *data)
4280 {
4281 	struct trace_event_file *file = hist_data->event_file;
4282 	struct trace_array *tr = file->tr;
4283 	struct track_data *track_data;
4284 	struct field_var *field_var;
4285 	unsigned int i;
4286 	char *param;
4287 	int ret = 0;
4288 
4289 	if (data->action == ACTION_TRACE)
4290 		return trace_action_create(hist_data, data);
4291 
4292 	if (data->action == ACTION_SNAPSHOT) {
4293 		track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4294 		if (IS_ERR(track_data)) {
4295 			ret = PTR_ERR(track_data);
4296 			goto out;
4297 		}
4298 
4299 		ret = tracing_snapshot_cond_enable(file->tr, track_data,
4300 						   cond_snapshot_update);
4301 		if (ret)
4302 			track_data_free(track_data);
4303 
4304 		goto out;
4305 	}
4306 
4307 	if (data->action == ACTION_SAVE) {
4308 		if (hist_data->n_save_vars) {
4309 			ret = -EEXIST;
4310 			hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4311 			goto out;
4312 		}
4313 
4314 		for (i = 0; i < data->n_params; i++) {
4315 			param = kstrdup(data->params[i], GFP_KERNEL);
4316 			if (!param) {
4317 				ret = -ENOMEM;
4318 				goto out;
4319 			}
4320 
4321 			field_var = create_target_field_var(hist_data, NULL, NULL, param);
4322 			if (IS_ERR(field_var)) {
4323 				hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4324 					 errpos(param));
4325 				ret = PTR_ERR(field_var);
4326 				kfree(param);
4327 				goto out;
4328 			}
4329 
4330 			hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4331 			if (field_var->val->flags & HIST_FIELD_FL_STRING)
4332 				hist_data->n_save_var_str++;
4333 			kfree(param);
4334 		}
4335 	}
4336  out:
4337 	return ret;
4338 }
4339 
4340 static int onmatch_create(struct hist_trigger_data *hist_data,
4341 			  struct action_data *data)
4342 {
4343 	return action_create(hist_data, data);
4344 }
4345 
4346 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4347 {
4348 	char *match_event, *match_event_system;
4349 	struct action_data *data;
4350 	int ret = -EINVAL;
4351 
4352 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4353 	if (!data)
4354 		return ERR_PTR(-ENOMEM);
4355 
4356 	match_event = strsep(&str, ")");
4357 	if (!match_event || !str) {
4358 		hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4359 		goto free;
4360 	}
4361 
4362 	match_event_system = strsep(&match_event, ".");
4363 	if (!match_event) {
4364 		hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4365 		goto free;
4366 	}
4367 
4368 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4369 		hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4370 		goto free;
4371 	}
4372 
4373 	data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4374 	if (!data->match_data.event) {
4375 		ret = -ENOMEM;
4376 		goto free;
4377 	}
4378 
4379 	data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4380 	if (!data->match_data.event_system) {
4381 		ret = -ENOMEM;
4382 		goto free;
4383 	}
4384 
4385 	ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4386 	if (ret)
4387 		goto free;
4388  out:
4389 	return data;
4390  free:
4391 	onmatch_destroy(data);
4392 	data = ERR_PTR(ret);
4393 	goto out;
4394 }
4395 
4396 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4397 {
4398 	hist_data->fields[HITCOUNT_IDX] =
4399 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4400 	if (!hist_data->fields[HITCOUNT_IDX])
4401 		return -ENOMEM;
4402 
4403 	hist_data->n_vals++;
4404 	hist_data->n_fields++;
4405 
4406 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4407 		return -EINVAL;
4408 
4409 	return 0;
4410 }
4411 
4412 static int __create_val_field(struct hist_trigger_data *hist_data,
4413 			      unsigned int val_idx,
4414 			      struct trace_event_file *file,
4415 			      char *var_name, char *field_str,
4416 			      unsigned long flags)
4417 {
4418 	struct hist_field *hist_field;
4419 	int ret = 0;
4420 
4421 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4422 	if (IS_ERR(hist_field)) {
4423 		ret = PTR_ERR(hist_field);
4424 		goto out;
4425 	}
4426 
4427 	hist_data->fields[val_idx] = hist_field;
4428 
4429 	++hist_data->n_vals;
4430 	++hist_data->n_fields;
4431 
4432 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4433 		ret = -EINVAL;
4434  out:
4435 	return ret;
4436 }
4437 
4438 static int create_val_field(struct hist_trigger_data *hist_data,
4439 			    unsigned int val_idx,
4440 			    struct trace_event_file *file,
4441 			    char *field_str)
4442 {
4443 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4444 		return -EINVAL;
4445 
4446 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4447 }
4448 
4449 static int create_var_field(struct hist_trigger_data *hist_data,
4450 			    unsigned int val_idx,
4451 			    struct trace_event_file *file,
4452 			    char *var_name, char *expr_str)
4453 {
4454 	struct trace_array *tr = hist_data->event_file->tr;
4455 	unsigned long flags = 0;
4456 
4457 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4458 		return -EINVAL;
4459 
4460 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4461 		hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4462 		return -EINVAL;
4463 	}
4464 
4465 	flags |= HIST_FIELD_FL_VAR;
4466 	hist_data->n_vars++;
4467 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4468 		return -EINVAL;
4469 
4470 	return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4471 }
4472 
4473 static int create_val_fields(struct hist_trigger_data *hist_data,
4474 			     struct trace_event_file *file)
4475 {
4476 	char *fields_str, *field_str;
4477 	unsigned int i, j = 1;
4478 	int ret;
4479 
4480 	ret = create_hitcount_val(hist_data);
4481 	if (ret)
4482 		goto out;
4483 
4484 	fields_str = hist_data->attrs->vals_str;
4485 	if (!fields_str)
4486 		goto out;
4487 
4488 	strsep(&fields_str, "=");
4489 	if (!fields_str)
4490 		goto out;
4491 
4492 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4493 		     j < TRACING_MAP_VALS_MAX; i++) {
4494 		field_str = strsep(&fields_str, ",");
4495 		if (!field_str)
4496 			break;
4497 
4498 		if (strcmp(field_str, "hitcount") == 0)
4499 			continue;
4500 
4501 		ret = create_val_field(hist_data, j++, file, field_str);
4502 		if (ret)
4503 			goto out;
4504 	}
4505 
4506 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4507 		ret = -EINVAL;
4508  out:
4509 	return ret;
4510 }
4511 
4512 static int create_key_field(struct hist_trigger_data *hist_data,
4513 			    unsigned int key_idx,
4514 			    unsigned int key_offset,
4515 			    struct trace_event_file *file,
4516 			    char *field_str)
4517 {
4518 	struct trace_array *tr = hist_data->event_file->tr;
4519 	struct hist_field *hist_field = NULL;
4520 	unsigned long flags = 0;
4521 	unsigned int key_size;
4522 	int ret = 0;
4523 
4524 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4525 		return -EINVAL;
4526 
4527 	flags |= HIST_FIELD_FL_KEY;
4528 
4529 	if (strcmp(field_str, "stacktrace") == 0) {
4530 		flags |= HIST_FIELD_FL_STACKTRACE;
4531 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4532 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4533 	} else {
4534 		hist_field = parse_expr(hist_data, file, field_str, flags,
4535 					NULL, 0);
4536 		if (IS_ERR(hist_field)) {
4537 			ret = PTR_ERR(hist_field);
4538 			goto out;
4539 		}
4540 
4541 		if (field_has_hist_vars(hist_field, 0))	{
4542 			hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4543 			destroy_hist_field(hist_field, 0);
4544 			ret = -EINVAL;
4545 			goto out;
4546 		}
4547 
4548 		key_size = hist_field->size;
4549 	}
4550 
4551 	hist_data->fields[key_idx] = hist_field;
4552 
4553 	key_size = ALIGN(key_size, sizeof(u64));
4554 	hist_data->fields[key_idx]->size = key_size;
4555 	hist_data->fields[key_idx]->offset = key_offset;
4556 
4557 	hist_data->key_size += key_size;
4558 
4559 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4560 		ret = -EINVAL;
4561 		goto out;
4562 	}
4563 
4564 	hist_data->n_keys++;
4565 	hist_data->n_fields++;
4566 
4567 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4568 		return -EINVAL;
4569 
4570 	ret = key_size;
4571  out:
4572 	return ret;
4573 }
4574 
4575 static int create_key_fields(struct hist_trigger_data *hist_data,
4576 			     struct trace_event_file *file)
4577 {
4578 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4579 	char *fields_str, *field_str;
4580 	int ret = -EINVAL;
4581 
4582 	fields_str = hist_data->attrs->keys_str;
4583 	if (!fields_str)
4584 		goto out;
4585 
4586 	strsep(&fields_str, "=");
4587 	if (!fields_str)
4588 		goto out;
4589 
4590 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4591 		field_str = strsep(&fields_str, ",");
4592 		if (!field_str)
4593 			break;
4594 		ret = create_key_field(hist_data, i, key_offset,
4595 				       file, field_str);
4596 		if (ret < 0)
4597 			goto out;
4598 		key_offset += ret;
4599 	}
4600 	if (fields_str) {
4601 		ret = -EINVAL;
4602 		goto out;
4603 	}
4604 	ret = 0;
4605  out:
4606 	return ret;
4607 }
4608 
4609 static int create_var_fields(struct hist_trigger_data *hist_data,
4610 			     struct trace_event_file *file)
4611 {
4612 	unsigned int i, j = hist_data->n_vals;
4613 	int ret = 0;
4614 
4615 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4616 
4617 	for (i = 0; i < n_vars; i++) {
4618 		char *var_name = hist_data->attrs->var_defs.name[i];
4619 		char *expr = hist_data->attrs->var_defs.expr[i];
4620 
4621 		ret = create_var_field(hist_data, j++, file, var_name, expr);
4622 		if (ret)
4623 			goto out;
4624 	}
4625  out:
4626 	return ret;
4627 }
4628 
4629 static void free_var_defs(struct hist_trigger_data *hist_data)
4630 {
4631 	unsigned int i;
4632 
4633 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4634 		kfree(hist_data->attrs->var_defs.name[i]);
4635 		kfree(hist_data->attrs->var_defs.expr[i]);
4636 	}
4637 
4638 	hist_data->attrs->var_defs.n_vars = 0;
4639 }
4640 
4641 static int parse_var_defs(struct hist_trigger_data *hist_data)
4642 {
4643 	struct trace_array *tr = hist_data->event_file->tr;
4644 	char *s, *str, *var_name, *field_str;
4645 	unsigned int i, j, n_vars = 0;
4646 	int ret = 0;
4647 
4648 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4649 		str = hist_data->attrs->assignment_str[i];
4650 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4651 			field_str = strsep(&str, ",");
4652 			if (!field_str)
4653 				break;
4654 
4655 			var_name = strsep(&field_str, "=");
4656 			if (!var_name || !field_str) {
4657 				hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4658 					 errpos(var_name));
4659 				ret = -EINVAL;
4660 				goto free;
4661 			}
4662 
4663 			if (n_vars == TRACING_MAP_VARS_MAX) {
4664 				hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4665 				ret = -EINVAL;
4666 				goto free;
4667 			}
4668 
4669 			s = kstrdup(var_name, GFP_KERNEL);
4670 			if (!s) {
4671 				ret = -ENOMEM;
4672 				goto free;
4673 			}
4674 			hist_data->attrs->var_defs.name[n_vars] = s;
4675 
4676 			s = kstrdup(field_str, GFP_KERNEL);
4677 			if (!s) {
4678 				kfree(hist_data->attrs->var_defs.name[n_vars]);
4679 				ret = -ENOMEM;
4680 				goto free;
4681 			}
4682 			hist_data->attrs->var_defs.expr[n_vars++] = s;
4683 
4684 			hist_data->attrs->var_defs.n_vars = n_vars;
4685 		}
4686 	}
4687 
4688 	return ret;
4689  free:
4690 	free_var_defs(hist_data);
4691 
4692 	return ret;
4693 }
4694 
4695 static int create_hist_fields(struct hist_trigger_data *hist_data,
4696 			      struct trace_event_file *file)
4697 {
4698 	int ret;
4699 
4700 	ret = parse_var_defs(hist_data);
4701 	if (ret)
4702 		goto out;
4703 
4704 	ret = create_val_fields(hist_data, file);
4705 	if (ret)
4706 		goto out;
4707 
4708 	ret = create_var_fields(hist_data, file);
4709 	if (ret)
4710 		goto out;
4711 
4712 	ret = create_key_fields(hist_data, file);
4713 	if (ret)
4714 		goto out;
4715  out:
4716 	free_var_defs(hist_data);
4717 
4718 	return ret;
4719 }
4720 
4721 static int is_descending(const char *str)
4722 {
4723 	if (!str)
4724 		return 0;
4725 
4726 	if (strcmp(str, "descending") == 0)
4727 		return 1;
4728 
4729 	if (strcmp(str, "ascending") == 0)
4730 		return 0;
4731 
4732 	return -EINVAL;
4733 }
4734 
4735 static int create_sort_keys(struct hist_trigger_data *hist_data)
4736 {
4737 	char *fields_str = hist_data->attrs->sort_key_str;
4738 	struct tracing_map_sort_key *sort_key;
4739 	int descending, ret = 0;
4740 	unsigned int i, j, k;
4741 
4742 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4743 
4744 	if (!fields_str)
4745 		goto out;
4746 
4747 	strsep(&fields_str, "=");
4748 	if (!fields_str) {
4749 		ret = -EINVAL;
4750 		goto out;
4751 	}
4752 
4753 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4754 		struct hist_field *hist_field;
4755 		char *field_str, *field_name;
4756 		const char *test_name;
4757 
4758 		sort_key = &hist_data->sort_keys[i];
4759 
4760 		field_str = strsep(&fields_str, ",");
4761 		if (!field_str) {
4762 			if (i == 0)
4763 				ret = -EINVAL;
4764 			break;
4765 		}
4766 
4767 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4768 			ret = -EINVAL;
4769 			break;
4770 		}
4771 
4772 		field_name = strsep(&field_str, ".");
4773 		if (!field_name) {
4774 			ret = -EINVAL;
4775 			break;
4776 		}
4777 
4778 		if (strcmp(field_name, "hitcount") == 0) {
4779 			descending = is_descending(field_str);
4780 			if (descending < 0) {
4781 				ret = descending;
4782 				break;
4783 			}
4784 			sort_key->descending = descending;
4785 			continue;
4786 		}
4787 
4788 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4789 			unsigned int idx;
4790 
4791 			hist_field = hist_data->fields[j];
4792 			if (hist_field->flags & HIST_FIELD_FL_VAR)
4793 				continue;
4794 
4795 			idx = k++;
4796 
4797 			test_name = hist_field_name(hist_field, 0);
4798 
4799 			if (strcmp(field_name, test_name) == 0) {
4800 				sort_key->field_idx = idx;
4801 				descending = is_descending(field_str);
4802 				if (descending < 0) {
4803 					ret = descending;
4804 					goto out;
4805 				}
4806 				sort_key->descending = descending;
4807 				break;
4808 			}
4809 		}
4810 		if (j == hist_data->n_fields) {
4811 			ret = -EINVAL;
4812 			break;
4813 		}
4814 	}
4815 
4816 	hist_data->n_sort_keys = i;
4817  out:
4818 	return ret;
4819 }
4820 
4821 static void destroy_actions(struct hist_trigger_data *hist_data)
4822 {
4823 	unsigned int i;
4824 
4825 	for (i = 0; i < hist_data->n_actions; i++) {
4826 		struct action_data *data = hist_data->actions[i];
4827 
4828 		if (data->handler == HANDLER_ONMATCH)
4829 			onmatch_destroy(data);
4830 		else if (data->handler == HANDLER_ONMAX ||
4831 			 data->handler == HANDLER_ONCHANGE)
4832 			track_data_destroy(hist_data, data);
4833 		else
4834 			kfree(data);
4835 	}
4836 }
4837 
4838 static int parse_actions(struct hist_trigger_data *hist_data)
4839 {
4840 	struct trace_array *tr = hist_data->event_file->tr;
4841 	struct action_data *data;
4842 	unsigned int i;
4843 	int ret = 0;
4844 	char *str;
4845 	int len;
4846 
4847 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4848 		str = hist_data->attrs->action_str[i];
4849 
4850 		if ((len = str_has_prefix(str, "onmatch("))) {
4851 			char *action_str = str + len;
4852 
4853 			data = onmatch_parse(tr, action_str);
4854 			if (IS_ERR(data)) {
4855 				ret = PTR_ERR(data);
4856 				break;
4857 			}
4858 		} else if ((len = str_has_prefix(str, "onmax("))) {
4859 			char *action_str = str + len;
4860 
4861 			data = track_data_parse(hist_data, action_str,
4862 						HANDLER_ONMAX);
4863 			if (IS_ERR(data)) {
4864 				ret = PTR_ERR(data);
4865 				break;
4866 			}
4867 		} else if ((len = str_has_prefix(str, "onchange("))) {
4868 			char *action_str = str + len;
4869 
4870 			data = track_data_parse(hist_data, action_str,
4871 						HANDLER_ONCHANGE);
4872 			if (IS_ERR(data)) {
4873 				ret = PTR_ERR(data);
4874 				break;
4875 			}
4876 		} else {
4877 			ret = -EINVAL;
4878 			break;
4879 		}
4880 
4881 		hist_data->actions[hist_data->n_actions++] = data;
4882 	}
4883 
4884 	return ret;
4885 }
4886 
4887 static int create_actions(struct hist_trigger_data *hist_data)
4888 {
4889 	struct action_data *data;
4890 	unsigned int i;
4891 	int ret = 0;
4892 
4893 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4894 		data = hist_data->actions[i];
4895 
4896 		if (data->handler == HANDLER_ONMATCH) {
4897 			ret = onmatch_create(hist_data, data);
4898 			if (ret)
4899 				break;
4900 		} else if (data->handler == HANDLER_ONMAX ||
4901 			   data->handler == HANDLER_ONCHANGE) {
4902 			ret = track_data_create(hist_data, data);
4903 			if (ret)
4904 				break;
4905 		} else {
4906 			ret = -EINVAL;
4907 			break;
4908 		}
4909 	}
4910 
4911 	return ret;
4912 }
4913 
4914 static void print_actions(struct seq_file *m,
4915 			  struct hist_trigger_data *hist_data,
4916 			  struct tracing_map_elt *elt)
4917 {
4918 	unsigned int i;
4919 
4920 	for (i = 0; i < hist_data->n_actions; i++) {
4921 		struct action_data *data = hist_data->actions[i];
4922 
4923 		if (data->action == ACTION_SNAPSHOT)
4924 			continue;
4925 
4926 		if (data->handler == HANDLER_ONMAX ||
4927 		    data->handler == HANDLER_ONCHANGE)
4928 			track_data_print(m, hist_data, elt, data);
4929 	}
4930 }
4931 
4932 static void print_action_spec(struct seq_file *m,
4933 			      struct hist_trigger_data *hist_data,
4934 			      struct action_data *data)
4935 {
4936 	unsigned int i;
4937 
4938 	if (data->action == ACTION_SAVE) {
4939 		for (i = 0; i < hist_data->n_save_vars; i++) {
4940 			seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4941 			if (i < hist_data->n_save_vars - 1)
4942 				seq_puts(m, ",");
4943 		}
4944 	} else if (data->action == ACTION_TRACE) {
4945 		if (data->use_trace_keyword)
4946 			seq_printf(m, "%s", data->synth_event_name);
4947 		for (i = 0; i < data->n_params; i++) {
4948 			if (i || data->use_trace_keyword)
4949 				seq_puts(m, ",");
4950 			seq_printf(m, "%s", data->params[i]);
4951 		}
4952 	}
4953 }
4954 
4955 static void print_track_data_spec(struct seq_file *m,
4956 				  struct hist_trigger_data *hist_data,
4957 				  struct action_data *data)
4958 {
4959 	if (data->handler == HANDLER_ONMAX)
4960 		seq_puts(m, ":onmax(");
4961 	else if (data->handler == HANDLER_ONCHANGE)
4962 		seq_puts(m, ":onchange(");
4963 	seq_printf(m, "%s", data->track_data.var_str);
4964 	seq_printf(m, ").%s(", data->action_name);
4965 
4966 	print_action_spec(m, hist_data, data);
4967 
4968 	seq_puts(m, ")");
4969 }
4970 
4971 static void print_onmatch_spec(struct seq_file *m,
4972 			       struct hist_trigger_data *hist_data,
4973 			       struct action_data *data)
4974 {
4975 	seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
4976 		   data->match_data.event);
4977 
4978 	seq_printf(m, "%s(", data->action_name);
4979 
4980 	print_action_spec(m, hist_data, data);
4981 
4982 	seq_puts(m, ")");
4983 }
4984 
4985 static bool actions_match(struct hist_trigger_data *hist_data,
4986 			  struct hist_trigger_data *hist_data_test)
4987 {
4988 	unsigned int i, j;
4989 
4990 	if (hist_data->n_actions != hist_data_test->n_actions)
4991 		return false;
4992 
4993 	for (i = 0; i < hist_data->n_actions; i++) {
4994 		struct action_data *data = hist_data->actions[i];
4995 		struct action_data *data_test = hist_data_test->actions[i];
4996 		char *action_name, *action_name_test;
4997 
4998 		if (data->handler != data_test->handler)
4999 			return false;
5000 		if (data->action != data_test->action)
5001 			return false;
5002 
5003 		if (data->n_params != data_test->n_params)
5004 			return false;
5005 
5006 		for (j = 0; j < data->n_params; j++) {
5007 			if (strcmp(data->params[j], data_test->params[j]) != 0)
5008 				return false;
5009 		}
5010 
5011 		if (data->use_trace_keyword)
5012 			action_name = data->synth_event_name;
5013 		else
5014 			action_name = data->action_name;
5015 
5016 		if (data_test->use_trace_keyword)
5017 			action_name_test = data_test->synth_event_name;
5018 		else
5019 			action_name_test = data_test->action_name;
5020 
5021 		if (strcmp(action_name, action_name_test) != 0)
5022 			return false;
5023 
5024 		if (data->handler == HANDLER_ONMATCH) {
5025 			if (strcmp(data->match_data.event_system,
5026 				   data_test->match_data.event_system) != 0)
5027 				return false;
5028 			if (strcmp(data->match_data.event,
5029 				   data_test->match_data.event) != 0)
5030 				return false;
5031 		} else if (data->handler == HANDLER_ONMAX ||
5032 			   data->handler == HANDLER_ONCHANGE) {
5033 			if (strcmp(data->track_data.var_str,
5034 				   data_test->track_data.var_str) != 0)
5035 				return false;
5036 		}
5037 	}
5038 
5039 	return true;
5040 }
5041 
5042 
5043 static void print_actions_spec(struct seq_file *m,
5044 			       struct hist_trigger_data *hist_data)
5045 {
5046 	unsigned int i;
5047 
5048 	for (i = 0; i < hist_data->n_actions; i++) {
5049 		struct action_data *data = hist_data->actions[i];
5050 
5051 		if (data->handler == HANDLER_ONMATCH)
5052 			print_onmatch_spec(m, hist_data, data);
5053 		else if (data->handler == HANDLER_ONMAX ||
5054 			 data->handler == HANDLER_ONCHANGE)
5055 			print_track_data_spec(m, hist_data, data);
5056 	}
5057 }
5058 
5059 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5060 {
5061 	unsigned int i;
5062 
5063 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5064 		kfree(hist_data->field_var_hists[i]->cmd);
5065 		kfree(hist_data->field_var_hists[i]);
5066 	}
5067 }
5068 
5069 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5070 {
5071 	if (!hist_data)
5072 		return;
5073 
5074 	destroy_hist_trigger_attrs(hist_data->attrs);
5075 	destroy_hist_fields(hist_data);
5076 	tracing_map_destroy(hist_data->map);
5077 
5078 	destroy_actions(hist_data);
5079 	destroy_field_vars(hist_data);
5080 	destroy_field_var_hists(hist_data);
5081 
5082 	kfree(hist_data);
5083 }
5084 
5085 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5086 {
5087 	struct tracing_map *map = hist_data->map;
5088 	struct ftrace_event_field *field;
5089 	struct hist_field *hist_field;
5090 	int i, idx = 0;
5091 
5092 	for_each_hist_field(i, hist_data) {
5093 		hist_field = hist_data->fields[i];
5094 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
5095 			tracing_map_cmp_fn_t cmp_fn;
5096 
5097 			field = hist_field->field;
5098 
5099 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5100 				cmp_fn = tracing_map_cmp_none;
5101 			else if (!field)
5102 				cmp_fn = tracing_map_cmp_num(hist_field->size,
5103 							     hist_field->is_signed);
5104 			else if (is_string_field(field))
5105 				cmp_fn = tracing_map_cmp_string;
5106 			else
5107 				cmp_fn = tracing_map_cmp_num(field->size,
5108 							     field->is_signed);
5109 			idx = tracing_map_add_key_field(map,
5110 							hist_field->offset,
5111 							cmp_fn);
5112 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5113 			idx = tracing_map_add_sum_field(map);
5114 
5115 		if (idx < 0)
5116 			return idx;
5117 
5118 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5119 			idx = tracing_map_add_var(map);
5120 			if (idx < 0)
5121 				return idx;
5122 			hist_field->var.idx = idx;
5123 			hist_field->var.hist_data = hist_data;
5124 		}
5125 	}
5126 
5127 	return 0;
5128 }
5129 
5130 static struct hist_trigger_data *
5131 create_hist_data(unsigned int map_bits,
5132 		 struct hist_trigger_attrs *attrs,
5133 		 struct trace_event_file *file,
5134 		 bool remove)
5135 {
5136 	const struct tracing_map_ops *map_ops = NULL;
5137 	struct hist_trigger_data *hist_data;
5138 	int ret = 0;
5139 
5140 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5141 	if (!hist_data)
5142 		return ERR_PTR(-ENOMEM);
5143 
5144 	hist_data->attrs = attrs;
5145 	hist_data->remove = remove;
5146 	hist_data->event_file = file;
5147 
5148 	ret = parse_actions(hist_data);
5149 	if (ret)
5150 		goto free;
5151 
5152 	ret = create_hist_fields(hist_data, file);
5153 	if (ret)
5154 		goto free;
5155 
5156 	ret = create_sort_keys(hist_data);
5157 	if (ret)
5158 		goto free;
5159 
5160 	map_ops = &hist_trigger_elt_data_ops;
5161 
5162 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5163 					    map_ops, hist_data);
5164 	if (IS_ERR(hist_data->map)) {
5165 		ret = PTR_ERR(hist_data->map);
5166 		hist_data->map = NULL;
5167 		goto free;
5168 	}
5169 
5170 	ret = create_tracing_map_fields(hist_data);
5171 	if (ret)
5172 		goto free;
5173  out:
5174 	return hist_data;
5175  free:
5176 	hist_data->attrs = NULL;
5177 
5178 	destroy_hist_data(hist_data);
5179 
5180 	hist_data = ERR_PTR(ret);
5181 
5182 	goto out;
5183 }
5184 
5185 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5186 				    struct tracing_map_elt *elt, void *rec,
5187 				    struct ring_buffer_event *rbe,
5188 				    u64 *var_ref_vals)
5189 {
5190 	struct hist_elt_data *elt_data;
5191 	struct hist_field *hist_field;
5192 	unsigned int i, var_idx;
5193 	u64 hist_val;
5194 
5195 	elt_data = elt->private_data;
5196 	elt_data->var_ref_vals = var_ref_vals;
5197 
5198 	for_each_hist_val_field(i, hist_data) {
5199 		hist_field = hist_data->fields[i];
5200 		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5201 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5202 			var_idx = hist_field->var.idx;
5203 			tracing_map_set_var(elt, var_idx, hist_val);
5204 			continue;
5205 		}
5206 		tracing_map_update_sum(elt, i, hist_val);
5207 	}
5208 
5209 	for_each_hist_key_field(i, hist_data) {
5210 		hist_field = hist_data->fields[i];
5211 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5212 			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5213 			var_idx = hist_field->var.idx;
5214 			tracing_map_set_var(elt, var_idx, hist_val);
5215 		}
5216 	}
5217 
5218 	update_field_vars(hist_data, elt, rbe, rec);
5219 }
5220 
5221 static inline void add_to_key(char *compound_key, void *key,
5222 			      struct hist_field *key_field, void *rec)
5223 {
5224 	size_t size = key_field->size;
5225 
5226 	if (key_field->flags & HIST_FIELD_FL_STRING) {
5227 		struct ftrace_event_field *field;
5228 
5229 		field = key_field->field;
5230 		if (field->filter_type == FILTER_DYN_STRING)
5231 			size = *(u32 *)(rec + field->offset) >> 16;
5232 		else if (field->filter_type == FILTER_PTR_STRING)
5233 			size = strlen(key);
5234 		else if (field->filter_type == FILTER_STATIC_STRING)
5235 			size = field->size;
5236 
5237 		/* ensure NULL-termination */
5238 		if (size > key_field->size - 1)
5239 			size = key_field->size - 1;
5240 
5241 		strncpy(compound_key + key_field->offset, (char *)key, size);
5242 	} else
5243 		memcpy(compound_key + key_field->offset, key, size);
5244 }
5245 
5246 static void
5247 hist_trigger_actions(struct hist_trigger_data *hist_data,
5248 		     struct tracing_map_elt *elt, void *rec,
5249 		     struct ring_buffer_event *rbe, void *key,
5250 		     u64 *var_ref_vals)
5251 {
5252 	struct action_data *data;
5253 	unsigned int i;
5254 
5255 	for (i = 0; i < hist_data->n_actions; i++) {
5256 		data = hist_data->actions[i];
5257 		data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5258 	}
5259 }
5260 
5261 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5262 			       struct ring_buffer_event *rbe)
5263 {
5264 	struct hist_trigger_data *hist_data = data->private_data;
5265 	bool use_compound_key = (hist_data->n_keys > 1);
5266 	unsigned long entries[HIST_STACKTRACE_DEPTH];
5267 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5268 	char compound_key[HIST_KEY_SIZE_MAX];
5269 	struct tracing_map_elt *elt = NULL;
5270 	struct hist_field *key_field;
5271 	u64 field_contents;
5272 	void *key = NULL;
5273 	unsigned int i;
5274 
5275 	memset(compound_key, 0, hist_data->key_size);
5276 
5277 	for_each_hist_key_field(i, hist_data) {
5278 		key_field = hist_data->fields[i];
5279 
5280 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5281 			memset(entries, 0, HIST_STACKTRACE_SIZE);
5282 			stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5283 					 HIST_STACKTRACE_SKIP);
5284 			key = entries;
5285 		} else {
5286 			field_contents = key_field->fn(key_field, elt, rbe, rec);
5287 			if (key_field->flags & HIST_FIELD_FL_STRING) {
5288 				key = (void *)(unsigned long)field_contents;
5289 				use_compound_key = true;
5290 			} else
5291 				key = (void *)&field_contents;
5292 		}
5293 
5294 		if (use_compound_key)
5295 			add_to_key(compound_key, key, key_field, rec);
5296 	}
5297 
5298 	if (use_compound_key)
5299 		key = compound_key;
5300 
5301 	if (hist_data->n_var_refs &&
5302 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
5303 		return;
5304 
5305 	elt = tracing_map_insert(hist_data->map, key);
5306 	if (!elt)
5307 		return;
5308 
5309 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5310 
5311 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5312 		hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5313 }
5314 
5315 static void hist_trigger_stacktrace_print(struct seq_file *m,
5316 					  unsigned long *stacktrace_entries,
5317 					  unsigned int max_entries)
5318 {
5319 	char str[KSYM_SYMBOL_LEN];
5320 	unsigned int spaces = 8;
5321 	unsigned int i;
5322 
5323 	for (i = 0; i < max_entries; i++) {
5324 		if (!stacktrace_entries[i])
5325 			return;
5326 
5327 		seq_printf(m, "%*c", 1 + spaces, ' ');
5328 		sprint_symbol(str, stacktrace_entries[i]);
5329 		seq_printf(m, "%s\n", str);
5330 	}
5331 }
5332 
5333 static void hist_trigger_print_key(struct seq_file *m,
5334 				   struct hist_trigger_data *hist_data,
5335 				   void *key,
5336 				   struct tracing_map_elt *elt)
5337 {
5338 	struct hist_field *key_field;
5339 	char str[KSYM_SYMBOL_LEN];
5340 	bool multiline = false;
5341 	const char *field_name;
5342 	unsigned int i;
5343 	u64 uval;
5344 
5345 	seq_puts(m, "{ ");
5346 
5347 	for_each_hist_key_field(i, hist_data) {
5348 		key_field = hist_data->fields[i];
5349 
5350 		if (i > hist_data->n_vals)
5351 			seq_puts(m, ", ");
5352 
5353 		field_name = hist_field_name(key_field, 0);
5354 
5355 		if (key_field->flags & HIST_FIELD_FL_HEX) {
5356 			uval = *(u64 *)(key + key_field->offset);
5357 			seq_printf(m, "%s: %llx", field_name, uval);
5358 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
5359 			uval = *(u64 *)(key + key_field->offset);
5360 			sprint_symbol_no_offset(str, uval);
5361 			seq_printf(m, "%s: [%llx] %-45s", field_name,
5362 				   uval, str);
5363 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5364 			uval = *(u64 *)(key + key_field->offset);
5365 			sprint_symbol(str, uval);
5366 			seq_printf(m, "%s: [%llx] %-55s", field_name,
5367 				   uval, str);
5368 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5369 			struct hist_elt_data *elt_data = elt->private_data;
5370 			char *comm;
5371 
5372 			if (WARN_ON_ONCE(!elt_data))
5373 				return;
5374 
5375 			comm = elt_data->comm;
5376 
5377 			uval = *(u64 *)(key + key_field->offset);
5378 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
5379 				   comm, uval);
5380 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5381 			const char *syscall_name;
5382 
5383 			uval = *(u64 *)(key + key_field->offset);
5384 			syscall_name = get_syscall_name(uval);
5385 			if (!syscall_name)
5386 				syscall_name = "unknown_syscall";
5387 
5388 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
5389 				   syscall_name, uval);
5390 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5391 			seq_puts(m, "stacktrace:\n");
5392 			hist_trigger_stacktrace_print(m,
5393 						      key + key_field->offset,
5394 						      HIST_STACKTRACE_DEPTH);
5395 			multiline = true;
5396 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5397 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5398 				   *(u64 *)(key + key_field->offset));
5399 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
5400 			seq_printf(m, "%s: %-50s", field_name,
5401 				   (char *)(key + key_field->offset));
5402 		} else {
5403 			uval = *(u64 *)(key + key_field->offset);
5404 			seq_printf(m, "%s: %10llu", field_name, uval);
5405 		}
5406 	}
5407 
5408 	if (!multiline)
5409 		seq_puts(m, " ");
5410 
5411 	seq_puts(m, "}");
5412 }
5413 
5414 static void hist_trigger_entry_print(struct seq_file *m,
5415 				     struct hist_trigger_data *hist_data,
5416 				     void *key,
5417 				     struct tracing_map_elt *elt)
5418 {
5419 	const char *field_name;
5420 	unsigned int i;
5421 
5422 	hist_trigger_print_key(m, hist_data, key, elt);
5423 
5424 	seq_printf(m, " hitcount: %10llu",
5425 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
5426 
5427 	for (i = 1; i < hist_data->n_vals; i++) {
5428 		field_name = hist_field_name(hist_data->fields[i], 0);
5429 
5430 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5431 		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5432 			continue;
5433 
5434 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5435 			seq_printf(m, "  %s: %10llx", field_name,
5436 				   tracing_map_read_sum(elt, i));
5437 		} else {
5438 			seq_printf(m, "  %s: %10llu", field_name,
5439 				   tracing_map_read_sum(elt, i));
5440 		}
5441 	}
5442 
5443 	print_actions(m, hist_data, elt);
5444 
5445 	seq_puts(m, "\n");
5446 }
5447 
5448 static int print_entries(struct seq_file *m,
5449 			 struct hist_trigger_data *hist_data)
5450 {
5451 	struct tracing_map_sort_entry **sort_entries = NULL;
5452 	struct tracing_map *map = hist_data->map;
5453 	int i, n_entries;
5454 
5455 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5456 					     hist_data->n_sort_keys,
5457 					     &sort_entries);
5458 	if (n_entries < 0)
5459 		return n_entries;
5460 
5461 	for (i = 0; i < n_entries; i++)
5462 		hist_trigger_entry_print(m, hist_data,
5463 					 sort_entries[i]->key,
5464 					 sort_entries[i]->elt);
5465 
5466 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
5467 
5468 	return n_entries;
5469 }
5470 
5471 static void hist_trigger_show(struct seq_file *m,
5472 			      struct event_trigger_data *data, int n)
5473 {
5474 	struct hist_trigger_data *hist_data;
5475 	int n_entries;
5476 
5477 	if (n > 0)
5478 		seq_puts(m, "\n\n");
5479 
5480 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
5481 	data->ops->print(m, data->ops, data);
5482 	seq_puts(m, "#\n\n");
5483 
5484 	hist_data = data->private_data;
5485 	n_entries = print_entries(m, hist_data);
5486 	if (n_entries < 0)
5487 		n_entries = 0;
5488 
5489 	track_data_snapshot_print(m, hist_data);
5490 
5491 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
5492 		   (u64)atomic64_read(&hist_data->map->hits),
5493 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
5494 }
5495 
5496 static int hist_show(struct seq_file *m, void *v)
5497 {
5498 	struct event_trigger_data *data;
5499 	struct trace_event_file *event_file;
5500 	int n = 0, ret = 0;
5501 
5502 	mutex_lock(&event_mutex);
5503 
5504 	event_file = event_file_data(m->private);
5505 	if (unlikely(!event_file)) {
5506 		ret = -ENODEV;
5507 		goto out_unlock;
5508 	}
5509 
5510 	list_for_each_entry_rcu(data, &event_file->triggers, list) {
5511 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5512 			hist_trigger_show(m, data, n++);
5513 	}
5514 
5515  out_unlock:
5516 	mutex_unlock(&event_mutex);
5517 
5518 	return ret;
5519 }
5520 
5521 static int event_hist_open(struct inode *inode, struct file *file)
5522 {
5523 	int ret;
5524 
5525 	ret = security_locked_down(LOCKDOWN_TRACEFS);
5526 	if (ret)
5527 		return ret;
5528 
5529 	return single_open(file, hist_show, file);
5530 }
5531 
5532 const struct file_operations event_hist_fops = {
5533 	.open = event_hist_open,
5534 	.read = seq_read,
5535 	.llseek = seq_lseek,
5536 	.release = single_release,
5537 };
5538 
5539 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5540 {
5541 	const char *field_name = hist_field_name(hist_field, 0);
5542 
5543 	if (hist_field->var.name)
5544 		seq_printf(m, "%s=", hist_field->var.name);
5545 
5546 	if (hist_field->flags & HIST_FIELD_FL_CPU)
5547 		seq_puts(m, "cpu");
5548 	else if (field_name) {
5549 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5550 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
5551 			seq_putc(m, '$');
5552 		seq_printf(m, "%s", field_name);
5553 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5554 		seq_puts(m, "common_timestamp");
5555 
5556 	if (hist_field->flags) {
5557 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5558 		    !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5559 			const char *flags = get_hist_field_flags(hist_field);
5560 
5561 			if (flags)
5562 				seq_printf(m, ".%s", flags);
5563 		}
5564 	}
5565 }
5566 
5567 static int event_hist_trigger_print(struct seq_file *m,
5568 				    struct event_trigger_ops *ops,
5569 				    struct event_trigger_data *data)
5570 {
5571 	struct hist_trigger_data *hist_data = data->private_data;
5572 	struct hist_field *field;
5573 	bool have_var = false;
5574 	unsigned int i;
5575 
5576 	seq_puts(m, "hist:");
5577 
5578 	if (data->name)
5579 		seq_printf(m, "%s:", data->name);
5580 
5581 	seq_puts(m, "keys=");
5582 
5583 	for_each_hist_key_field(i, hist_data) {
5584 		field = hist_data->fields[i];
5585 
5586 		if (i > hist_data->n_vals)
5587 			seq_puts(m, ",");
5588 
5589 		if (field->flags & HIST_FIELD_FL_STACKTRACE)
5590 			seq_puts(m, "stacktrace");
5591 		else
5592 			hist_field_print(m, field);
5593 	}
5594 
5595 	seq_puts(m, ":vals=");
5596 
5597 	for_each_hist_val_field(i, hist_data) {
5598 		field = hist_data->fields[i];
5599 		if (field->flags & HIST_FIELD_FL_VAR) {
5600 			have_var = true;
5601 			continue;
5602 		}
5603 
5604 		if (i == HITCOUNT_IDX)
5605 			seq_puts(m, "hitcount");
5606 		else {
5607 			seq_puts(m, ",");
5608 			hist_field_print(m, field);
5609 		}
5610 	}
5611 
5612 	if (have_var) {
5613 		unsigned int n = 0;
5614 
5615 		seq_puts(m, ":");
5616 
5617 		for_each_hist_val_field(i, hist_data) {
5618 			field = hist_data->fields[i];
5619 
5620 			if (field->flags & HIST_FIELD_FL_VAR) {
5621 				if (n++)
5622 					seq_puts(m, ",");
5623 				hist_field_print(m, field);
5624 			}
5625 		}
5626 	}
5627 
5628 	seq_puts(m, ":sort=");
5629 
5630 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5631 		struct tracing_map_sort_key *sort_key;
5632 		unsigned int idx, first_key_idx;
5633 
5634 		/* skip VAR vals */
5635 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
5636 
5637 		sort_key = &hist_data->sort_keys[i];
5638 		idx = sort_key->field_idx;
5639 
5640 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
5641 			return -EINVAL;
5642 
5643 		if (i > 0)
5644 			seq_puts(m, ",");
5645 
5646 		if (idx == HITCOUNT_IDX)
5647 			seq_puts(m, "hitcount");
5648 		else {
5649 			if (idx >= first_key_idx)
5650 				idx += hist_data->n_vars;
5651 			hist_field_print(m, hist_data->fields[idx]);
5652 		}
5653 
5654 		if (sort_key->descending)
5655 			seq_puts(m, ".descending");
5656 	}
5657 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5658 	if (hist_data->enable_timestamps)
5659 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5660 
5661 	print_actions_spec(m, hist_data);
5662 
5663 	if (data->filter_str)
5664 		seq_printf(m, " if %s", data->filter_str);
5665 
5666 	if (data->paused)
5667 		seq_puts(m, " [paused]");
5668 	else
5669 		seq_puts(m, " [active]");
5670 
5671 	seq_putc(m, '\n');
5672 
5673 	return 0;
5674 }
5675 
5676 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5677 				   struct event_trigger_data *data)
5678 {
5679 	struct hist_trigger_data *hist_data = data->private_data;
5680 
5681 	if (!data->ref && hist_data->attrs->name)
5682 		save_named_trigger(hist_data->attrs->name, data);
5683 
5684 	data->ref++;
5685 
5686 	return 0;
5687 }
5688 
5689 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5690 {
5691 	struct trace_event_file *file;
5692 	unsigned int i;
5693 	char *cmd;
5694 	int ret;
5695 
5696 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5697 		file = hist_data->field_var_hists[i]->hist_data->event_file;
5698 		cmd = hist_data->field_var_hists[i]->cmd;
5699 		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5700 					      "!hist", "hist", cmd);
5701 	}
5702 }
5703 
5704 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5705 				    struct event_trigger_data *data)
5706 {
5707 	struct hist_trigger_data *hist_data = data->private_data;
5708 
5709 	if (WARN_ON_ONCE(data->ref <= 0))
5710 		return;
5711 
5712 	data->ref--;
5713 	if (!data->ref) {
5714 		if (data->name)
5715 			del_named_trigger(data);
5716 
5717 		trigger_data_free(data);
5718 
5719 		remove_hist_vars(hist_data);
5720 
5721 		unregister_field_var_hists(hist_data);
5722 
5723 		destroy_hist_data(hist_data);
5724 	}
5725 }
5726 
5727 static struct event_trigger_ops event_hist_trigger_ops = {
5728 	.func			= event_hist_trigger,
5729 	.print			= event_hist_trigger_print,
5730 	.init			= event_hist_trigger_init,
5731 	.free			= event_hist_trigger_free,
5732 };
5733 
5734 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5735 					 struct event_trigger_data *data)
5736 {
5737 	data->ref++;
5738 
5739 	save_named_trigger(data->named_data->name, data);
5740 
5741 	event_hist_trigger_init(ops, data->named_data);
5742 
5743 	return 0;
5744 }
5745 
5746 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5747 					  struct event_trigger_data *data)
5748 {
5749 	if (WARN_ON_ONCE(data->ref <= 0))
5750 		return;
5751 
5752 	event_hist_trigger_free(ops, data->named_data);
5753 
5754 	data->ref--;
5755 	if (!data->ref) {
5756 		del_named_trigger(data);
5757 		trigger_data_free(data);
5758 	}
5759 }
5760 
5761 static struct event_trigger_ops event_hist_trigger_named_ops = {
5762 	.func			= event_hist_trigger,
5763 	.print			= event_hist_trigger_print,
5764 	.init			= event_hist_trigger_named_init,
5765 	.free			= event_hist_trigger_named_free,
5766 };
5767 
5768 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5769 							    char *param)
5770 {
5771 	return &event_hist_trigger_ops;
5772 }
5773 
5774 static void hist_clear(struct event_trigger_data *data)
5775 {
5776 	struct hist_trigger_data *hist_data = data->private_data;
5777 
5778 	if (data->name)
5779 		pause_named_trigger(data);
5780 
5781 	tracepoint_synchronize_unregister();
5782 
5783 	tracing_map_clear(hist_data->map);
5784 
5785 	if (data->name)
5786 		unpause_named_trigger(data);
5787 }
5788 
5789 static bool compatible_field(struct ftrace_event_field *field,
5790 			     struct ftrace_event_field *test_field)
5791 {
5792 	if (field == test_field)
5793 		return true;
5794 	if (field == NULL || test_field == NULL)
5795 		return false;
5796 	if (strcmp(field->name, test_field->name) != 0)
5797 		return false;
5798 	if (strcmp(field->type, test_field->type) != 0)
5799 		return false;
5800 	if (field->size != test_field->size)
5801 		return false;
5802 	if (field->is_signed != test_field->is_signed)
5803 		return false;
5804 
5805 	return true;
5806 }
5807 
5808 static bool hist_trigger_match(struct event_trigger_data *data,
5809 			       struct event_trigger_data *data_test,
5810 			       struct event_trigger_data *named_data,
5811 			       bool ignore_filter)
5812 {
5813 	struct tracing_map_sort_key *sort_key, *sort_key_test;
5814 	struct hist_trigger_data *hist_data, *hist_data_test;
5815 	struct hist_field *key_field, *key_field_test;
5816 	unsigned int i;
5817 
5818 	if (named_data && (named_data != data_test) &&
5819 	    (named_data != data_test->named_data))
5820 		return false;
5821 
5822 	if (!named_data && is_named_trigger(data_test))
5823 		return false;
5824 
5825 	hist_data = data->private_data;
5826 	hist_data_test = data_test->private_data;
5827 
5828 	if (hist_data->n_vals != hist_data_test->n_vals ||
5829 	    hist_data->n_fields != hist_data_test->n_fields ||
5830 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5831 		return false;
5832 
5833 	if (!ignore_filter) {
5834 		if ((data->filter_str && !data_test->filter_str) ||
5835 		   (!data->filter_str && data_test->filter_str))
5836 			return false;
5837 	}
5838 
5839 	for_each_hist_field(i, hist_data) {
5840 		key_field = hist_data->fields[i];
5841 		key_field_test = hist_data_test->fields[i];
5842 
5843 		if (key_field->flags != key_field_test->flags)
5844 			return false;
5845 		if (!compatible_field(key_field->field, key_field_test->field))
5846 			return false;
5847 		if (key_field->offset != key_field_test->offset)
5848 			return false;
5849 		if (key_field->size != key_field_test->size)
5850 			return false;
5851 		if (key_field->is_signed != key_field_test->is_signed)
5852 			return false;
5853 		if (!!key_field->var.name != !!key_field_test->var.name)
5854 			return false;
5855 		if (key_field->var.name &&
5856 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
5857 			return false;
5858 	}
5859 
5860 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5861 		sort_key = &hist_data->sort_keys[i];
5862 		sort_key_test = &hist_data_test->sort_keys[i];
5863 
5864 		if (sort_key->field_idx != sort_key_test->field_idx ||
5865 		    sort_key->descending != sort_key_test->descending)
5866 			return false;
5867 	}
5868 
5869 	if (!ignore_filter && data->filter_str &&
5870 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
5871 		return false;
5872 
5873 	if (!actions_match(hist_data, hist_data_test))
5874 		return false;
5875 
5876 	return true;
5877 }
5878 
5879 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5880 				 struct event_trigger_data *data,
5881 				 struct trace_event_file *file)
5882 {
5883 	struct hist_trigger_data *hist_data = data->private_data;
5884 	struct event_trigger_data *test, *named_data = NULL;
5885 	struct trace_array *tr = file->tr;
5886 	int ret = 0;
5887 
5888 	if (hist_data->attrs->name) {
5889 		named_data = find_named_trigger(hist_data->attrs->name);
5890 		if (named_data) {
5891 			if (!hist_trigger_match(data, named_data, named_data,
5892 						true)) {
5893 				hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5894 				ret = -EINVAL;
5895 				goto out;
5896 			}
5897 		}
5898 	}
5899 
5900 	if (hist_data->attrs->name && !named_data)
5901 		goto new;
5902 
5903 	list_for_each_entry_rcu(test, &file->triggers, list) {
5904 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5905 			if (!hist_trigger_match(data, test, named_data, false))
5906 				continue;
5907 			if (hist_data->attrs->pause)
5908 				test->paused = true;
5909 			else if (hist_data->attrs->cont)
5910 				test->paused = false;
5911 			else if (hist_data->attrs->clear)
5912 				hist_clear(test);
5913 			else {
5914 				hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5915 				ret = -EEXIST;
5916 			}
5917 			goto out;
5918 		}
5919 	}
5920  new:
5921 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
5922 		hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5923 		ret = -ENOENT;
5924 		goto out;
5925 	}
5926 
5927 	if (hist_data->attrs->pause)
5928 		data->paused = true;
5929 
5930 	if (named_data) {
5931 		data->private_data = named_data->private_data;
5932 		set_named_trigger_data(data, named_data);
5933 		data->ops = &event_hist_trigger_named_ops;
5934 	}
5935 
5936 	if (data->ops->init) {
5937 		ret = data->ops->init(data->ops, data);
5938 		if (ret < 0)
5939 			goto out;
5940 	}
5941 
5942 	if (hist_data->enable_timestamps) {
5943 		char *clock = hist_data->attrs->clock;
5944 
5945 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5946 		if (ret) {
5947 			hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5948 			goto out;
5949 		}
5950 
5951 		tracing_set_time_stamp_abs(file->tr, true);
5952 	}
5953 
5954 	if (named_data)
5955 		destroy_hist_data(hist_data);
5956 
5957 	ret++;
5958  out:
5959 	return ret;
5960 }
5961 
5962 static int hist_trigger_enable(struct event_trigger_data *data,
5963 			       struct trace_event_file *file)
5964 {
5965 	int ret = 0;
5966 
5967 	list_add_tail_rcu(&data->list, &file->triggers);
5968 
5969 	update_cond_flag(file);
5970 
5971 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
5972 		list_del_rcu(&data->list);
5973 		update_cond_flag(file);
5974 		ret--;
5975 	}
5976 
5977 	return ret;
5978 }
5979 
5980 static bool have_hist_trigger_match(struct event_trigger_data *data,
5981 				    struct trace_event_file *file)
5982 {
5983 	struct hist_trigger_data *hist_data = data->private_data;
5984 	struct event_trigger_data *test, *named_data = NULL;
5985 	bool match = false;
5986 
5987 	if (hist_data->attrs->name)
5988 		named_data = find_named_trigger(hist_data->attrs->name);
5989 
5990 	list_for_each_entry_rcu(test, &file->triggers, list) {
5991 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5992 			if (hist_trigger_match(data, test, named_data, false)) {
5993 				match = true;
5994 				break;
5995 			}
5996 		}
5997 	}
5998 
5999 	return match;
6000 }
6001 
6002 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6003 				    struct trace_event_file *file)
6004 {
6005 	struct hist_trigger_data *hist_data = data->private_data;
6006 	struct event_trigger_data *test, *named_data = NULL;
6007 
6008 	if (hist_data->attrs->name)
6009 		named_data = find_named_trigger(hist_data->attrs->name);
6010 
6011 	list_for_each_entry_rcu(test, &file->triggers, list) {
6012 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6013 			if (!hist_trigger_match(data, test, named_data, false))
6014 				continue;
6015 			hist_data = test->private_data;
6016 			if (check_var_refs(hist_data))
6017 				return true;
6018 			break;
6019 		}
6020 	}
6021 
6022 	return false;
6023 }
6024 
6025 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6026 				    struct event_trigger_data *data,
6027 				    struct trace_event_file *file)
6028 {
6029 	struct hist_trigger_data *hist_data = data->private_data;
6030 	struct event_trigger_data *test, *named_data = NULL;
6031 	bool unregistered = false;
6032 
6033 	if (hist_data->attrs->name)
6034 		named_data = find_named_trigger(hist_data->attrs->name);
6035 
6036 	list_for_each_entry_rcu(test, &file->triggers, list) {
6037 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6038 			if (!hist_trigger_match(data, test, named_data, false))
6039 				continue;
6040 			unregistered = true;
6041 			list_del_rcu(&test->list);
6042 			trace_event_trigger_enable_disable(file, 0);
6043 			update_cond_flag(file);
6044 			break;
6045 		}
6046 	}
6047 
6048 	if (unregistered && test->ops->free)
6049 		test->ops->free(test->ops, test);
6050 
6051 	if (hist_data->enable_timestamps) {
6052 		if (!hist_data->remove || unregistered)
6053 			tracing_set_time_stamp_abs(file->tr, false);
6054 	}
6055 }
6056 
6057 static bool hist_file_check_refs(struct trace_event_file *file)
6058 {
6059 	struct hist_trigger_data *hist_data;
6060 	struct event_trigger_data *test;
6061 
6062 	list_for_each_entry_rcu(test, &file->triggers, list) {
6063 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6064 			hist_data = test->private_data;
6065 			if (check_var_refs(hist_data))
6066 				return true;
6067 		}
6068 	}
6069 
6070 	return false;
6071 }
6072 
6073 static void hist_unreg_all(struct trace_event_file *file)
6074 {
6075 	struct event_trigger_data *test, *n;
6076 	struct hist_trigger_data *hist_data;
6077 	struct synth_event *se;
6078 	const char *se_name;
6079 
6080 	lockdep_assert_held(&event_mutex);
6081 
6082 	if (hist_file_check_refs(file))
6083 		return;
6084 
6085 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6086 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6087 			hist_data = test->private_data;
6088 			list_del_rcu(&test->list);
6089 			trace_event_trigger_enable_disable(file, 0);
6090 
6091 			se_name = trace_event_name(file->event_call);
6092 			se = find_synth_event(se_name);
6093 			if (se)
6094 				se->ref--;
6095 
6096 			update_cond_flag(file);
6097 			if (hist_data->enable_timestamps)
6098 				tracing_set_time_stamp_abs(file->tr, false);
6099 			if (test->ops->free)
6100 				test->ops->free(test->ops, test);
6101 		}
6102 	}
6103 }
6104 
6105 static int event_hist_trigger_func(struct event_command *cmd_ops,
6106 				   struct trace_event_file *file,
6107 				   char *glob, char *cmd, char *param)
6108 {
6109 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6110 	struct event_trigger_data *trigger_data;
6111 	struct hist_trigger_attrs *attrs;
6112 	struct event_trigger_ops *trigger_ops;
6113 	struct hist_trigger_data *hist_data;
6114 	struct synth_event *se;
6115 	const char *se_name;
6116 	bool remove = false;
6117 	char *trigger, *p;
6118 	int ret = 0;
6119 
6120 	lockdep_assert_held(&event_mutex);
6121 
6122 	if (glob && strlen(glob)) {
6123 		hist_err_clear();
6124 		last_cmd_set(file, param);
6125 	}
6126 
6127 	if (!param)
6128 		return -EINVAL;
6129 
6130 	if (glob[0] == '!')
6131 		remove = true;
6132 
6133 	/*
6134 	 * separate the trigger from the filter (k:v [if filter])
6135 	 * allowing for whitespace in the trigger
6136 	 */
6137 	p = trigger = param;
6138 	do {
6139 		p = strstr(p, "if");
6140 		if (!p)
6141 			break;
6142 		if (p == param)
6143 			return -EINVAL;
6144 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6145 			p++;
6146 			continue;
6147 		}
6148 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6149 			return -EINVAL;
6150 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6151 			p++;
6152 			continue;
6153 		}
6154 		break;
6155 	} while (p);
6156 
6157 	if (!p)
6158 		param = NULL;
6159 	else {
6160 		*(p - 1) = '\0';
6161 		param = strstrip(p);
6162 		trigger = strstrip(trigger);
6163 	}
6164 
6165 	attrs = parse_hist_trigger_attrs(file->tr, trigger);
6166 	if (IS_ERR(attrs))
6167 		return PTR_ERR(attrs);
6168 
6169 	if (attrs->map_bits)
6170 		hist_trigger_bits = attrs->map_bits;
6171 
6172 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6173 	if (IS_ERR(hist_data)) {
6174 		destroy_hist_trigger_attrs(attrs);
6175 		return PTR_ERR(hist_data);
6176 	}
6177 
6178 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6179 
6180 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6181 	if (!trigger_data) {
6182 		ret = -ENOMEM;
6183 		goto out_free;
6184 	}
6185 
6186 	trigger_data->count = -1;
6187 	trigger_data->ops = trigger_ops;
6188 	trigger_data->cmd_ops = cmd_ops;
6189 
6190 	INIT_LIST_HEAD(&trigger_data->list);
6191 	RCU_INIT_POINTER(trigger_data->filter, NULL);
6192 
6193 	trigger_data->private_data = hist_data;
6194 
6195 	/* if param is non-empty, it's supposed to be a filter */
6196 	if (param && cmd_ops->set_filter) {
6197 		ret = cmd_ops->set_filter(param, trigger_data, file);
6198 		if (ret < 0)
6199 			goto out_free;
6200 	}
6201 
6202 	if (remove) {
6203 		if (!have_hist_trigger_match(trigger_data, file))
6204 			goto out_free;
6205 
6206 		if (hist_trigger_check_refs(trigger_data, file)) {
6207 			ret = -EBUSY;
6208 			goto out_free;
6209 		}
6210 
6211 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6212 		se_name = trace_event_name(file->event_call);
6213 		se = find_synth_event(se_name);
6214 		if (se)
6215 			se->ref--;
6216 		ret = 0;
6217 		goto out_free;
6218 	}
6219 
6220 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6221 	/*
6222 	 * The above returns on success the # of triggers registered,
6223 	 * but if it didn't register any it returns zero.  Consider no
6224 	 * triggers registered a failure too.
6225 	 */
6226 	if (!ret) {
6227 		if (!(attrs->pause || attrs->cont || attrs->clear))
6228 			ret = -ENOENT;
6229 		goto out_free;
6230 	} else if (ret < 0)
6231 		goto out_free;
6232 
6233 	if (get_named_trigger_data(trigger_data))
6234 		goto enable;
6235 
6236 	if (has_hist_vars(hist_data))
6237 		save_hist_vars(hist_data);
6238 
6239 	ret = create_actions(hist_data);
6240 	if (ret)
6241 		goto out_unreg;
6242 
6243 	ret = tracing_map_init(hist_data->map);
6244 	if (ret)
6245 		goto out_unreg;
6246 enable:
6247 	ret = hist_trigger_enable(trigger_data, file);
6248 	if (ret)
6249 		goto out_unreg;
6250 
6251 	se_name = trace_event_name(file->event_call);
6252 	se = find_synth_event(se_name);
6253 	if (se)
6254 		se->ref++;
6255 	/* Just return zero, not the number of registered triggers */
6256 	ret = 0;
6257  out:
6258 	if (ret == 0)
6259 		hist_err_clear();
6260 
6261 	return ret;
6262  out_unreg:
6263 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6264  out_free:
6265 	if (cmd_ops->set_filter)
6266 		cmd_ops->set_filter(NULL, trigger_data, NULL);
6267 
6268 	remove_hist_vars(hist_data);
6269 
6270 	kfree(trigger_data);
6271 
6272 	destroy_hist_data(hist_data);
6273 	goto out;
6274 }
6275 
6276 static struct event_command trigger_hist_cmd = {
6277 	.name			= "hist",
6278 	.trigger_type		= ETT_EVENT_HIST,
6279 	.flags			= EVENT_CMD_FL_NEEDS_REC,
6280 	.func			= event_hist_trigger_func,
6281 	.reg			= hist_register_trigger,
6282 	.unreg			= hist_unregister_trigger,
6283 	.unreg_all		= hist_unreg_all,
6284 	.get_trigger_ops	= event_hist_get_trigger_ops,
6285 	.set_filter		= set_trigger_filter,
6286 };
6287 
6288 __init int register_trigger_hist_cmd(void)
6289 {
6290 	int ret;
6291 
6292 	ret = register_event_command(&trigger_hist_cmd);
6293 	WARN_ON(ret < 0);
6294 
6295 	return ret;
6296 }
6297 
6298 static void
6299 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6300 		    struct ring_buffer_event *event)
6301 {
6302 	struct enable_trigger_data *enable_data = data->private_data;
6303 	struct event_trigger_data *test;
6304 
6305 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
6306 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6307 			if (enable_data->enable)
6308 				test->paused = false;
6309 			else
6310 				test->paused = true;
6311 		}
6312 	}
6313 }
6314 
6315 static void
6316 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6317 			  struct ring_buffer_event *event)
6318 {
6319 	if (!data->count)
6320 		return;
6321 
6322 	if (data->count != -1)
6323 		(data->count)--;
6324 
6325 	hist_enable_trigger(data, rec, event);
6326 }
6327 
6328 static struct event_trigger_ops hist_enable_trigger_ops = {
6329 	.func			= hist_enable_trigger,
6330 	.print			= event_enable_trigger_print,
6331 	.init			= event_trigger_init,
6332 	.free			= event_enable_trigger_free,
6333 };
6334 
6335 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6336 	.func			= hist_enable_count_trigger,
6337 	.print			= event_enable_trigger_print,
6338 	.init			= event_trigger_init,
6339 	.free			= event_enable_trigger_free,
6340 };
6341 
6342 static struct event_trigger_ops hist_disable_trigger_ops = {
6343 	.func			= hist_enable_trigger,
6344 	.print			= event_enable_trigger_print,
6345 	.init			= event_trigger_init,
6346 	.free			= event_enable_trigger_free,
6347 };
6348 
6349 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6350 	.func			= hist_enable_count_trigger,
6351 	.print			= event_enable_trigger_print,
6352 	.init			= event_trigger_init,
6353 	.free			= event_enable_trigger_free,
6354 };
6355 
6356 static struct event_trigger_ops *
6357 hist_enable_get_trigger_ops(char *cmd, char *param)
6358 {
6359 	struct event_trigger_ops *ops;
6360 	bool enable;
6361 
6362 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6363 
6364 	if (enable)
6365 		ops = param ? &hist_enable_count_trigger_ops :
6366 			&hist_enable_trigger_ops;
6367 	else
6368 		ops = param ? &hist_disable_count_trigger_ops :
6369 			&hist_disable_trigger_ops;
6370 
6371 	return ops;
6372 }
6373 
6374 static void hist_enable_unreg_all(struct trace_event_file *file)
6375 {
6376 	struct event_trigger_data *test, *n;
6377 
6378 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6379 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6380 			list_del_rcu(&test->list);
6381 			update_cond_flag(file);
6382 			trace_event_trigger_enable_disable(file, 0);
6383 			if (test->ops->free)
6384 				test->ops->free(test->ops, test);
6385 		}
6386 	}
6387 }
6388 
6389 static struct event_command trigger_hist_enable_cmd = {
6390 	.name			= ENABLE_HIST_STR,
6391 	.trigger_type		= ETT_HIST_ENABLE,
6392 	.func			= event_enable_trigger_func,
6393 	.reg			= event_enable_register_trigger,
6394 	.unreg			= event_enable_unregister_trigger,
6395 	.unreg_all		= hist_enable_unreg_all,
6396 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6397 	.set_filter		= set_trigger_filter,
6398 };
6399 
6400 static struct event_command trigger_hist_disable_cmd = {
6401 	.name			= DISABLE_HIST_STR,
6402 	.trigger_type		= ETT_HIST_ENABLE,
6403 	.func			= event_enable_trigger_func,
6404 	.reg			= event_enable_register_trigger,
6405 	.unreg			= event_enable_unregister_trigger,
6406 	.unreg_all		= hist_enable_unreg_all,
6407 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6408 	.set_filter		= set_trigger_filter,
6409 };
6410 
6411 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6412 {
6413 	unregister_event_command(&trigger_hist_enable_cmd);
6414 	unregister_event_command(&trigger_hist_disable_cmd);
6415 }
6416 
6417 __init int register_trigger_hist_enable_disable_cmds(void)
6418 {
6419 	int ret;
6420 
6421 	ret = register_event_command(&trigger_hist_enable_cmd);
6422 	if (WARN_ON(ret < 0))
6423 		return ret;
6424 	ret = register_event_command(&trigger_hist_disable_cmd);
6425 	if (WARN_ON(ret < 0))
6426 		unregister_trigger_hist_enable_disable_cmds();
6427 
6428 	return ret;
6429 }
6430 
6431 static __init int trace_events_hist_init(void)
6432 {
6433 	struct dentry *entry = NULL;
6434 	struct dentry *d_tracer;
6435 	int err = 0;
6436 
6437 	err = dyn_event_register(&synth_event_ops);
6438 	if (err) {
6439 		pr_warn("Could not register synth_event_ops\n");
6440 		return err;
6441 	}
6442 
6443 	d_tracer = tracing_init_dentry();
6444 	if (IS_ERR(d_tracer)) {
6445 		err = PTR_ERR(d_tracer);
6446 		goto err;
6447 	}
6448 
6449 	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6450 				    NULL, &synth_events_fops);
6451 	if (!entry) {
6452 		err = -ENODEV;
6453 		goto err;
6454 	}
6455 
6456 	return err;
6457  err:
6458 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6459 
6460 	return err;
6461 }
6462 
6463 fs_initcall(trace_events_hist_init);
6464