xref: /openbmc/linux/kernel/trace/trace_events_hist.c (revision a24d286f36104ed45108a5a36f3868938434772f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24 
25 #define SYNTH_SYSTEM		"synthetic"
26 #define SYNTH_FIELDS_MAX	32
27 
28 #define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
29 
30 #define ERRORS								\
31 	C(NONE,			"No error"),				\
32 	C(DUPLICATE_VAR,	"Variable already defined"),		\
33 	C(VAR_NOT_UNIQUE,	"Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 	C(TOO_MANY_VARS,	"Too many variables defined"),		\
35 	C(MALFORMED_ASSIGNMENT,	"Malformed assignment"),		\
36 	C(NAMED_MISMATCH,	"Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 	C(TRIGGER_EEXIST,	"Hist trigger already exists"),		\
38 	C(TRIGGER_ENOENT_CLEAR,	"Can't clear or continue a nonexistent hist trigger"), \
39 	C(SET_CLOCK_FAIL,	"Couldn't set trace_clock"),		\
40 	C(BAD_FIELD_MODIFIER,	"Invalid field modifier"),		\
41 	C(TOO_MANY_SUBEXPR,	"Too many subexpressions (3 max)"),	\
42 	C(TIMESTAMP_MISMATCH,	"Timestamp units in expression don't match"), \
43 	C(TOO_MANY_FIELD_VARS,	"Too many field variables defined"),	\
44 	C(EVENT_FILE_NOT_FOUND,	"Event file not found"),		\
45 	C(HIST_NOT_FOUND,	"Matching event histogram not found"),	\
46 	C(HIST_CREATE_FAIL,	"Couldn't create histogram for field"),	\
47 	C(SYNTH_VAR_NOT_FOUND,	"Couldn't find synthetic variable"),	\
48 	C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"),	\
49 	C(SYNTH_TYPE_MISMATCH,	"Param type doesn't match synthetic event field type"), \
50 	C(SYNTH_COUNT_MISMATCH,	"Param count doesn't match synthetic event field count"), \
51 	C(FIELD_VAR_PARSE_FAIL,	"Couldn't parse field variable"),	\
52 	C(VAR_CREATE_FIND_FAIL,	"Couldn't create or find variable"),	\
53 	C(ONX_NOT_VAR,		"For onmax(x) or onchange(x), x must be a variable"), \
54 	C(ONX_VAR_NOT_FOUND,	"Couldn't find onmax or onchange variable"), \
55 	C(ONX_VAR_CREATE_FAIL,	"Couldn't create onmax or onchange variable"), \
56 	C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"),	\
57 	C(TOO_MANY_PARAMS,	"Too many action params"),		\
58 	C(PARAM_NOT_FOUND,	"Couldn't find param"),			\
59 	C(INVALID_PARAM,	"Invalid action param"),		\
60 	C(ACTION_NOT_FOUND,	"No action found"),			\
61 	C(NO_SAVE_PARAMS,	"No params found for save()"),		\
62 	C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 	C(ACTION_MISMATCH,	"Handler doesn't support action"),	\
64 	C(NO_CLOSING_PAREN,	"No closing paren found"),		\
65 	C(SUBSYS_NOT_FOUND,	"Missing subsystem"),			\
66 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
67 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
68 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
69 	C(FIELD_NOT_FOUND,	"Couldn't find field"),			\
70 	C(EMPTY_ASSIGNMENT,	"Empty assignment"),			\
71 	C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),		\
72 	C(EMPTY_SORT_FIELD,	"Empty sort field"),			\
73 	C(TOO_MANY_SORT_FIELDS,	"Too many sort fields (Max = 2)"),	\
74 	C(INVALID_SORT_FIELD,	"Sort field must be a key or a val"),
75 
76 #undef C
77 #define C(a, b)		HIST_ERR_##a
78 
79 enum { ERRORS };
80 
81 #undef C
82 #define C(a, b)		b
83 
84 static const char *err_text[] = { ERRORS };
85 
86 struct hist_field;
87 
88 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
89 				struct tracing_map_elt *elt,
90 				struct ring_buffer_event *rbe,
91 				void *event);
92 
93 #define HIST_FIELD_OPERANDS_MAX	2
94 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX	8
96 
97 enum field_op_id {
98 	FIELD_OP_NONE,
99 	FIELD_OP_PLUS,
100 	FIELD_OP_MINUS,
101 	FIELD_OP_UNARY_MINUS,
102 };
103 
104 /*
105  * A hist_var (histogram variable) contains variable information for
106  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
107  * flag set.  A hist_var has a variable name e.g. ts0, and is
108  * associated with a given histogram trigger, as specified by
109  * hist_data.  The hist_var idx is the unique index assigned to the
110  * variable by the hist trigger's tracing_map.  The idx is what is
111  * used to set a variable's value and, by a variable reference, to
112  * retrieve it.
113  */
114 struct hist_var {
115 	char				*name;
116 	struct hist_trigger_data	*hist_data;
117 	unsigned int			idx;
118 };
119 
120 struct hist_field {
121 	struct ftrace_event_field	*field;
122 	unsigned long			flags;
123 	hist_field_fn_t			fn;
124 	unsigned int			size;
125 	unsigned int			offset;
126 	unsigned int                    is_signed;
127 	const char			*type;
128 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
129 	struct hist_trigger_data	*hist_data;
130 
131 	/*
132 	 * Variable fields contain variable-specific info in var.
133 	 */
134 	struct hist_var			var;
135 	enum field_op_id		operator;
136 	char				*system;
137 	char				*event_name;
138 
139 	/*
140 	 * The name field is used for EXPR and VAR_REF fields.  VAR
141 	 * fields contain the variable name in var.name.
142 	 */
143 	char				*name;
144 
145 	/*
146 	 * When a histogram trigger is hit, if it has any references
147 	 * to variables, the values of those variables are collected
148 	 * into a var_ref_vals array by resolve_var_refs().  The
149 	 * current value of each variable is read from the tracing_map
150 	 * using the hist field's hist_var.idx and entered into the
151 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
152 	 */
153 	unsigned int			var_ref_idx;
154 	bool                            read_once;
155 };
156 
157 static u64 hist_field_none(struct hist_field *field,
158 			   struct tracing_map_elt *elt,
159 			   struct ring_buffer_event *rbe,
160 			   void *event)
161 {
162 	return 0;
163 }
164 
165 static u64 hist_field_counter(struct hist_field *field,
166 			      struct tracing_map_elt *elt,
167 			      struct ring_buffer_event *rbe,
168 			      void *event)
169 {
170 	return 1;
171 }
172 
173 static u64 hist_field_string(struct hist_field *hist_field,
174 			     struct tracing_map_elt *elt,
175 			     struct ring_buffer_event *rbe,
176 			     void *event)
177 {
178 	char *addr = (char *)(event + hist_field->field->offset);
179 
180 	return (u64)(unsigned long)addr;
181 }
182 
183 static u64 hist_field_dynstring(struct hist_field *hist_field,
184 				struct tracing_map_elt *elt,
185 				struct ring_buffer_event *rbe,
186 				void *event)
187 {
188 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
189 	int str_loc = str_item & 0xffff;
190 	char *addr = (char *)(event + str_loc);
191 
192 	return (u64)(unsigned long)addr;
193 }
194 
195 static u64 hist_field_pstring(struct hist_field *hist_field,
196 			      struct tracing_map_elt *elt,
197 			      struct ring_buffer_event *rbe,
198 			      void *event)
199 {
200 	char **addr = (char **)(event + hist_field->field->offset);
201 
202 	return (u64)(unsigned long)*addr;
203 }
204 
205 static u64 hist_field_log2(struct hist_field *hist_field,
206 			   struct tracing_map_elt *elt,
207 			   struct ring_buffer_event *rbe,
208 			   void *event)
209 {
210 	struct hist_field *operand = hist_field->operands[0];
211 
212 	u64 val = operand->fn(operand, elt, rbe, event);
213 
214 	return (u64) ilog2(roundup_pow_of_two(val));
215 }
216 
217 static u64 hist_field_plus(struct hist_field *hist_field,
218 			   struct tracing_map_elt *elt,
219 			   struct ring_buffer_event *rbe,
220 			   void *event)
221 {
222 	struct hist_field *operand1 = hist_field->operands[0];
223 	struct hist_field *operand2 = hist_field->operands[1];
224 
225 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
226 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
227 
228 	return val1 + val2;
229 }
230 
231 static u64 hist_field_minus(struct hist_field *hist_field,
232 			    struct tracing_map_elt *elt,
233 			    struct ring_buffer_event *rbe,
234 			    void *event)
235 {
236 	struct hist_field *operand1 = hist_field->operands[0];
237 	struct hist_field *operand2 = hist_field->operands[1];
238 
239 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
240 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
241 
242 	return val1 - val2;
243 }
244 
245 static u64 hist_field_unary_minus(struct hist_field *hist_field,
246 				  struct tracing_map_elt *elt,
247 				  struct ring_buffer_event *rbe,
248 				  void *event)
249 {
250 	struct hist_field *operand = hist_field->operands[0];
251 
252 	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
253 	u64 val = (u64)-sval;
254 
255 	return val;
256 }
257 
258 #define DEFINE_HIST_FIELD_FN(type)					\
259 	static u64 hist_field_##type(struct hist_field *hist_field,	\
260 				     struct tracing_map_elt *elt,	\
261 				     struct ring_buffer_event *rbe,	\
262 				     void *event)			\
263 {									\
264 	type *addr = (type *)(event + hist_field->field->offset);	\
265 									\
266 	return (u64)(unsigned long)*addr;				\
267 }
268 
269 DEFINE_HIST_FIELD_FN(s64);
270 DEFINE_HIST_FIELD_FN(u64);
271 DEFINE_HIST_FIELD_FN(s32);
272 DEFINE_HIST_FIELD_FN(u32);
273 DEFINE_HIST_FIELD_FN(s16);
274 DEFINE_HIST_FIELD_FN(u16);
275 DEFINE_HIST_FIELD_FN(s8);
276 DEFINE_HIST_FIELD_FN(u8);
277 
278 #define for_each_hist_field(i, hist_data)	\
279 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
280 
281 #define for_each_hist_val_field(i, hist_data)	\
282 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
283 
284 #define for_each_hist_key_field(i, hist_data)	\
285 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
286 
287 #define HIST_STACKTRACE_DEPTH	16
288 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
289 #define HIST_STACKTRACE_SKIP	5
290 
291 #define HITCOUNT_IDX		0
292 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
293 
294 enum hist_field_flags {
295 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
296 	HIST_FIELD_FL_KEY		= 1 << 1,
297 	HIST_FIELD_FL_STRING		= 1 << 2,
298 	HIST_FIELD_FL_HEX		= 1 << 3,
299 	HIST_FIELD_FL_SYM		= 1 << 4,
300 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
301 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
302 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
303 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
304 	HIST_FIELD_FL_LOG2		= 1 << 9,
305 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
306 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
307 	HIST_FIELD_FL_VAR		= 1 << 12,
308 	HIST_FIELD_FL_EXPR		= 1 << 13,
309 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
310 	HIST_FIELD_FL_CPU		= 1 << 15,
311 	HIST_FIELD_FL_ALIAS		= 1 << 16,
312 };
313 
314 struct var_defs {
315 	unsigned int	n_vars;
316 	char		*name[TRACING_MAP_VARS_MAX];
317 	char		*expr[TRACING_MAP_VARS_MAX];
318 };
319 
320 struct hist_trigger_attrs {
321 	char		*keys_str;
322 	char		*vals_str;
323 	char		*sort_key_str;
324 	char		*name;
325 	char		*clock;
326 	bool		pause;
327 	bool		cont;
328 	bool		clear;
329 	bool		ts_in_usecs;
330 	unsigned int	map_bits;
331 
332 	char		*assignment_str[TRACING_MAP_VARS_MAX];
333 	unsigned int	n_assignments;
334 
335 	char		*action_str[HIST_ACTIONS_MAX];
336 	unsigned int	n_actions;
337 
338 	struct var_defs	var_defs;
339 };
340 
341 struct field_var {
342 	struct hist_field	*var;
343 	struct hist_field	*val;
344 };
345 
346 struct field_var_hist {
347 	struct hist_trigger_data	*hist_data;
348 	char				*cmd;
349 };
350 
351 struct hist_trigger_data {
352 	struct hist_field               *fields[HIST_FIELDS_MAX];
353 	unsigned int			n_vals;
354 	unsigned int			n_keys;
355 	unsigned int			n_fields;
356 	unsigned int			n_vars;
357 	unsigned int			key_size;
358 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
359 	unsigned int			n_sort_keys;
360 	struct trace_event_file		*event_file;
361 	struct hist_trigger_attrs	*attrs;
362 	struct tracing_map		*map;
363 	bool				enable_timestamps;
364 	bool				remove;
365 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
366 	unsigned int			n_var_refs;
367 
368 	struct action_data		*actions[HIST_ACTIONS_MAX];
369 	unsigned int			n_actions;
370 
371 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
372 	unsigned int			n_field_vars;
373 	unsigned int			n_field_var_str;
374 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
375 	unsigned int			n_field_var_hists;
376 
377 	struct field_var		*save_vars[SYNTH_FIELDS_MAX];
378 	unsigned int			n_save_vars;
379 	unsigned int			n_save_var_str;
380 };
381 
382 static int create_synth_event(int argc, const char **argv);
383 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
384 static int synth_event_release(struct dyn_event *ev);
385 static bool synth_event_is_busy(struct dyn_event *ev);
386 static bool synth_event_match(const char *system, const char *event,
387 			int argc, const char **argv, struct dyn_event *ev);
388 
389 static struct dyn_event_operations synth_event_ops = {
390 	.create = create_synth_event,
391 	.show = synth_event_show,
392 	.is_busy = synth_event_is_busy,
393 	.free = synth_event_release,
394 	.match = synth_event_match,
395 };
396 
397 struct synth_field {
398 	char *type;
399 	char *name;
400 	size_t size;
401 	unsigned int offset;
402 	bool is_signed;
403 	bool is_string;
404 };
405 
406 struct synth_event {
407 	struct dyn_event			devent;
408 	int					ref;
409 	char					*name;
410 	struct synth_field			**fields;
411 	unsigned int				n_fields;
412 	unsigned int				n_u64;
413 	struct trace_event_class		class;
414 	struct trace_event_call			call;
415 	struct tracepoint			*tp;
416 	struct module				*mod;
417 };
418 
419 static bool is_synth_event(struct dyn_event *ev)
420 {
421 	return ev->ops == &synth_event_ops;
422 }
423 
424 static struct synth_event *to_synth_event(struct dyn_event *ev)
425 {
426 	return container_of(ev, struct synth_event, devent);
427 }
428 
429 static bool synth_event_is_busy(struct dyn_event *ev)
430 {
431 	struct synth_event *event = to_synth_event(ev);
432 
433 	return event->ref != 0;
434 }
435 
436 static bool synth_event_match(const char *system, const char *event,
437 			int argc, const char **argv, struct dyn_event *ev)
438 {
439 	struct synth_event *sev = to_synth_event(ev);
440 
441 	return strcmp(sev->name, event) == 0 &&
442 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
443 }
444 
445 struct action_data;
446 
447 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
448 			     struct tracing_map_elt *elt, void *rec,
449 			     struct ring_buffer_event *rbe, void *key,
450 			     struct action_data *data, u64 *var_ref_vals);
451 
452 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
453 
454 enum handler_id {
455 	HANDLER_ONMATCH = 1,
456 	HANDLER_ONMAX,
457 	HANDLER_ONCHANGE,
458 };
459 
460 enum action_id {
461 	ACTION_SAVE = 1,
462 	ACTION_TRACE,
463 	ACTION_SNAPSHOT,
464 };
465 
466 struct action_data {
467 	enum handler_id		handler;
468 	enum action_id		action;
469 	char			*action_name;
470 	action_fn_t		fn;
471 
472 	unsigned int		n_params;
473 	char			*params[SYNTH_FIELDS_MAX];
474 
475 	/*
476 	 * When a histogram trigger is hit, the values of any
477 	 * references to variables, including variables being passed
478 	 * as parameters to synthetic events, are collected into a
479 	 * var_ref_vals array.  This var_ref_idx array is an array of
480 	 * indices into the var_ref_vals array, one for each synthetic
481 	 * event param, and is passed to the synthetic event
482 	 * invocation.
483 	 */
484 	unsigned int		var_ref_idx[TRACING_MAP_VARS_MAX];
485 	struct synth_event	*synth_event;
486 	bool			use_trace_keyword;
487 	char			*synth_event_name;
488 
489 	union {
490 		struct {
491 			char			*event;
492 			char			*event_system;
493 		} match_data;
494 
495 		struct {
496 			/*
497 			 * var_str contains the $-unstripped variable
498 			 * name referenced by var_ref, and used when
499 			 * printing the action.  Because var_ref
500 			 * creation is deferred to create_actions(),
501 			 * we need a per-action way to save it until
502 			 * then, thus var_str.
503 			 */
504 			char			*var_str;
505 
506 			/*
507 			 * var_ref refers to the variable being
508 			 * tracked e.g onmax($var).
509 			 */
510 			struct hist_field	*var_ref;
511 
512 			/*
513 			 * track_var contains the 'invisible' tracking
514 			 * variable created to keep the current
515 			 * e.g. max value.
516 			 */
517 			struct hist_field	*track_var;
518 
519 			check_track_val_fn_t	check_val;
520 			action_fn_t		save_data;
521 		} track_data;
522 	};
523 };
524 
525 struct track_data {
526 	u64				track_val;
527 	bool				updated;
528 
529 	unsigned int			key_len;
530 	void				*key;
531 	struct tracing_map_elt		elt;
532 
533 	struct action_data		*action_data;
534 	struct hist_trigger_data	*hist_data;
535 };
536 
537 struct hist_elt_data {
538 	char *comm;
539 	u64 *var_ref_vals;
540 	char *field_var_str[SYNTH_FIELDS_MAX];
541 };
542 
543 struct snapshot_context {
544 	struct tracing_map_elt	*elt;
545 	void			*key;
546 };
547 
548 static void track_data_free(struct track_data *track_data)
549 {
550 	struct hist_elt_data *elt_data;
551 
552 	if (!track_data)
553 		return;
554 
555 	kfree(track_data->key);
556 
557 	elt_data = track_data->elt.private_data;
558 	if (elt_data) {
559 		kfree(elt_data->comm);
560 		kfree(elt_data);
561 	}
562 
563 	kfree(track_data);
564 }
565 
566 static struct track_data *track_data_alloc(unsigned int key_len,
567 					   struct action_data *action_data,
568 					   struct hist_trigger_data *hist_data)
569 {
570 	struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
571 	struct hist_elt_data *elt_data;
572 
573 	if (!data)
574 		return ERR_PTR(-ENOMEM);
575 
576 	data->key = kzalloc(key_len, GFP_KERNEL);
577 	if (!data->key) {
578 		track_data_free(data);
579 		return ERR_PTR(-ENOMEM);
580 	}
581 
582 	data->key_len = key_len;
583 	data->action_data = action_data;
584 	data->hist_data = hist_data;
585 
586 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
587 	if (!elt_data) {
588 		track_data_free(data);
589 		return ERR_PTR(-ENOMEM);
590 	}
591 	data->elt.private_data = elt_data;
592 
593 	elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
594 	if (!elt_data->comm) {
595 		track_data_free(data);
596 		return ERR_PTR(-ENOMEM);
597 	}
598 
599 	return data;
600 }
601 
602 static char last_cmd[MAX_FILTER_STR_VAL];
603 static char last_cmd_loc[MAX_FILTER_STR_VAL];
604 
605 static int errpos(char *str)
606 {
607 	return err_pos(last_cmd, str);
608 }
609 
610 static void last_cmd_set(struct trace_event_file *file, char *str)
611 {
612 	const char *system = NULL, *name = NULL;
613 	struct trace_event_call *call;
614 
615 	if (!str)
616 		return;
617 
618 	strcpy(last_cmd, "hist:");
619 	strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
620 
621 	if (file) {
622 		call = file->event_call;
623 
624 		system = call->class->system;
625 		if (system) {
626 			name = trace_event_name(call);
627 			if (!name)
628 				system = NULL;
629 		}
630 	}
631 
632 	if (system)
633 		snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
634 }
635 
636 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
637 {
638 	tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
639 			err_type, err_pos);
640 }
641 
642 static void hist_err_clear(void)
643 {
644 	last_cmd[0] = '\0';
645 	last_cmd_loc[0] = '\0';
646 }
647 
648 struct synth_trace_event {
649 	struct trace_entry	ent;
650 	u64			fields[];
651 };
652 
653 static int synth_event_define_fields(struct trace_event_call *call)
654 {
655 	struct synth_trace_event trace;
656 	int offset = offsetof(typeof(trace), fields);
657 	struct synth_event *event = call->data;
658 	unsigned int i, size, n_u64;
659 	char *name, *type;
660 	bool is_signed;
661 	int ret = 0;
662 
663 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
664 		size = event->fields[i]->size;
665 		is_signed = event->fields[i]->is_signed;
666 		type = event->fields[i]->type;
667 		name = event->fields[i]->name;
668 		ret = trace_define_field(call, type, name, offset, size,
669 					 is_signed, FILTER_OTHER);
670 		if (ret)
671 			break;
672 
673 		event->fields[i]->offset = n_u64;
674 
675 		if (event->fields[i]->is_string) {
676 			offset += STR_VAR_LEN_MAX;
677 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
678 		} else {
679 			offset += sizeof(u64);
680 			n_u64++;
681 		}
682 	}
683 
684 	event->n_u64 = n_u64;
685 
686 	return ret;
687 }
688 
689 static bool synth_field_signed(char *type)
690 {
691 	if (str_has_prefix(type, "u"))
692 		return false;
693 	if (strcmp(type, "gfp_t") == 0)
694 		return false;
695 
696 	return true;
697 }
698 
699 static int synth_field_is_string(char *type)
700 {
701 	if (strstr(type, "char[") != NULL)
702 		return true;
703 
704 	return false;
705 }
706 
707 static int synth_field_string_size(char *type)
708 {
709 	char buf[4], *end, *start;
710 	unsigned int len;
711 	int size, err;
712 
713 	start = strstr(type, "char[");
714 	if (start == NULL)
715 		return -EINVAL;
716 	start += sizeof("char[") - 1;
717 
718 	end = strchr(type, ']');
719 	if (!end || end < start)
720 		return -EINVAL;
721 
722 	len = end - start;
723 	if (len > 3)
724 		return -EINVAL;
725 
726 	strncpy(buf, start, len);
727 	buf[len] = '\0';
728 
729 	err = kstrtouint(buf, 0, &size);
730 	if (err)
731 		return err;
732 
733 	if (size > STR_VAR_LEN_MAX)
734 		return -EINVAL;
735 
736 	return size;
737 }
738 
739 static int synth_field_size(char *type)
740 {
741 	int size = 0;
742 
743 	if (strcmp(type, "s64") == 0)
744 		size = sizeof(s64);
745 	else if (strcmp(type, "u64") == 0)
746 		size = sizeof(u64);
747 	else if (strcmp(type, "s32") == 0)
748 		size = sizeof(s32);
749 	else if (strcmp(type, "u32") == 0)
750 		size = sizeof(u32);
751 	else if (strcmp(type, "s16") == 0)
752 		size = sizeof(s16);
753 	else if (strcmp(type, "u16") == 0)
754 		size = sizeof(u16);
755 	else if (strcmp(type, "s8") == 0)
756 		size = sizeof(s8);
757 	else if (strcmp(type, "u8") == 0)
758 		size = sizeof(u8);
759 	else if (strcmp(type, "char") == 0)
760 		size = sizeof(char);
761 	else if (strcmp(type, "unsigned char") == 0)
762 		size = sizeof(unsigned char);
763 	else if (strcmp(type, "int") == 0)
764 		size = sizeof(int);
765 	else if (strcmp(type, "unsigned int") == 0)
766 		size = sizeof(unsigned int);
767 	else if (strcmp(type, "long") == 0)
768 		size = sizeof(long);
769 	else if (strcmp(type, "unsigned long") == 0)
770 		size = sizeof(unsigned long);
771 	else if (strcmp(type, "pid_t") == 0)
772 		size = sizeof(pid_t);
773 	else if (strcmp(type, "gfp_t") == 0)
774 		size = sizeof(gfp_t);
775 	else if (synth_field_is_string(type))
776 		size = synth_field_string_size(type);
777 
778 	return size;
779 }
780 
781 static const char *synth_field_fmt(char *type)
782 {
783 	const char *fmt = "%llu";
784 
785 	if (strcmp(type, "s64") == 0)
786 		fmt = "%lld";
787 	else if (strcmp(type, "u64") == 0)
788 		fmt = "%llu";
789 	else if (strcmp(type, "s32") == 0)
790 		fmt = "%d";
791 	else if (strcmp(type, "u32") == 0)
792 		fmt = "%u";
793 	else if (strcmp(type, "s16") == 0)
794 		fmt = "%d";
795 	else if (strcmp(type, "u16") == 0)
796 		fmt = "%u";
797 	else if (strcmp(type, "s8") == 0)
798 		fmt = "%d";
799 	else if (strcmp(type, "u8") == 0)
800 		fmt = "%u";
801 	else if (strcmp(type, "char") == 0)
802 		fmt = "%d";
803 	else if (strcmp(type, "unsigned char") == 0)
804 		fmt = "%u";
805 	else if (strcmp(type, "int") == 0)
806 		fmt = "%d";
807 	else if (strcmp(type, "unsigned int") == 0)
808 		fmt = "%u";
809 	else if (strcmp(type, "long") == 0)
810 		fmt = "%ld";
811 	else if (strcmp(type, "unsigned long") == 0)
812 		fmt = "%lu";
813 	else if (strcmp(type, "pid_t") == 0)
814 		fmt = "%d";
815 	else if (strcmp(type, "gfp_t") == 0)
816 		fmt = "%x";
817 	else if (synth_field_is_string(type))
818 		fmt = "%s";
819 
820 	return fmt;
821 }
822 
823 static void print_synth_event_num_val(struct trace_seq *s,
824 				      char *print_fmt, char *name,
825 				      int size, u64 val, char *space)
826 {
827 	switch (size) {
828 	case 1:
829 		trace_seq_printf(s, print_fmt, name, (u8)val, space);
830 		break;
831 
832 	case 2:
833 		trace_seq_printf(s, print_fmt, name, (u16)val, space);
834 		break;
835 
836 	case 4:
837 		trace_seq_printf(s, print_fmt, name, (u32)val, space);
838 		break;
839 
840 	default:
841 		trace_seq_printf(s, print_fmt, name, val, space);
842 		break;
843 	}
844 }
845 
846 static enum print_line_t print_synth_event(struct trace_iterator *iter,
847 					   int flags,
848 					   struct trace_event *event)
849 {
850 	struct trace_array *tr = iter->tr;
851 	struct trace_seq *s = &iter->seq;
852 	struct synth_trace_event *entry;
853 	struct synth_event *se;
854 	unsigned int i, n_u64;
855 	char print_fmt[32];
856 	const char *fmt;
857 
858 	entry = (struct synth_trace_event *)iter->ent;
859 	se = container_of(event, struct synth_event, call.event);
860 
861 	trace_seq_printf(s, "%s: ", se->name);
862 
863 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
864 		if (trace_seq_has_overflowed(s))
865 			goto end;
866 
867 		fmt = synth_field_fmt(se->fields[i]->type);
868 
869 		/* parameter types */
870 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
871 			trace_seq_printf(s, "%s ", fmt);
872 
873 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
874 
875 		/* parameter values */
876 		if (se->fields[i]->is_string) {
877 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
878 					 (char *)&entry->fields[n_u64],
879 					 i == se->n_fields - 1 ? "" : " ");
880 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
881 		} else {
882 			struct trace_print_flags __flags[] = {
883 			    __def_gfpflag_names, {-1, NULL} };
884 			char *space = (i == se->n_fields - 1 ? "" : " ");
885 
886 			print_synth_event_num_val(s, print_fmt,
887 						  se->fields[i]->name,
888 						  se->fields[i]->size,
889 						  entry->fields[n_u64],
890 						  space);
891 
892 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
893 				trace_seq_puts(s, " (");
894 				trace_print_flags_seq(s, "|",
895 						      entry->fields[n_u64],
896 						      __flags);
897 				trace_seq_putc(s, ')');
898 			}
899 			n_u64++;
900 		}
901 	}
902 end:
903 	trace_seq_putc(s, '\n');
904 
905 	return trace_handle_return(s);
906 }
907 
908 static struct trace_event_functions synth_event_funcs = {
909 	.trace		= print_synth_event
910 };
911 
912 static notrace void trace_event_raw_event_synth(void *__data,
913 						u64 *var_ref_vals,
914 						unsigned int *var_ref_idx)
915 {
916 	struct trace_event_file *trace_file = __data;
917 	struct synth_trace_event *entry;
918 	struct trace_event_buffer fbuffer;
919 	struct trace_buffer *buffer;
920 	struct synth_event *event;
921 	unsigned int i, n_u64, val_idx;
922 	int fields_size = 0;
923 
924 	event = trace_file->event_call->data;
925 
926 	if (trace_trigger_soft_disabled(trace_file))
927 		return;
928 
929 	fields_size = event->n_u64 * sizeof(u64);
930 
931 	/*
932 	 * Avoid ring buffer recursion detection, as this event
933 	 * is being performed within another event.
934 	 */
935 	buffer = trace_file->tr->array_buffer.buffer;
936 	ring_buffer_nest_start(buffer);
937 
938 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
939 					   sizeof(*entry) + fields_size);
940 	if (!entry)
941 		goto out;
942 
943 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
944 		val_idx = var_ref_idx[i];
945 		if (event->fields[i]->is_string) {
946 			char *str_val = (char *)(long)var_ref_vals[val_idx];
947 			char *str_field = (char *)&entry->fields[n_u64];
948 
949 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
950 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
951 		} else {
952 			struct synth_field *field = event->fields[i];
953 			u64 val = var_ref_vals[val_idx];
954 
955 			switch (field->size) {
956 			case 1:
957 				*(u8 *)&entry->fields[n_u64] = (u8)val;
958 				break;
959 
960 			case 2:
961 				*(u16 *)&entry->fields[n_u64] = (u16)val;
962 				break;
963 
964 			case 4:
965 				*(u32 *)&entry->fields[n_u64] = (u32)val;
966 				break;
967 
968 			default:
969 				entry->fields[n_u64] = val;
970 				break;
971 			}
972 			n_u64++;
973 		}
974 	}
975 
976 	trace_event_buffer_commit(&fbuffer);
977 out:
978 	ring_buffer_nest_end(buffer);
979 }
980 
981 static void free_synth_event_print_fmt(struct trace_event_call *call)
982 {
983 	if (call) {
984 		kfree(call->print_fmt);
985 		call->print_fmt = NULL;
986 	}
987 }
988 
989 static int __set_synth_event_print_fmt(struct synth_event *event,
990 				       char *buf, int len)
991 {
992 	const char *fmt;
993 	int pos = 0;
994 	int i;
995 
996 	/* When len=0, we just calculate the needed length */
997 #define LEN_OR_ZERO (len ? len - pos : 0)
998 
999 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1000 	for (i = 0; i < event->n_fields; i++) {
1001 		fmt = synth_field_fmt(event->fields[i]->type);
1002 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
1003 				event->fields[i]->name, fmt,
1004 				i == event->n_fields - 1 ? "" : ", ");
1005 	}
1006 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1007 
1008 	for (i = 0; i < event->n_fields; i++) {
1009 		pos += snprintf(buf + pos, LEN_OR_ZERO,
1010 				", REC->%s", event->fields[i]->name);
1011 	}
1012 
1013 #undef LEN_OR_ZERO
1014 
1015 	/* return the length of print_fmt */
1016 	return pos;
1017 }
1018 
1019 static int set_synth_event_print_fmt(struct trace_event_call *call)
1020 {
1021 	struct synth_event *event = call->data;
1022 	char *print_fmt;
1023 	int len;
1024 
1025 	/* First: called with 0 length to calculate the needed length */
1026 	len = __set_synth_event_print_fmt(event, NULL, 0);
1027 
1028 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
1029 	if (!print_fmt)
1030 		return -ENOMEM;
1031 
1032 	/* Second: actually write the @print_fmt */
1033 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
1034 	call->print_fmt = print_fmt;
1035 
1036 	return 0;
1037 }
1038 
1039 static void free_synth_field(struct synth_field *field)
1040 {
1041 	kfree(field->type);
1042 	kfree(field->name);
1043 	kfree(field);
1044 }
1045 
1046 static struct synth_field *parse_synth_field(int argc, const char **argv,
1047 					     int *consumed)
1048 {
1049 	struct synth_field *field;
1050 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
1051 	int len, ret = 0;
1052 
1053 	if (field_type[0] == ';')
1054 		field_type++;
1055 
1056 	if (!strcmp(field_type, "unsigned")) {
1057 		if (argc < 3)
1058 			return ERR_PTR(-EINVAL);
1059 		prefix = "unsigned ";
1060 		field_type = argv[1];
1061 		field_name = argv[2];
1062 		*consumed = 3;
1063 	} else {
1064 		field_name = argv[1];
1065 		*consumed = 2;
1066 	}
1067 
1068 	field = kzalloc(sizeof(*field), GFP_KERNEL);
1069 	if (!field)
1070 		return ERR_PTR(-ENOMEM);
1071 
1072 	len = strlen(field_name);
1073 	array = strchr(field_name, '[');
1074 	if (array)
1075 		len -= strlen(array);
1076 	else if (field_name[len - 1] == ';')
1077 		len--;
1078 
1079 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1080 	if (!field->name) {
1081 		ret = -ENOMEM;
1082 		goto free;
1083 	}
1084 
1085 	if (field_type[0] == ';')
1086 		field_type++;
1087 	len = strlen(field_type) + 1;
1088 	if (array)
1089 		len += strlen(array);
1090 	if (prefix)
1091 		len += strlen(prefix);
1092 
1093 	field->type = kzalloc(len, GFP_KERNEL);
1094 	if (!field->type) {
1095 		ret = -ENOMEM;
1096 		goto free;
1097 	}
1098 	if (prefix)
1099 		strcat(field->type, prefix);
1100 	strcat(field->type, field_type);
1101 	if (array) {
1102 		strcat(field->type, array);
1103 		if (field->type[len - 1] == ';')
1104 			field->type[len - 1] = '\0';
1105 	}
1106 
1107 	field->size = synth_field_size(field->type);
1108 	if (!field->size) {
1109 		ret = -EINVAL;
1110 		goto free;
1111 	}
1112 
1113 	if (synth_field_is_string(field->type))
1114 		field->is_string = true;
1115 
1116 	field->is_signed = synth_field_signed(field->type);
1117 
1118  out:
1119 	return field;
1120  free:
1121 	free_synth_field(field);
1122 	field = ERR_PTR(ret);
1123 	goto out;
1124 }
1125 
1126 static void free_synth_tracepoint(struct tracepoint *tp)
1127 {
1128 	if (!tp)
1129 		return;
1130 
1131 	kfree(tp->name);
1132 	kfree(tp);
1133 }
1134 
1135 static struct tracepoint *alloc_synth_tracepoint(char *name)
1136 {
1137 	struct tracepoint *tp;
1138 
1139 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1140 	if (!tp)
1141 		return ERR_PTR(-ENOMEM);
1142 
1143 	tp->name = kstrdup(name, GFP_KERNEL);
1144 	if (!tp->name) {
1145 		kfree(tp);
1146 		return ERR_PTR(-ENOMEM);
1147 	}
1148 
1149 	return tp;
1150 }
1151 
1152 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1153 				    unsigned int *var_ref_idx);
1154 
1155 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1156 			       unsigned int *var_ref_idx)
1157 {
1158 	struct tracepoint *tp = event->tp;
1159 
1160 	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1161 		struct tracepoint_func *probe_func_ptr;
1162 		synth_probe_func_t probe_func;
1163 		void *__data;
1164 
1165 		if (!(cpu_online(raw_smp_processor_id())))
1166 			return;
1167 
1168 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1169 		if (probe_func_ptr) {
1170 			do {
1171 				probe_func = probe_func_ptr->func;
1172 				__data = probe_func_ptr->data;
1173 				probe_func(__data, var_ref_vals, var_ref_idx);
1174 			} while ((++probe_func_ptr)->func);
1175 		}
1176 	}
1177 }
1178 
1179 static struct synth_event *find_synth_event(const char *name)
1180 {
1181 	struct dyn_event *pos;
1182 	struct synth_event *event;
1183 
1184 	for_each_dyn_event(pos) {
1185 		if (!is_synth_event(pos))
1186 			continue;
1187 		event = to_synth_event(pos);
1188 		if (strcmp(event->name, name) == 0)
1189 			return event;
1190 	}
1191 
1192 	return NULL;
1193 }
1194 
1195 static int register_synth_event(struct synth_event *event)
1196 {
1197 	struct trace_event_call *call = &event->call;
1198 	int ret = 0;
1199 
1200 	event->call.class = &event->class;
1201 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1202 	if (!event->class.system) {
1203 		ret = -ENOMEM;
1204 		goto out;
1205 	}
1206 
1207 	event->tp = alloc_synth_tracepoint(event->name);
1208 	if (IS_ERR(event->tp)) {
1209 		ret = PTR_ERR(event->tp);
1210 		event->tp = NULL;
1211 		goto out;
1212 	}
1213 
1214 	INIT_LIST_HEAD(&call->class->fields);
1215 	call->event.funcs = &synth_event_funcs;
1216 	call->class->define_fields = synth_event_define_fields;
1217 
1218 	ret = register_trace_event(&call->event);
1219 	if (!ret) {
1220 		ret = -ENODEV;
1221 		goto out;
1222 	}
1223 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
1224 	call->class->reg = trace_event_reg;
1225 	call->class->probe = trace_event_raw_event_synth;
1226 	call->data = event;
1227 	call->tp = event->tp;
1228 
1229 	ret = trace_add_event_call(call);
1230 	if (ret) {
1231 		pr_warn("Failed to register synthetic event: %s\n",
1232 			trace_event_name(call));
1233 		goto err;
1234 	}
1235 
1236 	ret = set_synth_event_print_fmt(call);
1237 	if (ret < 0) {
1238 		trace_remove_event_call(call);
1239 		goto err;
1240 	}
1241  out:
1242 	return ret;
1243  err:
1244 	unregister_trace_event(&call->event);
1245 	goto out;
1246 }
1247 
1248 static int unregister_synth_event(struct synth_event *event)
1249 {
1250 	struct trace_event_call *call = &event->call;
1251 	int ret;
1252 
1253 	ret = trace_remove_event_call(call);
1254 
1255 	return ret;
1256 }
1257 
1258 static void free_synth_event(struct synth_event *event)
1259 {
1260 	unsigned int i;
1261 
1262 	if (!event)
1263 		return;
1264 
1265 	for (i = 0; i < event->n_fields; i++)
1266 		free_synth_field(event->fields[i]);
1267 
1268 	kfree(event->fields);
1269 	kfree(event->name);
1270 	kfree(event->class.system);
1271 	free_synth_tracepoint(event->tp);
1272 	free_synth_event_print_fmt(&event->call);
1273 	kfree(event);
1274 }
1275 
1276 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1277 					     struct synth_field **fields)
1278 {
1279 	struct synth_event *event;
1280 	unsigned int i;
1281 
1282 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1283 	if (!event) {
1284 		event = ERR_PTR(-ENOMEM);
1285 		goto out;
1286 	}
1287 
1288 	event->name = kstrdup(name, GFP_KERNEL);
1289 	if (!event->name) {
1290 		kfree(event);
1291 		event = ERR_PTR(-ENOMEM);
1292 		goto out;
1293 	}
1294 
1295 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1296 	if (!event->fields) {
1297 		free_synth_event(event);
1298 		event = ERR_PTR(-ENOMEM);
1299 		goto out;
1300 	}
1301 
1302 	dyn_event_init(&event->devent, &synth_event_ops);
1303 
1304 	for (i = 0; i < n_fields; i++)
1305 		event->fields[i] = fields[i];
1306 
1307 	event->n_fields = n_fields;
1308  out:
1309 	return event;
1310 }
1311 
1312 static void action_trace(struct hist_trigger_data *hist_data,
1313 			 struct tracing_map_elt *elt, void *rec,
1314 			 struct ring_buffer_event *rbe, void *key,
1315 			 struct action_data *data, u64 *var_ref_vals)
1316 {
1317 	struct synth_event *event = data->synth_event;
1318 
1319 	trace_synth(event, var_ref_vals, data->var_ref_idx);
1320 }
1321 
1322 struct hist_var_data {
1323 	struct list_head list;
1324 	struct hist_trigger_data *hist_data;
1325 };
1326 
1327 static int synth_event_check_arg_fn(void *data)
1328 {
1329 	struct dynevent_arg_pair *arg_pair = data;
1330 	int size;
1331 
1332 	size = synth_field_size((char *)arg_pair->lhs);
1333 
1334 	return size ? 0 : -EINVAL;
1335 }
1336 
1337 /**
1338  * synth_event_add_field - Add a new field to a synthetic event cmd
1339  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1340  * @type: The type of the new field to add
1341  * @name: The name of the new field to add
1342  *
1343  * Add a new field to a synthetic event cmd object.  Field ordering is in
1344  * the same order the fields are added.
1345  *
1346  * See synth_field_size() for available types. If field_name contains
1347  * [n] the field is considered to be an array.
1348  *
1349  * Return: 0 if successful, error otherwise.
1350  */
1351 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1352 			  const char *name)
1353 {
1354 	struct dynevent_arg_pair arg_pair;
1355 	int ret;
1356 
1357 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1358 		return -EINVAL;
1359 
1360 	if (!type || !name)
1361 		return -EINVAL;
1362 
1363 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1364 
1365 	arg_pair.lhs = type;
1366 	arg_pair.rhs = name;
1367 
1368 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1369 	if (ret)
1370 		return ret;
1371 
1372 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1373 		ret = -EINVAL;
1374 
1375 	return ret;
1376 }
1377 EXPORT_SYMBOL_GPL(synth_event_add_field);
1378 
1379 /**
1380  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1381  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1382  * @type_name: The type and name of the new field to add, as a single string
1383  *
1384  * Add a new field to a synthetic event cmd object, as a single
1385  * string.  The @type_name string is expected to be of the form 'type
1386  * name', which will be appended by ';'.  No sanity checking is done -
1387  * what's passed in is assumed to already be well-formed.  Field
1388  * ordering is in the same order the fields are added.
1389  *
1390  * See synth_field_size() for available types. If field_name contains
1391  * [n] the field is considered to be an array.
1392  *
1393  * Return: 0 if successful, error otherwise.
1394  */
1395 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1396 {
1397 	struct dynevent_arg arg;
1398 	int ret;
1399 
1400 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1401 		return -EINVAL;
1402 
1403 	if (!type_name)
1404 		return -EINVAL;
1405 
1406 	dynevent_arg_init(&arg, ';');
1407 
1408 	arg.str = type_name;
1409 
1410 	ret = dynevent_arg_add(cmd, &arg, NULL);
1411 	if (ret)
1412 		return ret;
1413 
1414 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1415 		ret = -EINVAL;
1416 
1417 	return ret;
1418 }
1419 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1420 
1421 /**
1422  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1423  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1424  * @fields: An array of type/name field descriptions
1425  * @n_fields: The number of field descriptions contained in the fields array
1426  *
1427  * Add a new set of fields to a synthetic event cmd object.  The event
1428  * fields that will be defined for the event should be passed in as an
1429  * array of struct synth_field_desc, and the number of elements in the
1430  * array passed in as n_fields.  Field ordering will retain the
1431  * ordering given in the fields array.
1432  *
1433  * See synth_field_size() for available types. If field_name contains
1434  * [n] the field is considered to be an array.
1435  *
1436  * Return: 0 if successful, error otherwise.
1437  */
1438 int synth_event_add_fields(struct dynevent_cmd *cmd,
1439 			   struct synth_field_desc *fields,
1440 			   unsigned int n_fields)
1441 {
1442 	unsigned int i;
1443 	int ret = 0;
1444 
1445 	for (i = 0; i < n_fields; i++) {
1446 		if (fields[i].type == NULL || fields[i].name == NULL) {
1447 			ret = -EINVAL;
1448 			break;
1449 		}
1450 
1451 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1452 		if (ret)
1453 			break;
1454 	}
1455 
1456 	return ret;
1457 }
1458 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1459 
1460 /**
1461  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1462  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1463  * @name: The name of the synthetic event
1464  * @mod: The module creating the event, NULL if not created from a module
1465  * @args: Variable number of arg (pairs), one pair for each field
1466  *
1467  * NOTE: Users normally won't want to call this function directly, but
1468  * rather use the synth_event_gen_cmd_start() wrapper, which
1469  * automatically adds a NULL to the end of the arg list.  If this
1470  * function is used directly, make sure the last arg in the variable
1471  * arg list is NULL.
1472  *
1473  * Generate a synthetic event command to be executed by
1474  * synth_event_gen_cmd_end().  This function can be used to generate
1475  * the complete command or only the first part of it; in the latter
1476  * case, synth_event_add_field(), synth_event_add_field_str(), or
1477  * synth_event_add_fields() can be used to add more fields following
1478  * this.
1479  *
1480  * There should be an even number variable args, each pair consisting
1481  * of a type followed by a field name.
1482  *
1483  * See synth_field_size() for available types. If field_name contains
1484  * [n] the field is considered to be an array.
1485  *
1486  * Return: 0 if successful, error otherwise.
1487  */
1488 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1489 				struct module *mod, ...)
1490 {
1491 	struct dynevent_arg arg;
1492 	va_list args;
1493 	int ret;
1494 
1495 	cmd->event_name = name;
1496 	cmd->private_data = mod;
1497 
1498 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1499 		return -EINVAL;
1500 
1501 	dynevent_arg_init(&arg, 0);
1502 	arg.str = name;
1503 	ret = dynevent_arg_add(cmd, &arg, NULL);
1504 	if (ret)
1505 		return ret;
1506 
1507 	va_start(args, mod);
1508 	for (;;) {
1509 		const char *type, *name;
1510 
1511 		type = va_arg(args, const char *);
1512 		if (!type)
1513 			break;
1514 		name = va_arg(args, const char *);
1515 		if (!name)
1516 			break;
1517 
1518 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1519 			ret = -EINVAL;
1520 			break;
1521 		}
1522 
1523 		ret = synth_event_add_field(cmd, type, name);
1524 		if (ret)
1525 			break;
1526 	}
1527 	va_end(args);
1528 
1529 	return ret;
1530 }
1531 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1532 
1533 /**
1534  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1535  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1536  * @name: The name of the synthetic event
1537  * @fields: An array of type/name field descriptions
1538  * @n_fields: The number of field descriptions contained in the fields array
1539  *
1540  * Generate a synthetic event command to be executed by
1541  * synth_event_gen_cmd_end().  This function can be used to generate
1542  * the complete command or only the first part of it; in the latter
1543  * case, synth_event_add_field(), synth_event_add_field_str(), or
1544  * synth_event_add_fields() can be used to add more fields following
1545  * this.
1546  *
1547  * The event fields that will be defined for the event should be
1548  * passed in as an array of struct synth_field_desc, and the number of
1549  * elements in the array passed in as n_fields.  Field ordering will
1550  * retain the ordering given in the fields array.
1551  *
1552  * See synth_field_size() for available types. If field_name contains
1553  * [n] the field is considered to be an array.
1554  *
1555  * Return: 0 if successful, error otherwise.
1556  */
1557 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1558 				    struct module *mod,
1559 				    struct synth_field_desc *fields,
1560 				    unsigned int n_fields)
1561 {
1562 	struct dynevent_arg arg;
1563 	unsigned int i;
1564 	int ret = 0;
1565 
1566 	cmd->event_name = name;
1567 	cmd->private_data = mod;
1568 
1569 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1570 		return -EINVAL;
1571 
1572 	if (n_fields > SYNTH_FIELDS_MAX)
1573 		return -EINVAL;
1574 
1575 	dynevent_arg_init(&arg, 0);
1576 	arg.str = name;
1577 	ret = dynevent_arg_add(cmd, &arg, NULL);
1578 	if (ret)
1579 		return ret;
1580 
1581 	for (i = 0; i < n_fields; i++) {
1582 		if (fields[i].type == NULL || fields[i].name == NULL)
1583 			return -EINVAL;
1584 
1585 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1586 		if (ret)
1587 			break;
1588 	}
1589 
1590 	return ret;
1591 }
1592 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1593 
1594 static int __create_synth_event(int argc, const char *name, const char **argv)
1595 {
1596 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1597 	struct synth_event *event = NULL;
1598 	int i, consumed = 0, n_fields = 0, ret = 0;
1599 
1600 	/*
1601 	 * Argument syntax:
1602 	 *  - Add synthetic event: <event_name> field[;field] ...
1603 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1604 	 *      where 'field' = type field_name
1605 	 */
1606 
1607 	if (name[0] == '\0' || argc < 1)
1608 		return -EINVAL;
1609 
1610 	mutex_lock(&event_mutex);
1611 
1612 	event = find_synth_event(name);
1613 	if (event) {
1614 		ret = -EEXIST;
1615 		goto out;
1616 	}
1617 
1618 	for (i = 0; i < argc - 1; i++) {
1619 		if (strcmp(argv[i], ";") == 0)
1620 			continue;
1621 		if (n_fields == SYNTH_FIELDS_MAX) {
1622 			ret = -EINVAL;
1623 			goto err;
1624 		}
1625 
1626 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1627 		if (IS_ERR(field)) {
1628 			ret = PTR_ERR(field);
1629 			goto err;
1630 		}
1631 		fields[n_fields++] = field;
1632 		i += consumed - 1;
1633 	}
1634 
1635 	if (i < argc && strcmp(argv[i], ";") != 0) {
1636 		ret = -EINVAL;
1637 		goto err;
1638 	}
1639 
1640 	event = alloc_synth_event(name, n_fields, fields);
1641 	if (IS_ERR(event)) {
1642 		ret = PTR_ERR(event);
1643 		event = NULL;
1644 		goto err;
1645 	}
1646 	ret = register_synth_event(event);
1647 	if (!ret)
1648 		dyn_event_add(&event->devent);
1649 	else
1650 		free_synth_event(event);
1651  out:
1652 	mutex_unlock(&event_mutex);
1653 
1654 	return ret;
1655  err:
1656 	for (i = 0; i < n_fields; i++)
1657 		free_synth_field(fields[i]);
1658 
1659 	goto out;
1660 }
1661 
1662 /**
1663  * synth_event_create - Create a new synthetic event
1664  * @name: The name of the new sythetic event
1665  * @fields: An array of type/name field descriptions
1666  * @n_fields: The number of field descriptions contained in the fields array
1667  * @mod: The module creating the event, NULL if not created from a module
1668  *
1669  * Create a new synthetic event with the given name under the
1670  * trace/events/synthetic/ directory.  The event fields that will be
1671  * defined for the event should be passed in as an array of struct
1672  * synth_field_desc, and the number elements in the array passed in as
1673  * n_fields. Field ordering will retain the ordering given in the
1674  * fields array.
1675  *
1676  * If the new synthetic event is being created from a module, the mod
1677  * param must be non-NULL.  This will ensure that the trace buffer
1678  * won't contain unreadable events.
1679  *
1680  * The new synth event should be deleted using synth_event_delete()
1681  * function.  The new synthetic event can be generated from modules or
1682  * other kernel code using trace_synth_event() and related functions.
1683  *
1684  * Return: 0 if successful, error otherwise.
1685  */
1686 int synth_event_create(const char *name, struct synth_field_desc *fields,
1687 		       unsigned int n_fields, struct module *mod)
1688 {
1689 	struct dynevent_cmd cmd;
1690 	char *buf;
1691 	int ret;
1692 
1693 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1694 	if (!buf)
1695 		return -ENOMEM;
1696 
1697 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1698 
1699 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1700 					      fields, n_fields);
1701 	if (ret)
1702 		goto out;
1703 
1704 	ret = synth_event_gen_cmd_end(&cmd);
1705  out:
1706 	kfree(buf);
1707 
1708 	return ret;
1709 }
1710 EXPORT_SYMBOL_GPL(synth_event_create);
1711 
1712 static int destroy_synth_event(struct synth_event *se)
1713 {
1714 	int ret;
1715 
1716 	if (se->ref)
1717 		ret = -EBUSY;
1718 	else {
1719 		ret = unregister_synth_event(se);
1720 		if (!ret) {
1721 			dyn_event_remove(&se->devent);
1722 			free_synth_event(se);
1723 		}
1724 	}
1725 
1726 	return ret;
1727 }
1728 
1729 /**
1730  * synth_event_delete - Delete a synthetic event
1731  * @event_name: The name of the new sythetic event
1732  *
1733  * Delete a synthetic event that was created with synth_event_create().
1734  *
1735  * Return: 0 if successful, error otherwise.
1736  */
1737 int synth_event_delete(const char *event_name)
1738 {
1739 	struct synth_event *se = NULL;
1740 	struct module *mod = NULL;
1741 	int ret = -ENOENT;
1742 
1743 	mutex_lock(&event_mutex);
1744 	se = find_synth_event(event_name);
1745 	if (se) {
1746 		mod = se->mod;
1747 		ret = destroy_synth_event(se);
1748 	}
1749 	mutex_unlock(&event_mutex);
1750 
1751 	if (mod) {
1752 		mutex_lock(&trace_types_lock);
1753 		/*
1754 		 * It is safest to reset the ring buffer if the module
1755 		 * being unloaded registered any events that were
1756 		 * used. The only worry is if a new module gets
1757 		 * loaded, and takes on the same id as the events of
1758 		 * this module. When printing out the buffer, traced
1759 		 * events left over from this module may be passed to
1760 		 * the new module events and unexpected results may
1761 		 * occur.
1762 		 */
1763 		tracing_reset_all_online_cpus();
1764 		mutex_unlock(&trace_types_lock);
1765 	}
1766 
1767 	return ret;
1768 }
1769 EXPORT_SYMBOL_GPL(synth_event_delete);
1770 
1771 static int create_or_delete_synth_event(int argc, char **argv)
1772 {
1773 	const char *name = argv[0];
1774 	int ret;
1775 
1776 	/* trace_run_command() ensures argc != 0 */
1777 	if (name[0] == '!') {
1778 		ret = synth_event_delete(name + 1);
1779 		return ret;
1780 	}
1781 
1782 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1783 	return ret == -ECANCELED ? -EINVAL : ret;
1784 }
1785 
1786 static int synth_event_run_command(struct dynevent_cmd *cmd)
1787 {
1788 	struct synth_event *se;
1789 	int ret;
1790 
1791 	ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
1792 	if (ret)
1793 		return ret;
1794 
1795 	se = find_synth_event(cmd->event_name);
1796 	if (WARN_ON(!se))
1797 		return -ENOENT;
1798 
1799 	se->mod = cmd->private_data;
1800 
1801 	return ret;
1802 }
1803 
1804 /**
1805  * synth_event_cmd_init - Initialize a synthetic event command object
1806  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1807  * @buf: A pointer to the buffer used to build the command
1808  * @maxlen: The length of the buffer passed in @buf
1809  *
1810  * Initialize a synthetic event command object.  Use this before
1811  * calling any of the other dyenvent_cmd functions.
1812  */
1813 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1814 {
1815 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1816 			  synth_event_run_command);
1817 }
1818 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1819 
1820 static inline int
1821 __synth_event_trace_start(struct trace_event_file *file,
1822 			  struct synth_event_trace_state *trace_state)
1823 {
1824 	int entry_size, fields_size = 0;
1825 	int ret = 0;
1826 
1827 	memset(trace_state, '\0', sizeof(*trace_state));
1828 
1829 	/*
1830 	 * Normal event tracing doesn't get called at all unless the
1831 	 * ENABLED bit is set (which attaches the probe thus allowing
1832 	 * this code to be called, etc).  Because this is called
1833 	 * directly by the user, we don't have that but we still need
1834 	 * to honor not logging when disabled.  For the the iterated
1835 	 * trace case, we save the enabed state upon start and just
1836 	 * ignore the following data calls.
1837 	 */
1838 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1839 	    trace_trigger_soft_disabled(file)) {
1840 		trace_state->disabled = true;
1841 		ret = -ENOENT;
1842 		goto out;
1843 	}
1844 
1845 	trace_state->event = file->event_call->data;
1846 
1847 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1848 
1849 	/*
1850 	 * Avoid ring buffer recursion detection, as this event
1851 	 * is being performed within another event.
1852 	 */
1853 	trace_state->buffer = file->tr->array_buffer.buffer;
1854 	ring_buffer_nest_start(trace_state->buffer);
1855 
1856 	entry_size = sizeof(*trace_state->entry) + fields_size;
1857 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1858 							file,
1859 							entry_size);
1860 	if (!trace_state->entry) {
1861 		ring_buffer_nest_end(trace_state->buffer);
1862 		ret = -EINVAL;
1863 	}
1864 out:
1865 	return ret;
1866 }
1867 
1868 static inline void
1869 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1870 {
1871 	trace_event_buffer_commit(&trace_state->fbuffer);
1872 
1873 	ring_buffer_nest_end(trace_state->buffer);
1874 }
1875 
1876 /**
1877  * synth_event_trace - Trace a synthetic event
1878  * @file: The trace_event_file representing the synthetic event
1879  * @n_vals: The number of values in vals
1880  * @args: Variable number of args containing the event values
1881  *
1882  * Trace a synthetic event using the values passed in the variable
1883  * argument list.
1884  *
1885  * The argument list should be a list 'n_vals' u64 values.  The number
1886  * of vals must match the number of field in the synthetic event, and
1887  * must be in the same order as the synthetic event fields.
1888  *
1889  * All vals should be cast to u64, and string vals are just pointers
1890  * to strings, cast to u64.  Strings will be copied into space
1891  * reserved in the event for the string, using these pointers.
1892  *
1893  * Return: 0 on success, err otherwise.
1894  */
1895 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1896 {
1897 	struct synth_event_trace_state state;
1898 	unsigned int i, n_u64;
1899 	va_list args;
1900 	int ret;
1901 
1902 	ret = __synth_event_trace_start(file, &state);
1903 	if (ret) {
1904 		if (ret == -ENOENT)
1905 			ret = 0; /* just disabled, not really an error */
1906 		return ret;
1907 	}
1908 
1909 	if (n_vals != state.event->n_fields) {
1910 		ret = -EINVAL;
1911 		goto out;
1912 	}
1913 
1914 	va_start(args, n_vals);
1915 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1916 		u64 val;
1917 
1918 		val = va_arg(args, u64);
1919 
1920 		if (state.event->fields[i]->is_string) {
1921 			char *str_val = (char *)(long)val;
1922 			char *str_field = (char *)&state.entry->fields[n_u64];
1923 
1924 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1925 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
1926 		} else {
1927 			struct synth_field *field = state.event->fields[i];
1928 
1929 			switch (field->size) {
1930 			case 1:
1931 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1932 				break;
1933 
1934 			case 2:
1935 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1936 				break;
1937 
1938 			case 4:
1939 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1940 				break;
1941 
1942 			default:
1943 				state.entry->fields[n_u64] = val;
1944 				break;
1945 			}
1946 			n_u64++;
1947 		}
1948 	}
1949 	va_end(args);
1950 out:
1951 	__synth_event_trace_end(&state);
1952 
1953 	return ret;
1954 }
1955 EXPORT_SYMBOL_GPL(synth_event_trace);
1956 
1957 /**
1958  * synth_event_trace_array - Trace a synthetic event from an array
1959  * @file: The trace_event_file representing the synthetic event
1960  * @vals: Array of values
1961  * @n_vals: The number of values in vals
1962  *
1963  * Trace a synthetic event using the values passed in as 'vals'.
1964  *
1965  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1966  * vals must match the number of field in the synthetic event, and
1967  * must be in the same order as the synthetic event fields.
1968  *
1969  * All vals should be cast to u64, and string vals are just pointers
1970  * to strings, cast to u64.  Strings will be copied into space
1971  * reserved in the event for the string, using these pointers.
1972  *
1973  * Return: 0 on success, err otherwise.
1974  */
1975 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1976 			    unsigned int n_vals)
1977 {
1978 	struct synth_event_trace_state state;
1979 	unsigned int i, n_u64;
1980 	int ret;
1981 
1982 	ret = __synth_event_trace_start(file, &state);
1983 	if (ret) {
1984 		if (ret == -ENOENT)
1985 			ret = 0; /* just disabled, not really an error */
1986 		return ret;
1987 	}
1988 
1989 	if (n_vals != state.event->n_fields) {
1990 		ret = -EINVAL;
1991 		goto out;
1992 	}
1993 
1994 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1995 		if (state.event->fields[i]->is_string) {
1996 			char *str_val = (char *)(long)vals[i];
1997 			char *str_field = (char *)&state.entry->fields[n_u64];
1998 
1999 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2000 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
2001 		} else {
2002 			struct synth_field *field = state.event->fields[i];
2003 			u64 val = vals[i];
2004 
2005 			switch (field->size) {
2006 			case 1:
2007 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
2008 				break;
2009 
2010 			case 2:
2011 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
2012 				break;
2013 
2014 			case 4:
2015 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
2016 				break;
2017 
2018 			default:
2019 				state.entry->fields[n_u64] = val;
2020 				break;
2021 			}
2022 			n_u64++;
2023 		}
2024 	}
2025 out:
2026 	__synth_event_trace_end(&state);
2027 
2028 	return ret;
2029 }
2030 EXPORT_SYMBOL_GPL(synth_event_trace_array);
2031 
2032 /**
2033  * synth_event_trace_start - Start piecewise synthetic event trace
2034  * @file: The trace_event_file representing the synthetic event
2035  * @trace_state: A pointer to object tracking the piecewise trace state
2036  *
2037  * Start the trace of a synthetic event field-by-field rather than all
2038  * at once.
2039  *
2040  * This function 'opens' an event trace, which means space is reserved
2041  * for the event in the trace buffer, after which the event's
2042  * individual field values can be set through either
2043  * synth_event_add_next_val() or synth_event_add_val().
2044  *
2045  * A pointer to a trace_state object is passed in, which will keep
2046  * track of the current event trace state until the event trace is
2047  * closed (and the event finally traced) using
2048  * synth_event_trace_end().
2049  *
2050  * Note that synth_event_trace_end() must be called after all values
2051  * have been added for each event trace, regardless of whether adding
2052  * all field values succeeded or not.
2053  *
2054  * Note also that for a given event trace, all fields must be added
2055  * using either synth_event_add_next_val() or synth_event_add_val()
2056  * but not both together or interleaved.
2057  *
2058  * Return: 0 on success, err otherwise.
2059  */
2060 int synth_event_trace_start(struct trace_event_file *file,
2061 			    struct synth_event_trace_state *trace_state)
2062 {
2063 	int ret;
2064 
2065 	if (!trace_state)
2066 		return -EINVAL;
2067 
2068 	ret = __synth_event_trace_start(file, trace_state);
2069 	if (ret == -ENOENT)
2070 		ret = 0; /* just disabled, not really an error */
2071 
2072 	return ret;
2073 }
2074 EXPORT_SYMBOL_GPL(synth_event_trace_start);
2075 
2076 static int __synth_event_add_val(const char *field_name, u64 val,
2077 				 struct synth_event_trace_state *trace_state)
2078 {
2079 	struct synth_field *field = NULL;
2080 	struct synth_trace_event *entry;
2081 	struct synth_event *event;
2082 	int i, ret = 0;
2083 
2084 	if (!trace_state) {
2085 		ret = -EINVAL;
2086 		goto out;
2087 	}
2088 
2089 	/* can't mix add_next_synth_val() with add_synth_val() */
2090 	if (field_name) {
2091 		if (trace_state->add_next) {
2092 			ret = -EINVAL;
2093 			goto out;
2094 		}
2095 		trace_state->add_name = true;
2096 	} else {
2097 		if (trace_state->add_name) {
2098 			ret = -EINVAL;
2099 			goto out;
2100 		}
2101 		trace_state->add_next = true;
2102 	}
2103 
2104 	if (trace_state->disabled)
2105 		goto out;
2106 
2107 	event = trace_state->event;
2108 	if (trace_state->add_name) {
2109 		for (i = 0; i < event->n_fields; i++) {
2110 			field = event->fields[i];
2111 			if (strcmp(field->name, field_name) == 0)
2112 				break;
2113 		}
2114 		if (!field) {
2115 			ret = -EINVAL;
2116 			goto out;
2117 		}
2118 	} else {
2119 		if (trace_state->cur_field >= event->n_fields) {
2120 			ret = -EINVAL;
2121 			goto out;
2122 		}
2123 		field = event->fields[trace_state->cur_field++];
2124 	}
2125 
2126 	entry = trace_state->entry;
2127 	if (field->is_string) {
2128 		char *str_val = (char *)(long)val;
2129 		char *str_field;
2130 
2131 		if (!str_val) {
2132 			ret = -EINVAL;
2133 			goto out;
2134 		}
2135 
2136 		str_field = (char *)&entry->fields[field->offset];
2137 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2138 	} else {
2139 		switch (field->size) {
2140 		case 1:
2141 			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
2142 			break;
2143 
2144 		case 2:
2145 			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
2146 			break;
2147 
2148 		case 4:
2149 			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
2150 			break;
2151 
2152 		default:
2153 			trace_state->entry->fields[field->offset] = val;
2154 			break;
2155 		}
2156 	}
2157  out:
2158 	return ret;
2159 }
2160 
2161 /**
2162  * synth_event_add_next_val - Add the next field's value to an open synth trace
2163  * @val: The value to set the next field to
2164  * @trace_state: A pointer to object tracking the piecewise trace state
2165  *
2166  * Set the value of the next field in an event that's been opened by
2167  * synth_event_trace_start().
2168  *
2169  * The val param should be the value cast to u64.  If the value points
2170  * to a string, the val param should be a char * cast to u64.
2171  *
2172  * This function assumes all the fields in an event are to be set one
2173  * after another - successive calls to this function are made, one for
2174  * each field, in the order of the fields in the event, until all
2175  * fields have been set.  If you'd rather set each field individually
2176  * without regard to ordering, synth_event_add_val() can be used
2177  * instead.
2178  *
2179  * Note however that synth_event_add_next_val() and
2180  * synth_event_add_val() can't be intermixed for a given event trace -
2181  * one or the other but not both can be used at the same time.
2182  *
2183  * Note also that synth_event_trace_end() must be called after all
2184  * values have been added for each event trace, regardless of whether
2185  * adding all field values succeeded or not.
2186  *
2187  * Return: 0 on success, err otherwise.
2188  */
2189 int synth_event_add_next_val(u64 val,
2190 			     struct synth_event_trace_state *trace_state)
2191 {
2192 	return __synth_event_add_val(NULL, val, trace_state);
2193 }
2194 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2195 
2196 /**
2197  * synth_event_add_val - Add a named field's value to an open synth trace
2198  * @field_name: The name of the synthetic event field value to set
2199  * @val: The value to set the next field to
2200  * @trace_state: A pointer to object tracking the piecewise trace state
2201  *
2202  * Set the value of the named field in an event that's been opened by
2203  * synth_event_trace_start().
2204  *
2205  * The val param should be the value cast to u64.  If the value points
2206  * to a string, the val param should be a char * cast to u64.
2207  *
2208  * This function looks up the field name, and if found, sets the field
2209  * to the specified value.  This lookup makes this function more
2210  * expensive than synth_event_add_next_val(), so use that or the
2211  * none-piecewise synth_event_trace() instead if efficiency is more
2212  * important.
2213  *
2214  * Note however that synth_event_add_next_val() and
2215  * synth_event_add_val() can't be intermixed for a given event trace -
2216  * one or the other but not both can be used at the same time.
2217  *
2218  * Note also that synth_event_trace_end() must be called after all
2219  * values have been added for each event trace, regardless of whether
2220  * adding all field values succeeded or not.
2221  *
2222  * Return: 0 on success, err otherwise.
2223  */
2224 int synth_event_add_val(const char *field_name, u64 val,
2225 			struct synth_event_trace_state *trace_state)
2226 {
2227 	return __synth_event_add_val(field_name, val, trace_state);
2228 }
2229 EXPORT_SYMBOL_GPL(synth_event_add_val);
2230 
2231 /**
2232  * synth_event_trace_end - End piecewise synthetic event trace
2233  * @trace_state: A pointer to object tracking the piecewise trace state
2234  *
2235  * End the trace of a synthetic event opened by
2236  * synth_event_trace__start().
2237  *
2238  * This function 'closes' an event trace, which basically means that
2239  * it commits the reserved event and cleans up other loose ends.
2240  *
2241  * A pointer to a trace_state object is passed in, which will keep
2242  * track of the current event trace state opened with
2243  * synth_event_trace_start().
2244  *
2245  * Note that this function must be called after all values have been
2246  * added for each event trace, regardless of whether adding all field
2247  * values succeeded or not.
2248  *
2249  * Return: 0 on success, err otherwise.
2250  */
2251 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2252 {
2253 	if (!trace_state)
2254 		return -EINVAL;
2255 
2256 	__synth_event_trace_end(trace_state);
2257 
2258 	return 0;
2259 }
2260 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2261 
2262 static int create_synth_event(int argc, const char **argv)
2263 {
2264 	const char *name = argv[0];
2265 	int len;
2266 
2267 	if (name[0] != 's' || name[1] != ':')
2268 		return -ECANCELED;
2269 	name += 2;
2270 
2271 	/* This interface accepts group name prefix */
2272 	if (strchr(name, '/')) {
2273 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2274 		if (len == 0)
2275 			return -EINVAL;
2276 		name += len;
2277 	}
2278 	return __create_synth_event(argc - 1, name, argv + 1);
2279 }
2280 
2281 static int synth_event_release(struct dyn_event *ev)
2282 {
2283 	struct synth_event *event = to_synth_event(ev);
2284 	int ret;
2285 
2286 	if (event->ref)
2287 		return -EBUSY;
2288 
2289 	ret = unregister_synth_event(event);
2290 	if (ret)
2291 		return ret;
2292 
2293 	dyn_event_remove(ev);
2294 	free_synth_event(event);
2295 	return 0;
2296 }
2297 
2298 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2299 {
2300 	struct synth_field *field;
2301 	unsigned int i;
2302 
2303 	seq_printf(m, "%s\t", event->name);
2304 
2305 	for (i = 0; i < event->n_fields; i++) {
2306 		field = event->fields[i];
2307 
2308 		/* parameter values */
2309 		seq_printf(m, "%s %s%s", field->type, field->name,
2310 			   i == event->n_fields - 1 ? "" : "; ");
2311 	}
2312 
2313 	seq_putc(m, '\n');
2314 
2315 	return 0;
2316 }
2317 
2318 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2319 {
2320 	struct synth_event *event = to_synth_event(ev);
2321 
2322 	seq_printf(m, "s:%s/", event->class.system);
2323 
2324 	return __synth_event_show(m, event);
2325 }
2326 
2327 static int synth_events_seq_show(struct seq_file *m, void *v)
2328 {
2329 	struct dyn_event *ev = v;
2330 
2331 	if (!is_synth_event(ev))
2332 		return 0;
2333 
2334 	return __synth_event_show(m, to_synth_event(ev));
2335 }
2336 
2337 static const struct seq_operations synth_events_seq_op = {
2338 	.start	= dyn_event_seq_start,
2339 	.next	= dyn_event_seq_next,
2340 	.stop	= dyn_event_seq_stop,
2341 	.show	= synth_events_seq_show,
2342 };
2343 
2344 static int synth_events_open(struct inode *inode, struct file *file)
2345 {
2346 	int ret;
2347 
2348 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2349 	if (ret)
2350 		return ret;
2351 
2352 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2353 		ret = dyn_events_release_all(&synth_event_ops);
2354 		if (ret < 0)
2355 			return ret;
2356 	}
2357 
2358 	return seq_open(file, &synth_events_seq_op);
2359 }
2360 
2361 static ssize_t synth_events_write(struct file *file,
2362 				  const char __user *buffer,
2363 				  size_t count, loff_t *ppos)
2364 {
2365 	return trace_parse_run_command(file, buffer, count, ppos,
2366 				       create_or_delete_synth_event);
2367 }
2368 
2369 static const struct file_operations synth_events_fops = {
2370 	.open           = synth_events_open,
2371 	.write		= synth_events_write,
2372 	.read           = seq_read,
2373 	.llseek         = seq_lseek,
2374 	.release        = seq_release,
2375 };
2376 
2377 static u64 hist_field_timestamp(struct hist_field *hist_field,
2378 				struct tracing_map_elt *elt,
2379 				struct ring_buffer_event *rbe,
2380 				void *event)
2381 {
2382 	struct hist_trigger_data *hist_data = hist_field->hist_data;
2383 	struct trace_array *tr = hist_data->event_file->tr;
2384 
2385 	u64 ts = ring_buffer_event_time_stamp(rbe);
2386 
2387 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
2388 		ts = ns2usecs(ts);
2389 
2390 	return ts;
2391 }
2392 
2393 static u64 hist_field_cpu(struct hist_field *hist_field,
2394 			  struct tracing_map_elt *elt,
2395 			  struct ring_buffer_event *rbe,
2396 			  void *event)
2397 {
2398 	int cpu = smp_processor_id();
2399 
2400 	return cpu;
2401 }
2402 
2403 /**
2404  * check_field_for_var_ref - Check if a VAR_REF field references a variable
2405  * @hist_field: The VAR_REF field to check
2406  * @var_data: The hist trigger that owns the variable
2407  * @var_idx: The trigger variable identifier
2408  *
2409  * Check the given VAR_REF field to see whether or not it references
2410  * the given variable associated with the given trigger.
2411  *
2412  * Return: The VAR_REF field if it does reference the variable, NULL if not
2413  */
2414 static struct hist_field *
2415 check_field_for_var_ref(struct hist_field *hist_field,
2416 			struct hist_trigger_data *var_data,
2417 			unsigned int var_idx)
2418 {
2419 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
2420 
2421 	if (hist_field && hist_field->var.idx == var_idx &&
2422 	    hist_field->var.hist_data == var_data)
2423 		return hist_field;
2424 
2425 	return NULL;
2426 }
2427 
2428 /**
2429  * find_var_ref - Check if a trigger has a reference to a trigger variable
2430  * @hist_data: The hist trigger that might have a reference to the variable
2431  * @var_data: The hist trigger that owns the variable
2432  * @var_idx: The trigger variable identifier
2433  *
2434  * Check the list of var_refs[] on the first hist trigger to see
2435  * whether any of them are references to the variable on the second
2436  * trigger.
2437  *
2438  * Return: The VAR_REF field referencing the variable if so, NULL if not
2439  */
2440 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
2441 				       struct hist_trigger_data *var_data,
2442 				       unsigned int var_idx)
2443 {
2444 	struct hist_field *hist_field;
2445 	unsigned int i;
2446 
2447 	for (i = 0; i < hist_data->n_var_refs; i++) {
2448 		hist_field = hist_data->var_refs[i];
2449 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
2450 			return hist_field;
2451 	}
2452 
2453 	return NULL;
2454 }
2455 
2456 /**
2457  * find_any_var_ref - Check if there is a reference to a given trigger variable
2458  * @hist_data: The hist trigger
2459  * @var_idx: The trigger variable identifier
2460  *
2461  * Check to see whether the given variable is currently referenced by
2462  * any other trigger.
2463  *
2464  * The trigger the variable is defined on is explicitly excluded - the
2465  * assumption being that a self-reference doesn't prevent a trigger
2466  * from being removed.
2467  *
2468  * Return: The VAR_REF field referencing the variable if so, NULL if not
2469  */
2470 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
2471 					   unsigned int var_idx)
2472 {
2473 	struct trace_array *tr = hist_data->event_file->tr;
2474 	struct hist_field *found = NULL;
2475 	struct hist_var_data *var_data;
2476 
2477 	list_for_each_entry(var_data, &tr->hist_vars, list) {
2478 		if (var_data->hist_data == hist_data)
2479 			continue;
2480 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
2481 		if (found)
2482 			break;
2483 	}
2484 
2485 	return found;
2486 }
2487 
2488 /**
2489  * check_var_refs - Check if there is a reference to any of trigger's variables
2490  * @hist_data: The hist trigger
2491  *
2492  * A trigger can define one or more variables.  If any one of them is
2493  * currently referenced by any other trigger, this function will
2494  * determine that.
2495 
2496  * Typically used to determine whether or not a trigger can be removed
2497  * - if there are any references to a trigger's variables, it cannot.
2498  *
2499  * Return: True if there is a reference to any of trigger's variables
2500  */
2501 static bool check_var_refs(struct hist_trigger_data *hist_data)
2502 {
2503 	struct hist_field *field;
2504 	bool found = false;
2505 	int i;
2506 
2507 	for_each_hist_field(i, hist_data) {
2508 		field = hist_data->fields[i];
2509 		if (field && field->flags & HIST_FIELD_FL_VAR) {
2510 			if (find_any_var_ref(hist_data, field->var.idx)) {
2511 				found = true;
2512 				break;
2513 			}
2514 		}
2515 	}
2516 
2517 	return found;
2518 }
2519 
2520 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
2521 {
2522 	struct trace_array *tr = hist_data->event_file->tr;
2523 	struct hist_var_data *var_data, *found = NULL;
2524 
2525 	list_for_each_entry(var_data, &tr->hist_vars, list) {
2526 		if (var_data->hist_data == hist_data) {
2527 			found = var_data;
2528 			break;
2529 		}
2530 	}
2531 
2532 	return found;
2533 }
2534 
2535 static bool field_has_hist_vars(struct hist_field *hist_field,
2536 				unsigned int level)
2537 {
2538 	int i;
2539 
2540 	if (level > 3)
2541 		return false;
2542 
2543 	if (!hist_field)
2544 		return false;
2545 
2546 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
2547 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
2548 		return true;
2549 
2550 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
2551 		struct hist_field *operand;
2552 
2553 		operand = hist_field->operands[i];
2554 		if (field_has_hist_vars(operand, level + 1))
2555 			return true;
2556 	}
2557 
2558 	return false;
2559 }
2560 
2561 static bool has_hist_vars(struct hist_trigger_data *hist_data)
2562 {
2563 	struct hist_field *hist_field;
2564 	int i;
2565 
2566 	for_each_hist_field(i, hist_data) {
2567 		hist_field = hist_data->fields[i];
2568 		if (field_has_hist_vars(hist_field, 0))
2569 			return true;
2570 	}
2571 
2572 	return false;
2573 }
2574 
2575 static int save_hist_vars(struct hist_trigger_data *hist_data)
2576 {
2577 	struct trace_array *tr = hist_data->event_file->tr;
2578 	struct hist_var_data *var_data;
2579 
2580 	var_data = find_hist_vars(hist_data);
2581 	if (var_data)
2582 		return 0;
2583 
2584 	if (tracing_check_open_get_tr(tr))
2585 		return -ENODEV;
2586 
2587 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
2588 	if (!var_data) {
2589 		trace_array_put(tr);
2590 		return -ENOMEM;
2591 	}
2592 
2593 	var_data->hist_data = hist_data;
2594 	list_add(&var_data->list, &tr->hist_vars);
2595 
2596 	return 0;
2597 }
2598 
2599 static void remove_hist_vars(struct hist_trigger_data *hist_data)
2600 {
2601 	struct trace_array *tr = hist_data->event_file->tr;
2602 	struct hist_var_data *var_data;
2603 
2604 	var_data = find_hist_vars(hist_data);
2605 	if (!var_data)
2606 		return;
2607 
2608 	if (WARN_ON(check_var_refs(hist_data)))
2609 		return;
2610 
2611 	list_del(&var_data->list);
2612 
2613 	kfree(var_data);
2614 
2615 	trace_array_put(tr);
2616 }
2617 
2618 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
2619 					 const char *var_name)
2620 {
2621 	struct hist_field *hist_field, *found = NULL;
2622 	int i;
2623 
2624 	for_each_hist_field(i, hist_data) {
2625 		hist_field = hist_data->fields[i];
2626 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
2627 		    strcmp(hist_field->var.name, var_name) == 0) {
2628 			found = hist_field;
2629 			break;
2630 		}
2631 	}
2632 
2633 	return found;
2634 }
2635 
2636 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
2637 				   struct trace_event_file *file,
2638 				   const char *var_name)
2639 {
2640 	struct hist_trigger_data *test_data;
2641 	struct event_trigger_data *test;
2642 	struct hist_field *hist_field;
2643 
2644 	lockdep_assert_held(&event_mutex);
2645 
2646 	hist_field = find_var_field(hist_data, var_name);
2647 	if (hist_field)
2648 		return hist_field;
2649 
2650 	list_for_each_entry(test, &file->triggers, list) {
2651 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2652 			test_data = test->private_data;
2653 			hist_field = find_var_field(test_data, var_name);
2654 			if (hist_field)
2655 				return hist_field;
2656 		}
2657 	}
2658 
2659 	return NULL;
2660 }
2661 
2662 static struct trace_event_file *find_var_file(struct trace_array *tr,
2663 					      char *system,
2664 					      char *event_name,
2665 					      char *var_name)
2666 {
2667 	struct hist_trigger_data *var_hist_data;
2668 	struct hist_var_data *var_data;
2669 	struct trace_event_file *file, *found = NULL;
2670 
2671 	if (system)
2672 		return find_event_file(tr, system, event_name);
2673 
2674 	list_for_each_entry(var_data, &tr->hist_vars, list) {
2675 		var_hist_data = var_data->hist_data;
2676 		file = var_hist_data->event_file;
2677 		if (file == found)
2678 			continue;
2679 
2680 		if (find_var_field(var_hist_data, var_name)) {
2681 			if (found) {
2682 				hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
2683 				return NULL;
2684 			}
2685 
2686 			found = file;
2687 		}
2688 	}
2689 
2690 	return found;
2691 }
2692 
2693 static struct hist_field *find_file_var(struct trace_event_file *file,
2694 					const char *var_name)
2695 {
2696 	struct hist_trigger_data *test_data;
2697 	struct event_trigger_data *test;
2698 	struct hist_field *hist_field;
2699 
2700 	lockdep_assert_held(&event_mutex);
2701 
2702 	list_for_each_entry(test, &file->triggers, list) {
2703 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2704 			test_data = test->private_data;
2705 			hist_field = find_var_field(test_data, var_name);
2706 			if (hist_field)
2707 				return hist_field;
2708 		}
2709 	}
2710 
2711 	return NULL;
2712 }
2713 
2714 static struct hist_field *
2715 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
2716 {
2717 	struct trace_array *tr = hist_data->event_file->tr;
2718 	struct hist_field *hist_field, *found = NULL;
2719 	struct trace_event_file *file;
2720 	unsigned int i;
2721 
2722 	for (i = 0; i < hist_data->n_actions; i++) {
2723 		struct action_data *data = hist_data->actions[i];
2724 
2725 		if (data->handler == HANDLER_ONMATCH) {
2726 			char *system = data->match_data.event_system;
2727 			char *event_name = data->match_data.event;
2728 
2729 			file = find_var_file(tr, system, event_name, var_name);
2730 			if (!file)
2731 				continue;
2732 			hist_field = find_file_var(file, var_name);
2733 			if (hist_field) {
2734 				if (found) {
2735 					hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
2736 						 errpos(var_name));
2737 					return ERR_PTR(-EINVAL);
2738 				}
2739 
2740 				found = hist_field;
2741 			}
2742 		}
2743 	}
2744 	return found;
2745 }
2746 
2747 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
2748 					 char *system,
2749 					 char *event_name,
2750 					 char *var_name)
2751 {
2752 	struct trace_array *tr = hist_data->event_file->tr;
2753 	struct hist_field *hist_field = NULL;
2754 	struct trace_event_file *file;
2755 
2756 	if (!system || !event_name) {
2757 		hist_field = find_match_var(hist_data, var_name);
2758 		if (IS_ERR(hist_field))
2759 			return NULL;
2760 		if (hist_field)
2761 			return hist_field;
2762 	}
2763 
2764 	file = find_var_file(tr, system, event_name, var_name);
2765 	if (!file)
2766 		return NULL;
2767 
2768 	hist_field = find_file_var(file, var_name);
2769 
2770 	return hist_field;
2771 }
2772 
2773 static u64 hist_field_var_ref(struct hist_field *hist_field,
2774 			      struct tracing_map_elt *elt,
2775 			      struct ring_buffer_event *rbe,
2776 			      void *event)
2777 {
2778 	struct hist_elt_data *elt_data;
2779 	u64 var_val = 0;
2780 
2781 	if (WARN_ON_ONCE(!elt))
2782 		return var_val;
2783 
2784 	elt_data = elt->private_data;
2785 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
2786 
2787 	return var_val;
2788 }
2789 
2790 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
2791 			     u64 *var_ref_vals, bool self)
2792 {
2793 	struct hist_trigger_data *var_data;
2794 	struct tracing_map_elt *var_elt;
2795 	struct hist_field *hist_field;
2796 	unsigned int i, var_idx;
2797 	bool resolved = true;
2798 	u64 var_val = 0;
2799 
2800 	for (i = 0; i < hist_data->n_var_refs; i++) {
2801 		hist_field = hist_data->var_refs[i];
2802 		var_idx = hist_field->var.idx;
2803 		var_data = hist_field->var.hist_data;
2804 
2805 		if (var_data == NULL) {
2806 			resolved = false;
2807 			break;
2808 		}
2809 
2810 		if ((self && var_data != hist_data) ||
2811 		    (!self && var_data == hist_data))
2812 			continue;
2813 
2814 		var_elt = tracing_map_lookup(var_data->map, key);
2815 		if (!var_elt) {
2816 			resolved = false;
2817 			break;
2818 		}
2819 
2820 		if (!tracing_map_var_set(var_elt, var_idx)) {
2821 			resolved = false;
2822 			break;
2823 		}
2824 
2825 		if (self || !hist_field->read_once)
2826 			var_val = tracing_map_read_var(var_elt, var_idx);
2827 		else
2828 			var_val = tracing_map_read_var_once(var_elt, var_idx);
2829 
2830 		var_ref_vals[i] = var_val;
2831 	}
2832 
2833 	return resolved;
2834 }
2835 
2836 static const char *hist_field_name(struct hist_field *field,
2837 				   unsigned int level)
2838 {
2839 	const char *field_name = "";
2840 
2841 	if (level > 1)
2842 		return field_name;
2843 
2844 	if (field->field)
2845 		field_name = field->field->name;
2846 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
2847 		 field->flags & HIST_FIELD_FL_ALIAS)
2848 		field_name = hist_field_name(field->operands[0], ++level);
2849 	else if (field->flags & HIST_FIELD_FL_CPU)
2850 		field_name = "cpu";
2851 	else if (field->flags & HIST_FIELD_FL_EXPR ||
2852 		 field->flags & HIST_FIELD_FL_VAR_REF) {
2853 		if (field->system) {
2854 			static char full_name[MAX_FILTER_STR_VAL];
2855 
2856 			strcat(full_name, field->system);
2857 			strcat(full_name, ".");
2858 			strcat(full_name, field->event_name);
2859 			strcat(full_name, ".");
2860 			strcat(full_name, field->name);
2861 			field_name = full_name;
2862 		} else
2863 			field_name = field->name;
2864 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
2865 		field_name = "common_timestamp";
2866 
2867 	if (field_name == NULL)
2868 		field_name = "";
2869 
2870 	return field_name;
2871 }
2872 
2873 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
2874 {
2875 	hist_field_fn_t fn = NULL;
2876 
2877 	switch (field_size) {
2878 	case 8:
2879 		if (field_is_signed)
2880 			fn = hist_field_s64;
2881 		else
2882 			fn = hist_field_u64;
2883 		break;
2884 	case 4:
2885 		if (field_is_signed)
2886 			fn = hist_field_s32;
2887 		else
2888 			fn = hist_field_u32;
2889 		break;
2890 	case 2:
2891 		if (field_is_signed)
2892 			fn = hist_field_s16;
2893 		else
2894 			fn = hist_field_u16;
2895 		break;
2896 	case 1:
2897 		if (field_is_signed)
2898 			fn = hist_field_s8;
2899 		else
2900 			fn = hist_field_u8;
2901 		break;
2902 	}
2903 
2904 	return fn;
2905 }
2906 
2907 static int parse_map_size(char *str)
2908 {
2909 	unsigned long size, map_bits;
2910 	int ret;
2911 
2912 	ret = kstrtoul(str, 0, &size);
2913 	if (ret)
2914 		goto out;
2915 
2916 	map_bits = ilog2(roundup_pow_of_two(size));
2917 	if (map_bits < TRACING_MAP_BITS_MIN ||
2918 	    map_bits > TRACING_MAP_BITS_MAX)
2919 		ret = -EINVAL;
2920 	else
2921 		ret = map_bits;
2922  out:
2923 	return ret;
2924 }
2925 
2926 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2927 {
2928 	unsigned int i;
2929 
2930 	if (!attrs)
2931 		return;
2932 
2933 	for (i = 0; i < attrs->n_assignments; i++)
2934 		kfree(attrs->assignment_str[i]);
2935 
2936 	for (i = 0; i < attrs->n_actions; i++)
2937 		kfree(attrs->action_str[i]);
2938 
2939 	kfree(attrs->name);
2940 	kfree(attrs->sort_key_str);
2941 	kfree(attrs->keys_str);
2942 	kfree(attrs->vals_str);
2943 	kfree(attrs->clock);
2944 	kfree(attrs);
2945 }
2946 
2947 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2948 {
2949 	int ret = -EINVAL;
2950 
2951 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
2952 		return ret;
2953 
2954 	if ((str_has_prefix(str, "onmatch(")) ||
2955 	    (str_has_prefix(str, "onmax(")) ||
2956 	    (str_has_prefix(str, "onchange("))) {
2957 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2958 		if (!attrs->action_str[attrs->n_actions]) {
2959 			ret = -ENOMEM;
2960 			return ret;
2961 		}
2962 		attrs->n_actions++;
2963 		ret = 0;
2964 	}
2965 	return ret;
2966 }
2967 
2968 static int parse_assignment(struct trace_array *tr,
2969 			    char *str, struct hist_trigger_attrs *attrs)
2970 {
2971 	int len, ret = 0;
2972 
2973 	if ((len = str_has_prefix(str, "key=")) ||
2974 	    (len = str_has_prefix(str, "keys="))) {
2975 		attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
2976 		if (!attrs->keys_str) {
2977 			ret = -ENOMEM;
2978 			goto out;
2979 		}
2980 	} else if ((len = str_has_prefix(str, "val=")) ||
2981 		   (len = str_has_prefix(str, "vals=")) ||
2982 		   (len = str_has_prefix(str, "values="))) {
2983 		attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
2984 		if (!attrs->vals_str) {
2985 			ret = -ENOMEM;
2986 			goto out;
2987 		}
2988 	} else if ((len = str_has_prefix(str, "sort="))) {
2989 		attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
2990 		if (!attrs->sort_key_str) {
2991 			ret = -ENOMEM;
2992 			goto out;
2993 		}
2994 	} else if (str_has_prefix(str, "name=")) {
2995 		attrs->name = kstrdup(str, GFP_KERNEL);
2996 		if (!attrs->name) {
2997 			ret = -ENOMEM;
2998 			goto out;
2999 		}
3000 	} else if ((len = str_has_prefix(str, "clock="))) {
3001 		str += len;
3002 
3003 		str = strstrip(str);
3004 		attrs->clock = kstrdup(str, GFP_KERNEL);
3005 		if (!attrs->clock) {
3006 			ret = -ENOMEM;
3007 			goto out;
3008 		}
3009 	} else if ((len = str_has_prefix(str, "size="))) {
3010 		int map_bits = parse_map_size(str + len);
3011 
3012 		if (map_bits < 0) {
3013 			ret = map_bits;
3014 			goto out;
3015 		}
3016 		attrs->map_bits = map_bits;
3017 	} else {
3018 		char *assignment;
3019 
3020 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
3021 			hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
3022 			ret = -EINVAL;
3023 			goto out;
3024 		}
3025 
3026 		assignment = kstrdup(str, GFP_KERNEL);
3027 		if (!assignment) {
3028 			ret = -ENOMEM;
3029 			goto out;
3030 		}
3031 
3032 		attrs->assignment_str[attrs->n_assignments++] = assignment;
3033 	}
3034  out:
3035 	return ret;
3036 }
3037 
3038 static struct hist_trigger_attrs *
3039 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
3040 {
3041 	struct hist_trigger_attrs *attrs;
3042 	int ret = 0;
3043 
3044 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3045 	if (!attrs)
3046 		return ERR_PTR(-ENOMEM);
3047 
3048 	while (trigger_str) {
3049 		char *str = strsep(&trigger_str, ":");
3050 		char *rhs;
3051 
3052 		rhs = strchr(str, '=');
3053 		if (rhs) {
3054 			if (!strlen(++rhs)) {
3055 				ret = -EINVAL;
3056 				hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
3057 				goto free;
3058 			}
3059 			ret = parse_assignment(tr, str, attrs);
3060 			if (ret)
3061 				goto free;
3062 		} else if (strcmp(str, "pause") == 0)
3063 			attrs->pause = true;
3064 		else if ((strcmp(str, "cont") == 0) ||
3065 			 (strcmp(str, "continue") == 0))
3066 			attrs->cont = true;
3067 		else if (strcmp(str, "clear") == 0)
3068 			attrs->clear = true;
3069 		else {
3070 			ret = parse_action(str, attrs);
3071 			if (ret)
3072 				goto free;
3073 		}
3074 	}
3075 
3076 	if (!attrs->keys_str) {
3077 		ret = -EINVAL;
3078 		goto free;
3079 	}
3080 
3081 	if (!attrs->clock) {
3082 		attrs->clock = kstrdup("global", GFP_KERNEL);
3083 		if (!attrs->clock) {
3084 			ret = -ENOMEM;
3085 			goto free;
3086 		}
3087 	}
3088 
3089 	return attrs;
3090  free:
3091 	destroy_hist_trigger_attrs(attrs);
3092 
3093 	return ERR_PTR(ret);
3094 }
3095 
3096 static inline void save_comm(char *comm, struct task_struct *task)
3097 {
3098 	if (!task->pid) {
3099 		strcpy(comm, "<idle>");
3100 		return;
3101 	}
3102 
3103 	if (WARN_ON_ONCE(task->pid < 0)) {
3104 		strcpy(comm, "<XXX>");
3105 		return;
3106 	}
3107 
3108 	strncpy(comm, task->comm, TASK_COMM_LEN);
3109 }
3110 
3111 static void hist_elt_data_free(struct hist_elt_data *elt_data)
3112 {
3113 	unsigned int i;
3114 
3115 	for (i = 0; i < SYNTH_FIELDS_MAX; i++)
3116 		kfree(elt_data->field_var_str[i]);
3117 
3118 	kfree(elt_data->comm);
3119 	kfree(elt_data);
3120 }
3121 
3122 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
3123 {
3124 	struct hist_elt_data *elt_data = elt->private_data;
3125 
3126 	hist_elt_data_free(elt_data);
3127 }
3128 
3129 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
3130 {
3131 	struct hist_trigger_data *hist_data = elt->map->private_data;
3132 	unsigned int size = TASK_COMM_LEN;
3133 	struct hist_elt_data *elt_data;
3134 	struct hist_field *key_field;
3135 	unsigned int i, n_str;
3136 
3137 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
3138 	if (!elt_data)
3139 		return -ENOMEM;
3140 
3141 	for_each_hist_key_field(i, hist_data) {
3142 		key_field = hist_data->fields[i];
3143 
3144 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
3145 			elt_data->comm = kzalloc(size, GFP_KERNEL);
3146 			if (!elt_data->comm) {
3147 				kfree(elt_data);
3148 				return -ENOMEM;
3149 			}
3150 			break;
3151 		}
3152 	}
3153 
3154 	n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
3155 
3156 	size = STR_VAR_LEN_MAX;
3157 
3158 	for (i = 0; i < n_str; i++) {
3159 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
3160 		if (!elt_data->field_var_str[i]) {
3161 			hist_elt_data_free(elt_data);
3162 			return -ENOMEM;
3163 		}
3164 	}
3165 
3166 	elt->private_data = elt_data;
3167 
3168 	return 0;
3169 }
3170 
3171 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
3172 {
3173 	struct hist_elt_data *elt_data = elt->private_data;
3174 
3175 	if (elt_data->comm)
3176 		save_comm(elt_data->comm, current);
3177 }
3178 
3179 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
3180 	.elt_alloc	= hist_trigger_elt_data_alloc,
3181 	.elt_free	= hist_trigger_elt_data_free,
3182 	.elt_init	= hist_trigger_elt_data_init,
3183 };
3184 
3185 static const char *get_hist_field_flags(struct hist_field *hist_field)
3186 {
3187 	const char *flags_str = NULL;
3188 
3189 	if (hist_field->flags & HIST_FIELD_FL_HEX)
3190 		flags_str = "hex";
3191 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
3192 		flags_str = "sym";
3193 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
3194 		flags_str = "sym-offset";
3195 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
3196 		flags_str = "execname";
3197 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
3198 		flags_str = "syscall";
3199 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
3200 		flags_str = "log2";
3201 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
3202 		flags_str = "usecs";
3203 
3204 	return flags_str;
3205 }
3206 
3207 static void expr_field_str(struct hist_field *field, char *expr)
3208 {
3209 	if (field->flags & HIST_FIELD_FL_VAR_REF)
3210 		strcat(expr, "$");
3211 
3212 	strcat(expr, hist_field_name(field, 0));
3213 
3214 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
3215 		const char *flags_str = get_hist_field_flags(field);
3216 
3217 		if (flags_str) {
3218 			strcat(expr, ".");
3219 			strcat(expr, flags_str);
3220 		}
3221 	}
3222 }
3223 
3224 static char *expr_str(struct hist_field *field, unsigned int level)
3225 {
3226 	char *expr;
3227 
3228 	if (level > 1)
3229 		return NULL;
3230 
3231 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3232 	if (!expr)
3233 		return NULL;
3234 
3235 	if (!field->operands[0]) {
3236 		expr_field_str(field, expr);
3237 		return expr;
3238 	}
3239 
3240 	if (field->operator == FIELD_OP_UNARY_MINUS) {
3241 		char *subexpr;
3242 
3243 		strcat(expr, "-(");
3244 		subexpr = expr_str(field->operands[0], ++level);
3245 		if (!subexpr) {
3246 			kfree(expr);
3247 			return NULL;
3248 		}
3249 		strcat(expr, subexpr);
3250 		strcat(expr, ")");
3251 
3252 		kfree(subexpr);
3253 
3254 		return expr;
3255 	}
3256 
3257 	expr_field_str(field->operands[0], expr);
3258 
3259 	switch (field->operator) {
3260 	case FIELD_OP_MINUS:
3261 		strcat(expr, "-");
3262 		break;
3263 	case FIELD_OP_PLUS:
3264 		strcat(expr, "+");
3265 		break;
3266 	default:
3267 		kfree(expr);
3268 		return NULL;
3269 	}
3270 
3271 	expr_field_str(field->operands[1], expr);
3272 
3273 	return expr;
3274 }
3275 
3276 static int contains_operator(char *str)
3277 {
3278 	enum field_op_id field_op = FIELD_OP_NONE;
3279 	char *op;
3280 
3281 	op = strpbrk(str, "+-");
3282 	if (!op)
3283 		return FIELD_OP_NONE;
3284 
3285 	switch (*op) {
3286 	case '-':
3287 		if (*str == '-')
3288 			field_op = FIELD_OP_UNARY_MINUS;
3289 		else
3290 			field_op = FIELD_OP_MINUS;
3291 		break;
3292 	case '+':
3293 		field_op = FIELD_OP_PLUS;
3294 		break;
3295 	default:
3296 		break;
3297 	}
3298 
3299 	return field_op;
3300 }
3301 
3302 static void __destroy_hist_field(struct hist_field *hist_field)
3303 {
3304 	kfree(hist_field->var.name);
3305 	kfree(hist_field->name);
3306 	kfree(hist_field->type);
3307 
3308 	kfree(hist_field);
3309 }
3310 
3311 static void destroy_hist_field(struct hist_field *hist_field,
3312 			       unsigned int level)
3313 {
3314 	unsigned int i;
3315 
3316 	if (level > 3)
3317 		return;
3318 
3319 	if (!hist_field)
3320 		return;
3321 
3322 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
3323 		return; /* var refs will be destroyed separately */
3324 
3325 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
3326 		destroy_hist_field(hist_field->operands[i], level + 1);
3327 
3328 	__destroy_hist_field(hist_field);
3329 }
3330 
3331 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
3332 					    struct ftrace_event_field *field,
3333 					    unsigned long flags,
3334 					    char *var_name)
3335 {
3336 	struct hist_field *hist_field;
3337 
3338 	if (field && is_function_field(field))
3339 		return NULL;
3340 
3341 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3342 	if (!hist_field)
3343 		return NULL;
3344 
3345 	hist_field->hist_data = hist_data;
3346 
3347 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
3348 		goto out; /* caller will populate */
3349 
3350 	if (flags & HIST_FIELD_FL_VAR_REF) {
3351 		hist_field->fn = hist_field_var_ref;
3352 		goto out;
3353 	}
3354 
3355 	if (flags & HIST_FIELD_FL_HITCOUNT) {
3356 		hist_field->fn = hist_field_counter;
3357 		hist_field->size = sizeof(u64);
3358 		hist_field->type = kstrdup("u64", GFP_KERNEL);
3359 		if (!hist_field->type)
3360 			goto free;
3361 		goto out;
3362 	}
3363 
3364 	if (flags & HIST_FIELD_FL_STACKTRACE) {
3365 		hist_field->fn = hist_field_none;
3366 		goto out;
3367 	}
3368 
3369 	if (flags & HIST_FIELD_FL_LOG2) {
3370 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
3371 		hist_field->fn = hist_field_log2;
3372 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
3373 		hist_field->size = hist_field->operands[0]->size;
3374 		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
3375 		if (!hist_field->type)
3376 			goto free;
3377 		goto out;
3378 	}
3379 
3380 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
3381 		hist_field->fn = hist_field_timestamp;
3382 		hist_field->size = sizeof(u64);
3383 		hist_field->type = kstrdup("u64", GFP_KERNEL);
3384 		if (!hist_field->type)
3385 			goto free;
3386 		goto out;
3387 	}
3388 
3389 	if (flags & HIST_FIELD_FL_CPU) {
3390 		hist_field->fn = hist_field_cpu;
3391 		hist_field->size = sizeof(int);
3392 		hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
3393 		if (!hist_field->type)
3394 			goto free;
3395 		goto out;
3396 	}
3397 
3398 	if (WARN_ON_ONCE(!field))
3399 		goto out;
3400 
3401 	if (is_string_field(field)) {
3402 		flags |= HIST_FIELD_FL_STRING;
3403 
3404 		hist_field->size = MAX_FILTER_STR_VAL;
3405 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
3406 		if (!hist_field->type)
3407 			goto free;
3408 
3409 		if (field->filter_type == FILTER_STATIC_STRING)
3410 			hist_field->fn = hist_field_string;
3411 		else if (field->filter_type == FILTER_DYN_STRING)
3412 			hist_field->fn = hist_field_dynstring;
3413 		else
3414 			hist_field->fn = hist_field_pstring;
3415 	} else {
3416 		hist_field->size = field->size;
3417 		hist_field->is_signed = field->is_signed;
3418 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
3419 		if (!hist_field->type)
3420 			goto free;
3421 
3422 		hist_field->fn = select_value_fn(field->size,
3423 						 field->is_signed);
3424 		if (!hist_field->fn) {
3425 			destroy_hist_field(hist_field, 0);
3426 			return NULL;
3427 		}
3428 	}
3429  out:
3430 	hist_field->field = field;
3431 	hist_field->flags = flags;
3432 
3433 	if (var_name) {
3434 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
3435 		if (!hist_field->var.name)
3436 			goto free;
3437 	}
3438 
3439 	return hist_field;
3440  free:
3441 	destroy_hist_field(hist_field, 0);
3442 	return NULL;
3443 }
3444 
3445 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
3446 {
3447 	unsigned int i;
3448 
3449 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
3450 		if (hist_data->fields[i]) {
3451 			destroy_hist_field(hist_data->fields[i], 0);
3452 			hist_data->fields[i] = NULL;
3453 		}
3454 	}
3455 
3456 	for (i = 0; i < hist_data->n_var_refs; i++) {
3457 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
3458 		__destroy_hist_field(hist_data->var_refs[i]);
3459 		hist_data->var_refs[i] = NULL;
3460 	}
3461 }
3462 
3463 static int init_var_ref(struct hist_field *ref_field,
3464 			struct hist_field *var_field,
3465 			char *system, char *event_name)
3466 {
3467 	int err = 0;
3468 
3469 	ref_field->var.idx = var_field->var.idx;
3470 	ref_field->var.hist_data = var_field->hist_data;
3471 	ref_field->size = var_field->size;
3472 	ref_field->is_signed = var_field->is_signed;
3473 	ref_field->flags |= var_field->flags &
3474 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3475 
3476 	if (system) {
3477 		ref_field->system = kstrdup(system, GFP_KERNEL);
3478 		if (!ref_field->system)
3479 			return -ENOMEM;
3480 	}
3481 
3482 	if (event_name) {
3483 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
3484 		if (!ref_field->event_name) {
3485 			err = -ENOMEM;
3486 			goto free;
3487 		}
3488 	}
3489 
3490 	if (var_field->var.name) {
3491 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
3492 		if (!ref_field->name) {
3493 			err = -ENOMEM;
3494 			goto free;
3495 		}
3496 	} else if (var_field->name) {
3497 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
3498 		if (!ref_field->name) {
3499 			err = -ENOMEM;
3500 			goto free;
3501 		}
3502 	}
3503 
3504 	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
3505 	if (!ref_field->type) {
3506 		err = -ENOMEM;
3507 		goto free;
3508 	}
3509  out:
3510 	return err;
3511  free:
3512 	kfree(ref_field->system);
3513 	kfree(ref_field->event_name);
3514 	kfree(ref_field->name);
3515 
3516 	goto out;
3517 }
3518 
3519 static int find_var_ref_idx(struct hist_trigger_data *hist_data,
3520 			    struct hist_field *var_field)
3521 {
3522 	struct hist_field *ref_field;
3523 	int i;
3524 
3525 	for (i = 0; i < hist_data->n_var_refs; i++) {
3526 		ref_field = hist_data->var_refs[i];
3527 		if (ref_field->var.idx == var_field->var.idx &&
3528 		    ref_field->var.hist_data == var_field->hist_data)
3529 			return i;
3530 	}
3531 
3532 	return -ENOENT;
3533 }
3534 
3535 /**
3536  * create_var_ref - Create a variable reference and attach it to trigger
3537  * @hist_data: The trigger that will be referencing the variable
3538  * @var_field: The VAR field to create a reference to
3539  * @system: The optional system string
3540  * @event_name: The optional event_name string
3541  *
3542  * Given a variable hist_field, create a VAR_REF hist_field that
3543  * represents a reference to it.
3544  *
3545  * This function also adds the reference to the trigger that
3546  * now references the variable.
3547  *
3548  * Return: The VAR_REF field if successful, NULL if not
3549  */
3550 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
3551 					 struct hist_field *var_field,
3552 					 char *system, char *event_name)
3553 {
3554 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
3555 	struct hist_field *ref_field;
3556 
3557 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
3558 	if (ref_field) {
3559 		if (init_var_ref(ref_field, var_field, system, event_name)) {
3560 			destroy_hist_field(ref_field, 0);
3561 			return NULL;
3562 		}
3563 
3564 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
3565 		ref_field->var_ref_idx = hist_data->n_var_refs++;
3566 	}
3567 
3568 	return ref_field;
3569 }
3570 
3571 static bool is_var_ref(char *var_name)
3572 {
3573 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
3574 		return false;
3575 
3576 	return true;
3577 }
3578 
3579 static char *field_name_from_var(struct hist_trigger_data *hist_data,
3580 				 char *var_name)
3581 {
3582 	char *name, *field;
3583 	unsigned int i;
3584 
3585 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
3586 		name = hist_data->attrs->var_defs.name[i];
3587 
3588 		if (strcmp(var_name, name) == 0) {
3589 			field = hist_data->attrs->var_defs.expr[i];
3590 			if (contains_operator(field) || is_var_ref(field))
3591 				continue;
3592 			return field;
3593 		}
3594 	}
3595 
3596 	return NULL;
3597 }
3598 
3599 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
3600 				 char *system, char *event_name,
3601 				 char *var_name)
3602 {
3603 	struct trace_event_call *call;
3604 
3605 	if (system && event_name) {
3606 		call = hist_data->event_file->event_call;
3607 
3608 		if (strcmp(system, call->class->system) != 0)
3609 			return NULL;
3610 
3611 		if (strcmp(event_name, trace_event_name(call)) != 0)
3612 			return NULL;
3613 	}
3614 
3615 	if (!!system != !!event_name)
3616 		return NULL;
3617 
3618 	if (!is_var_ref(var_name))
3619 		return NULL;
3620 
3621 	var_name++;
3622 
3623 	return field_name_from_var(hist_data, var_name);
3624 }
3625 
3626 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
3627 					char *system, char *event_name,
3628 					char *var_name)
3629 {
3630 	struct hist_field *var_field = NULL, *ref_field = NULL;
3631 	struct trace_array *tr = hist_data->event_file->tr;
3632 
3633 	if (!is_var_ref(var_name))
3634 		return NULL;
3635 
3636 	var_name++;
3637 
3638 	var_field = find_event_var(hist_data, system, event_name, var_name);
3639 	if (var_field)
3640 		ref_field = create_var_ref(hist_data, var_field,
3641 					   system, event_name);
3642 
3643 	if (!ref_field)
3644 		hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
3645 
3646 	return ref_field;
3647 }
3648 
3649 static struct ftrace_event_field *
3650 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
3651 	    char *field_str, unsigned long *flags)
3652 {
3653 	struct ftrace_event_field *field = NULL;
3654 	char *field_name, *modifier, *str;
3655 	struct trace_array *tr = file->tr;
3656 
3657 	modifier = str = kstrdup(field_str, GFP_KERNEL);
3658 	if (!modifier)
3659 		return ERR_PTR(-ENOMEM);
3660 
3661 	field_name = strsep(&modifier, ".");
3662 	if (modifier) {
3663 		if (strcmp(modifier, "hex") == 0)
3664 			*flags |= HIST_FIELD_FL_HEX;
3665 		else if (strcmp(modifier, "sym") == 0)
3666 			*flags |= HIST_FIELD_FL_SYM;
3667 		else if (strcmp(modifier, "sym-offset") == 0)
3668 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
3669 		else if ((strcmp(modifier, "execname") == 0) &&
3670 			 (strcmp(field_name, "common_pid") == 0))
3671 			*flags |= HIST_FIELD_FL_EXECNAME;
3672 		else if (strcmp(modifier, "syscall") == 0)
3673 			*flags |= HIST_FIELD_FL_SYSCALL;
3674 		else if (strcmp(modifier, "log2") == 0)
3675 			*flags |= HIST_FIELD_FL_LOG2;
3676 		else if (strcmp(modifier, "usecs") == 0)
3677 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
3678 		else {
3679 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
3680 			field = ERR_PTR(-EINVAL);
3681 			goto out;
3682 		}
3683 	}
3684 
3685 	if (strcmp(field_name, "common_timestamp") == 0) {
3686 		*flags |= HIST_FIELD_FL_TIMESTAMP;
3687 		hist_data->enable_timestamps = true;
3688 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
3689 			hist_data->attrs->ts_in_usecs = true;
3690 	} else if (strcmp(field_name, "cpu") == 0)
3691 		*flags |= HIST_FIELD_FL_CPU;
3692 	else {
3693 		field = trace_find_event_field(file->event_call, field_name);
3694 		if (!field || !field->size) {
3695 			hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
3696 			field = ERR_PTR(-EINVAL);
3697 			goto out;
3698 		}
3699 	}
3700  out:
3701 	kfree(str);
3702 
3703 	return field;
3704 }
3705 
3706 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
3707 				       struct hist_field *var_ref,
3708 				       char *var_name)
3709 {
3710 	struct hist_field *alias = NULL;
3711 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
3712 
3713 	alias = create_hist_field(hist_data, NULL, flags, var_name);
3714 	if (!alias)
3715 		return NULL;
3716 
3717 	alias->fn = var_ref->fn;
3718 	alias->operands[0] = var_ref;
3719 
3720 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
3721 		destroy_hist_field(alias, 0);
3722 		return NULL;
3723 	}
3724 
3725 	alias->var_ref_idx = var_ref->var_ref_idx;
3726 
3727 	return alias;
3728 }
3729 
3730 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
3731 				     struct trace_event_file *file, char *str,
3732 				     unsigned long *flags, char *var_name)
3733 {
3734 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
3735 	struct ftrace_event_field *field = NULL;
3736 	struct hist_field *hist_field = NULL;
3737 	int ret = 0;
3738 
3739 	s = strchr(str, '.');
3740 	if (s) {
3741 		s = strchr(++s, '.');
3742 		if (s) {
3743 			ref_system = strsep(&str, ".");
3744 			if (!str) {
3745 				ret = -EINVAL;
3746 				goto out;
3747 			}
3748 			ref_event = strsep(&str, ".");
3749 			if (!str) {
3750 				ret = -EINVAL;
3751 				goto out;
3752 			}
3753 			ref_var = str;
3754 		}
3755 	}
3756 
3757 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
3758 	if (!s) {
3759 		hist_field = parse_var_ref(hist_data, ref_system,
3760 					   ref_event, ref_var);
3761 		if (hist_field) {
3762 			if (var_name) {
3763 				hist_field = create_alias(hist_data, hist_field, var_name);
3764 				if (!hist_field) {
3765 					ret = -ENOMEM;
3766 					goto out;
3767 				}
3768 			}
3769 			return hist_field;
3770 		}
3771 	} else
3772 		str = s;
3773 
3774 	field = parse_field(hist_data, file, str, flags);
3775 	if (IS_ERR(field)) {
3776 		ret = PTR_ERR(field);
3777 		goto out;
3778 	}
3779 
3780 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
3781 	if (!hist_field) {
3782 		ret = -ENOMEM;
3783 		goto out;
3784 	}
3785 
3786 	return hist_field;
3787  out:
3788 	return ERR_PTR(ret);
3789 }
3790 
3791 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3792 				     struct trace_event_file *file,
3793 				     char *str, unsigned long flags,
3794 				     char *var_name, unsigned int level);
3795 
3796 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
3797 				      struct trace_event_file *file,
3798 				      char *str, unsigned long flags,
3799 				      char *var_name, unsigned int level)
3800 {
3801 	struct hist_field *operand1, *expr = NULL;
3802 	unsigned long operand_flags;
3803 	int ret = 0;
3804 	char *s;
3805 
3806 	/* we support only -(xxx) i.e. explicit parens required */
3807 
3808 	if (level > 3) {
3809 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3810 		ret = -EINVAL;
3811 		goto free;
3812 	}
3813 
3814 	str++; /* skip leading '-' */
3815 
3816 	s = strchr(str, '(');
3817 	if (s)
3818 		str++;
3819 	else {
3820 		ret = -EINVAL;
3821 		goto free;
3822 	}
3823 
3824 	s = strrchr(str, ')');
3825 	if (s)
3826 		*s = '\0';
3827 	else {
3828 		ret = -EINVAL; /* no closing ')' */
3829 		goto free;
3830 	}
3831 
3832 	flags |= HIST_FIELD_FL_EXPR;
3833 	expr = create_hist_field(hist_data, NULL, flags, var_name);
3834 	if (!expr) {
3835 		ret = -ENOMEM;
3836 		goto free;
3837 	}
3838 
3839 	operand_flags = 0;
3840 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3841 	if (IS_ERR(operand1)) {
3842 		ret = PTR_ERR(operand1);
3843 		goto free;
3844 	}
3845 
3846 	expr->flags |= operand1->flags &
3847 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3848 	expr->fn = hist_field_unary_minus;
3849 	expr->operands[0] = operand1;
3850 	expr->operator = FIELD_OP_UNARY_MINUS;
3851 	expr->name = expr_str(expr, 0);
3852 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
3853 	if (!expr->type) {
3854 		ret = -ENOMEM;
3855 		goto free;
3856 	}
3857 
3858 	return expr;
3859  free:
3860 	destroy_hist_field(expr, 0);
3861 	return ERR_PTR(ret);
3862 }
3863 
3864 static int check_expr_operands(struct trace_array *tr,
3865 			       struct hist_field *operand1,
3866 			       struct hist_field *operand2)
3867 {
3868 	unsigned long operand1_flags = operand1->flags;
3869 	unsigned long operand2_flags = operand2->flags;
3870 
3871 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
3872 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
3873 		struct hist_field *var;
3874 
3875 		var = find_var_field(operand1->var.hist_data, operand1->name);
3876 		if (!var)
3877 			return -EINVAL;
3878 		operand1_flags = var->flags;
3879 	}
3880 
3881 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
3882 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
3883 		struct hist_field *var;
3884 
3885 		var = find_var_field(operand2->var.hist_data, operand2->name);
3886 		if (!var)
3887 			return -EINVAL;
3888 		operand2_flags = var->flags;
3889 	}
3890 
3891 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
3892 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
3893 		hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
3894 		return -EINVAL;
3895 	}
3896 
3897 	return 0;
3898 }
3899 
3900 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3901 				     struct trace_event_file *file,
3902 				     char *str, unsigned long flags,
3903 				     char *var_name, unsigned int level)
3904 {
3905 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
3906 	unsigned long operand_flags;
3907 	int field_op, ret = -EINVAL;
3908 	char *sep, *operand1_str;
3909 
3910 	if (level > 3) {
3911 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3912 		return ERR_PTR(-EINVAL);
3913 	}
3914 
3915 	field_op = contains_operator(str);
3916 
3917 	if (field_op == FIELD_OP_NONE)
3918 		return parse_atom(hist_data, file, str, &flags, var_name);
3919 
3920 	if (field_op == FIELD_OP_UNARY_MINUS)
3921 		return parse_unary(hist_data, file, str, flags, var_name, ++level);
3922 
3923 	switch (field_op) {
3924 	case FIELD_OP_MINUS:
3925 		sep = "-";
3926 		break;
3927 	case FIELD_OP_PLUS:
3928 		sep = "+";
3929 		break;
3930 	default:
3931 		goto free;
3932 	}
3933 
3934 	operand1_str = strsep(&str, sep);
3935 	if (!operand1_str || !str)
3936 		goto free;
3937 
3938 	operand_flags = 0;
3939 	operand1 = parse_atom(hist_data, file, operand1_str,
3940 			      &operand_flags, NULL);
3941 	if (IS_ERR(operand1)) {
3942 		ret = PTR_ERR(operand1);
3943 		operand1 = NULL;
3944 		goto free;
3945 	}
3946 
3947 	/* rest of string could be another expression e.g. b+c in a+b+c */
3948 	operand_flags = 0;
3949 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3950 	if (IS_ERR(operand2)) {
3951 		ret = PTR_ERR(operand2);
3952 		operand2 = NULL;
3953 		goto free;
3954 	}
3955 
3956 	ret = check_expr_operands(file->tr, operand1, operand2);
3957 	if (ret)
3958 		goto free;
3959 
3960 	flags |= HIST_FIELD_FL_EXPR;
3961 
3962 	flags |= operand1->flags &
3963 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3964 
3965 	expr = create_hist_field(hist_data, NULL, flags, var_name);
3966 	if (!expr) {
3967 		ret = -ENOMEM;
3968 		goto free;
3969 	}
3970 
3971 	operand1->read_once = true;
3972 	operand2->read_once = true;
3973 
3974 	expr->operands[0] = operand1;
3975 	expr->operands[1] = operand2;
3976 	expr->operator = field_op;
3977 	expr->name = expr_str(expr, 0);
3978 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
3979 	if (!expr->type) {
3980 		ret = -ENOMEM;
3981 		goto free;
3982 	}
3983 
3984 	switch (field_op) {
3985 	case FIELD_OP_MINUS:
3986 		expr->fn = hist_field_minus;
3987 		break;
3988 	case FIELD_OP_PLUS:
3989 		expr->fn = hist_field_plus;
3990 		break;
3991 	default:
3992 		ret = -EINVAL;
3993 		goto free;
3994 	}
3995 
3996 	return expr;
3997  free:
3998 	destroy_hist_field(operand1, 0);
3999 	destroy_hist_field(operand2, 0);
4000 	destroy_hist_field(expr, 0);
4001 
4002 	return ERR_PTR(ret);
4003 }
4004 
4005 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
4006 				 struct trace_event_file *file)
4007 {
4008 	struct event_trigger_data *test;
4009 
4010 	lockdep_assert_held(&event_mutex);
4011 
4012 	list_for_each_entry(test, &file->triggers, list) {
4013 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4014 			if (test->private_data == hist_data)
4015 				return test->filter_str;
4016 		}
4017 	}
4018 
4019 	return NULL;
4020 }
4021 
4022 static struct event_command trigger_hist_cmd;
4023 static int event_hist_trigger_func(struct event_command *cmd_ops,
4024 				   struct trace_event_file *file,
4025 				   char *glob, char *cmd, char *param);
4026 
4027 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
4028 			    struct hist_trigger_data *hist_data,
4029 			    unsigned int n_keys)
4030 {
4031 	struct hist_field *target_hist_field, *hist_field;
4032 	unsigned int n, i, j;
4033 
4034 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
4035 		return false;
4036 
4037 	i = hist_data->n_vals;
4038 	j = target_hist_data->n_vals;
4039 
4040 	for (n = 0; n < n_keys; n++) {
4041 		hist_field = hist_data->fields[i + n];
4042 		target_hist_field = target_hist_data->fields[j + n];
4043 
4044 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
4045 			return false;
4046 		if (hist_field->size != target_hist_field->size)
4047 			return false;
4048 		if (hist_field->is_signed != target_hist_field->is_signed)
4049 			return false;
4050 	}
4051 
4052 	return true;
4053 }
4054 
4055 static struct hist_trigger_data *
4056 find_compatible_hist(struct hist_trigger_data *target_hist_data,
4057 		     struct trace_event_file *file)
4058 {
4059 	struct hist_trigger_data *hist_data;
4060 	struct event_trigger_data *test;
4061 	unsigned int n_keys;
4062 
4063 	lockdep_assert_held(&event_mutex);
4064 
4065 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
4066 
4067 	list_for_each_entry(test, &file->triggers, list) {
4068 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4069 			hist_data = test->private_data;
4070 
4071 			if (compatible_keys(target_hist_data, hist_data, n_keys))
4072 				return hist_data;
4073 		}
4074 	}
4075 
4076 	return NULL;
4077 }
4078 
4079 static struct trace_event_file *event_file(struct trace_array *tr,
4080 					   char *system, char *event_name)
4081 {
4082 	struct trace_event_file *file;
4083 
4084 	file = __find_event_file(tr, system, event_name);
4085 	if (!file)
4086 		return ERR_PTR(-EINVAL);
4087 
4088 	return file;
4089 }
4090 
4091 static struct hist_field *
4092 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
4093 			 char *system, char *event_name, char *field_name)
4094 {
4095 	struct hist_field *event_var;
4096 	char *synthetic_name;
4097 
4098 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
4099 	if (!synthetic_name)
4100 		return ERR_PTR(-ENOMEM);
4101 
4102 	strcpy(synthetic_name, "synthetic_");
4103 	strcat(synthetic_name, field_name);
4104 
4105 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
4106 
4107 	kfree(synthetic_name);
4108 
4109 	return event_var;
4110 }
4111 
4112 /**
4113  * create_field_var_hist - Automatically create a histogram and var for a field
4114  * @target_hist_data: The target hist trigger
4115  * @subsys_name: Optional subsystem name
4116  * @event_name: Optional event name
4117  * @field_name: The name of the field (and the resulting variable)
4118  *
4119  * Hist trigger actions fetch data from variables, not directly from
4120  * events.  However, for convenience, users are allowed to directly
4121  * specify an event field in an action, which will be automatically
4122  * converted into a variable on their behalf.
4123 
4124  * If a user specifies a field on an event that isn't the event the
4125  * histogram currently being defined (the target event histogram), the
4126  * only way that can be accomplished is if a new hist trigger is
4127  * created and the field variable defined on that.
4128  *
4129  * This function creates a new histogram compatible with the target
4130  * event (meaning a histogram with the same key as the target
4131  * histogram), and creates a variable for the specified field, but
4132  * with 'synthetic_' prepended to the variable name in order to avoid
4133  * collision with normal field variables.
4134  *
4135  * Return: The variable created for the field.
4136  */
4137 static struct hist_field *
4138 create_field_var_hist(struct hist_trigger_data *target_hist_data,
4139 		      char *subsys_name, char *event_name, char *field_name)
4140 {
4141 	struct trace_array *tr = target_hist_data->event_file->tr;
4142 	struct hist_field *event_var = ERR_PTR(-EINVAL);
4143 	struct hist_trigger_data *hist_data;
4144 	unsigned int i, n, first = true;
4145 	struct field_var_hist *var_hist;
4146 	struct trace_event_file *file;
4147 	struct hist_field *key_field;
4148 	char *saved_filter;
4149 	char *cmd;
4150 	int ret;
4151 
4152 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
4153 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
4154 		return ERR_PTR(-EINVAL);
4155 	}
4156 
4157 	file = event_file(tr, subsys_name, event_name);
4158 
4159 	if (IS_ERR(file)) {
4160 		hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
4161 		ret = PTR_ERR(file);
4162 		return ERR_PTR(ret);
4163 	}
4164 
4165 	/*
4166 	 * Look for a histogram compatible with target.  We'll use the
4167 	 * found histogram specification to create a new matching
4168 	 * histogram with our variable on it.  target_hist_data is not
4169 	 * yet a registered histogram so we can't use that.
4170 	 */
4171 	hist_data = find_compatible_hist(target_hist_data, file);
4172 	if (!hist_data) {
4173 		hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
4174 		return ERR_PTR(-EINVAL);
4175 	}
4176 
4177 	/* See if a synthetic field variable has already been created */
4178 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
4179 					     event_name, field_name);
4180 	if (!IS_ERR_OR_NULL(event_var))
4181 		return event_var;
4182 
4183 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
4184 	if (!var_hist)
4185 		return ERR_PTR(-ENOMEM);
4186 
4187 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
4188 	if (!cmd) {
4189 		kfree(var_hist);
4190 		return ERR_PTR(-ENOMEM);
4191 	}
4192 
4193 	/* Use the same keys as the compatible histogram */
4194 	strcat(cmd, "keys=");
4195 
4196 	for_each_hist_key_field(i, hist_data) {
4197 		key_field = hist_data->fields[i];
4198 		if (!first)
4199 			strcat(cmd, ",");
4200 		strcat(cmd, key_field->field->name);
4201 		first = false;
4202 	}
4203 
4204 	/* Create the synthetic field variable specification */
4205 	strcat(cmd, ":synthetic_");
4206 	strcat(cmd, field_name);
4207 	strcat(cmd, "=");
4208 	strcat(cmd, field_name);
4209 
4210 	/* Use the same filter as the compatible histogram */
4211 	saved_filter = find_trigger_filter(hist_data, file);
4212 	if (saved_filter) {
4213 		strcat(cmd, " if ");
4214 		strcat(cmd, saved_filter);
4215 	}
4216 
4217 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
4218 	if (!var_hist->cmd) {
4219 		kfree(cmd);
4220 		kfree(var_hist);
4221 		return ERR_PTR(-ENOMEM);
4222 	}
4223 
4224 	/* Save the compatible histogram information */
4225 	var_hist->hist_data = hist_data;
4226 
4227 	/* Create the new histogram with our variable */
4228 	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
4229 				      "", "hist", cmd);
4230 	if (ret) {
4231 		kfree(cmd);
4232 		kfree(var_hist->cmd);
4233 		kfree(var_hist);
4234 		hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
4235 		return ERR_PTR(ret);
4236 	}
4237 
4238 	kfree(cmd);
4239 
4240 	/* If we can't find the variable, something went wrong */
4241 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
4242 					     event_name, field_name);
4243 	if (IS_ERR_OR_NULL(event_var)) {
4244 		kfree(var_hist->cmd);
4245 		kfree(var_hist);
4246 		hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
4247 		return ERR_PTR(-EINVAL);
4248 	}
4249 
4250 	n = target_hist_data->n_field_var_hists;
4251 	target_hist_data->field_var_hists[n] = var_hist;
4252 	target_hist_data->n_field_var_hists++;
4253 
4254 	return event_var;
4255 }
4256 
4257 static struct hist_field *
4258 find_target_event_var(struct hist_trigger_data *hist_data,
4259 		      char *subsys_name, char *event_name, char *var_name)
4260 {
4261 	struct trace_event_file *file = hist_data->event_file;
4262 	struct hist_field *hist_field = NULL;
4263 
4264 	if (subsys_name) {
4265 		struct trace_event_call *call;
4266 
4267 		if (!event_name)
4268 			return NULL;
4269 
4270 		call = file->event_call;
4271 
4272 		if (strcmp(subsys_name, call->class->system) != 0)
4273 			return NULL;
4274 
4275 		if (strcmp(event_name, trace_event_name(call)) != 0)
4276 			return NULL;
4277 	}
4278 
4279 	hist_field = find_var_field(hist_data, var_name);
4280 
4281 	return hist_field;
4282 }
4283 
4284 static inline void __update_field_vars(struct tracing_map_elt *elt,
4285 				       struct ring_buffer_event *rbe,
4286 				       void *rec,
4287 				       struct field_var **field_vars,
4288 				       unsigned int n_field_vars,
4289 				       unsigned int field_var_str_start)
4290 {
4291 	struct hist_elt_data *elt_data = elt->private_data;
4292 	unsigned int i, j, var_idx;
4293 	u64 var_val;
4294 
4295 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
4296 		struct field_var *field_var = field_vars[i];
4297 		struct hist_field *var = field_var->var;
4298 		struct hist_field *val = field_var->val;
4299 
4300 		var_val = val->fn(val, elt, rbe, rec);
4301 		var_idx = var->var.idx;
4302 
4303 		if (val->flags & HIST_FIELD_FL_STRING) {
4304 			char *str = elt_data->field_var_str[j++];
4305 			char *val_str = (char *)(uintptr_t)var_val;
4306 
4307 			strscpy(str, val_str, STR_VAR_LEN_MAX);
4308 			var_val = (u64)(uintptr_t)str;
4309 		}
4310 		tracing_map_set_var(elt, var_idx, var_val);
4311 	}
4312 }
4313 
4314 static void update_field_vars(struct hist_trigger_data *hist_data,
4315 			      struct tracing_map_elt *elt,
4316 			      struct ring_buffer_event *rbe,
4317 			      void *rec)
4318 {
4319 	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
4320 			    hist_data->n_field_vars, 0);
4321 }
4322 
4323 static void save_track_data_vars(struct hist_trigger_data *hist_data,
4324 				 struct tracing_map_elt *elt, void *rec,
4325 				 struct ring_buffer_event *rbe, void *key,
4326 				 struct action_data *data, u64 *var_ref_vals)
4327 {
4328 	__update_field_vars(elt, rbe, rec, hist_data->save_vars,
4329 			    hist_data->n_save_vars, hist_data->n_field_var_str);
4330 }
4331 
4332 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
4333 				     struct trace_event_file *file,
4334 				     char *name, int size, const char *type)
4335 {
4336 	struct hist_field *var;
4337 	int idx;
4338 
4339 	if (find_var(hist_data, file, name) && !hist_data->remove) {
4340 		var = ERR_PTR(-EINVAL);
4341 		goto out;
4342 	}
4343 
4344 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
4345 	if (!var) {
4346 		var = ERR_PTR(-ENOMEM);
4347 		goto out;
4348 	}
4349 
4350 	idx = tracing_map_add_var(hist_data->map);
4351 	if (idx < 0) {
4352 		kfree(var);
4353 		var = ERR_PTR(-EINVAL);
4354 		goto out;
4355 	}
4356 
4357 	var->flags = HIST_FIELD_FL_VAR;
4358 	var->var.idx = idx;
4359 	var->var.hist_data = var->hist_data = hist_data;
4360 	var->size = size;
4361 	var->var.name = kstrdup(name, GFP_KERNEL);
4362 	var->type = kstrdup(type, GFP_KERNEL);
4363 	if (!var->var.name || !var->type) {
4364 		kfree(var->var.name);
4365 		kfree(var->type);
4366 		kfree(var);
4367 		var = ERR_PTR(-ENOMEM);
4368 	}
4369  out:
4370 	return var;
4371 }
4372 
4373 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
4374 					  struct trace_event_file *file,
4375 					  char *field_name)
4376 {
4377 	struct hist_field *val = NULL, *var = NULL;
4378 	unsigned long flags = HIST_FIELD_FL_VAR;
4379 	struct trace_array *tr = file->tr;
4380 	struct field_var *field_var;
4381 	int ret = 0;
4382 
4383 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
4384 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
4385 		ret = -EINVAL;
4386 		goto err;
4387 	}
4388 
4389 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
4390 	if (IS_ERR(val)) {
4391 		hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
4392 		ret = PTR_ERR(val);
4393 		goto err;
4394 	}
4395 
4396 	var = create_var(hist_data, file, field_name, val->size, val->type);
4397 	if (IS_ERR(var)) {
4398 		hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
4399 		kfree(val);
4400 		ret = PTR_ERR(var);
4401 		goto err;
4402 	}
4403 
4404 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
4405 	if (!field_var) {
4406 		kfree(val);
4407 		kfree(var);
4408 		ret =  -ENOMEM;
4409 		goto err;
4410 	}
4411 
4412 	field_var->var = var;
4413 	field_var->val = val;
4414  out:
4415 	return field_var;
4416  err:
4417 	field_var = ERR_PTR(ret);
4418 	goto out;
4419 }
4420 
4421 /**
4422  * create_target_field_var - Automatically create a variable for a field
4423  * @target_hist_data: The target hist trigger
4424  * @subsys_name: Optional subsystem name
4425  * @event_name: Optional event name
4426  * @var_name: The name of the field (and the resulting variable)
4427  *
4428  * Hist trigger actions fetch data from variables, not directly from
4429  * events.  However, for convenience, users are allowed to directly
4430  * specify an event field in an action, which will be automatically
4431  * converted into a variable on their behalf.
4432 
4433  * This function creates a field variable with the name var_name on
4434  * the hist trigger currently being defined on the target event.  If
4435  * subsys_name and event_name are specified, this function simply
4436  * verifies that they do in fact match the target event subsystem and
4437  * event name.
4438  *
4439  * Return: The variable created for the field.
4440  */
4441 static struct field_var *
4442 create_target_field_var(struct hist_trigger_data *target_hist_data,
4443 			char *subsys_name, char *event_name, char *var_name)
4444 {
4445 	struct trace_event_file *file = target_hist_data->event_file;
4446 
4447 	if (subsys_name) {
4448 		struct trace_event_call *call;
4449 
4450 		if (!event_name)
4451 			return NULL;
4452 
4453 		call = file->event_call;
4454 
4455 		if (strcmp(subsys_name, call->class->system) != 0)
4456 			return NULL;
4457 
4458 		if (strcmp(event_name, trace_event_name(call)) != 0)
4459 			return NULL;
4460 	}
4461 
4462 	return create_field_var(target_hist_data, file, var_name);
4463 }
4464 
4465 static bool check_track_val_max(u64 track_val, u64 var_val)
4466 {
4467 	if (var_val <= track_val)
4468 		return false;
4469 
4470 	return true;
4471 }
4472 
4473 static bool check_track_val_changed(u64 track_val, u64 var_val)
4474 {
4475 	if (var_val == track_val)
4476 		return false;
4477 
4478 	return true;
4479 }
4480 
4481 static u64 get_track_val(struct hist_trigger_data *hist_data,
4482 			 struct tracing_map_elt *elt,
4483 			 struct action_data *data)
4484 {
4485 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
4486 	u64 track_val;
4487 
4488 	track_val = tracing_map_read_var(elt, track_var_idx);
4489 
4490 	return track_val;
4491 }
4492 
4493 static void save_track_val(struct hist_trigger_data *hist_data,
4494 			   struct tracing_map_elt *elt,
4495 			   struct action_data *data, u64 var_val)
4496 {
4497 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
4498 
4499 	tracing_map_set_var(elt, track_var_idx, var_val);
4500 }
4501 
4502 static void save_track_data(struct hist_trigger_data *hist_data,
4503 			    struct tracing_map_elt *elt, void *rec,
4504 			    struct ring_buffer_event *rbe, void *key,
4505 			    struct action_data *data, u64 *var_ref_vals)
4506 {
4507 	if (data->track_data.save_data)
4508 		data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
4509 }
4510 
4511 static bool check_track_val(struct tracing_map_elt *elt,
4512 			    struct action_data *data,
4513 			    u64 var_val)
4514 {
4515 	struct hist_trigger_data *hist_data;
4516 	u64 track_val;
4517 
4518 	hist_data = data->track_data.track_var->hist_data;
4519 	track_val = get_track_val(hist_data, elt, data);
4520 
4521 	return data->track_data.check_val(track_val, var_val);
4522 }
4523 
4524 #ifdef CONFIG_TRACER_SNAPSHOT
4525 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
4526 {
4527 	/* called with tr->max_lock held */
4528 	struct track_data *track_data = tr->cond_snapshot->cond_data;
4529 	struct hist_elt_data *elt_data, *track_elt_data;
4530 	struct snapshot_context *context = cond_data;
4531 	struct action_data *action;
4532 	u64 track_val;
4533 
4534 	if (!track_data)
4535 		return false;
4536 
4537 	action = track_data->action_data;
4538 
4539 	track_val = get_track_val(track_data->hist_data, context->elt,
4540 				  track_data->action_data);
4541 
4542 	if (!action->track_data.check_val(track_data->track_val, track_val))
4543 		return false;
4544 
4545 	track_data->track_val = track_val;
4546 	memcpy(track_data->key, context->key, track_data->key_len);
4547 
4548 	elt_data = context->elt->private_data;
4549 	track_elt_data = track_data->elt.private_data;
4550 	if (elt_data->comm)
4551 		strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
4552 
4553 	track_data->updated = true;
4554 
4555 	return true;
4556 }
4557 
4558 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
4559 				     struct tracing_map_elt *elt, void *rec,
4560 				     struct ring_buffer_event *rbe, void *key,
4561 				     struct action_data *data,
4562 				     u64 *var_ref_vals)
4563 {
4564 	struct trace_event_file *file = hist_data->event_file;
4565 	struct snapshot_context context;
4566 
4567 	context.elt = elt;
4568 	context.key = key;
4569 
4570 	tracing_snapshot_cond(file->tr, &context);
4571 }
4572 
4573 static void hist_trigger_print_key(struct seq_file *m,
4574 				   struct hist_trigger_data *hist_data,
4575 				   void *key,
4576 				   struct tracing_map_elt *elt);
4577 
4578 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
4579 {
4580 	unsigned int i;
4581 
4582 	if (!hist_data->n_actions)
4583 		return NULL;
4584 
4585 	for (i = 0; i < hist_data->n_actions; i++) {
4586 		struct action_data *data = hist_data->actions[i];
4587 
4588 		if (data->action == ACTION_SNAPSHOT)
4589 			return data;
4590 	}
4591 
4592 	return NULL;
4593 }
4594 
4595 static void track_data_snapshot_print(struct seq_file *m,
4596 				      struct hist_trigger_data *hist_data)
4597 {
4598 	struct trace_event_file *file = hist_data->event_file;
4599 	struct track_data *track_data;
4600 	struct action_data *action;
4601 
4602 	track_data = tracing_cond_snapshot_data(file->tr);
4603 	if (!track_data)
4604 		return;
4605 
4606 	if (!track_data->updated)
4607 		return;
4608 
4609 	action = snapshot_action(hist_data);
4610 	if (!action)
4611 		return;
4612 
4613 	seq_puts(m, "\nSnapshot taken (see tracing/snapshot).  Details:\n");
4614 	seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
4615 		   action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
4616 		   action->track_data.var_str, track_data->track_val);
4617 
4618 	seq_puts(m, "\ttriggered by event with key: ");
4619 	hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
4620 	seq_putc(m, '\n');
4621 }
4622 #else
4623 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
4624 {
4625 	return false;
4626 }
4627 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
4628 				     struct tracing_map_elt *elt, void *rec,
4629 				     struct ring_buffer_event *rbe, void *key,
4630 				     struct action_data *data,
4631 				     u64 *var_ref_vals) {}
4632 static void track_data_snapshot_print(struct seq_file *m,
4633 				      struct hist_trigger_data *hist_data) {}
4634 #endif /* CONFIG_TRACER_SNAPSHOT */
4635 
4636 static void track_data_print(struct seq_file *m,
4637 			     struct hist_trigger_data *hist_data,
4638 			     struct tracing_map_elt *elt,
4639 			     struct action_data *data)
4640 {
4641 	u64 track_val = get_track_val(hist_data, elt, data);
4642 	unsigned int i, save_var_idx;
4643 
4644 	if (data->handler == HANDLER_ONMAX)
4645 		seq_printf(m, "\n\tmax: %10llu", track_val);
4646 	else if (data->handler == HANDLER_ONCHANGE)
4647 		seq_printf(m, "\n\tchanged: %10llu", track_val);
4648 
4649 	if (data->action == ACTION_SNAPSHOT)
4650 		return;
4651 
4652 	for (i = 0; i < hist_data->n_save_vars; i++) {
4653 		struct hist_field *save_val = hist_data->save_vars[i]->val;
4654 		struct hist_field *save_var = hist_data->save_vars[i]->var;
4655 		u64 val;
4656 
4657 		save_var_idx = save_var->var.idx;
4658 
4659 		val = tracing_map_read_var(elt, save_var_idx);
4660 
4661 		if (save_val->flags & HIST_FIELD_FL_STRING) {
4662 			seq_printf(m, "  %s: %-32s", save_var->var.name,
4663 				   (char *)(uintptr_t)(val));
4664 		} else
4665 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
4666 	}
4667 }
4668 
4669 static void ontrack_action(struct hist_trigger_data *hist_data,
4670 			   struct tracing_map_elt *elt, void *rec,
4671 			   struct ring_buffer_event *rbe, void *key,
4672 			   struct action_data *data, u64 *var_ref_vals)
4673 {
4674 	u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
4675 
4676 	if (check_track_val(elt, data, var_val)) {
4677 		save_track_val(hist_data, elt, data, var_val);
4678 		save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
4679 	}
4680 }
4681 
4682 static void action_data_destroy(struct action_data *data)
4683 {
4684 	unsigned int i;
4685 
4686 	lockdep_assert_held(&event_mutex);
4687 
4688 	kfree(data->action_name);
4689 
4690 	for (i = 0; i < data->n_params; i++)
4691 		kfree(data->params[i]);
4692 
4693 	if (data->synth_event)
4694 		data->synth_event->ref--;
4695 
4696 	kfree(data->synth_event_name);
4697 
4698 	kfree(data);
4699 }
4700 
4701 static void track_data_destroy(struct hist_trigger_data *hist_data,
4702 			       struct action_data *data)
4703 {
4704 	struct trace_event_file *file = hist_data->event_file;
4705 
4706 	destroy_hist_field(data->track_data.track_var, 0);
4707 
4708 	if (data->action == ACTION_SNAPSHOT) {
4709 		struct track_data *track_data;
4710 
4711 		track_data = tracing_cond_snapshot_data(file->tr);
4712 		if (track_data && track_data->hist_data == hist_data) {
4713 			tracing_snapshot_cond_disable(file->tr);
4714 			track_data_free(track_data);
4715 		}
4716 	}
4717 
4718 	kfree(data->track_data.var_str);
4719 
4720 	action_data_destroy(data);
4721 }
4722 
4723 static int action_create(struct hist_trigger_data *hist_data,
4724 			 struct action_data *data);
4725 
4726 static int track_data_create(struct hist_trigger_data *hist_data,
4727 			     struct action_data *data)
4728 {
4729 	struct hist_field *var_field, *ref_field, *track_var = NULL;
4730 	struct trace_event_file *file = hist_data->event_file;
4731 	struct trace_array *tr = file->tr;
4732 	char *track_data_var_str;
4733 	int ret = 0;
4734 
4735 	track_data_var_str = data->track_data.var_str;
4736 	if (track_data_var_str[0] != '$') {
4737 		hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
4738 		return -EINVAL;
4739 	}
4740 	track_data_var_str++;
4741 
4742 	var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
4743 	if (!var_field) {
4744 		hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
4745 		return -EINVAL;
4746 	}
4747 
4748 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
4749 	if (!ref_field)
4750 		return -ENOMEM;
4751 
4752 	data->track_data.var_ref = ref_field;
4753 
4754 	if (data->handler == HANDLER_ONMAX)
4755 		track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
4756 	if (IS_ERR(track_var)) {
4757 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
4758 		ret = PTR_ERR(track_var);
4759 		goto out;
4760 	}
4761 
4762 	if (data->handler == HANDLER_ONCHANGE)
4763 		track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
4764 	if (IS_ERR(track_var)) {
4765 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
4766 		ret = PTR_ERR(track_var);
4767 		goto out;
4768 	}
4769 	data->track_data.track_var = track_var;
4770 
4771 	ret = action_create(hist_data, data);
4772  out:
4773 	return ret;
4774 }
4775 
4776 static int parse_action_params(struct trace_array *tr, char *params,
4777 			       struct action_data *data)
4778 {
4779 	char *param, *saved_param;
4780 	bool first_param = true;
4781 	int ret = 0;
4782 
4783 	while (params) {
4784 		if (data->n_params >= SYNTH_FIELDS_MAX) {
4785 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
4786 			goto out;
4787 		}
4788 
4789 		param = strsep(&params, ",");
4790 		if (!param) {
4791 			hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
4792 			ret = -EINVAL;
4793 			goto out;
4794 		}
4795 
4796 		param = strstrip(param);
4797 		if (strlen(param) < 2) {
4798 			hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
4799 			ret = -EINVAL;
4800 			goto out;
4801 		}
4802 
4803 		saved_param = kstrdup(param, GFP_KERNEL);
4804 		if (!saved_param) {
4805 			ret = -ENOMEM;
4806 			goto out;
4807 		}
4808 
4809 		if (first_param && data->use_trace_keyword) {
4810 			data->synth_event_name = saved_param;
4811 			first_param = false;
4812 			continue;
4813 		}
4814 		first_param = false;
4815 
4816 		data->params[data->n_params++] = saved_param;
4817 	}
4818  out:
4819 	return ret;
4820 }
4821 
4822 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
4823 			enum handler_id handler)
4824 {
4825 	char *action_name;
4826 	int ret = 0;
4827 
4828 	strsep(&str, ".");
4829 	if (!str) {
4830 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
4831 		ret = -EINVAL;
4832 		goto out;
4833 	}
4834 
4835 	action_name = strsep(&str, "(");
4836 	if (!action_name || !str) {
4837 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
4838 		ret = -EINVAL;
4839 		goto out;
4840 	}
4841 
4842 	if (str_has_prefix(action_name, "save")) {
4843 		char *params = strsep(&str, ")");
4844 
4845 		if (!params) {
4846 			hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
4847 			ret = -EINVAL;
4848 			goto out;
4849 		}
4850 
4851 		ret = parse_action_params(tr, params, data);
4852 		if (ret)
4853 			goto out;
4854 
4855 		if (handler == HANDLER_ONMAX)
4856 			data->track_data.check_val = check_track_val_max;
4857 		else if (handler == HANDLER_ONCHANGE)
4858 			data->track_data.check_val = check_track_val_changed;
4859 		else {
4860 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4861 			ret = -EINVAL;
4862 			goto out;
4863 		}
4864 
4865 		data->track_data.save_data = save_track_data_vars;
4866 		data->fn = ontrack_action;
4867 		data->action = ACTION_SAVE;
4868 	} else if (str_has_prefix(action_name, "snapshot")) {
4869 		char *params = strsep(&str, ")");
4870 
4871 		if (!str) {
4872 			hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
4873 			ret = -EINVAL;
4874 			goto out;
4875 		}
4876 
4877 		if (handler == HANDLER_ONMAX)
4878 			data->track_data.check_val = check_track_val_max;
4879 		else if (handler == HANDLER_ONCHANGE)
4880 			data->track_data.check_val = check_track_val_changed;
4881 		else {
4882 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4883 			ret = -EINVAL;
4884 			goto out;
4885 		}
4886 
4887 		data->track_data.save_data = save_track_data_snapshot;
4888 		data->fn = ontrack_action;
4889 		data->action = ACTION_SNAPSHOT;
4890 	} else {
4891 		char *params = strsep(&str, ")");
4892 
4893 		if (str_has_prefix(action_name, "trace"))
4894 			data->use_trace_keyword = true;
4895 
4896 		if (params) {
4897 			ret = parse_action_params(tr, params, data);
4898 			if (ret)
4899 				goto out;
4900 		}
4901 
4902 		if (handler == HANDLER_ONMAX)
4903 			data->track_data.check_val = check_track_val_max;
4904 		else if (handler == HANDLER_ONCHANGE)
4905 			data->track_data.check_val = check_track_val_changed;
4906 
4907 		if (handler != HANDLER_ONMATCH) {
4908 			data->track_data.save_data = action_trace;
4909 			data->fn = ontrack_action;
4910 		} else
4911 			data->fn = action_trace;
4912 
4913 		data->action = ACTION_TRACE;
4914 	}
4915 
4916 	data->action_name = kstrdup(action_name, GFP_KERNEL);
4917 	if (!data->action_name) {
4918 		ret = -ENOMEM;
4919 		goto out;
4920 	}
4921 
4922 	data->handler = handler;
4923  out:
4924 	return ret;
4925 }
4926 
4927 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4928 					    char *str, enum handler_id handler)
4929 {
4930 	struct action_data *data;
4931 	int ret = -EINVAL;
4932 	char *var_str;
4933 
4934 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4935 	if (!data)
4936 		return ERR_PTR(-ENOMEM);
4937 
4938 	var_str = strsep(&str, ")");
4939 	if (!var_str || !str) {
4940 		ret = -EINVAL;
4941 		goto free;
4942 	}
4943 
4944 	data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4945 	if (!data->track_data.var_str) {
4946 		ret = -ENOMEM;
4947 		goto free;
4948 	}
4949 
4950 	ret = action_parse(hist_data->event_file->tr, str, data, handler);
4951 	if (ret)
4952 		goto free;
4953  out:
4954 	return data;
4955  free:
4956 	track_data_destroy(hist_data, data);
4957 	data = ERR_PTR(ret);
4958 	goto out;
4959 }
4960 
4961 static void onmatch_destroy(struct action_data *data)
4962 {
4963 	kfree(data->match_data.event);
4964 	kfree(data->match_data.event_system);
4965 
4966 	action_data_destroy(data);
4967 }
4968 
4969 static void destroy_field_var(struct field_var *field_var)
4970 {
4971 	if (!field_var)
4972 		return;
4973 
4974 	destroy_hist_field(field_var->var, 0);
4975 	destroy_hist_field(field_var->val, 0);
4976 
4977 	kfree(field_var);
4978 }
4979 
4980 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4981 {
4982 	unsigned int i;
4983 
4984 	for (i = 0; i < hist_data->n_field_vars; i++)
4985 		destroy_field_var(hist_data->field_vars[i]);
4986 }
4987 
4988 static void save_field_var(struct hist_trigger_data *hist_data,
4989 			   struct field_var *field_var)
4990 {
4991 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4992 
4993 	if (field_var->val->flags & HIST_FIELD_FL_STRING)
4994 		hist_data->n_field_var_str++;
4995 }
4996 
4997 
4998 static int check_synth_field(struct synth_event *event,
4999 			     struct hist_field *hist_field,
5000 			     unsigned int field_pos)
5001 {
5002 	struct synth_field *field;
5003 
5004 	if (field_pos >= event->n_fields)
5005 		return -EINVAL;
5006 
5007 	field = event->fields[field_pos];
5008 
5009 	if (strcmp(field->type, hist_field->type) != 0) {
5010 		if (field->size != hist_field->size ||
5011 		    field->is_signed != hist_field->is_signed)
5012 			return -EINVAL;
5013 	}
5014 
5015 	return 0;
5016 }
5017 
5018 static struct hist_field *
5019 trace_action_find_var(struct hist_trigger_data *hist_data,
5020 		      struct action_data *data,
5021 		      char *system, char *event, char *var)
5022 {
5023 	struct trace_array *tr = hist_data->event_file->tr;
5024 	struct hist_field *hist_field;
5025 
5026 	var++; /* skip '$' */
5027 
5028 	hist_field = find_target_event_var(hist_data, system, event, var);
5029 	if (!hist_field) {
5030 		if (!system && data->handler == HANDLER_ONMATCH) {
5031 			system = data->match_data.event_system;
5032 			event = data->match_data.event;
5033 		}
5034 
5035 		hist_field = find_event_var(hist_data, system, event, var);
5036 	}
5037 
5038 	if (!hist_field)
5039 		hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
5040 
5041 	return hist_field;
5042 }
5043 
5044 static struct hist_field *
5045 trace_action_create_field_var(struct hist_trigger_data *hist_data,
5046 			      struct action_data *data, char *system,
5047 			      char *event, char *var)
5048 {
5049 	struct hist_field *hist_field = NULL;
5050 	struct field_var *field_var;
5051 
5052 	/*
5053 	 * First try to create a field var on the target event (the
5054 	 * currently being defined).  This will create a variable for
5055 	 * unqualified fields on the target event, or if qualified,
5056 	 * target fields that have qualified names matching the target.
5057 	 */
5058 	field_var = create_target_field_var(hist_data, system, event, var);
5059 
5060 	if (field_var && !IS_ERR(field_var)) {
5061 		save_field_var(hist_data, field_var);
5062 		hist_field = field_var->var;
5063 	} else {
5064 		field_var = NULL;
5065 		/*
5066 		 * If no explicit system.event is specfied, default to
5067 		 * looking for fields on the onmatch(system.event.xxx)
5068 		 * event.
5069 		 */
5070 		if (!system && data->handler == HANDLER_ONMATCH) {
5071 			system = data->match_data.event_system;
5072 			event = data->match_data.event;
5073 		}
5074 
5075 		/*
5076 		 * At this point, we're looking at a field on another
5077 		 * event.  Because we can't modify a hist trigger on
5078 		 * another event to add a variable for a field, we need
5079 		 * to create a new trigger on that event and create the
5080 		 * variable at the same time.
5081 		 */
5082 		hist_field = create_field_var_hist(hist_data, system, event, var);
5083 		if (IS_ERR(hist_field))
5084 			goto free;
5085 	}
5086  out:
5087 	return hist_field;
5088  free:
5089 	destroy_field_var(field_var);
5090 	hist_field = NULL;
5091 	goto out;
5092 }
5093 
5094 static int trace_action_create(struct hist_trigger_data *hist_data,
5095 			       struct action_data *data)
5096 {
5097 	struct trace_array *tr = hist_data->event_file->tr;
5098 	char *event_name, *param, *system = NULL;
5099 	struct hist_field *hist_field, *var_ref;
5100 	unsigned int i;
5101 	unsigned int field_pos = 0;
5102 	struct synth_event *event;
5103 	char *synth_event_name;
5104 	int var_ref_idx, ret = 0;
5105 
5106 	lockdep_assert_held(&event_mutex);
5107 
5108 	if (data->use_trace_keyword)
5109 		synth_event_name = data->synth_event_name;
5110 	else
5111 		synth_event_name = data->action_name;
5112 
5113 	event = find_synth_event(synth_event_name);
5114 	if (!event) {
5115 		hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
5116 		return -EINVAL;
5117 	}
5118 
5119 	event->ref++;
5120 
5121 	for (i = 0; i < data->n_params; i++) {
5122 		char *p;
5123 
5124 		p = param = kstrdup(data->params[i], GFP_KERNEL);
5125 		if (!param) {
5126 			ret = -ENOMEM;
5127 			goto err;
5128 		}
5129 
5130 		system = strsep(&param, ".");
5131 		if (!param) {
5132 			param = (char *)system;
5133 			system = event_name = NULL;
5134 		} else {
5135 			event_name = strsep(&param, ".");
5136 			if (!param) {
5137 				kfree(p);
5138 				ret = -EINVAL;
5139 				goto err;
5140 			}
5141 		}
5142 
5143 		if (param[0] == '$')
5144 			hist_field = trace_action_find_var(hist_data, data,
5145 							   system, event_name,
5146 							   param);
5147 		else
5148 			hist_field = trace_action_create_field_var(hist_data,
5149 								   data,
5150 								   system,
5151 								   event_name,
5152 								   param);
5153 
5154 		if (!hist_field) {
5155 			kfree(p);
5156 			ret = -EINVAL;
5157 			goto err;
5158 		}
5159 
5160 		if (check_synth_field(event, hist_field, field_pos) == 0) {
5161 			var_ref = create_var_ref(hist_data, hist_field,
5162 						 system, event_name);
5163 			if (!var_ref) {
5164 				kfree(p);
5165 				ret = -ENOMEM;
5166 				goto err;
5167 			}
5168 
5169 			var_ref_idx = find_var_ref_idx(hist_data, var_ref);
5170 			if (WARN_ON(var_ref_idx < 0)) {
5171 				ret = var_ref_idx;
5172 				goto err;
5173 			}
5174 
5175 			data->var_ref_idx[i] = var_ref_idx;
5176 
5177 			field_pos++;
5178 			kfree(p);
5179 			continue;
5180 		}
5181 
5182 		hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
5183 		kfree(p);
5184 		ret = -EINVAL;
5185 		goto err;
5186 	}
5187 
5188 	if (field_pos != event->n_fields) {
5189 		hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
5190 		ret = -EINVAL;
5191 		goto err;
5192 	}
5193 
5194 	data->synth_event = event;
5195  out:
5196 	return ret;
5197  err:
5198 	event->ref--;
5199 
5200 	goto out;
5201 }
5202 
5203 static int action_create(struct hist_trigger_data *hist_data,
5204 			 struct action_data *data)
5205 {
5206 	struct trace_event_file *file = hist_data->event_file;
5207 	struct trace_array *tr = file->tr;
5208 	struct track_data *track_data;
5209 	struct field_var *field_var;
5210 	unsigned int i;
5211 	char *param;
5212 	int ret = 0;
5213 
5214 	if (data->action == ACTION_TRACE)
5215 		return trace_action_create(hist_data, data);
5216 
5217 	if (data->action == ACTION_SNAPSHOT) {
5218 		track_data = track_data_alloc(hist_data->key_size, data, hist_data);
5219 		if (IS_ERR(track_data)) {
5220 			ret = PTR_ERR(track_data);
5221 			goto out;
5222 		}
5223 
5224 		ret = tracing_snapshot_cond_enable(file->tr, track_data,
5225 						   cond_snapshot_update);
5226 		if (ret)
5227 			track_data_free(track_data);
5228 
5229 		goto out;
5230 	}
5231 
5232 	if (data->action == ACTION_SAVE) {
5233 		if (hist_data->n_save_vars) {
5234 			ret = -EEXIST;
5235 			hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
5236 			goto out;
5237 		}
5238 
5239 		for (i = 0; i < data->n_params; i++) {
5240 			param = kstrdup(data->params[i], GFP_KERNEL);
5241 			if (!param) {
5242 				ret = -ENOMEM;
5243 				goto out;
5244 			}
5245 
5246 			field_var = create_target_field_var(hist_data, NULL, NULL, param);
5247 			if (IS_ERR(field_var)) {
5248 				hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
5249 					 errpos(param));
5250 				ret = PTR_ERR(field_var);
5251 				kfree(param);
5252 				goto out;
5253 			}
5254 
5255 			hist_data->save_vars[hist_data->n_save_vars++] = field_var;
5256 			if (field_var->val->flags & HIST_FIELD_FL_STRING)
5257 				hist_data->n_save_var_str++;
5258 			kfree(param);
5259 		}
5260 	}
5261  out:
5262 	return ret;
5263 }
5264 
5265 static int onmatch_create(struct hist_trigger_data *hist_data,
5266 			  struct action_data *data)
5267 {
5268 	return action_create(hist_data, data);
5269 }
5270 
5271 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
5272 {
5273 	char *match_event, *match_event_system;
5274 	struct action_data *data;
5275 	int ret = -EINVAL;
5276 
5277 	data = kzalloc(sizeof(*data), GFP_KERNEL);
5278 	if (!data)
5279 		return ERR_PTR(-ENOMEM);
5280 
5281 	match_event = strsep(&str, ")");
5282 	if (!match_event || !str) {
5283 		hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
5284 		goto free;
5285 	}
5286 
5287 	match_event_system = strsep(&match_event, ".");
5288 	if (!match_event) {
5289 		hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
5290 		goto free;
5291 	}
5292 
5293 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
5294 		hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
5295 		goto free;
5296 	}
5297 
5298 	data->match_data.event = kstrdup(match_event, GFP_KERNEL);
5299 	if (!data->match_data.event) {
5300 		ret = -ENOMEM;
5301 		goto free;
5302 	}
5303 
5304 	data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
5305 	if (!data->match_data.event_system) {
5306 		ret = -ENOMEM;
5307 		goto free;
5308 	}
5309 
5310 	ret = action_parse(tr, str, data, HANDLER_ONMATCH);
5311 	if (ret)
5312 		goto free;
5313  out:
5314 	return data;
5315  free:
5316 	onmatch_destroy(data);
5317 	data = ERR_PTR(ret);
5318 	goto out;
5319 }
5320 
5321 static int create_hitcount_val(struct hist_trigger_data *hist_data)
5322 {
5323 	hist_data->fields[HITCOUNT_IDX] =
5324 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
5325 	if (!hist_data->fields[HITCOUNT_IDX])
5326 		return -ENOMEM;
5327 
5328 	hist_data->n_vals++;
5329 	hist_data->n_fields++;
5330 
5331 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
5332 		return -EINVAL;
5333 
5334 	return 0;
5335 }
5336 
5337 static int __create_val_field(struct hist_trigger_data *hist_data,
5338 			      unsigned int val_idx,
5339 			      struct trace_event_file *file,
5340 			      char *var_name, char *field_str,
5341 			      unsigned long flags)
5342 {
5343 	struct hist_field *hist_field;
5344 	int ret = 0;
5345 
5346 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
5347 	if (IS_ERR(hist_field)) {
5348 		ret = PTR_ERR(hist_field);
5349 		goto out;
5350 	}
5351 
5352 	hist_data->fields[val_idx] = hist_field;
5353 
5354 	++hist_data->n_vals;
5355 	++hist_data->n_fields;
5356 
5357 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
5358 		ret = -EINVAL;
5359  out:
5360 	return ret;
5361 }
5362 
5363 static int create_val_field(struct hist_trigger_data *hist_data,
5364 			    unsigned int val_idx,
5365 			    struct trace_event_file *file,
5366 			    char *field_str)
5367 {
5368 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
5369 		return -EINVAL;
5370 
5371 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
5372 }
5373 
5374 static int create_var_field(struct hist_trigger_data *hist_data,
5375 			    unsigned int val_idx,
5376 			    struct trace_event_file *file,
5377 			    char *var_name, char *expr_str)
5378 {
5379 	struct trace_array *tr = hist_data->event_file->tr;
5380 	unsigned long flags = 0;
5381 
5382 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
5383 		return -EINVAL;
5384 
5385 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
5386 		hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
5387 		return -EINVAL;
5388 	}
5389 
5390 	flags |= HIST_FIELD_FL_VAR;
5391 	hist_data->n_vars++;
5392 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
5393 		return -EINVAL;
5394 
5395 	return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
5396 }
5397 
5398 static int create_val_fields(struct hist_trigger_data *hist_data,
5399 			     struct trace_event_file *file)
5400 {
5401 	char *fields_str, *field_str;
5402 	unsigned int i, j = 1;
5403 	int ret;
5404 
5405 	ret = create_hitcount_val(hist_data);
5406 	if (ret)
5407 		goto out;
5408 
5409 	fields_str = hist_data->attrs->vals_str;
5410 	if (!fields_str)
5411 		goto out;
5412 
5413 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
5414 		     j < TRACING_MAP_VALS_MAX; i++) {
5415 		field_str = strsep(&fields_str, ",");
5416 		if (!field_str)
5417 			break;
5418 
5419 		if (strcmp(field_str, "hitcount") == 0)
5420 			continue;
5421 
5422 		ret = create_val_field(hist_data, j++, file, field_str);
5423 		if (ret)
5424 			goto out;
5425 	}
5426 
5427 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
5428 		ret = -EINVAL;
5429  out:
5430 	return ret;
5431 }
5432 
5433 static int create_key_field(struct hist_trigger_data *hist_data,
5434 			    unsigned int key_idx,
5435 			    unsigned int key_offset,
5436 			    struct trace_event_file *file,
5437 			    char *field_str)
5438 {
5439 	struct trace_array *tr = hist_data->event_file->tr;
5440 	struct hist_field *hist_field = NULL;
5441 	unsigned long flags = 0;
5442 	unsigned int key_size;
5443 	int ret = 0;
5444 
5445 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
5446 		return -EINVAL;
5447 
5448 	flags |= HIST_FIELD_FL_KEY;
5449 
5450 	if (strcmp(field_str, "stacktrace") == 0) {
5451 		flags |= HIST_FIELD_FL_STACKTRACE;
5452 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
5453 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
5454 	} else {
5455 		hist_field = parse_expr(hist_data, file, field_str, flags,
5456 					NULL, 0);
5457 		if (IS_ERR(hist_field)) {
5458 			ret = PTR_ERR(hist_field);
5459 			goto out;
5460 		}
5461 
5462 		if (field_has_hist_vars(hist_field, 0))	{
5463 			hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
5464 			destroy_hist_field(hist_field, 0);
5465 			ret = -EINVAL;
5466 			goto out;
5467 		}
5468 
5469 		key_size = hist_field->size;
5470 	}
5471 
5472 	hist_data->fields[key_idx] = hist_field;
5473 
5474 	key_size = ALIGN(key_size, sizeof(u64));
5475 	hist_data->fields[key_idx]->size = key_size;
5476 	hist_data->fields[key_idx]->offset = key_offset;
5477 
5478 	hist_data->key_size += key_size;
5479 
5480 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
5481 		ret = -EINVAL;
5482 		goto out;
5483 	}
5484 
5485 	hist_data->n_keys++;
5486 	hist_data->n_fields++;
5487 
5488 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
5489 		return -EINVAL;
5490 
5491 	ret = key_size;
5492  out:
5493 	return ret;
5494 }
5495 
5496 static int create_key_fields(struct hist_trigger_data *hist_data,
5497 			     struct trace_event_file *file)
5498 {
5499 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
5500 	char *fields_str, *field_str;
5501 	int ret = -EINVAL;
5502 
5503 	fields_str = hist_data->attrs->keys_str;
5504 	if (!fields_str)
5505 		goto out;
5506 
5507 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
5508 		field_str = strsep(&fields_str, ",");
5509 		if (!field_str)
5510 			break;
5511 		ret = create_key_field(hist_data, i, key_offset,
5512 				       file, field_str);
5513 		if (ret < 0)
5514 			goto out;
5515 		key_offset += ret;
5516 	}
5517 	if (fields_str) {
5518 		ret = -EINVAL;
5519 		goto out;
5520 	}
5521 	ret = 0;
5522  out:
5523 	return ret;
5524 }
5525 
5526 static int create_var_fields(struct hist_trigger_data *hist_data,
5527 			     struct trace_event_file *file)
5528 {
5529 	unsigned int i, j = hist_data->n_vals;
5530 	int ret = 0;
5531 
5532 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
5533 
5534 	for (i = 0; i < n_vars; i++) {
5535 		char *var_name = hist_data->attrs->var_defs.name[i];
5536 		char *expr = hist_data->attrs->var_defs.expr[i];
5537 
5538 		ret = create_var_field(hist_data, j++, file, var_name, expr);
5539 		if (ret)
5540 			goto out;
5541 	}
5542  out:
5543 	return ret;
5544 }
5545 
5546 static void free_var_defs(struct hist_trigger_data *hist_data)
5547 {
5548 	unsigned int i;
5549 
5550 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
5551 		kfree(hist_data->attrs->var_defs.name[i]);
5552 		kfree(hist_data->attrs->var_defs.expr[i]);
5553 	}
5554 
5555 	hist_data->attrs->var_defs.n_vars = 0;
5556 }
5557 
5558 static int parse_var_defs(struct hist_trigger_data *hist_data)
5559 {
5560 	struct trace_array *tr = hist_data->event_file->tr;
5561 	char *s, *str, *var_name, *field_str;
5562 	unsigned int i, j, n_vars = 0;
5563 	int ret = 0;
5564 
5565 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
5566 		str = hist_data->attrs->assignment_str[i];
5567 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
5568 			field_str = strsep(&str, ",");
5569 			if (!field_str)
5570 				break;
5571 
5572 			var_name = strsep(&field_str, "=");
5573 			if (!var_name || !field_str) {
5574 				hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
5575 					 errpos(var_name));
5576 				ret = -EINVAL;
5577 				goto free;
5578 			}
5579 
5580 			if (n_vars == TRACING_MAP_VARS_MAX) {
5581 				hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
5582 				ret = -EINVAL;
5583 				goto free;
5584 			}
5585 
5586 			s = kstrdup(var_name, GFP_KERNEL);
5587 			if (!s) {
5588 				ret = -ENOMEM;
5589 				goto free;
5590 			}
5591 			hist_data->attrs->var_defs.name[n_vars] = s;
5592 
5593 			s = kstrdup(field_str, GFP_KERNEL);
5594 			if (!s) {
5595 				kfree(hist_data->attrs->var_defs.name[n_vars]);
5596 				ret = -ENOMEM;
5597 				goto free;
5598 			}
5599 			hist_data->attrs->var_defs.expr[n_vars++] = s;
5600 
5601 			hist_data->attrs->var_defs.n_vars = n_vars;
5602 		}
5603 	}
5604 
5605 	return ret;
5606  free:
5607 	free_var_defs(hist_data);
5608 
5609 	return ret;
5610 }
5611 
5612 static int create_hist_fields(struct hist_trigger_data *hist_data,
5613 			      struct trace_event_file *file)
5614 {
5615 	int ret;
5616 
5617 	ret = parse_var_defs(hist_data);
5618 	if (ret)
5619 		goto out;
5620 
5621 	ret = create_val_fields(hist_data, file);
5622 	if (ret)
5623 		goto out;
5624 
5625 	ret = create_var_fields(hist_data, file);
5626 	if (ret)
5627 		goto out;
5628 
5629 	ret = create_key_fields(hist_data, file);
5630 	if (ret)
5631 		goto out;
5632  out:
5633 	free_var_defs(hist_data);
5634 
5635 	return ret;
5636 }
5637 
5638 static int is_descending(struct trace_array *tr, const char *str)
5639 {
5640 	if (!str)
5641 		return 0;
5642 
5643 	if (strcmp(str, "descending") == 0)
5644 		return 1;
5645 
5646 	if (strcmp(str, "ascending") == 0)
5647 		return 0;
5648 
5649 	hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
5650 
5651 	return -EINVAL;
5652 }
5653 
5654 static int create_sort_keys(struct hist_trigger_data *hist_data)
5655 {
5656 	struct trace_array *tr = hist_data->event_file->tr;
5657 	char *fields_str = hist_data->attrs->sort_key_str;
5658 	struct tracing_map_sort_key *sort_key;
5659 	int descending, ret = 0;
5660 	unsigned int i, j, k;
5661 
5662 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
5663 
5664 	if (!fields_str)
5665 		goto out;
5666 
5667 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
5668 		struct hist_field *hist_field;
5669 		char *field_str, *field_name;
5670 		const char *test_name;
5671 
5672 		sort_key = &hist_data->sort_keys[i];
5673 
5674 		field_str = strsep(&fields_str, ",");
5675 		if (!field_str)
5676 			break;
5677 
5678 		if (!*field_str) {
5679 			ret = -EINVAL;
5680 			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
5681 			break;
5682 		}
5683 
5684 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
5685 			hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
5686 			ret = -EINVAL;
5687 			break;
5688 		}
5689 
5690 		field_name = strsep(&field_str, ".");
5691 		if (!field_name || !*field_name) {
5692 			ret = -EINVAL;
5693 			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
5694 			break;
5695 		}
5696 
5697 		if (strcmp(field_name, "hitcount") == 0) {
5698 			descending = is_descending(tr, field_str);
5699 			if (descending < 0) {
5700 				ret = descending;
5701 				break;
5702 			}
5703 			sort_key->descending = descending;
5704 			continue;
5705 		}
5706 
5707 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
5708 			unsigned int idx;
5709 
5710 			hist_field = hist_data->fields[j];
5711 			if (hist_field->flags & HIST_FIELD_FL_VAR)
5712 				continue;
5713 
5714 			idx = k++;
5715 
5716 			test_name = hist_field_name(hist_field, 0);
5717 
5718 			if (strcmp(field_name, test_name) == 0) {
5719 				sort_key->field_idx = idx;
5720 				descending = is_descending(tr, field_str);
5721 				if (descending < 0) {
5722 					ret = descending;
5723 					goto out;
5724 				}
5725 				sort_key->descending = descending;
5726 				break;
5727 			}
5728 		}
5729 		if (j == hist_data->n_fields) {
5730 			ret = -EINVAL;
5731 			hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
5732 			break;
5733 		}
5734 	}
5735 
5736 	hist_data->n_sort_keys = i;
5737  out:
5738 	return ret;
5739 }
5740 
5741 static void destroy_actions(struct hist_trigger_data *hist_data)
5742 {
5743 	unsigned int i;
5744 
5745 	for (i = 0; i < hist_data->n_actions; i++) {
5746 		struct action_data *data = hist_data->actions[i];
5747 
5748 		if (data->handler == HANDLER_ONMATCH)
5749 			onmatch_destroy(data);
5750 		else if (data->handler == HANDLER_ONMAX ||
5751 			 data->handler == HANDLER_ONCHANGE)
5752 			track_data_destroy(hist_data, data);
5753 		else
5754 			kfree(data);
5755 	}
5756 }
5757 
5758 static int parse_actions(struct hist_trigger_data *hist_data)
5759 {
5760 	struct trace_array *tr = hist_data->event_file->tr;
5761 	struct action_data *data;
5762 	unsigned int i;
5763 	int ret = 0;
5764 	char *str;
5765 	int len;
5766 
5767 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
5768 		str = hist_data->attrs->action_str[i];
5769 
5770 		if ((len = str_has_prefix(str, "onmatch("))) {
5771 			char *action_str = str + len;
5772 
5773 			data = onmatch_parse(tr, action_str);
5774 			if (IS_ERR(data)) {
5775 				ret = PTR_ERR(data);
5776 				break;
5777 			}
5778 		} else if ((len = str_has_prefix(str, "onmax("))) {
5779 			char *action_str = str + len;
5780 
5781 			data = track_data_parse(hist_data, action_str,
5782 						HANDLER_ONMAX);
5783 			if (IS_ERR(data)) {
5784 				ret = PTR_ERR(data);
5785 				break;
5786 			}
5787 		} else if ((len = str_has_prefix(str, "onchange("))) {
5788 			char *action_str = str + len;
5789 
5790 			data = track_data_parse(hist_data, action_str,
5791 						HANDLER_ONCHANGE);
5792 			if (IS_ERR(data)) {
5793 				ret = PTR_ERR(data);
5794 				break;
5795 			}
5796 		} else {
5797 			ret = -EINVAL;
5798 			break;
5799 		}
5800 
5801 		hist_data->actions[hist_data->n_actions++] = data;
5802 	}
5803 
5804 	return ret;
5805 }
5806 
5807 static int create_actions(struct hist_trigger_data *hist_data)
5808 {
5809 	struct action_data *data;
5810 	unsigned int i;
5811 	int ret = 0;
5812 
5813 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
5814 		data = hist_data->actions[i];
5815 
5816 		if (data->handler == HANDLER_ONMATCH) {
5817 			ret = onmatch_create(hist_data, data);
5818 			if (ret)
5819 				break;
5820 		} else if (data->handler == HANDLER_ONMAX ||
5821 			   data->handler == HANDLER_ONCHANGE) {
5822 			ret = track_data_create(hist_data, data);
5823 			if (ret)
5824 				break;
5825 		} else {
5826 			ret = -EINVAL;
5827 			break;
5828 		}
5829 	}
5830 
5831 	return ret;
5832 }
5833 
5834 static void print_actions(struct seq_file *m,
5835 			  struct hist_trigger_data *hist_data,
5836 			  struct tracing_map_elt *elt)
5837 {
5838 	unsigned int i;
5839 
5840 	for (i = 0; i < hist_data->n_actions; i++) {
5841 		struct action_data *data = hist_data->actions[i];
5842 
5843 		if (data->action == ACTION_SNAPSHOT)
5844 			continue;
5845 
5846 		if (data->handler == HANDLER_ONMAX ||
5847 		    data->handler == HANDLER_ONCHANGE)
5848 			track_data_print(m, hist_data, elt, data);
5849 	}
5850 }
5851 
5852 static void print_action_spec(struct seq_file *m,
5853 			      struct hist_trigger_data *hist_data,
5854 			      struct action_data *data)
5855 {
5856 	unsigned int i;
5857 
5858 	if (data->action == ACTION_SAVE) {
5859 		for (i = 0; i < hist_data->n_save_vars; i++) {
5860 			seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
5861 			if (i < hist_data->n_save_vars - 1)
5862 				seq_puts(m, ",");
5863 		}
5864 	} else if (data->action == ACTION_TRACE) {
5865 		if (data->use_trace_keyword)
5866 			seq_printf(m, "%s", data->synth_event_name);
5867 		for (i = 0; i < data->n_params; i++) {
5868 			if (i || data->use_trace_keyword)
5869 				seq_puts(m, ",");
5870 			seq_printf(m, "%s", data->params[i]);
5871 		}
5872 	}
5873 }
5874 
5875 static void print_track_data_spec(struct seq_file *m,
5876 				  struct hist_trigger_data *hist_data,
5877 				  struct action_data *data)
5878 {
5879 	if (data->handler == HANDLER_ONMAX)
5880 		seq_puts(m, ":onmax(");
5881 	else if (data->handler == HANDLER_ONCHANGE)
5882 		seq_puts(m, ":onchange(");
5883 	seq_printf(m, "%s", data->track_data.var_str);
5884 	seq_printf(m, ").%s(", data->action_name);
5885 
5886 	print_action_spec(m, hist_data, data);
5887 
5888 	seq_puts(m, ")");
5889 }
5890 
5891 static void print_onmatch_spec(struct seq_file *m,
5892 			       struct hist_trigger_data *hist_data,
5893 			       struct action_data *data)
5894 {
5895 	seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
5896 		   data->match_data.event);
5897 
5898 	seq_printf(m, "%s(", data->action_name);
5899 
5900 	print_action_spec(m, hist_data, data);
5901 
5902 	seq_puts(m, ")");
5903 }
5904 
5905 static bool actions_match(struct hist_trigger_data *hist_data,
5906 			  struct hist_trigger_data *hist_data_test)
5907 {
5908 	unsigned int i, j;
5909 
5910 	if (hist_data->n_actions != hist_data_test->n_actions)
5911 		return false;
5912 
5913 	for (i = 0; i < hist_data->n_actions; i++) {
5914 		struct action_data *data = hist_data->actions[i];
5915 		struct action_data *data_test = hist_data_test->actions[i];
5916 		char *action_name, *action_name_test;
5917 
5918 		if (data->handler != data_test->handler)
5919 			return false;
5920 		if (data->action != data_test->action)
5921 			return false;
5922 
5923 		if (data->n_params != data_test->n_params)
5924 			return false;
5925 
5926 		for (j = 0; j < data->n_params; j++) {
5927 			if (strcmp(data->params[j], data_test->params[j]) != 0)
5928 				return false;
5929 		}
5930 
5931 		if (data->use_trace_keyword)
5932 			action_name = data->synth_event_name;
5933 		else
5934 			action_name = data->action_name;
5935 
5936 		if (data_test->use_trace_keyword)
5937 			action_name_test = data_test->synth_event_name;
5938 		else
5939 			action_name_test = data_test->action_name;
5940 
5941 		if (strcmp(action_name, action_name_test) != 0)
5942 			return false;
5943 
5944 		if (data->handler == HANDLER_ONMATCH) {
5945 			if (strcmp(data->match_data.event_system,
5946 				   data_test->match_data.event_system) != 0)
5947 				return false;
5948 			if (strcmp(data->match_data.event,
5949 				   data_test->match_data.event) != 0)
5950 				return false;
5951 		} else if (data->handler == HANDLER_ONMAX ||
5952 			   data->handler == HANDLER_ONCHANGE) {
5953 			if (strcmp(data->track_data.var_str,
5954 				   data_test->track_data.var_str) != 0)
5955 				return false;
5956 		}
5957 	}
5958 
5959 	return true;
5960 }
5961 
5962 
5963 static void print_actions_spec(struct seq_file *m,
5964 			       struct hist_trigger_data *hist_data)
5965 {
5966 	unsigned int i;
5967 
5968 	for (i = 0; i < hist_data->n_actions; i++) {
5969 		struct action_data *data = hist_data->actions[i];
5970 
5971 		if (data->handler == HANDLER_ONMATCH)
5972 			print_onmatch_spec(m, hist_data, data);
5973 		else if (data->handler == HANDLER_ONMAX ||
5974 			 data->handler == HANDLER_ONCHANGE)
5975 			print_track_data_spec(m, hist_data, data);
5976 	}
5977 }
5978 
5979 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5980 {
5981 	unsigned int i;
5982 
5983 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5984 		kfree(hist_data->field_var_hists[i]->cmd);
5985 		kfree(hist_data->field_var_hists[i]);
5986 	}
5987 }
5988 
5989 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5990 {
5991 	if (!hist_data)
5992 		return;
5993 
5994 	destroy_hist_trigger_attrs(hist_data->attrs);
5995 	destroy_hist_fields(hist_data);
5996 	tracing_map_destroy(hist_data->map);
5997 
5998 	destroy_actions(hist_data);
5999 	destroy_field_vars(hist_data);
6000 	destroy_field_var_hists(hist_data);
6001 
6002 	kfree(hist_data);
6003 }
6004 
6005 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
6006 {
6007 	struct tracing_map *map = hist_data->map;
6008 	struct ftrace_event_field *field;
6009 	struct hist_field *hist_field;
6010 	int i, idx = 0;
6011 
6012 	for_each_hist_field(i, hist_data) {
6013 		hist_field = hist_data->fields[i];
6014 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
6015 			tracing_map_cmp_fn_t cmp_fn;
6016 
6017 			field = hist_field->field;
6018 
6019 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
6020 				cmp_fn = tracing_map_cmp_none;
6021 			else if (!field)
6022 				cmp_fn = tracing_map_cmp_num(hist_field->size,
6023 							     hist_field->is_signed);
6024 			else if (is_string_field(field))
6025 				cmp_fn = tracing_map_cmp_string;
6026 			else
6027 				cmp_fn = tracing_map_cmp_num(field->size,
6028 							     field->is_signed);
6029 			idx = tracing_map_add_key_field(map,
6030 							hist_field->offset,
6031 							cmp_fn);
6032 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
6033 			idx = tracing_map_add_sum_field(map);
6034 
6035 		if (idx < 0)
6036 			return idx;
6037 
6038 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
6039 			idx = tracing_map_add_var(map);
6040 			if (idx < 0)
6041 				return idx;
6042 			hist_field->var.idx = idx;
6043 			hist_field->var.hist_data = hist_data;
6044 		}
6045 	}
6046 
6047 	return 0;
6048 }
6049 
6050 static struct hist_trigger_data *
6051 create_hist_data(unsigned int map_bits,
6052 		 struct hist_trigger_attrs *attrs,
6053 		 struct trace_event_file *file,
6054 		 bool remove)
6055 {
6056 	const struct tracing_map_ops *map_ops = NULL;
6057 	struct hist_trigger_data *hist_data;
6058 	int ret = 0;
6059 
6060 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
6061 	if (!hist_data)
6062 		return ERR_PTR(-ENOMEM);
6063 
6064 	hist_data->attrs = attrs;
6065 	hist_data->remove = remove;
6066 	hist_data->event_file = file;
6067 
6068 	ret = parse_actions(hist_data);
6069 	if (ret)
6070 		goto free;
6071 
6072 	ret = create_hist_fields(hist_data, file);
6073 	if (ret)
6074 		goto free;
6075 
6076 	ret = create_sort_keys(hist_data);
6077 	if (ret)
6078 		goto free;
6079 
6080 	map_ops = &hist_trigger_elt_data_ops;
6081 
6082 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
6083 					    map_ops, hist_data);
6084 	if (IS_ERR(hist_data->map)) {
6085 		ret = PTR_ERR(hist_data->map);
6086 		hist_data->map = NULL;
6087 		goto free;
6088 	}
6089 
6090 	ret = create_tracing_map_fields(hist_data);
6091 	if (ret)
6092 		goto free;
6093  out:
6094 	return hist_data;
6095  free:
6096 	hist_data->attrs = NULL;
6097 
6098 	destroy_hist_data(hist_data);
6099 
6100 	hist_data = ERR_PTR(ret);
6101 
6102 	goto out;
6103 }
6104 
6105 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
6106 				    struct tracing_map_elt *elt, void *rec,
6107 				    struct ring_buffer_event *rbe,
6108 				    u64 *var_ref_vals)
6109 {
6110 	struct hist_elt_data *elt_data;
6111 	struct hist_field *hist_field;
6112 	unsigned int i, var_idx;
6113 	u64 hist_val;
6114 
6115 	elt_data = elt->private_data;
6116 	elt_data->var_ref_vals = var_ref_vals;
6117 
6118 	for_each_hist_val_field(i, hist_data) {
6119 		hist_field = hist_data->fields[i];
6120 		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
6121 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
6122 			var_idx = hist_field->var.idx;
6123 			tracing_map_set_var(elt, var_idx, hist_val);
6124 			continue;
6125 		}
6126 		tracing_map_update_sum(elt, i, hist_val);
6127 	}
6128 
6129 	for_each_hist_key_field(i, hist_data) {
6130 		hist_field = hist_data->fields[i];
6131 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
6132 			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
6133 			var_idx = hist_field->var.idx;
6134 			tracing_map_set_var(elt, var_idx, hist_val);
6135 		}
6136 	}
6137 
6138 	update_field_vars(hist_data, elt, rbe, rec);
6139 }
6140 
6141 static inline void add_to_key(char *compound_key, void *key,
6142 			      struct hist_field *key_field, void *rec)
6143 {
6144 	size_t size = key_field->size;
6145 
6146 	if (key_field->flags & HIST_FIELD_FL_STRING) {
6147 		struct ftrace_event_field *field;
6148 
6149 		field = key_field->field;
6150 		if (field->filter_type == FILTER_DYN_STRING)
6151 			size = *(u32 *)(rec + field->offset) >> 16;
6152 		else if (field->filter_type == FILTER_PTR_STRING)
6153 			size = strlen(key);
6154 		else if (field->filter_type == FILTER_STATIC_STRING)
6155 			size = field->size;
6156 
6157 		/* ensure NULL-termination */
6158 		if (size > key_field->size - 1)
6159 			size = key_field->size - 1;
6160 
6161 		strncpy(compound_key + key_field->offset, (char *)key, size);
6162 	} else
6163 		memcpy(compound_key + key_field->offset, key, size);
6164 }
6165 
6166 static void
6167 hist_trigger_actions(struct hist_trigger_data *hist_data,
6168 		     struct tracing_map_elt *elt, void *rec,
6169 		     struct ring_buffer_event *rbe, void *key,
6170 		     u64 *var_ref_vals)
6171 {
6172 	struct action_data *data;
6173 	unsigned int i;
6174 
6175 	for (i = 0; i < hist_data->n_actions; i++) {
6176 		data = hist_data->actions[i];
6177 		data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
6178 	}
6179 }
6180 
6181 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
6182 			       struct ring_buffer_event *rbe)
6183 {
6184 	struct hist_trigger_data *hist_data = data->private_data;
6185 	bool use_compound_key = (hist_data->n_keys > 1);
6186 	unsigned long entries[HIST_STACKTRACE_DEPTH];
6187 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
6188 	char compound_key[HIST_KEY_SIZE_MAX];
6189 	struct tracing_map_elt *elt = NULL;
6190 	struct hist_field *key_field;
6191 	u64 field_contents;
6192 	void *key = NULL;
6193 	unsigned int i;
6194 
6195 	memset(compound_key, 0, hist_data->key_size);
6196 
6197 	for_each_hist_key_field(i, hist_data) {
6198 		key_field = hist_data->fields[i];
6199 
6200 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
6201 			memset(entries, 0, HIST_STACKTRACE_SIZE);
6202 			stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
6203 					 HIST_STACKTRACE_SKIP);
6204 			key = entries;
6205 		} else {
6206 			field_contents = key_field->fn(key_field, elt, rbe, rec);
6207 			if (key_field->flags & HIST_FIELD_FL_STRING) {
6208 				key = (void *)(unsigned long)field_contents;
6209 				use_compound_key = true;
6210 			} else
6211 				key = (void *)&field_contents;
6212 		}
6213 
6214 		if (use_compound_key)
6215 			add_to_key(compound_key, key, key_field, rec);
6216 	}
6217 
6218 	if (use_compound_key)
6219 		key = compound_key;
6220 
6221 	if (hist_data->n_var_refs &&
6222 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
6223 		return;
6224 
6225 	elt = tracing_map_insert(hist_data->map, key);
6226 	if (!elt)
6227 		return;
6228 
6229 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
6230 
6231 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
6232 		hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
6233 }
6234 
6235 static void hist_trigger_stacktrace_print(struct seq_file *m,
6236 					  unsigned long *stacktrace_entries,
6237 					  unsigned int max_entries)
6238 {
6239 	char str[KSYM_SYMBOL_LEN];
6240 	unsigned int spaces = 8;
6241 	unsigned int i;
6242 
6243 	for (i = 0; i < max_entries; i++) {
6244 		if (!stacktrace_entries[i])
6245 			return;
6246 
6247 		seq_printf(m, "%*c", 1 + spaces, ' ');
6248 		sprint_symbol(str, stacktrace_entries[i]);
6249 		seq_printf(m, "%s\n", str);
6250 	}
6251 }
6252 
6253 static void hist_trigger_print_key(struct seq_file *m,
6254 				   struct hist_trigger_data *hist_data,
6255 				   void *key,
6256 				   struct tracing_map_elt *elt)
6257 {
6258 	struct hist_field *key_field;
6259 	char str[KSYM_SYMBOL_LEN];
6260 	bool multiline = false;
6261 	const char *field_name;
6262 	unsigned int i;
6263 	u64 uval;
6264 
6265 	seq_puts(m, "{ ");
6266 
6267 	for_each_hist_key_field(i, hist_data) {
6268 		key_field = hist_data->fields[i];
6269 
6270 		if (i > hist_data->n_vals)
6271 			seq_puts(m, ", ");
6272 
6273 		field_name = hist_field_name(key_field, 0);
6274 
6275 		if (key_field->flags & HIST_FIELD_FL_HEX) {
6276 			uval = *(u64 *)(key + key_field->offset);
6277 			seq_printf(m, "%s: %llx", field_name, uval);
6278 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
6279 			uval = *(u64 *)(key + key_field->offset);
6280 			sprint_symbol_no_offset(str, uval);
6281 			seq_printf(m, "%s: [%llx] %-45s", field_name,
6282 				   uval, str);
6283 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
6284 			uval = *(u64 *)(key + key_field->offset);
6285 			sprint_symbol(str, uval);
6286 			seq_printf(m, "%s: [%llx] %-55s", field_name,
6287 				   uval, str);
6288 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
6289 			struct hist_elt_data *elt_data = elt->private_data;
6290 			char *comm;
6291 
6292 			if (WARN_ON_ONCE(!elt_data))
6293 				return;
6294 
6295 			comm = elt_data->comm;
6296 
6297 			uval = *(u64 *)(key + key_field->offset);
6298 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
6299 				   comm, uval);
6300 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
6301 			const char *syscall_name;
6302 
6303 			uval = *(u64 *)(key + key_field->offset);
6304 			syscall_name = get_syscall_name(uval);
6305 			if (!syscall_name)
6306 				syscall_name = "unknown_syscall";
6307 
6308 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
6309 				   syscall_name, uval);
6310 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
6311 			seq_puts(m, "stacktrace:\n");
6312 			hist_trigger_stacktrace_print(m,
6313 						      key + key_field->offset,
6314 						      HIST_STACKTRACE_DEPTH);
6315 			multiline = true;
6316 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
6317 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
6318 				   *(u64 *)(key + key_field->offset));
6319 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
6320 			seq_printf(m, "%s: %-50s", field_name,
6321 				   (char *)(key + key_field->offset));
6322 		} else {
6323 			uval = *(u64 *)(key + key_field->offset);
6324 			seq_printf(m, "%s: %10llu", field_name, uval);
6325 		}
6326 	}
6327 
6328 	if (!multiline)
6329 		seq_puts(m, " ");
6330 
6331 	seq_puts(m, "}");
6332 }
6333 
6334 static void hist_trigger_entry_print(struct seq_file *m,
6335 				     struct hist_trigger_data *hist_data,
6336 				     void *key,
6337 				     struct tracing_map_elt *elt)
6338 {
6339 	const char *field_name;
6340 	unsigned int i;
6341 
6342 	hist_trigger_print_key(m, hist_data, key, elt);
6343 
6344 	seq_printf(m, " hitcount: %10llu",
6345 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
6346 
6347 	for (i = 1; i < hist_data->n_vals; i++) {
6348 		field_name = hist_field_name(hist_data->fields[i], 0);
6349 
6350 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
6351 		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
6352 			continue;
6353 
6354 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
6355 			seq_printf(m, "  %s: %10llx", field_name,
6356 				   tracing_map_read_sum(elt, i));
6357 		} else {
6358 			seq_printf(m, "  %s: %10llu", field_name,
6359 				   tracing_map_read_sum(elt, i));
6360 		}
6361 	}
6362 
6363 	print_actions(m, hist_data, elt);
6364 
6365 	seq_puts(m, "\n");
6366 }
6367 
6368 static int print_entries(struct seq_file *m,
6369 			 struct hist_trigger_data *hist_data)
6370 {
6371 	struct tracing_map_sort_entry **sort_entries = NULL;
6372 	struct tracing_map *map = hist_data->map;
6373 	int i, n_entries;
6374 
6375 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
6376 					     hist_data->n_sort_keys,
6377 					     &sort_entries);
6378 	if (n_entries < 0)
6379 		return n_entries;
6380 
6381 	for (i = 0; i < n_entries; i++)
6382 		hist_trigger_entry_print(m, hist_data,
6383 					 sort_entries[i]->key,
6384 					 sort_entries[i]->elt);
6385 
6386 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
6387 
6388 	return n_entries;
6389 }
6390 
6391 static void hist_trigger_show(struct seq_file *m,
6392 			      struct event_trigger_data *data, int n)
6393 {
6394 	struct hist_trigger_data *hist_data;
6395 	int n_entries;
6396 
6397 	if (n > 0)
6398 		seq_puts(m, "\n\n");
6399 
6400 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
6401 	data->ops->print(m, data->ops, data);
6402 	seq_puts(m, "#\n\n");
6403 
6404 	hist_data = data->private_data;
6405 	n_entries = print_entries(m, hist_data);
6406 	if (n_entries < 0)
6407 		n_entries = 0;
6408 
6409 	track_data_snapshot_print(m, hist_data);
6410 
6411 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
6412 		   (u64)atomic64_read(&hist_data->map->hits),
6413 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
6414 }
6415 
6416 static int hist_show(struct seq_file *m, void *v)
6417 {
6418 	struct event_trigger_data *data;
6419 	struct trace_event_file *event_file;
6420 	int n = 0, ret = 0;
6421 
6422 	mutex_lock(&event_mutex);
6423 
6424 	event_file = event_file_data(m->private);
6425 	if (unlikely(!event_file)) {
6426 		ret = -ENODEV;
6427 		goto out_unlock;
6428 	}
6429 
6430 	list_for_each_entry(data, &event_file->triggers, list) {
6431 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
6432 			hist_trigger_show(m, data, n++);
6433 	}
6434 
6435  out_unlock:
6436 	mutex_unlock(&event_mutex);
6437 
6438 	return ret;
6439 }
6440 
6441 static int event_hist_open(struct inode *inode, struct file *file)
6442 {
6443 	int ret;
6444 
6445 	ret = security_locked_down(LOCKDOWN_TRACEFS);
6446 	if (ret)
6447 		return ret;
6448 
6449 	return single_open(file, hist_show, file);
6450 }
6451 
6452 const struct file_operations event_hist_fops = {
6453 	.open = event_hist_open,
6454 	.read = seq_read,
6455 	.llseek = seq_lseek,
6456 	.release = single_release,
6457 };
6458 
6459 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
6460 {
6461 	const char *field_name = hist_field_name(hist_field, 0);
6462 
6463 	if (hist_field->var.name)
6464 		seq_printf(m, "%s=", hist_field->var.name);
6465 
6466 	if (hist_field->flags & HIST_FIELD_FL_CPU)
6467 		seq_puts(m, "cpu");
6468 	else if (field_name) {
6469 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
6470 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
6471 			seq_putc(m, '$');
6472 		seq_printf(m, "%s", field_name);
6473 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
6474 		seq_puts(m, "common_timestamp");
6475 
6476 	if (hist_field->flags) {
6477 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
6478 		    !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
6479 			const char *flags = get_hist_field_flags(hist_field);
6480 
6481 			if (flags)
6482 				seq_printf(m, ".%s", flags);
6483 		}
6484 	}
6485 }
6486 
6487 static int event_hist_trigger_print(struct seq_file *m,
6488 				    struct event_trigger_ops *ops,
6489 				    struct event_trigger_data *data)
6490 {
6491 	struct hist_trigger_data *hist_data = data->private_data;
6492 	struct hist_field *field;
6493 	bool have_var = false;
6494 	unsigned int i;
6495 
6496 	seq_puts(m, "hist:");
6497 
6498 	if (data->name)
6499 		seq_printf(m, "%s:", data->name);
6500 
6501 	seq_puts(m, "keys=");
6502 
6503 	for_each_hist_key_field(i, hist_data) {
6504 		field = hist_data->fields[i];
6505 
6506 		if (i > hist_data->n_vals)
6507 			seq_puts(m, ",");
6508 
6509 		if (field->flags & HIST_FIELD_FL_STACKTRACE)
6510 			seq_puts(m, "stacktrace");
6511 		else
6512 			hist_field_print(m, field);
6513 	}
6514 
6515 	seq_puts(m, ":vals=");
6516 
6517 	for_each_hist_val_field(i, hist_data) {
6518 		field = hist_data->fields[i];
6519 		if (field->flags & HIST_FIELD_FL_VAR) {
6520 			have_var = true;
6521 			continue;
6522 		}
6523 
6524 		if (i == HITCOUNT_IDX)
6525 			seq_puts(m, "hitcount");
6526 		else {
6527 			seq_puts(m, ",");
6528 			hist_field_print(m, field);
6529 		}
6530 	}
6531 
6532 	if (have_var) {
6533 		unsigned int n = 0;
6534 
6535 		seq_puts(m, ":");
6536 
6537 		for_each_hist_val_field(i, hist_data) {
6538 			field = hist_data->fields[i];
6539 
6540 			if (field->flags & HIST_FIELD_FL_VAR) {
6541 				if (n++)
6542 					seq_puts(m, ",");
6543 				hist_field_print(m, field);
6544 			}
6545 		}
6546 	}
6547 
6548 	seq_puts(m, ":sort=");
6549 
6550 	for (i = 0; i < hist_data->n_sort_keys; i++) {
6551 		struct tracing_map_sort_key *sort_key;
6552 		unsigned int idx, first_key_idx;
6553 
6554 		/* skip VAR vals */
6555 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
6556 
6557 		sort_key = &hist_data->sort_keys[i];
6558 		idx = sort_key->field_idx;
6559 
6560 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
6561 			return -EINVAL;
6562 
6563 		if (i > 0)
6564 			seq_puts(m, ",");
6565 
6566 		if (idx == HITCOUNT_IDX)
6567 			seq_puts(m, "hitcount");
6568 		else {
6569 			if (idx >= first_key_idx)
6570 				idx += hist_data->n_vars;
6571 			hist_field_print(m, hist_data->fields[idx]);
6572 		}
6573 
6574 		if (sort_key->descending)
6575 			seq_puts(m, ".descending");
6576 	}
6577 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
6578 	if (hist_data->enable_timestamps)
6579 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
6580 
6581 	print_actions_spec(m, hist_data);
6582 
6583 	if (data->filter_str)
6584 		seq_printf(m, " if %s", data->filter_str);
6585 
6586 	if (data->paused)
6587 		seq_puts(m, " [paused]");
6588 	else
6589 		seq_puts(m, " [active]");
6590 
6591 	seq_putc(m, '\n');
6592 
6593 	return 0;
6594 }
6595 
6596 static int event_hist_trigger_init(struct event_trigger_ops *ops,
6597 				   struct event_trigger_data *data)
6598 {
6599 	struct hist_trigger_data *hist_data = data->private_data;
6600 
6601 	if (!data->ref && hist_data->attrs->name)
6602 		save_named_trigger(hist_data->attrs->name, data);
6603 
6604 	data->ref++;
6605 
6606 	return 0;
6607 }
6608 
6609 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
6610 {
6611 	struct trace_event_file *file;
6612 	unsigned int i;
6613 	char *cmd;
6614 	int ret;
6615 
6616 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
6617 		file = hist_data->field_var_hists[i]->hist_data->event_file;
6618 		cmd = hist_data->field_var_hists[i]->cmd;
6619 		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
6620 					      "!hist", "hist", cmd);
6621 	}
6622 }
6623 
6624 static void event_hist_trigger_free(struct event_trigger_ops *ops,
6625 				    struct event_trigger_data *data)
6626 {
6627 	struct hist_trigger_data *hist_data = data->private_data;
6628 
6629 	if (WARN_ON_ONCE(data->ref <= 0))
6630 		return;
6631 
6632 	data->ref--;
6633 	if (!data->ref) {
6634 		if (data->name)
6635 			del_named_trigger(data);
6636 
6637 		trigger_data_free(data);
6638 
6639 		remove_hist_vars(hist_data);
6640 
6641 		unregister_field_var_hists(hist_data);
6642 
6643 		destroy_hist_data(hist_data);
6644 	}
6645 }
6646 
6647 static struct event_trigger_ops event_hist_trigger_ops = {
6648 	.func			= event_hist_trigger,
6649 	.print			= event_hist_trigger_print,
6650 	.init			= event_hist_trigger_init,
6651 	.free			= event_hist_trigger_free,
6652 };
6653 
6654 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
6655 					 struct event_trigger_data *data)
6656 {
6657 	data->ref++;
6658 
6659 	save_named_trigger(data->named_data->name, data);
6660 
6661 	event_hist_trigger_init(ops, data->named_data);
6662 
6663 	return 0;
6664 }
6665 
6666 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
6667 					  struct event_trigger_data *data)
6668 {
6669 	if (WARN_ON_ONCE(data->ref <= 0))
6670 		return;
6671 
6672 	event_hist_trigger_free(ops, data->named_data);
6673 
6674 	data->ref--;
6675 	if (!data->ref) {
6676 		del_named_trigger(data);
6677 		trigger_data_free(data);
6678 	}
6679 }
6680 
6681 static struct event_trigger_ops event_hist_trigger_named_ops = {
6682 	.func			= event_hist_trigger,
6683 	.print			= event_hist_trigger_print,
6684 	.init			= event_hist_trigger_named_init,
6685 	.free			= event_hist_trigger_named_free,
6686 };
6687 
6688 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
6689 							    char *param)
6690 {
6691 	return &event_hist_trigger_ops;
6692 }
6693 
6694 static void hist_clear(struct event_trigger_data *data)
6695 {
6696 	struct hist_trigger_data *hist_data = data->private_data;
6697 
6698 	if (data->name)
6699 		pause_named_trigger(data);
6700 
6701 	tracepoint_synchronize_unregister();
6702 
6703 	tracing_map_clear(hist_data->map);
6704 
6705 	if (data->name)
6706 		unpause_named_trigger(data);
6707 }
6708 
6709 static bool compatible_field(struct ftrace_event_field *field,
6710 			     struct ftrace_event_field *test_field)
6711 {
6712 	if (field == test_field)
6713 		return true;
6714 	if (field == NULL || test_field == NULL)
6715 		return false;
6716 	if (strcmp(field->name, test_field->name) != 0)
6717 		return false;
6718 	if (strcmp(field->type, test_field->type) != 0)
6719 		return false;
6720 	if (field->size != test_field->size)
6721 		return false;
6722 	if (field->is_signed != test_field->is_signed)
6723 		return false;
6724 
6725 	return true;
6726 }
6727 
6728 static bool hist_trigger_match(struct event_trigger_data *data,
6729 			       struct event_trigger_data *data_test,
6730 			       struct event_trigger_data *named_data,
6731 			       bool ignore_filter)
6732 {
6733 	struct tracing_map_sort_key *sort_key, *sort_key_test;
6734 	struct hist_trigger_data *hist_data, *hist_data_test;
6735 	struct hist_field *key_field, *key_field_test;
6736 	unsigned int i;
6737 
6738 	if (named_data && (named_data != data_test) &&
6739 	    (named_data != data_test->named_data))
6740 		return false;
6741 
6742 	if (!named_data && is_named_trigger(data_test))
6743 		return false;
6744 
6745 	hist_data = data->private_data;
6746 	hist_data_test = data_test->private_data;
6747 
6748 	if (hist_data->n_vals != hist_data_test->n_vals ||
6749 	    hist_data->n_fields != hist_data_test->n_fields ||
6750 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
6751 		return false;
6752 
6753 	if (!ignore_filter) {
6754 		if ((data->filter_str && !data_test->filter_str) ||
6755 		   (!data->filter_str && data_test->filter_str))
6756 			return false;
6757 	}
6758 
6759 	for_each_hist_field(i, hist_data) {
6760 		key_field = hist_data->fields[i];
6761 		key_field_test = hist_data_test->fields[i];
6762 
6763 		if (key_field->flags != key_field_test->flags)
6764 			return false;
6765 		if (!compatible_field(key_field->field, key_field_test->field))
6766 			return false;
6767 		if (key_field->offset != key_field_test->offset)
6768 			return false;
6769 		if (key_field->size != key_field_test->size)
6770 			return false;
6771 		if (key_field->is_signed != key_field_test->is_signed)
6772 			return false;
6773 		if (!!key_field->var.name != !!key_field_test->var.name)
6774 			return false;
6775 		if (key_field->var.name &&
6776 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
6777 			return false;
6778 	}
6779 
6780 	for (i = 0; i < hist_data->n_sort_keys; i++) {
6781 		sort_key = &hist_data->sort_keys[i];
6782 		sort_key_test = &hist_data_test->sort_keys[i];
6783 
6784 		if (sort_key->field_idx != sort_key_test->field_idx ||
6785 		    sort_key->descending != sort_key_test->descending)
6786 			return false;
6787 	}
6788 
6789 	if (!ignore_filter && data->filter_str &&
6790 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
6791 		return false;
6792 
6793 	if (!actions_match(hist_data, hist_data_test))
6794 		return false;
6795 
6796 	return true;
6797 }
6798 
6799 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
6800 				 struct event_trigger_data *data,
6801 				 struct trace_event_file *file)
6802 {
6803 	struct hist_trigger_data *hist_data = data->private_data;
6804 	struct event_trigger_data *test, *named_data = NULL;
6805 	struct trace_array *tr = file->tr;
6806 	int ret = 0;
6807 
6808 	if (hist_data->attrs->name) {
6809 		named_data = find_named_trigger(hist_data->attrs->name);
6810 		if (named_data) {
6811 			if (!hist_trigger_match(data, named_data, named_data,
6812 						true)) {
6813 				hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
6814 				ret = -EINVAL;
6815 				goto out;
6816 			}
6817 		}
6818 	}
6819 
6820 	if (hist_data->attrs->name && !named_data)
6821 		goto new;
6822 
6823 	lockdep_assert_held(&event_mutex);
6824 
6825 	list_for_each_entry(test, &file->triggers, list) {
6826 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6827 			if (!hist_trigger_match(data, test, named_data, false))
6828 				continue;
6829 			if (hist_data->attrs->pause)
6830 				test->paused = true;
6831 			else if (hist_data->attrs->cont)
6832 				test->paused = false;
6833 			else if (hist_data->attrs->clear)
6834 				hist_clear(test);
6835 			else {
6836 				hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
6837 				ret = -EEXIST;
6838 			}
6839 			goto out;
6840 		}
6841 	}
6842  new:
6843 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
6844 		hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
6845 		ret = -ENOENT;
6846 		goto out;
6847 	}
6848 
6849 	if (hist_data->attrs->pause)
6850 		data->paused = true;
6851 
6852 	if (named_data) {
6853 		data->private_data = named_data->private_data;
6854 		set_named_trigger_data(data, named_data);
6855 		data->ops = &event_hist_trigger_named_ops;
6856 	}
6857 
6858 	if (data->ops->init) {
6859 		ret = data->ops->init(data->ops, data);
6860 		if (ret < 0)
6861 			goto out;
6862 	}
6863 
6864 	if (hist_data->enable_timestamps) {
6865 		char *clock = hist_data->attrs->clock;
6866 
6867 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
6868 		if (ret) {
6869 			hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
6870 			goto out;
6871 		}
6872 
6873 		tracing_set_time_stamp_abs(file->tr, true);
6874 	}
6875 
6876 	if (named_data)
6877 		destroy_hist_data(hist_data);
6878 
6879 	ret++;
6880  out:
6881 	return ret;
6882 }
6883 
6884 static int hist_trigger_enable(struct event_trigger_data *data,
6885 			       struct trace_event_file *file)
6886 {
6887 	int ret = 0;
6888 
6889 	list_add_tail_rcu(&data->list, &file->triggers);
6890 
6891 	update_cond_flag(file);
6892 
6893 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
6894 		list_del_rcu(&data->list);
6895 		update_cond_flag(file);
6896 		ret--;
6897 	}
6898 
6899 	return ret;
6900 }
6901 
6902 static bool have_hist_trigger_match(struct event_trigger_data *data,
6903 				    struct trace_event_file *file)
6904 {
6905 	struct hist_trigger_data *hist_data = data->private_data;
6906 	struct event_trigger_data *test, *named_data = NULL;
6907 	bool match = false;
6908 
6909 	lockdep_assert_held(&event_mutex);
6910 
6911 	if (hist_data->attrs->name)
6912 		named_data = find_named_trigger(hist_data->attrs->name);
6913 
6914 	list_for_each_entry(test, &file->triggers, list) {
6915 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6916 			if (hist_trigger_match(data, test, named_data, false)) {
6917 				match = true;
6918 				break;
6919 			}
6920 		}
6921 	}
6922 
6923 	return match;
6924 }
6925 
6926 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6927 				    struct trace_event_file *file)
6928 {
6929 	struct hist_trigger_data *hist_data = data->private_data;
6930 	struct event_trigger_data *test, *named_data = NULL;
6931 
6932 	lockdep_assert_held(&event_mutex);
6933 
6934 	if (hist_data->attrs->name)
6935 		named_data = find_named_trigger(hist_data->attrs->name);
6936 
6937 	list_for_each_entry(test, &file->triggers, list) {
6938 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6939 			if (!hist_trigger_match(data, test, named_data, false))
6940 				continue;
6941 			hist_data = test->private_data;
6942 			if (check_var_refs(hist_data))
6943 				return true;
6944 			break;
6945 		}
6946 	}
6947 
6948 	return false;
6949 }
6950 
6951 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6952 				    struct event_trigger_data *data,
6953 				    struct trace_event_file *file)
6954 {
6955 	struct hist_trigger_data *hist_data = data->private_data;
6956 	struct event_trigger_data *test, *named_data = NULL;
6957 	bool unregistered = false;
6958 
6959 	lockdep_assert_held(&event_mutex);
6960 
6961 	if (hist_data->attrs->name)
6962 		named_data = find_named_trigger(hist_data->attrs->name);
6963 
6964 	list_for_each_entry(test, &file->triggers, list) {
6965 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6966 			if (!hist_trigger_match(data, test, named_data, false))
6967 				continue;
6968 			unregistered = true;
6969 			list_del_rcu(&test->list);
6970 			trace_event_trigger_enable_disable(file, 0);
6971 			update_cond_flag(file);
6972 			break;
6973 		}
6974 	}
6975 
6976 	if (unregistered && test->ops->free)
6977 		test->ops->free(test->ops, test);
6978 
6979 	if (hist_data->enable_timestamps) {
6980 		if (!hist_data->remove || unregistered)
6981 			tracing_set_time_stamp_abs(file->tr, false);
6982 	}
6983 }
6984 
6985 static bool hist_file_check_refs(struct trace_event_file *file)
6986 {
6987 	struct hist_trigger_data *hist_data;
6988 	struct event_trigger_data *test;
6989 
6990 	lockdep_assert_held(&event_mutex);
6991 
6992 	list_for_each_entry(test, &file->triggers, list) {
6993 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6994 			hist_data = test->private_data;
6995 			if (check_var_refs(hist_data))
6996 				return true;
6997 		}
6998 	}
6999 
7000 	return false;
7001 }
7002 
7003 static void hist_unreg_all(struct trace_event_file *file)
7004 {
7005 	struct event_trigger_data *test, *n;
7006 	struct hist_trigger_data *hist_data;
7007 	struct synth_event *se;
7008 	const char *se_name;
7009 
7010 	lockdep_assert_held(&event_mutex);
7011 
7012 	if (hist_file_check_refs(file))
7013 		return;
7014 
7015 	list_for_each_entry_safe(test, n, &file->triggers, list) {
7016 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
7017 			hist_data = test->private_data;
7018 			list_del_rcu(&test->list);
7019 			trace_event_trigger_enable_disable(file, 0);
7020 
7021 			se_name = trace_event_name(file->event_call);
7022 			se = find_synth_event(se_name);
7023 			if (se)
7024 				se->ref--;
7025 
7026 			update_cond_flag(file);
7027 			if (hist_data->enable_timestamps)
7028 				tracing_set_time_stamp_abs(file->tr, false);
7029 			if (test->ops->free)
7030 				test->ops->free(test->ops, test);
7031 		}
7032 	}
7033 }
7034 
7035 static int event_hist_trigger_func(struct event_command *cmd_ops,
7036 				   struct trace_event_file *file,
7037 				   char *glob, char *cmd, char *param)
7038 {
7039 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
7040 	struct event_trigger_data *trigger_data;
7041 	struct hist_trigger_attrs *attrs;
7042 	struct event_trigger_ops *trigger_ops;
7043 	struct hist_trigger_data *hist_data;
7044 	struct synth_event *se;
7045 	const char *se_name;
7046 	bool remove = false;
7047 	char *trigger, *p;
7048 	int ret = 0;
7049 
7050 	lockdep_assert_held(&event_mutex);
7051 
7052 	if (glob && strlen(glob)) {
7053 		hist_err_clear();
7054 		last_cmd_set(file, param);
7055 	}
7056 
7057 	if (!param)
7058 		return -EINVAL;
7059 
7060 	if (glob[0] == '!')
7061 		remove = true;
7062 
7063 	/*
7064 	 * separate the trigger from the filter (k:v [if filter])
7065 	 * allowing for whitespace in the trigger
7066 	 */
7067 	p = trigger = param;
7068 	do {
7069 		p = strstr(p, "if");
7070 		if (!p)
7071 			break;
7072 		if (p == param)
7073 			return -EINVAL;
7074 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
7075 			p++;
7076 			continue;
7077 		}
7078 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
7079 			return -EINVAL;
7080 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
7081 			p++;
7082 			continue;
7083 		}
7084 		break;
7085 	} while (p);
7086 
7087 	if (!p)
7088 		param = NULL;
7089 	else {
7090 		*(p - 1) = '\0';
7091 		param = strstrip(p);
7092 		trigger = strstrip(trigger);
7093 	}
7094 
7095 	attrs = parse_hist_trigger_attrs(file->tr, trigger);
7096 	if (IS_ERR(attrs))
7097 		return PTR_ERR(attrs);
7098 
7099 	if (attrs->map_bits)
7100 		hist_trigger_bits = attrs->map_bits;
7101 
7102 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
7103 	if (IS_ERR(hist_data)) {
7104 		destroy_hist_trigger_attrs(attrs);
7105 		return PTR_ERR(hist_data);
7106 	}
7107 
7108 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
7109 
7110 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
7111 	if (!trigger_data) {
7112 		ret = -ENOMEM;
7113 		goto out_free;
7114 	}
7115 
7116 	trigger_data->count = -1;
7117 	trigger_data->ops = trigger_ops;
7118 	trigger_data->cmd_ops = cmd_ops;
7119 
7120 	INIT_LIST_HEAD(&trigger_data->list);
7121 	RCU_INIT_POINTER(trigger_data->filter, NULL);
7122 
7123 	trigger_data->private_data = hist_data;
7124 
7125 	/* if param is non-empty, it's supposed to be a filter */
7126 	if (param && cmd_ops->set_filter) {
7127 		ret = cmd_ops->set_filter(param, trigger_data, file);
7128 		if (ret < 0)
7129 			goto out_free;
7130 	}
7131 
7132 	if (remove) {
7133 		if (!have_hist_trigger_match(trigger_data, file))
7134 			goto out_free;
7135 
7136 		if (hist_trigger_check_refs(trigger_data, file)) {
7137 			ret = -EBUSY;
7138 			goto out_free;
7139 		}
7140 
7141 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
7142 		se_name = trace_event_name(file->event_call);
7143 		se = find_synth_event(se_name);
7144 		if (se)
7145 			se->ref--;
7146 		ret = 0;
7147 		goto out_free;
7148 	}
7149 
7150 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
7151 	/*
7152 	 * The above returns on success the # of triggers registered,
7153 	 * but if it didn't register any it returns zero.  Consider no
7154 	 * triggers registered a failure too.
7155 	 */
7156 	if (!ret) {
7157 		if (!(attrs->pause || attrs->cont || attrs->clear))
7158 			ret = -ENOENT;
7159 		goto out_free;
7160 	} else if (ret < 0)
7161 		goto out_free;
7162 
7163 	if (get_named_trigger_data(trigger_data))
7164 		goto enable;
7165 
7166 	if (has_hist_vars(hist_data))
7167 		save_hist_vars(hist_data);
7168 
7169 	ret = create_actions(hist_data);
7170 	if (ret)
7171 		goto out_unreg;
7172 
7173 	ret = tracing_map_init(hist_data->map);
7174 	if (ret)
7175 		goto out_unreg;
7176 enable:
7177 	ret = hist_trigger_enable(trigger_data, file);
7178 	if (ret)
7179 		goto out_unreg;
7180 
7181 	se_name = trace_event_name(file->event_call);
7182 	se = find_synth_event(se_name);
7183 	if (se)
7184 		se->ref++;
7185 	/* Just return zero, not the number of registered triggers */
7186 	ret = 0;
7187  out:
7188 	if (ret == 0)
7189 		hist_err_clear();
7190 
7191 	return ret;
7192  out_unreg:
7193 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
7194  out_free:
7195 	if (cmd_ops->set_filter)
7196 		cmd_ops->set_filter(NULL, trigger_data, NULL);
7197 
7198 	remove_hist_vars(hist_data);
7199 
7200 	kfree(trigger_data);
7201 
7202 	destroy_hist_data(hist_data);
7203 	goto out;
7204 }
7205 
7206 static struct event_command trigger_hist_cmd = {
7207 	.name			= "hist",
7208 	.trigger_type		= ETT_EVENT_HIST,
7209 	.flags			= EVENT_CMD_FL_NEEDS_REC,
7210 	.func			= event_hist_trigger_func,
7211 	.reg			= hist_register_trigger,
7212 	.unreg			= hist_unregister_trigger,
7213 	.unreg_all		= hist_unreg_all,
7214 	.get_trigger_ops	= event_hist_get_trigger_ops,
7215 	.set_filter		= set_trigger_filter,
7216 };
7217 
7218 __init int register_trigger_hist_cmd(void)
7219 {
7220 	int ret;
7221 
7222 	ret = register_event_command(&trigger_hist_cmd);
7223 	WARN_ON(ret < 0);
7224 
7225 	return ret;
7226 }
7227 
7228 static void
7229 hist_enable_trigger(struct event_trigger_data *data, void *rec,
7230 		    struct ring_buffer_event *event)
7231 {
7232 	struct enable_trigger_data *enable_data = data->private_data;
7233 	struct event_trigger_data *test;
7234 
7235 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
7236 				lockdep_is_held(&event_mutex)) {
7237 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
7238 			if (enable_data->enable)
7239 				test->paused = false;
7240 			else
7241 				test->paused = true;
7242 		}
7243 	}
7244 }
7245 
7246 static void
7247 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
7248 			  struct ring_buffer_event *event)
7249 {
7250 	if (!data->count)
7251 		return;
7252 
7253 	if (data->count != -1)
7254 		(data->count)--;
7255 
7256 	hist_enable_trigger(data, rec, event);
7257 }
7258 
7259 static struct event_trigger_ops hist_enable_trigger_ops = {
7260 	.func			= hist_enable_trigger,
7261 	.print			= event_enable_trigger_print,
7262 	.init			= event_trigger_init,
7263 	.free			= event_enable_trigger_free,
7264 };
7265 
7266 static struct event_trigger_ops hist_enable_count_trigger_ops = {
7267 	.func			= hist_enable_count_trigger,
7268 	.print			= event_enable_trigger_print,
7269 	.init			= event_trigger_init,
7270 	.free			= event_enable_trigger_free,
7271 };
7272 
7273 static struct event_trigger_ops hist_disable_trigger_ops = {
7274 	.func			= hist_enable_trigger,
7275 	.print			= event_enable_trigger_print,
7276 	.init			= event_trigger_init,
7277 	.free			= event_enable_trigger_free,
7278 };
7279 
7280 static struct event_trigger_ops hist_disable_count_trigger_ops = {
7281 	.func			= hist_enable_count_trigger,
7282 	.print			= event_enable_trigger_print,
7283 	.init			= event_trigger_init,
7284 	.free			= event_enable_trigger_free,
7285 };
7286 
7287 static struct event_trigger_ops *
7288 hist_enable_get_trigger_ops(char *cmd, char *param)
7289 {
7290 	struct event_trigger_ops *ops;
7291 	bool enable;
7292 
7293 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
7294 
7295 	if (enable)
7296 		ops = param ? &hist_enable_count_trigger_ops :
7297 			&hist_enable_trigger_ops;
7298 	else
7299 		ops = param ? &hist_disable_count_trigger_ops :
7300 			&hist_disable_trigger_ops;
7301 
7302 	return ops;
7303 }
7304 
7305 static void hist_enable_unreg_all(struct trace_event_file *file)
7306 {
7307 	struct event_trigger_data *test, *n;
7308 
7309 	list_for_each_entry_safe(test, n, &file->triggers, list) {
7310 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
7311 			list_del_rcu(&test->list);
7312 			update_cond_flag(file);
7313 			trace_event_trigger_enable_disable(file, 0);
7314 			if (test->ops->free)
7315 				test->ops->free(test->ops, test);
7316 		}
7317 	}
7318 }
7319 
7320 static struct event_command trigger_hist_enable_cmd = {
7321 	.name			= ENABLE_HIST_STR,
7322 	.trigger_type		= ETT_HIST_ENABLE,
7323 	.func			= event_enable_trigger_func,
7324 	.reg			= event_enable_register_trigger,
7325 	.unreg			= event_enable_unregister_trigger,
7326 	.unreg_all		= hist_enable_unreg_all,
7327 	.get_trigger_ops	= hist_enable_get_trigger_ops,
7328 	.set_filter		= set_trigger_filter,
7329 };
7330 
7331 static struct event_command trigger_hist_disable_cmd = {
7332 	.name			= DISABLE_HIST_STR,
7333 	.trigger_type		= ETT_HIST_ENABLE,
7334 	.func			= event_enable_trigger_func,
7335 	.reg			= event_enable_register_trigger,
7336 	.unreg			= event_enable_unregister_trigger,
7337 	.unreg_all		= hist_enable_unreg_all,
7338 	.get_trigger_ops	= hist_enable_get_trigger_ops,
7339 	.set_filter		= set_trigger_filter,
7340 };
7341 
7342 static __init void unregister_trigger_hist_enable_disable_cmds(void)
7343 {
7344 	unregister_event_command(&trigger_hist_enable_cmd);
7345 	unregister_event_command(&trigger_hist_disable_cmd);
7346 }
7347 
7348 __init int register_trigger_hist_enable_disable_cmds(void)
7349 {
7350 	int ret;
7351 
7352 	ret = register_event_command(&trigger_hist_enable_cmd);
7353 	if (WARN_ON(ret < 0))
7354 		return ret;
7355 	ret = register_event_command(&trigger_hist_disable_cmd);
7356 	if (WARN_ON(ret < 0))
7357 		unregister_trigger_hist_enable_disable_cmds();
7358 
7359 	return ret;
7360 }
7361 
7362 static __init int trace_events_hist_init(void)
7363 {
7364 	struct dentry *entry = NULL;
7365 	struct dentry *d_tracer;
7366 	int err = 0;
7367 
7368 	err = dyn_event_register(&synth_event_ops);
7369 	if (err) {
7370 		pr_warn("Could not register synth_event_ops\n");
7371 		return err;
7372 	}
7373 
7374 	d_tracer = tracing_init_dentry();
7375 	if (IS_ERR(d_tracer)) {
7376 		err = PTR_ERR(d_tracer);
7377 		goto err;
7378 	}
7379 
7380 	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
7381 				    NULL, &synth_events_fops);
7382 	if (!entry) {
7383 		err = -ENODEV;
7384 		goto err;
7385 	}
7386 
7387 	return err;
7388  err:
7389 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
7390 
7391 	return err;
7392 }
7393 
7394 fs_initcall(trace_events_hist_init);
7395