1 /*
2  * trace_events_hist - trace event hist triggers
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
23 
24 #include "tracing_map.h"
25 #include "trace.h"
26 
27 struct hist_field;
28 
29 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
30 
31 #define HIST_FIELD_OPERANDS_MAX	2
32 
33 struct hist_field {
34 	struct ftrace_event_field	*field;
35 	unsigned long			flags;
36 	hist_field_fn_t			fn;
37 	unsigned int			size;
38 	unsigned int			offset;
39 	unsigned int                    is_signed;
40 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
41 };
42 
43 static u64 hist_field_none(struct hist_field *field, void *event)
44 {
45 	return 0;
46 }
47 
48 static u64 hist_field_counter(struct hist_field *field, void *event)
49 {
50 	return 1;
51 }
52 
53 static u64 hist_field_string(struct hist_field *hist_field, void *event)
54 {
55 	char *addr = (char *)(event + hist_field->field->offset);
56 
57 	return (u64)(unsigned long)addr;
58 }
59 
60 static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
61 {
62 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
63 	int str_loc = str_item & 0xffff;
64 	char *addr = (char *)(event + str_loc);
65 
66 	return (u64)(unsigned long)addr;
67 }
68 
69 static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
70 {
71 	char **addr = (char **)(event + hist_field->field->offset);
72 
73 	return (u64)(unsigned long)*addr;
74 }
75 
76 static u64 hist_field_log2(struct hist_field *hist_field, void *event)
77 {
78 	struct hist_field *operand = hist_field->operands[0];
79 
80 	u64 val = operand->fn(operand, event);
81 
82 	return (u64) ilog2(roundup_pow_of_two(val));
83 }
84 
85 #define DEFINE_HIST_FIELD_FN(type)					\
86 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
87 {									\
88 	type *addr = (type *)(event + hist_field->field->offset);	\
89 									\
90 	return (u64)(unsigned long)*addr;				\
91 }
92 
93 DEFINE_HIST_FIELD_FN(s64);
94 DEFINE_HIST_FIELD_FN(u64);
95 DEFINE_HIST_FIELD_FN(s32);
96 DEFINE_HIST_FIELD_FN(u32);
97 DEFINE_HIST_FIELD_FN(s16);
98 DEFINE_HIST_FIELD_FN(u16);
99 DEFINE_HIST_FIELD_FN(s8);
100 DEFINE_HIST_FIELD_FN(u8);
101 
102 #define for_each_hist_field(i, hist_data)	\
103 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
104 
105 #define for_each_hist_val_field(i, hist_data)	\
106 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
107 
108 #define for_each_hist_key_field(i, hist_data)	\
109 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
110 
111 #define HIST_STACKTRACE_DEPTH	16
112 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
113 #define HIST_STACKTRACE_SKIP	5
114 
115 #define HITCOUNT_IDX		0
116 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
117 
118 enum hist_field_flags {
119 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
120 	HIST_FIELD_FL_KEY		= 1 << 1,
121 	HIST_FIELD_FL_STRING		= 1 << 2,
122 	HIST_FIELD_FL_HEX		= 1 << 3,
123 	HIST_FIELD_FL_SYM		= 1 << 4,
124 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
125 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
126 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
127 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
128 	HIST_FIELD_FL_LOG2		= 1 << 9,
129 };
130 
131 struct hist_trigger_attrs {
132 	char		*keys_str;
133 	char		*vals_str;
134 	char		*sort_key_str;
135 	char		*name;
136 	bool		pause;
137 	bool		cont;
138 	bool		clear;
139 	unsigned int	map_bits;
140 };
141 
142 struct hist_trigger_data {
143 	struct hist_field               *fields[TRACING_MAP_FIELDS_MAX];
144 	unsigned int			n_vals;
145 	unsigned int			n_keys;
146 	unsigned int			n_fields;
147 	unsigned int			key_size;
148 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
149 	unsigned int			n_sort_keys;
150 	struct trace_event_file		*event_file;
151 	struct hist_trigger_attrs	*attrs;
152 	struct tracing_map		*map;
153 };
154 
155 static const char *hist_field_name(struct hist_field *field,
156 				   unsigned int level)
157 {
158 	const char *field_name = "";
159 
160 	if (level > 1)
161 		return field_name;
162 
163 	if (field->field)
164 		field_name = field->field->name;
165 	else if (field->flags & HIST_FIELD_FL_LOG2)
166 		field_name = hist_field_name(field->operands[0], ++level);
167 
168 	if (field_name == NULL)
169 		field_name = "";
170 
171 	return field_name;
172 }
173 
174 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
175 {
176 	hist_field_fn_t fn = NULL;
177 
178 	switch (field_size) {
179 	case 8:
180 		if (field_is_signed)
181 			fn = hist_field_s64;
182 		else
183 			fn = hist_field_u64;
184 		break;
185 	case 4:
186 		if (field_is_signed)
187 			fn = hist_field_s32;
188 		else
189 			fn = hist_field_u32;
190 		break;
191 	case 2:
192 		if (field_is_signed)
193 			fn = hist_field_s16;
194 		else
195 			fn = hist_field_u16;
196 		break;
197 	case 1:
198 		if (field_is_signed)
199 			fn = hist_field_s8;
200 		else
201 			fn = hist_field_u8;
202 		break;
203 	}
204 
205 	return fn;
206 }
207 
208 static int parse_map_size(char *str)
209 {
210 	unsigned long size, map_bits;
211 	int ret;
212 
213 	strsep(&str, "=");
214 	if (!str) {
215 		ret = -EINVAL;
216 		goto out;
217 	}
218 
219 	ret = kstrtoul(str, 0, &size);
220 	if (ret)
221 		goto out;
222 
223 	map_bits = ilog2(roundup_pow_of_two(size));
224 	if (map_bits < TRACING_MAP_BITS_MIN ||
225 	    map_bits > TRACING_MAP_BITS_MAX)
226 		ret = -EINVAL;
227 	else
228 		ret = map_bits;
229  out:
230 	return ret;
231 }
232 
233 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
234 {
235 	if (!attrs)
236 		return;
237 
238 	kfree(attrs->name);
239 	kfree(attrs->sort_key_str);
240 	kfree(attrs->keys_str);
241 	kfree(attrs->vals_str);
242 	kfree(attrs);
243 }
244 
245 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
246 {
247 	struct hist_trigger_attrs *attrs;
248 	int ret = 0;
249 
250 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
251 	if (!attrs)
252 		return ERR_PTR(-ENOMEM);
253 
254 	while (trigger_str) {
255 		char *str = strsep(&trigger_str, ":");
256 
257 		if ((strncmp(str, "key=", strlen("key=")) == 0) ||
258 		    (strncmp(str, "keys=", strlen("keys=")) == 0))
259 			attrs->keys_str = kstrdup(str, GFP_KERNEL);
260 		else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
261 			 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
262 			 (strncmp(str, "values=", strlen("values=")) == 0))
263 			attrs->vals_str = kstrdup(str, GFP_KERNEL);
264 		else if (strncmp(str, "sort=", strlen("sort=")) == 0)
265 			attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
266 		else if (strncmp(str, "name=", strlen("name=")) == 0)
267 			attrs->name = kstrdup(str, GFP_KERNEL);
268 		else if (strcmp(str, "pause") == 0)
269 			attrs->pause = true;
270 		else if ((strcmp(str, "cont") == 0) ||
271 			 (strcmp(str, "continue") == 0))
272 			attrs->cont = true;
273 		else if (strcmp(str, "clear") == 0)
274 			attrs->clear = true;
275 		else if (strncmp(str, "size=", strlen("size=")) == 0) {
276 			int map_bits = parse_map_size(str);
277 
278 			if (map_bits < 0) {
279 				ret = map_bits;
280 				goto free;
281 			}
282 			attrs->map_bits = map_bits;
283 		} else {
284 			ret = -EINVAL;
285 			goto free;
286 		}
287 	}
288 
289 	if (!attrs->keys_str) {
290 		ret = -EINVAL;
291 		goto free;
292 	}
293 
294 	return attrs;
295  free:
296 	destroy_hist_trigger_attrs(attrs);
297 
298 	return ERR_PTR(ret);
299 }
300 
301 static inline void save_comm(char *comm, struct task_struct *task)
302 {
303 	if (!task->pid) {
304 		strcpy(comm, "<idle>");
305 		return;
306 	}
307 
308 	if (WARN_ON_ONCE(task->pid < 0)) {
309 		strcpy(comm, "<XXX>");
310 		return;
311 	}
312 
313 	memcpy(comm, task->comm, TASK_COMM_LEN);
314 }
315 
316 static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
317 {
318 	kfree((char *)elt->private_data);
319 }
320 
321 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
322 {
323 	struct hist_trigger_data *hist_data = elt->map->private_data;
324 	struct hist_field *key_field;
325 	unsigned int i;
326 
327 	for_each_hist_key_field(i, hist_data) {
328 		key_field = hist_data->fields[i];
329 
330 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
331 			unsigned int size = TASK_COMM_LEN + 1;
332 
333 			elt->private_data = kzalloc(size, GFP_KERNEL);
334 			if (!elt->private_data)
335 				return -ENOMEM;
336 			break;
337 		}
338 	}
339 
340 	return 0;
341 }
342 
343 static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
344 				       struct tracing_map_elt *from)
345 {
346 	char *comm_from = from->private_data;
347 	char *comm_to = to->private_data;
348 
349 	if (comm_from)
350 		memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
351 }
352 
353 static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
354 {
355 	char *comm = elt->private_data;
356 
357 	if (comm)
358 		save_comm(comm, current);
359 }
360 
361 static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
362 	.elt_alloc	= hist_trigger_elt_comm_alloc,
363 	.elt_copy	= hist_trigger_elt_comm_copy,
364 	.elt_free	= hist_trigger_elt_comm_free,
365 	.elt_init	= hist_trigger_elt_comm_init,
366 };
367 
368 static void destroy_hist_field(struct hist_field *hist_field,
369 			       unsigned int level)
370 {
371 	unsigned int i;
372 
373 	if (level > 2)
374 		return;
375 
376 	if (!hist_field)
377 		return;
378 
379 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
380 		destroy_hist_field(hist_field->operands[i], level + 1);
381 
382 	kfree(hist_field);
383 }
384 
385 static struct hist_field *create_hist_field(struct ftrace_event_field *field,
386 					    unsigned long flags)
387 {
388 	struct hist_field *hist_field;
389 
390 	if (field && is_function_field(field))
391 		return NULL;
392 
393 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
394 	if (!hist_field)
395 		return NULL;
396 
397 	if (flags & HIST_FIELD_FL_HITCOUNT) {
398 		hist_field->fn = hist_field_counter;
399 		goto out;
400 	}
401 
402 	if (flags & HIST_FIELD_FL_STACKTRACE) {
403 		hist_field->fn = hist_field_none;
404 		goto out;
405 	}
406 
407 	if (flags & HIST_FIELD_FL_LOG2) {
408 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
409 		hist_field->fn = hist_field_log2;
410 		hist_field->operands[0] = create_hist_field(field, fl);
411 		hist_field->size = hist_field->operands[0]->size;
412 		goto out;
413 	}
414 
415 	if (WARN_ON_ONCE(!field))
416 		goto out;
417 
418 	if (is_string_field(field)) {
419 		flags |= HIST_FIELD_FL_STRING;
420 
421 		if (field->filter_type == FILTER_STATIC_STRING)
422 			hist_field->fn = hist_field_string;
423 		else if (field->filter_type == FILTER_DYN_STRING)
424 			hist_field->fn = hist_field_dynstring;
425 		else
426 			hist_field->fn = hist_field_pstring;
427 	} else {
428 		hist_field->fn = select_value_fn(field->size,
429 						 field->is_signed);
430 		if (!hist_field->fn) {
431 			destroy_hist_field(hist_field, 0);
432 			return NULL;
433 		}
434 	}
435  out:
436 	hist_field->field = field;
437 	hist_field->flags = flags;
438 
439 	return hist_field;
440 }
441 
442 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
443 {
444 	unsigned int i;
445 
446 	for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
447 		if (hist_data->fields[i]) {
448 			destroy_hist_field(hist_data->fields[i], 0);
449 			hist_data->fields[i] = NULL;
450 		}
451 	}
452 }
453 
454 static int create_hitcount_val(struct hist_trigger_data *hist_data)
455 {
456 	hist_data->fields[HITCOUNT_IDX] =
457 		create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
458 	if (!hist_data->fields[HITCOUNT_IDX])
459 		return -ENOMEM;
460 
461 	hist_data->n_vals++;
462 
463 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
464 		return -EINVAL;
465 
466 	return 0;
467 }
468 
469 static int create_val_field(struct hist_trigger_data *hist_data,
470 			    unsigned int val_idx,
471 			    struct trace_event_file *file,
472 			    char *field_str)
473 {
474 	struct ftrace_event_field *field = NULL;
475 	unsigned long flags = 0;
476 	char *field_name;
477 	int ret = 0;
478 
479 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
480 		return -EINVAL;
481 
482 	field_name = strsep(&field_str, ".");
483 	if (field_str) {
484 		if (strcmp(field_str, "hex") == 0)
485 			flags |= HIST_FIELD_FL_HEX;
486 		else {
487 			ret = -EINVAL;
488 			goto out;
489 		}
490 	}
491 
492 	field = trace_find_event_field(file->event_call, field_name);
493 	if (!field || !field->size) {
494 		ret = -EINVAL;
495 		goto out;
496 	}
497 
498 	hist_data->fields[val_idx] = create_hist_field(field, flags);
499 	if (!hist_data->fields[val_idx]) {
500 		ret = -ENOMEM;
501 		goto out;
502 	}
503 
504 	++hist_data->n_vals;
505 
506 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
507 		ret = -EINVAL;
508  out:
509 	return ret;
510 }
511 
512 static int create_val_fields(struct hist_trigger_data *hist_data,
513 			     struct trace_event_file *file)
514 {
515 	char *fields_str, *field_str;
516 	unsigned int i, j;
517 	int ret;
518 
519 	ret = create_hitcount_val(hist_data);
520 	if (ret)
521 		goto out;
522 
523 	fields_str = hist_data->attrs->vals_str;
524 	if (!fields_str)
525 		goto out;
526 
527 	strsep(&fields_str, "=");
528 	if (!fields_str)
529 		goto out;
530 
531 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
532 		     j < TRACING_MAP_VALS_MAX; i++) {
533 		field_str = strsep(&fields_str, ",");
534 		if (!field_str)
535 			break;
536 		if (strcmp(field_str, "hitcount") == 0)
537 			continue;
538 		ret = create_val_field(hist_data, j++, file, field_str);
539 		if (ret)
540 			goto out;
541 	}
542 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
543 		ret = -EINVAL;
544  out:
545 	return ret;
546 }
547 
548 static int create_key_field(struct hist_trigger_data *hist_data,
549 			    unsigned int key_idx,
550 			    unsigned int key_offset,
551 			    struct trace_event_file *file,
552 			    char *field_str)
553 {
554 	struct ftrace_event_field *field = NULL;
555 	unsigned long flags = 0;
556 	unsigned int key_size;
557 	int ret = 0;
558 
559 	if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
560 		return -EINVAL;
561 
562 	flags |= HIST_FIELD_FL_KEY;
563 
564 	if (strcmp(field_str, "stacktrace") == 0) {
565 		flags |= HIST_FIELD_FL_STACKTRACE;
566 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
567 	} else {
568 		char *field_name = strsep(&field_str, ".");
569 
570 		if (field_str) {
571 			if (strcmp(field_str, "hex") == 0)
572 				flags |= HIST_FIELD_FL_HEX;
573 			else if (strcmp(field_str, "sym") == 0)
574 				flags |= HIST_FIELD_FL_SYM;
575 			else if (strcmp(field_str, "sym-offset") == 0)
576 				flags |= HIST_FIELD_FL_SYM_OFFSET;
577 			else if ((strcmp(field_str, "execname") == 0) &&
578 				 (strcmp(field_name, "common_pid") == 0))
579 				flags |= HIST_FIELD_FL_EXECNAME;
580 			else if (strcmp(field_str, "syscall") == 0)
581 				flags |= HIST_FIELD_FL_SYSCALL;
582 			else if (strcmp(field_str, "log2") == 0)
583 				flags |= HIST_FIELD_FL_LOG2;
584 			else {
585 				ret = -EINVAL;
586 				goto out;
587 			}
588 		}
589 
590 		field = trace_find_event_field(file->event_call, field_name);
591 		if (!field || !field->size) {
592 			ret = -EINVAL;
593 			goto out;
594 		}
595 
596 		if (is_string_field(field))
597 			key_size = MAX_FILTER_STR_VAL;
598 		else
599 			key_size = field->size;
600 	}
601 
602 	hist_data->fields[key_idx] = create_hist_field(field, flags);
603 	if (!hist_data->fields[key_idx]) {
604 		ret = -ENOMEM;
605 		goto out;
606 	}
607 
608 	key_size = ALIGN(key_size, sizeof(u64));
609 	hist_data->fields[key_idx]->size = key_size;
610 	hist_data->fields[key_idx]->offset = key_offset;
611 	hist_data->key_size += key_size;
612 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
613 		ret = -EINVAL;
614 		goto out;
615 	}
616 
617 	hist_data->n_keys++;
618 
619 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
620 		return -EINVAL;
621 
622 	ret = key_size;
623  out:
624 	return ret;
625 }
626 
627 static int create_key_fields(struct hist_trigger_data *hist_data,
628 			     struct trace_event_file *file)
629 {
630 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
631 	char *fields_str, *field_str;
632 	int ret = -EINVAL;
633 
634 	fields_str = hist_data->attrs->keys_str;
635 	if (!fields_str)
636 		goto out;
637 
638 	strsep(&fields_str, "=");
639 	if (!fields_str)
640 		goto out;
641 
642 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
643 		field_str = strsep(&fields_str, ",");
644 		if (!field_str)
645 			break;
646 		ret = create_key_field(hist_data, i, key_offset,
647 				       file, field_str);
648 		if (ret < 0)
649 			goto out;
650 		key_offset += ret;
651 	}
652 	if (fields_str) {
653 		ret = -EINVAL;
654 		goto out;
655 	}
656 	ret = 0;
657  out:
658 	return ret;
659 }
660 
661 static int create_hist_fields(struct hist_trigger_data *hist_data,
662 			      struct trace_event_file *file)
663 {
664 	int ret;
665 
666 	ret = create_val_fields(hist_data, file);
667 	if (ret)
668 		goto out;
669 
670 	ret = create_key_fields(hist_data, file);
671 	if (ret)
672 		goto out;
673 
674 	hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
675  out:
676 	return ret;
677 }
678 
679 static int is_descending(const char *str)
680 {
681 	if (!str)
682 		return 0;
683 
684 	if (strcmp(str, "descending") == 0)
685 		return 1;
686 
687 	if (strcmp(str, "ascending") == 0)
688 		return 0;
689 
690 	return -EINVAL;
691 }
692 
693 static int create_sort_keys(struct hist_trigger_data *hist_data)
694 {
695 	char *fields_str = hist_data->attrs->sort_key_str;
696 	struct tracing_map_sort_key *sort_key;
697 	int descending, ret = 0;
698 	unsigned int i, j;
699 
700 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
701 
702 	if (!fields_str)
703 		goto out;
704 
705 	strsep(&fields_str, "=");
706 	if (!fields_str) {
707 		ret = -EINVAL;
708 		goto out;
709 	}
710 
711 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
712 		struct hist_field *hist_field;
713 		char *field_str, *field_name;
714 		const char *test_name;
715 
716 		sort_key = &hist_data->sort_keys[i];
717 
718 		field_str = strsep(&fields_str, ",");
719 		if (!field_str) {
720 			if (i == 0)
721 				ret = -EINVAL;
722 			break;
723 		}
724 
725 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
726 			ret = -EINVAL;
727 			break;
728 		}
729 
730 		field_name = strsep(&field_str, ".");
731 		if (!field_name) {
732 			ret = -EINVAL;
733 			break;
734 		}
735 
736 		if (strcmp(field_name, "hitcount") == 0) {
737 			descending = is_descending(field_str);
738 			if (descending < 0) {
739 				ret = descending;
740 				break;
741 			}
742 			sort_key->descending = descending;
743 			continue;
744 		}
745 
746 		for (j = 1; j < hist_data->n_fields; j++) {
747 			hist_field = hist_data->fields[j];
748 			test_name = hist_field_name(hist_field, 0);
749 
750 			if (strcmp(field_name, test_name) == 0) {
751 				sort_key->field_idx = j;
752 				descending = is_descending(field_str);
753 				if (descending < 0) {
754 					ret = descending;
755 					goto out;
756 				}
757 				sort_key->descending = descending;
758 				break;
759 			}
760 		}
761 		if (j == hist_data->n_fields) {
762 			ret = -EINVAL;
763 			break;
764 		}
765 	}
766 	hist_data->n_sort_keys = i;
767  out:
768 	return ret;
769 }
770 
771 static void destroy_hist_data(struct hist_trigger_data *hist_data)
772 {
773 	destroy_hist_trigger_attrs(hist_data->attrs);
774 	destroy_hist_fields(hist_data);
775 	tracing_map_destroy(hist_data->map);
776 	kfree(hist_data);
777 }
778 
779 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
780 {
781 	struct tracing_map *map = hist_data->map;
782 	struct ftrace_event_field *field;
783 	struct hist_field *hist_field;
784 	int i, idx;
785 
786 	for_each_hist_field(i, hist_data) {
787 		hist_field = hist_data->fields[i];
788 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
789 			tracing_map_cmp_fn_t cmp_fn;
790 
791 			field = hist_field->field;
792 
793 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
794 				cmp_fn = tracing_map_cmp_none;
795 			else if (is_string_field(field))
796 				cmp_fn = tracing_map_cmp_string;
797 			else
798 				cmp_fn = tracing_map_cmp_num(field->size,
799 							     field->is_signed);
800 			idx = tracing_map_add_key_field(map,
801 							hist_field->offset,
802 							cmp_fn);
803 
804 		} else
805 			idx = tracing_map_add_sum_field(map);
806 
807 		if (idx < 0)
808 			return idx;
809 	}
810 
811 	return 0;
812 }
813 
814 static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
815 {
816 	struct hist_field *key_field;
817 	unsigned int i;
818 
819 	for_each_hist_key_field(i, hist_data) {
820 		key_field = hist_data->fields[i];
821 
822 		if (key_field->flags & HIST_FIELD_FL_EXECNAME)
823 			return true;
824 	}
825 
826 	return false;
827 }
828 
829 static struct hist_trigger_data *
830 create_hist_data(unsigned int map_bits,
831 		 struct hist_trigger_attrs *attrs,
832 		 struct trace_event_file *file)
833 {
834 	const struct tracing_map_ops *map_ops = NULL;
835 	struct hist_trigger_data *hist_data;
836 	int ret = 0;
837 
838 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
839 	if (!hist_data)
840 		return ERR_PTR(-ENOMEM);
841 
842 	hist_data->attrs = attrs;
843 
844 	ret = create_hist_fields(hist_data, file);
845 	if (ret)
846 		goto free;
847 
848 	ret = create_sort_keys(hist_data);
849 	if (ret)
850 		goto free;
851 
852 	if (need_tracing_map_ops(hist_data))
853 		map_ops = &hist_trigger_elt_comm_ops;
854 
855 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
856 					    map_ops, hist_data);
857 	if (IS_ERR(hist_data->map)) {
858 		ret = PTR_ERR(hist_data->map);
859 		hist_data->map = NULL;
860 		goto free;
861 	}
862 
863 	ret = create_tracing_map_fields(hist_data);
864 	if (ret)
865 		goto free;
866 
867 	ret = tracing_map_init(hist_data->map);
868 	if (ret)
869 		goto free;
870 
871 	hist_data->event_file = file;
872  out:
873 	return hist_data;
874  free:
875 	hist_data->attrs = NULL;
876 
877 	destroy_hist_data(hist_data);
878 
879 	hist_data = ERR_PTR(ret);
880 
881 	goto out;
882 }
883 
884 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
885 				    struct tracing_map_elt *elt,
886 				    void *rec)
887 {
888 	struct hist_field *hist_field;
889 	unsigned int i;
890 	u64 hist_val;
891 
892 	for_each_hist_val_field(i, hist_data) {
893 		hist_field = hist_data->fields[i];
894 		hist_val = hist_field->fn(hist_field, rec);
895 		tracing_map_update_sum(elt, i, hist_val);
896 	}
897 }
898 
899 static inline void add_to_key(char *compound_key, void *key,
900 			      struct hist_field *key_field, void *rec)
901 {
902 	size_t size = key_field->size;
903 
904 	if (key_field->flags & HIST_FIELD_FL_STRING) {
905 		struct ftrace_event_field *field;
906 
907 		field = key_field->field;
908 		if (field->filter_type == FILTER_DYN_STRING)
909 			size = *(u32 *)(rec + field->offset) >> 16;
910 		else if (field->filter_type == FILTER_PTR_STRING)
911 			size = strlen(key);
912 		else if (field->filter_type == FILTER_STATIC_STRING)
913 			size = field->size;
914 
915 		/* ensure NULL-termination */
916 		if (size > key_field->size - 1)
917 			size = key_field->size - 1;
918 	}
919 
920 	memcpy(compound_key + key_field->offset, key, size);
921 }
922 
923 static void event_hist_trigger(struct event_trigger_data *data, void *rec)
924 {
925 	struct hist_trigger_data *hist_data = data->private_data;
926 	bool use_compound_key = (hist_data->n_keys > 1);
927 	unsigned long entries[HIST_STACKTRACE_DEPTH];
928 	char compound_key[HIST_KEY_SIZE_MAX];
929 	struct stack_trace stacktrace;
930 	struct hist_field *key_field;
931 	struct tracing_map_elt *elt;
932 	u64 field_contents;
933 	void *key = NULL;
934 	unsigned int i;
935 
936 	memset(compound_key, 0, hist_data->key_size);
937 
938 	for_each_hist_key_field(i, hist_data) {
939 		key_field = hist_data->fields[i];
940 
941 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
942 			stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
943 			stacktrace.entries = entries;
944 			stacktrace.nr_entries = 0;
945 			stacktrace.skip = HIST_STACKTRACE_SKIP;
946 
947 			memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
948 			save_stack_trace(&stacktrace);
949 
950 			key = entries;
951 		} else {
952 			field_contents = key_field->fn(key_field, rec);
953 			if (key_field->flags & HIST_FIELD_FL_STRING) {
954 				key = (void *)(unsigned long)field_contents;
955 				use_compound_key = true;
956 			} else
957 				key = (void *)&field_contents;
958 		}
959 
960 		if (use_compound_key)
961 			add_to_key(compound_key, key, key_field, rec);
962 	}
963 
964 	if (use_compound_key)
965 		key = compound_key;
966 
967 	elt = tracing_map_insert(hist_data->map, key);
968 	if (elt)
969 		hist_trigger_elt_update(hist_data, elt, rec);
970 }
971 
972 static void hist_trigger_stacktrace_print(struct seq_file *m,
973 					  unsigned long *stacktrace_entries,
974 					  unsigned int max_entries)
975 {
976 	char str[KSYM_SYMBOL_LEN];
977 	unsigned int spaces = 8;
978 	unsigned int i;
979 
980 	for (i = 0; i < max_entries; i++) {
981 		if (stacktrace_entries[i] == ULONG_MAX)
982 			return;
983 
984 		seq_printf(m, "%*c", 1 + spaces, ' ');
985 		sprint_symbol(str, stacktrace_entries[i]);
986 		seq_printf(m, "%s\n", str);
987 	}
988 }
989 
990 static void
991 hist_trigger_entry_print(struct seq_file *m,
992 			 struct hist_trigger_data *hist_data, void *key,
993 			 struct tracing_map_elt *elt)
994 {
995 	struct hist_field *key_field;
996 	char str[KSYM_SYMBOL_LEN];
997 	bool multiline = false;
998 	const char *field_name;
999 	unsigned int i;
1000 	u64 uval;
1001 
1002 	seq_puts(m, "{ ");
1003 
1004 	for_each_hist_key_field(i, hist_data) {
1005 		key_field = hist_data->fields[i];
1006 
1007 		if (i > hist_data->n_vals)
1008 			seq_puts(m, ", ");
1009 
1010 		field_name = hist_field_name(key_field, 0);
1011 
1012 		if (key_field->flags & HIST_FIELD_FL_HEX) {
1013 			uval = *(u64 *)(key + key_field->offset);
1014 			seq_printf(m, "%s: %llx", field_name, uval);
1015 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
1016 			uval = *(u64 *)(key + key_field->offset);
1017 			sprint_symbol_no_offset(str, uval);
1018 			seq_printf(m, "%s: [%llx] %-45s", field_name,
1019 				   uval, str);
1020 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
1021 			uval = *(u64 *)(key + key_field->offset);
1022 			sprint_symbol(str, uval);
1023 			seq_printf(m, "%s: [%llx] %-55s", field_name,
1024 				   uval, str);
1025 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
1026 			char *comm = elt->private_data;
1027 
1028 			uval = *(u64 *)(key + key_field->offset);
1029 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
1030 				   comm, uval);
1031 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
1032 			const char *syscall_name;
1033 
1034 			uval = *(u64 *)(key + key_field->offset);
1035 			syscall_name = get_syscall_name(uval);
1036 			if (!syscall_name)
1037 				syscall_name = "unknown_syscall";
1038 
1039 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
1040 				   syscall_name, uval);
1041 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
1042 			seq_puts(m, "stacktrace:\n");
1043 			hist_trigger_stacktrace_print(m,
1044 						      key + key_field->offset,
1045 						      HIST_STACKTRACE_DEPTH);
1046 			multiline = true;
1047 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
1048 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
1049 				   *(u64 *)(key + key_field->offset));
1050 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
1051 			seq_printf(m, "%s: %-50s", field_name,
1052 				   (char *)(key + key_field->offset));
1053 		} else {
1054 			uval = *(u64 *)(key + key_field->offset);
1055 			seq_printf(m, "%s: %10llu", field_name, uval);
1056 		}
1057 	}
1058 
1059 	if (!multiline)
1060 		seq_puts(m, " ");
1061 
1062 	seq_puts(m, "}");
1063 
1064 	seq_printf(m, " hitcount: %10llu",
1065 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
1066 
1067 	for (i = 1; i < hist_data->n_vals; i++) {
1068 		field_name = hist_field_name(hist_data->fields[i], 0);
1069 
1070 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1071 			seq_printf(m, "  %s: %10llx", field_name,
1072 				   tracing_map_read_sum(elt, i));
1073 		} else {
1074 			seq_printf(m, "  %s: %10llu", field_name,
1075 				   tracing_map_read_sum(elt, i));
1076 		}
1077 	}
1078 
1079 	seq_puts(m, "\n");
1080 }
1081 
1082 static int print_entries(struct seq_file *m,
1083 			 struct hist_trigger_data *hist_data)
1084 {
1085 	struct tracing_map_sort_entry **sort_entries = NULL;
1086 	struct tracing_map *map = hist_data->map;
1087 	int i, n_entries;
1088 
1089 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
1090 					     hist_data->n_sort_keys,
1091 					     &sort_entries);
1092 	if (n_entries < 0)
1093 		return n_entries;
1094 
1095 	for (i = 0; i < n_entries; i++)
1096 		hist_trigger_entry_print(m, hist_data,
1097 					 sort_entries[i]->key,
1098 					 sort_entries[i]->elt);
1099 
1100 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
1101 
1102 	return n_entries;
1103 }
1104 
1105 static void hist_trigger_show(struct seq_file *m,
1106 			      struct event_trigger_data *data, int n)
1107 {
1108 	struct hist_trigger_data *hist_data;
1109 	int n_entries;
1110 
1111 	if (n > 0)
1112 		seq_puts(m, "\n\n");
1113 
1114 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
1115 	data->ops->print(m, data->ops, data);
1116 	seq_puts(m, "#\n\n");
1117 
1118 	hist_data = data->private_data;
1119 	n_entries = print_entries(m, hist_data);
1120 	if (n_entries < 0)
1121 		n_entries = 0;
1122 
1123 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
1124 		   (u64)atomic64_read(&hist_data->map->hits),
1125 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
1126 }
1127 
1128 static int hist_show(struct seq_file *m, void *v)
1129 {
1130 	struct event_trigger_data *data;
1131 	struct trace_event_file *event_file;
1132 	int n = 0, ret = 0;
1133 
1134 	mutex_lock(&event_mutex);
1135 
1136 	event_file = event_file_data(m->private);
1137 	if (unlikely(!event_file)) {
1138 		ret = -ENODEV;
1139 		goto out_unlock;
1140 	}
1141 
1142 	list_for_each_entry_rcu(data, &event_file->triggers, list) {
1143 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
1144 			hist_trigger_show(m, data, n++);
1145 	}
1146 
1147  out_unlock:
1148 	mutex_unlock(&event_mutex);
1149 
1150 	return ret;
1151 }
1152 
1153 static int event_hist_open(struct inode *inode, struct file *file)
1154 {
1155 	return single_open(file, hist_show, file);
1156 }
1157 
1158 const struct file_operations event_hist_fops = {
1159 	.open = event_hist_open,
1160 	.read = seq_read,
1161 	.llseek = seq_lseek,
1162 	.release = single_release,
1163 };
1164 
1165 static const char *get_hist_field_flags(struct hist_field *hist_field)
1166 {
1167 	const char *flags_str = NULL;
1168 
1169 	if (hist_field->flags & HIST_FIELD_FL_HEX)
1170 		flags_str = "hex";
1171 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
1172 		flags_str = "sym";
1173 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1174 		flags_str = "sym-offset";
1175 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1176 		flags_str = "execname";
1177 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1178 		flags_str = "syscall";
1179 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1180 		flags_str = "log2";
1181 
1182 	return flags_str;
1183 }
1184 
1185 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1186 {
1187 	const char *field_name = hist_field_name(hist_field, 0);
1188 
1189 	seq_printf(m, "%s", field_name);
1190 	if (hist_field->flags) {
1191 		const char *flags_str = get_hist_field_flags(hist_field);
1192 
1193 		if (flags_str)
1194 			seq_printf(m, ".%s", flags_str);
1195 	}
1196 }
1197 
1198 static int event_hist_trigger_print(struct seq_file *m,
1199 				    struct event_trigger_ops *ops,
1200 				    struct event_trigger_data *data)
1201 {
1202 	struct hist_trigger_data *hist_data = data->private_data;
1203 	struct hist_field *key_field;
1204 	unsigned int i;
1205 
1206 	seq_puts(m, "hist:");
1207 
1208 	if (data->name)
1209 		seq_printf(m, "%s:", data->name);
1210 
1211 	seq_puts(m, "keys=");
1212 
1213 	for_each_hist_key_field(i, hist_data) {
1214 		key_field = hist_data->fields[i];
1215 
1216 		if (i > hist_data->n_vals)
1217 			seq_puts(m, ",");
1218 
1219 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
1220 			seq_puts(m, "stacktrace");
1221 		else
1222 			hist_field_print(m, key_field);
1223 	}
1224 
1225 	seq_puts(m, ":vals=");
1226 
1227 	for_each_hist_val_field(i, hist_data) {
1228 		if (i == HITCOUNT_IDX)
1229 			seq_puts(m, "hitcount");
1230 		else {
1231 			seq_puts(m, ",");
1232 			hist_field_print(m, hist_data->fields[i]);
1233 		}
1234 	}
1235 
1236 	seq_puts(m, ":sort=");
1237 
1238 	for (i = 0; i < hist_data->n_sort_keys; i++) {
1239 		struct tracing_map_sort_key *sort_key;
1240 
1241 		sort_key = &hist_data->sort_keys[i];
1242 
1243 		if (i > 0)
1244 			seq_puts(m, ",");
1245 
1246 		if (sort_key->field_idx == HITCOUNT_IDX)
1247 			seq_puts(m, "hitcount");
1248 		else {
1249 			unsigned int idx = sort_key->field_idx;
1250 
1251 			if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
1252 				return -EINVAL;
1253 
1254 			hist_field_print(m, hist_data->fields[idx]);
1255 		}
1256 
1257 		if (sort_key->descending)
1258 			seq_puts(m, ".descending");
1259 	}
1260 
1261 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
1262 
1263 	if (data->filter_str)
1264 		seq_printf(m, " if %s", data->filter_str);
1265 
1266 	if (data->paused)
1267 		seq_puts(m, " [paused]");
1268 	else
1269 		seq_puts(m, " [active]");
1270 
1271 	seq_putc(m, '\n');
1272 
1273 	return 0;
1274 }
1275 
1276 static int event_hist_trigger_init(struct event_trigger_ops *ops,
1277 				   struct event_trigger_data *data)
1278 {
1279 	struct hist_trigger_data *hist_data = data->private_data;
1280 
1281 	if (!data->ref && hist_data->attrs->name)
1282 		save_named_trigger(hist_data->attrs->name, data);
1283 
1284 	data->ref++;
1285 
1286 	return 0;
1287 }
1288 
1289 static void event_hist_trigger_free(struct event_trigger_ops *ops,
1290 				    struct event_trigger_data *data)
1291 {
1292 	struct hist_trigger_data *hist_data = data->private_data;
1293 
1294 	if (WARN_ON_ONCE(data->ref <= 0))
1295 		return;
1296 
1297 	data->ref--;
1298 	if (!data->ref) {
1299 		if (data->name)
1300 			del_named_trigger(data);
1301 		trigger_data_free(data);
1302 		destroy_hist_data(hist_data);
1303 	}
1304 }
1305 
1306 static struct event_trigger_ops event_hist_trigger_ops = {
1307 	.func			= event_hist_trigger,
1308 	.print			= event_hist_trigger_print,
1309 	.init			= event_hist_trigger_init,
1310 	.free			= event_hist_trigger_free,
1311 };
1312 
1313 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
1314 					 struct event_trigger_data *data)
1315 {
1316 	data->ref++;
1317 
1318 	save_named_trigger(data->named_data->name, data);
1319 
1320 	event_hist_trigger_init(ops, data->named_data);
1321 
1322 	return 0;
1323 }
1324 
1325 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
1326 					  struct event_trigger_data *data)
1327 {
1328 	if (WARN_ON_ONCE(data->ref <= 0))
1329 		return;
1330 
1331 	event_hist_trigger_free(ops, data->named_data);
1332 
1333 	data->ref--;
1334 	if (!data->ref) {
1335 		del_named_trigger(data);
1336 		trigger_data_free(data);
1337 	}
1338 }
1339 
1340 static struct event_trigger_ops event_hist_trigger_named_ops = {
1341 	.func			= event_hist_trigger,
1342 	.print			= event_hist_trigger_print,
1343 	.init			= event_hist_trigger_named_init,
1344 	.free			= event_hist_trigger_named_free,
1345 };
1346 
1347 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
1348 							    char *param)
1349 {
1350 	return &event_hist_trigger_ops;
1351 }
1352 
1353 static void hist_clear(struct event_trigger_data *data)
1354 {
1355 	struct hist_trigger_data *hist_data = data->private_data;
1356 
1357 	if (data->name)
1358 		pause_named_trigger(data);
1359 
1360 	synchronize_sched();
1361 
1362 	tracing_map_clear(hist_data->map);
1363 
1364 	if (data->name)
1365 		unpause_named_trigger(data);
1366 }
1367 
1368 static bool compatible_field(struct ftrace_event_field *field,
1369 			     struct ftrace_event_field *test_field)
1370 {
1371 	if (field == test_field)
1372 		return true;
1373 	if (field == NULL || test_field == NULL)
1374 		return false;
1375 	if (strcmp(field->name, test_field->name) != 0)
1376 		return false;
1377 	if (strcmp(field->type, test_field->type) != 0)
1378 		return false;
1379 	if (field->size != test_field->size)
1380 		return false;
1381 	if (field->is_signed != test_field->is_signed)
1382 		return false;
1383 
1384 	return true;
1385 }
1386 
1387 static bool hist_trigger_match(struct event_trigger_data *data,
1388 			       struct event_trigger_data *data_test,
1389 			       struct event_trigger_data *named_data,
1390 			       bool ignore_filter)
1391 {
1392 	struct tracing_map_sort_key *sort_key, *sort_key_test;
1393 	struct hist_trigger_data *hist_data, *hist_data_test;
1394 	struct hist_field *key_field, *key_field_test;
1395 	unsigned int i;
1396 
1397 	if (named_data && (named_data != data_test) &&
1398 	    (named_data != data_test->named_data))
1399 		return false;
1400 
1401 	if (!named_data && is_named_trigger(data_test))
1402 		return false;
1403 
1404 	hist_data = data->private_data;
1405 	hist_data_test = data_test->private_data;
1406 
1407 	if (hist_data->n_vals != hist_data_test->n_vals ||
1408 	    hist_data->n_fields != hist_data_test->n_fields ||
1409 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
1410 		return false;
1411 
1412 	if (!ignore_filter) {
1413 		if ((data->filter_str && !data_test->filter_str) ||
1414 		   (!data->filter_str && data_test->filter_str))
1415 			return false;
1416 	}
1417 
1418 	for_each_hist_field(i, hist_data) {
1419 		key_field = hist_data->fields[i];
1420 		key_field_test = hist_data_test->fields[i];
1421 
1422 		if (key_field->flags != key_field_test->flags)
1423 			return false;
1424 		if (!compatible_field(key_field->field, key_field_test->field))
1425 			return false;
1426 		if (key_field->offset != key_field_test->offset)
1427 			return false;
1428 	}
1429 
1430 	for (i = 0; i < hist_data->n_sort_keys; i++) {
1431 		sort_key = &hist_data->sort_keys[i];
1432 		sort_key_test = &hist_data_test->sort_keys[i];
1433 
1434 		if (sort_key->field_idx != sort_key_test->field_idx ||
1435 		    sort_key->descending != sort_key_test->descending)
1436 			return false;
1437 	}
1438 
1439 	if (!ignore_filter && data->filter_str &&
1440 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
1441 		return false;
1442 
1443 	return true;
1444 }
1445 
1446 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
1447 				 struct event_trigger_data *data,
1448 				 struct trace_event_file *file)
1449 {
1450 	struct hist_trigger_data *hist_data = data->private_data;
1451 	struct event_trigger_data *test, *named_data = NULL;
1452 	int ret = 0;
1453 
1454 	if (hist_data->attrs->name) {
1455 		named_data = find_named_trigger(hist_data->attrs->name);
1456 		if (named_data) {
1457 			if (!hist_trigger_match(data, named_data, named_data,
1458 						true)) {
1459 				ret = -EINVAL;
1460 				goto out;
1461 			}
1462 		}
1463 	}
1464 
1465 	if (hist_data->attrs->name && !named_data)
1466 		goto new;
1467 
1468 	list_for_each_entry_rcu(test, &file->triggers, list) {
1469 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1470 			if (!hist_trigger_match(data, test, named_data, false))
1471 				continue;
1472 			if (hist_data->attrs->pause)
1473 				test->paused = true;
1474 			else if (hist_data->attrs->cont)
1475 				test->paused = false;
1476 			else if (hist_data->attrs->clear)
1477 				hist_clear(test);
1478 			else
1479 				ret = -EEXIST;
1480 			goto out;
1481 		}
1482 	}
1483  new:
1484 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
1485 		ret = -ENOENT;
1486 		goto out;
1487 	}
1488 
1489 	if (hist_data->attrs->pause)
1490 		data->paused = true;
1491 
1492 	if (named_data) {
1493 		destroy_hist_data(data->private_data);
1494 		data->private_data = named_data->private_data;
1495 		set_named_trigger_data(data, named_data);
1496 		data->ops = &event_hist_trigger_named_ops;
1497 	}
1498 
1499 	if (data->ops->init) {
1500 		ret = data->ops->init(data->ops, data);
1501 		if (ret < 0)
1502 			goto out;
1503 	}
1504 
1505 	list_add_rcu(&data->list, &file->triggers);
1506 	ret++;
1507 
1508 	update_cond_flag(file);
1509 
1510 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1511 		list_del_rcu(&data->list);
1512 		update_cond_flag(file);
1513 		ret--;
1514 	}
1515  out:
1516 	return ret;
1517 }
1518 
1519 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
1520 				    struct event_trigger_data *data,
1521 				    struct trace_event_file *file)
1522 {
1523 	struct hist_trigger_data *hist_data = data->private_data;
1524 	struct event_trigger_data *test, *named_data = NULL;
1525 	bool unregistered = false;
1526 
1527 	if (hist_data->attrs->name)
1528 		named_data = find_named_trigger(hist_data->attrs->name);
1529 
1530 	list_for_each_entry_rcu(test, &file->triggers, list) {
1531 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1532 			if (!hist_trigger_match(data, test, named_data, false))
1533 				continue;
1534 			unregistered = true;
1535 			list_del_rcu(&test->list);
1536 			trace_event_trigger_enable_disable(file, 0);
1537 			update_cond_flag(file);
1538 			break;
1539 		}
1540 	}
1541 
1542 	if (unregistered && test->ops->free)
1543 		test->ops->free(test->ops, test);
1544 }
1545 
1546 static void hist_unreg_all(struct trace_event_file *file)
1547 {
1548 	struct event_trigger_data *test, *n;
1549 
1550 	list_for_each_entry_safe(test, n, &file->triggers, list) {
1551 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1552 			list_del_rcu(&test->list);
1553 			trace_event_trigger_enable_disable(file, 0);
1554 			update_cond_flag(file);
1555 			if (test->ops->free)
1556 				test->ops->free(test->ops, test);
1557 		}
1558 	}
1559 }
1560 
1561 static int event_hist_trigger_func(struct event_command *cmd_ops,
1562 				   struct trace_event_file *file,
1563 				   char *glob, char *cmd, char *param)
1564 {
1565 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
1566 	struct event_trigger_data *trigger_data;
1567 	struct hist_trigger_attrs *attrs;
1568 	struct event_trigger_ops *trigger_ops;
1569 	struct hist_trigger_data *hist_data;
1570 	char *trigger;
1571 	int ret = 0;
1572 
1573 	if (!param)
1574 		return -EINVAL;
1575 
1576 	/* separate the trigger from the filter (k:v [if filter]) */
1577 	trigger = strsep(&param, " \t");
1578 	if (!trigger)
1579 		return -EINVAL;
1580 
1581 	attrs = parse_hist_trigger_attrs(trigger);
1582 	if (IS_ERR(attrs))
1583 		return PTR_ERR(attrs);
1584 
1585 	if (attrs->map_bits)
1586 		hist_trigger_bits = attrs->map_bits;
1587 
1588 	hist_data = create_hist_data(hist_trigger_bits, attrs, file);
1589 	if (IS_ERR(hist_data)) {
1590 		destroy_hist_trigger_attrs(attrs);
1591 		return PTR_ERR(hist_data);
1592 	}
1593 
1594 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1595 
1596 	ret = -ENOMEM;
1597 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1598 	if (!trigger_data)
1599 		goto out_free;
1600 
1601 	trigger_data->count = -1;
1602 	trigger_data->ops = trigger_ops;
1603 	trigger_data->cmd_ops = cmd_ops;
1604 
1605 	INIT_LIST_HEAD(&trigger_data->list);
1606 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1607 
1608 	trigger_data->private_data = hist_data;
1609 
1610 	/* if param is non-empty, it's supposed to be a filter */
1611 	if (param && cmd_ops->set_filter) {
1612 		ret = cmd_ops->set_filter(param, trigger_data, file);
1613 		if (ret < 0)
1614 			goto out_free;
1615 	}
1616 
1617 	if (glob[0] == '!') {
1618 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1619 		ret = 0;
1620 		goto out_free;
1621 	}
1622 
1623 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1624 	/*
1625 	 * The above returns on success the # of triggers registered,
1626 	 * but if it didn't register any it returns zero.  Consider no
1627 	 * triggers registered a failure too.
1628 	 */
1629 	if (!ret) {
1630 		if (!(attrs->pause || attrs->cont || attrs->clear))
1631 			ret = -ENOENT;
1632 		goto out_free;
1633 	} else if (ret < 0)
1634 		goto out_free;
1635 	/* Just return zero, not the number of registered triggers */
1636 	ret = 0;
1637  out:
1638 	return ret;
1639  out_free:
1640 	if (cmd_ops->set_filter)
1641 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1642 
1643 	kfree(trigger_data);
1644 
1645 	destroy_hist_data(hist_data);
1646 	goto out;
1647 }
1648 
1649 static struct event_command trigger_hist_cmd = {
1650 	.name			= "hist",
1651 	.trigger_type		= ETT_EVENT_HIST,
1652 	.flags			= EVENT_CMD_FL_NEEDS_REC,
1653 	.func			= event_hist_trigger_func,
1654 	.reg			= hist_register_trigger,
1655 	.unreg			= hist_unregister_trigger,
1656 	.unreg_all		= hist_unreg_all,
1657 	.get_trigger_ops	= event_hist_get_trigger_ops,
1658 	.set_filter		= set_trigger_filter,
1659 };
1660 
1661 __init int register_trigger_hist_cmd(void)
1662 {
1663 	int ret;
1664 
1665 	ret = register_event_command(&trigger_hist_cmd);
1666 	WARN_ON(ret < 0);
1667 
1668 	return ret;
1669 }
1670 
1671 static void
1672 hist_enable_trigger(struct event_trigger_data *data, void *rec)
1673 {
1674 	struct enable_trigger_data *enable_data = data->private_data;
1675 	struct event_trigger_data *test;
1676 
1677 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
1678 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1679 			if (enable_data->enable)
1680 				test->paused = false;
1681 			else
1682 				test->paused = true;
1683 		}
1684 	}
1685 }
1686 
1687 static void
1688 hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
1689 {
1690 	if (!data->count)
1691 		return;
1692 
1693 	if (data->count != -1)
1694 		(data->count)--;
1695 
1696 	hist_enable_trigger(data, rec);
1697 }
1698 
1699 static struct event_trigger_ops hist_enable_trigger_ops = {
1700 	.func			= hist_enable_trigger,
1701 	.print			= event_enable_trigger_print,
1702 	.init			= event_trigger_init,
1703 	.free			= event_enable_trigger_free,
1704 };
1705 
1706 static struct event_trigger_ops hist_enable_count_trigger_ops = {
1707 	.func			= hist_enable_count_trigger,
1708 	.print			= event_enable_trigger_print,
1709 	.init			= event_trigger_init,
1710 	.free			= event_enable_trigger_free,
1711 };
1712 
1713 static struct event_trigger_ops hist_disable_trigger_ops = {
1714 	.func			= hist_enable_trigger,
1715 	.print			= event_enable_trigger_print,
1716 	.init			= event_trigger_init,
1717 	.free			= event_enable_trigger_free,
1718 };
1719 
1720 static struct event_trigger_ops hist_disable_count_trigger_ops = {
1721 	.func			= hist_enable_count_trigger,
1722 	.print			= event_enable_trigger_print,
1723 	.init			= event_trigger_init,
1724 	.free			= event_enable_trigger_free,
1725 };
1726 
1727 static struct event_trigger_ops *
1728 hist_enable_get_trigger_ops(char *cmd, char *param)
1729 {
1730 	struct event_trigger_ops *ops;
1731 	bool enable;
1732 
1733 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
1734 
1735 	if (enable)
1736 		ops = param ? &hist_enable_count_trigger_ops :
1737 			&hist_enable_trigger_ops;
1738 	else
1739 		ops = param ? &hist_disable_count_trigger_ops :
1740 			&hist_disable_trigger_ops;
1741 
1742 	return ops;
1743 }
1744 
1745 static void hist_enable_unreg_all(struct trace_event_file *file)
1746 {
1747 	struct event_trigger_data *test, *n;
1748 
1749 	list_for_each_entry_safe(test, n, &file->triggers, list) {
1750 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
1751 			list_del_rcu(&test->list);
1752 			update_cond_flag(file);
1753 			trace_event_trigger_enable_disable(file, 0);
1754 			if (test->ops->free)
1755 				test->ops->free(test->ops, test);
1756 		}
1757 	}
1758 }
1759 
1760 static struct event_command trigger_hist_enable_cmd = {
1761 	.name			= ENABLE_HIST_STR,
1762 	.trigger_type		= ETT_HIST_ENABLE,
1763 	.func			= event_enable_trigger_func,
1764 	.reg			= event_enable_register_trigger,
1765 	.unreg			= event_enable_unregister_trigger,
1766 	.unreg_all		= hist_enable_unreg_all,
1767 	.get_trigger_ops	= hist_enable_get_trigger_ops,
1768 	.set_filter		= set_trigger_filter,
1769 };
1770 
1771 static struct event_command trigger_hist_disable_cmd = {
1772 	.name			= DISABLE_HIST_STR,
1773 	.trigger_type		= ETT_HIST_ENABLE,
1774 	.func			= event_enable_trigger_func,
1775 	.reg			= event_enable_register_trigger,
1776 	.unreg			= event_enable_unregister_trigger,
1777 	.unreg_all		= hist_enable_unreg_all,
1778 	.get_trigger_ops	= hist_enable_get_trigger_ops,
1779 	.set_filter		= set_trigger_filter,
1780 };
1781 
1782 static __init void unregister_trigger_hist_enable_disable_cmds(void)
1783 {
1784 	unregister_event_command(&trigger_hist_enable_cmd);
1785 	unregister_event_command(&trigger_hist_disable_cmd);
1786 }
1787 
1788 __init int register_trigger_hist_enable_disable_cmds(void)
1789 {
1790 	int ret;
1791 
1792 	ret = register_event_command(&trigger_hist_enable_cmd);
1793 	if (WARN_ON(ret < 0))
1794 		return ret;
1795 	ret = register_event_command(&trigger_hist_disable_cmd);
1796 	if (WARN_ON(ret < 0))
1797 		unregister_trigger_hist_enable_disable_cmds();
1798 
1799 	return ret;
1800 }
1801