xref: /openbmc/linux/tools/perf/util/parse-events.c (revision 4f3db074)
1 #include <linux/hw_breakpoint.h>
2 #include "util.h"
3 #include "../perf.h"
4 #include "evlist.h"
5 #include "evsel.h"
6 #include "parse-options.h"
7 #include "parse-events.h"
8 #include "exec_cmd.h"
9 #include "string.h"
10 #include "symbol.h"
11 #include "cache.h"
12 #include "header.h"
13 #include "debug.h"
14 #include <api/fs/debugfs.h>
15 #include "parse-events-bison.h"
16 #define YY_EXTRA_TYPE int
17 #include "parse-events-flex.h"
18 #include "pmu.h"
19 #include "thread_map.h"
20 #include "asm/bug.h"
21 
22 #define MAX_NAME_LEN 100
23 
24 #ifdef PARSER_DEBUG
25 extern int parse_events_debug;
26 #endif
27 int parse_events_parse(void *data, void *scanner);
28 int parse_events_term__num(struct parse_events_term **term,
29 			   int type_term, char *config, u64 num,
30 			   YYLTYPE *loc_term, YYLTYPE *loc_val);
31 int parse_events_term__str(struct parse_events_term **term,
32 			   int type_term, char *config, char *str,
33 			   YYLTYPE *loc_term, YYLTYPE *loc_val);
34 
35 static struct perf_pmu_event_symbol *perf_pmu_events_list;
36 /*
37  * The variable indicates the number of supported pmu event symbols.
38  * 0 means not initialized and ready to init
39  * -1 means failed to init, don't try anymore
40  * >0 is the number of supported pmu event symbols
41  */
42 static int perf_pmu_events_list_num;
43 
44 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
45 	[PERF_COUNT_HW_CPU_CYCLES] = {
46 		.symbol = "cpu-cycles",
47 		.alias  = "cycles",
48 	},
49 	[PERF_COUNT_HW_INSTRUCTIONS] = {
50 		.symbol = "instructions",
51 		.alias  = "",
52 	},
53 	[PERF_COUNT_HW_CACHE_REFERENCES] = {
54 		.symbol = "cache-references",
55 		.alias  = "",
56 	},
57 	[PERF_COUNT_HW_CACHE_MISSES] = {
58 		.symbol = "cache-misses",
59 		.alias  = "",
60 	},
61 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
62 		.symbol = "branch-instructions",
63 		.alias  = "branches",
64 	},
65 	[PERF_COUNT_HW_BRANCH_MISSES] = {
66 		.symbol = "branch-misses",
67 		.alias  = "",
68 	},
69 	[PERF_COUNT_HW_BUS_CYCLES] = {
70 		.symbol = "bus-cycles",
71 		.alias  = "",
72 	},
73 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
74 		.symbol = "stalled-cycles-frontend",
75 		.alias  = "idle-cycles-frontend",
76 	},
77 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
78 		.symbol = "stalled-cycles-backend",
79 		.alias  = "idle-cycles-backend",
80 	},
81 	[PERF_COUNT_HW_REF_CPU_CYCLES] = {
82 		.symbol = "ref-cycles",
83 		.alias  = "",
84 	},
85 };
86 
87 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
88 	[PERF_COUNT_SW_CPU_CLOCK] = {
89 		.symbol = "cpu-clock",
90 		.alias  = "",
91 	},
92 	[PERF_COUNT_SW_TASK_CLOCK] = {
93 		.symbol = "task-clock",
94 		.alias  = "",
95 	},
96 	[PERF_COUNT_SW_PAGE_FAULTS] = {
97 		.symbol = "page-faults",
98 		.alias  = "faults",
99 	},
100 	[PERF_COUNT_SW_CONTEXT_SWITCHES] = {
101 		.symbol = "context-switches",
102 		.alias  = "cs",
103 	},
104 	[PERF_COUNT_SW_CPU_MIGRATIONS] = {
105 		.symbol = "cpu-migrations",
106 		.alias  = "migrations",
107 	},
108 	[PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
109 		.symbol = "minor-faults",
110 		.alias  = "",
111 	},
112 	[PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
113 		.symbol = "major-faults",
114 		.alias  = "",
115 	},
116 	[PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
117 		.symbol = "alignment-faults",
118 		.alias  = "",
119 	},
120 	[PERF_COUNT_SW_EMULATION_FAULTS] = {
121 		.symbol = "emulation-faults",
122 		.alias  = "",
123 	},
124 	[PERF_COUNT_SW_DUMMY] = {
125 		.symbol = "dummy",
126 		.alias  = "",
127 	},
128 };
129 
130 #define __PERF_EVENT_FIELD(config, name) \
131 	((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
132 
133 #define PERF_EVENT_RAW(config)		__PERF_EVENT_FIELD(config, RAW)
134 #define PERF_EVENT_CONFIG(config)	__PERF_EVENT_FIELD(config, CONFIG)
135 #define PERF_EVENT_TYPE(config)		__PERF_EVENT_FIELD(config, TYPE)
136 #define PERF_EVENT_ID(config)		__PERF_EVENT_FIELD(config, EVENT)
137 
138 #define for_each_subsystem(sys_dir, sys_dirent, sys_next)	       \
139 	while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)	       \
140 	if (sys_dirent.d_type == DT_DIR &&				       \
141 	   (strcmp(sys_dirent.d_name, ".")) &&				       \
142 	   (strcmp(sys_dirent.d_name, "..")))
143 
144 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
145 {
146 	char evt_path[MAXPATHLEN];
147 	int fd;
148 
149 	snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
150 			sys_dir->d_name, evt_dir->d_name);
151 	fd = open(evt_path, O_RDONLY);
152 	if (fd < 0)
153 		return -EINVAL;
154 	close(fd);
155 
156 	return 0;
157 }
158 
159 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next)	       \
160 	while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next)        \
161 	if (evt_dirent.d_type == DT_DIR &&				       \
162 	   (strcmp(evt_dirent.d_name, ".")) &&				       \
163 	   (strcmp(evt_dirent.d_name, "..")) &&				       \
164 	   (!tp_event_has_id(&sys_dirent, &evt_dirent)))
165 
166 #define MAX_EVENT_LENGTH 512
167 
168 
169 struct tracepoint_path *tracepoint_id_to_path(u64 config)
170 {
171 	struct tracepoint_path *path = NULL;
172 	DIR *sys_dir, *evt_dir;
173 	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
174 	char id_buf[24];
175 	int fd;
176 	u64 id;
177 	char evt_path[MAXPATHLEN];
178 	char dir_path[MAXPATHLEN];
179 
180 	sys_dir = opendir(tracing_events_path);
181 	if (!sys_dir)
182 		return NULL;
183 
184 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
185 
186 		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
187 			 sys_dirent.d_name);
188 		evt_dir = opendir(dir_path);
189 		if (!evt_dir)
190 			continue;
191 
192 		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
193 
194 			snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
195 				 evt_dirent.d_name);
196 			fd = open(evt_path, O_RDONLY);
197 			if (fd < 0)
198 				continue;
199 			if (read(fd, id_buf, sizeof(id_buf)) < 0) {
200 				close(fd);
201 				continue;
202 			}
203 			close(fd);
204 			id = atoll(id_buf);
205 			if (id == config) {
206 				closedir(evt_dir);
207 				closedir(sys_dir);
208 				path = zalloc(sizeof(*path));
209 				path->system = malloc(MAX_EVENT_LENGTH);
210 				if (!path->system) {
211 					free(path);
212 					return NULL;
213 				}
214 				path->name = malloc(MAX_EVENT_LENGTH);
215 				if (!path->name) {
216 					zfree(&path->system);
217 					free(path);
218 					return NULL;
219 				}
220 				strncpy(path->system, sys_dirent.d_name,
221 					MAX_EVENT_LENGTH);
222 				strncpy(path->name, evt_dirent.d_name,
223 					MAX_EVENT_LENGTH);
224 				return path;
225 			}
226 		}
227 		closedir(evt_dir);
228 	}
229 
230 	closedir(sys_dir);
231 	return NULL;
232 }
233 
234 struct tracepoint_path *tracepoint_name_to_path(const char *name)
235 {
236 	struct tracepoint_path *path = zalloc(sizeof(*path));
237 	char *str = strchr(name, ':');
238 
239 	if (path == NULL || str == NULL) {
240 		free(path);
241 		return NULL;
242 	}
243 
244 	path->system = strndup(name, str - name);
245 	path->name = strdup(str+1);
246 
247 	if (path->system == NULL || path->name == NULL) {
248 		zfree(&path->system);
249 		zfree(&path->name);
250 		free(path);
251 		path = NULL;
252 	}
253 
254 	return path;
255 }
256 
257 const char *event_type(int type)
258 {
259 	switch (type) {
260 	case PERF_TYPE_HARDWARE:
261 		return "hardware";
262 
263 	case PERF_TYPE_SOFTWARE:
264 		return "software";
265 
266 	case PERF_TYPE_TRACEPOINT:
267 		return "tracepoint";
268 
269 	case PERF_TYPE_HW_CACHE:
270 		return "hardware-cache";
271 
272 	default:
273 		break;
274 	}
275 
276 	return "unknown";
277 }
278 
279 
280 
281 static struct perf_evsel *
282 __add_event(struct list_head *list, int *idx,
283 	    struct perf_event_attr *attr,
284 	    char *name, struct cpu_map *cpus)
285 {
286 	struct perf_evsel *evsel;
287 
288 	event_attr_init(attr);
289 
290 	evsel = perf_evsel__new_idx(attr, (*idx)++);
291 	if (!evsel)
292 		return NULL;
293 
294 	evsel->cpus = cpus;
295 	if (name)
296 		evsel->name = strdup(name);
297 	list_add_tail(&evsel->node, list);
298 	return evsel;
299 }
300 
301 static int add_event(struct list_head *list, int *idx,
302 		     struct perf_event_attr *attr, char *name)
303 {
304 	return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM;
305 }
306 
307 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
308 {
309 	int i, j;
310 	int n, longest = -1;
311 
312 	for (i = 0; i < size; i++) {
313 		for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
314 			n = strlen(names[i][j]);
315 			if (n > longest && !strncasecmp(str, names[i][j], n))
316 				longest = n;
317 		}
318 		if (longest > 0)
319 			return i;
320 	}
321 
322 	return -1;
323 }
324 
325 int parse_events_add_cache(struct list_head *list, int *idx,
326 			   char *type, char *op_result1, char *op_result2)
327 {
328 	struct perf_event_attr attr;
329 	char name[MAX_NAME_LEN];
330 	int cache_type = -1, cache_op = -1, cache_result = -1;
331 	char *op_result[2] = { op_result1, op_result2 };
332 	int i, n;
333 
334 	/*
335 	 * No fallback - if we cannot get a clear cache type
336 	 * then bail out:
337 	 */
338 	cache_type = parse_aliases(type, perf_evsel__hw_cache,
339 				   PERF_COUNT_HW_CACHE_MAX);
340 	if (cache_type == -1)
341 		return -EINVAL;
342 
343 	n = snprintf(name, MAX_NAME_LEN, "%s", type);
344 
345 	for (i = 0; (i < 2) && (op_result[i]); i++) {
346 		char *str = op_result[i];
347 
348 		n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
349 
350 		if (cache_op == -1) {
351 			cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
352 						 PERF_COUNT_HW_CACHE_OP_MAX);
353 			if (cache_op >= 0) {
354 				if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
355 					return -EINVAL;
356 				continue;
357 			}
358 		}
359 
360 		if (cache_result == -1) {
361 			cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
362 						     PERF_COUNT_HW_CACHE_RESULT_MAX);
363 			if (cache_result >= 0)
364 				continue;
365 		}
366 	}
367 
368 	/*
369 	 * Fall back to reads:
370 	 */
371 	if (cache_op == -1)
372 		cache_op = PERF_COUNT_HW_CACHE_OP_READ;
373 
374 	/*
375 	 * Fall back to accesses:
376 	 */
377 	if (cache_result == -1)
378 		cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
379 
380 	memset(&attr, 0, sizeof(attr));
381 	attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
382 	attr.type = PERF_TYPE_HW_CACHE;
383 	return add_event(list, idx, &attr, name);
384 }
385 
386 static int add_tracepoint(struct list_head *list, int *idx,
387 			  char *sys_name, char *evt_name)
388 {
389 	struct perf_evsel *evsel;
390 
391 	evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
392 	if (!evsel)
393 		return -ENOMEM;
394 
395 	list_add_tail(&evsel->node, list);
396 
397 	return 0;
398 }
399 
400 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
401 				      char *sys_name, char *evt_name)
402 {
403 	char evt_path[MAXPATHLEN];
404 	struct dirent *evt_ent;
405 	DIR *evt_dir;
406 	int ret = 0;
407 
408 	snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
409 	evt_dir = opendir(evt_path);
410 	if (!evt_dir) {
411 		perror("Can't open event dir");
412 		return -1;
413 	}
414 
415 	while (!ret && (evt_ent = readdir(evt_dir))) {
416 		if (!strcmp(evt_ent->d_name, ".")
417 		    || !strcmp(evt_ent->d_name, "..")
418 		    || !strcmp(evt_ent->d_name, "enable")
419 		    || !strcmp(evt_ent->d_name, "filter"))
420 			continue;
421 
422 		if (!strglobmatch(evt_ent->d_name, evt_name))
423 			continue;
424 
425 		ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
426 	}
427 
428 	closedir(evt_dir);
429 	return ret;
430 }
431 
432 static int add_tracepoint_event(struct list_head *list, int *idx,
433 				char *sys_name, char *evt_name)
434 {
435 	return strpbrk(evt_name, "*?") ?
436 	       add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
437 	       add_tracepoint(list, idx, sys_name, evt_name);
438 }
439 
440 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
441 				    char *sys_name, char *evt_name)
442 {
443 	struct dirent *events_ent;
444 	DIR *events_dir;
445 	int ret = 0;
446 
447 	events_dir = opendir(tracing_events_path);
448 	if (!events_dir) {
449 		perror("Can't open event dir");
450 		return -1;
451 	}
452 
453 	while (!ret && (events_ent = readdir(events_dir))) {
454 		if (!strcmp(events_ent->d_name, ".")
455 		    || !strcmp(events_ent->d_name, "..")
456 		    || !strcmp(events_ent->d_name, "enable")
457 		    || !strcmp(events_ent->d_name, "header_event")
458 		    || !strcmp(events_ent->d_name, "header_page"))
459 			continue;
460 
461 		if (!strglobmatch(events_ent->d_name, sys_name))
462 			continue;
463 
464 		ret = add_tracepoint_event(list, idx, events_ent->d_name,
465 					   evt_name);
466 	}
467 
468 	closedir(events_dir);
469 	return ret;
470 }
471 
472 int parse_events_add_tracepoint(struct list_head *list, int *idx,
473 				char *sys, char *event)
474 {
475 	if (strpbrk(sys, "*?"))
476 		return add_tracepoint_multi_sys(list, idx, sys, event);
477 	else
478 		return add_tracepoint_event(list, idx, sys, event);
479 }
480 
481 static int
482 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
483 {
484 	int i;
485 
486 	for (i = 0; i < 3; i++) {
487 		if (!type || !type[i])
488 			break;
489 
490 #define CHECK_SET_TYPE(bit)		\
491 do {					\
492 	if (attr->bp_type & bit)	\
493 		return -EINVAL;		\
494 	else				\
495 		attr->bp_type |= bit;	\
496 } while (0)
497 
498 		switch (type[i]) {
499 		case 'r':
500 			CHECK_SET_TYPE(HW_BREAKPOINT_R);
501 			break;
502 		case 'w':
503 			CHECK_SET_TYPE(HW_BREAKPOINT_W);
504 			break;
505 		case 'x':
506 			CHECK_SET_TYPE(HW_BREAKPOINT_X);
507 			break;
508 		default:
509 			return -EINVAL;
510 		}
511 	}
512 
513 #undef CHECK_SET_TYPE
514 
515 	if (!attr->bp_type) /* Default */
516 		attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
517 
518 	return 0;
519 }
520 
521 int parse_events_add_breakpoint(struct list_head *list, int *idx,
522 				void *ptr, char *type, u64 len)
523 {
524 	struct perf_event_attr attr;
525 
526 	memset(&attr, 0, sizeof(attr));
527 	attr.bp_addr = (unsigned long) ptr;
528 
529 	if (parse_breakpoint_type(type, &attr))
530 		return -EINVAL;
531 
532 	/* Provide some defaults if len is not specified */
533 	if (!len) {
534 		if (attr.bp_type == HW_BREAKPOINT_X)
535 			len = sizeof(long);
536 		else
537 			len = HW_BREAKPOINT_LEN_4;
538 	}
539 
540 	attr.bp_len = len;
541 
542 	attr.type = PERF_TYPE_BREAKPOINT;
543 	attr.sample_period = 1;
544 
545 	return add_event(list, idx, &attr, NULL);
546 }
547 
548 static int check_type_val(struct parse_events_term *term,
549 			  struct parse_events_error *err,
550 			  int type)
551 {
552 	if (type == term->type_val)
553 		return 0;
554 
555 	if (err) {
556 		err->idx = term->err_val;
557 		if (type == PARSE_EVENTS__TERM_TYPE_NUM)
558 			err->str = strdup("expected numeric value");
559 		else
560 			err->str = strdup("expected string value");
561 	}
562 	return -EINVAL;
563 }
564 
565 static int config_term(struct perf_event_attr *attr,
566 		       struct parse_events_term *term,
567 		       struct parse_events_error *err)
568 {
569 #define CHECK_TYPE_VAL(type)						   \
570 do {									   \
571 	if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
572 		return -EINVAL;						   \
573 } while (0)
574 
575 	switch (term->type_term) {
576 	case PARSE_EVENTS__TERM_TYPE_USER:
577 		/*
578 		 * Always succeed for sysfs terms, as we dont know
579 		 * at this point what type they need to have.
580 		 */
581 		return 0;
582 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
583 		CHECK_TYPE_VAL(NUM);
584 		attr->config = term->val.num;
585 		break;
586 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
587 		CHECK_TYPE_VAL(NUM);
588 		attr->config1 = term->val.num;
589 		break;
590 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
591 		CHECK_TYPE_VAL(NUM);
592 		attr->config2 = term->val.num;
593 		break;
594 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
595 		CHECK_TYPE_VAL(NUM);
596 		attr->sample_period = term->val.num;
597 		break;
598 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
599 		/*
600 		 * TODO uncomment when the field is available
601 		 * attr->branch_sample_type = term->val.num;
602 		 */
603 		break;
604 	case PARSE_EVENTS__TERM_TYPE_NAME:
605 		CHECK_TYPE_VAL(STR);
606 		break;
607 	default:
608 		return -EINVAL;
609 	}
610 
611 	return 0;
612 #undef CHECK_TYPE_VAL
613 }
614 
615 static int config_attr(struct perf_event_attr *attr,
616 		       struct list_head *head,
617 		       struct parse_events_error *err)
618 {
619 	struct parse_events_term *term;
620 
621 	list_for_each_entry(term, head, list)
622 		if (config_term(attr, term, err))
623 			return -EINVAL;
624 
625 	return 0;
626 }
627 
628 int parse_events_add_numeric(struct parse_events_evlist *data,
629 			     struct list_head *list,
630 			     u32 type, u64 config,
631 			     struct list_head *head_config)
632 {
633 	struct perf_event_attr attr;
634 
635 	memset(&attr, 0, sizeof(attr));
636 	attr.type = type;
637 	attr.config = config;
638 
639 	if (head_config &&
640 	    config_attr(&attr, head_config, data->error))
641 		return -EINVAL;
642 
643 	return add_event(list, &data->idx, &attr, NULL);
644 }
645 
646 static int parse_events__is_name_term(struct parse_events_term *term)
647 {
648 	return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
649 }
650 
651 static char *pmu_event_name(struct list_head *head_terms)
652 {
653 	struct parse_events_term *term;
654 
655 	list_for_each_entry(term, head_terms, list)
656 		if (parse_events__is_name_term(term))
657 			return term->val.str;
658 
659 	return NULL;
660 }
661 
662 int parse_events_add_pmu(struct parse_events_evlist *data,
663 			 struct list_head *list, char *name,
664 			 struct list_head *head_config)
665 {
666 	struct perf_event_attr attr;
667 	struct perf_pmu_info info;
668 	struct perf_pmu *pmu;
669 	struct perf_evsel *evsel;
670 
671 	pmu = perf_pmu__find(name);
672 	if (!pmu)
673 		return -EINVAL;
674 
675 	if (pmu->default_config) {
676 		memcpy(&attr, pmu->default_config,
677 		       sizeof(struct perf_event_attr));
678 	} else {
679 		memset(&attr, 0, sizeof(attr));
680 	}
681 
682 	if (!head_config) {
683 		attr.type = pmu->type;
684 		evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus);
685 		return evsel ? 0 : -ENOMEM;
686 	}
687 
688 	if (perf_pmu__check_alias(pmu, head_config, &info))
689 		return -EINVAL;
690 
691 	/*
692 	 * Configure hardcoded terms first, no need to check
693 	 * return value when called with fail == 0 ;)
694 	 */
695 	if (config_attr(&attr, head_config, data->error))
696 		return -EINVAL;
697 
698 	if (perf_pmu__config(pmu, &attr, head_config, data->error))
699 		return -EINVAL;
700 
701 	evsel = __add_event(list, &data->idx, &attr,
702 			    pmu_event_name(head_config), pmu->cpus);
703 	if (evsel) {
704 		evsel->unit = info.unit;
705 		evsel->scale = info.scale;
706 		evsel->per_pkg = info.per_pkg;
707 		evsel->snapshot = info.snapshot;
708 	}
709 
710 	return evsel ? 0 : -ENOMEM;
711 }
712 
713 int parse_events__modifier_group(struct list_head *list,
714 				 char *event_mod)
715 {
716 	return parse_events__modifier_event(list, event_mod, true);
717 }
718 
719 void parse_events__set_leader(char *name, struct list_head *list)
720 {
721 	struct perf_evsel *leader;
722 
723 	__perf_evlist__set_leader(list);
724 	leader = list_entry(list->next, struct perf_evsel, node);
725 	leader->group_name = name ? strdup(name) : NULL;
726 }
727 
728 /* list_event is assumed to point to malloc'ed memory */
729 void parse_events_update_lists(struct list_head *list_event,
730 			       struct list_head *list_all)
731 {
732 	/*
733 	 * Called for single event definition. Update the
734 	 * 'all event' list, and reinit the 'single event'
735 	 * list, for next event definition.
736 	 */
737 	list_splice_tail(list_event, list_all);
738 	free(list_event);
739 }
740 
741 struct event_modifier {
742 	int eu;
743 	int ek;
744 	int eh;
745 	int eH;
746 	int eG;
747 	int eI;
748 	int precise;
749 	int exclude_GH;
750 	int sample_read;
751 	int pinned;
752 };
753 
754 static int get_event_modifier(struct event_modifier *mod, char *str,
755 			       struct perf_evsel *evsel)
756 {
757 	int eu = evsel ? evsel->attr.exclude_user : 0;
758 	int ek = evsel ? evsel->attr.exclude_kernel : 0;
759 	int eh = evsel ? evsel->attr.exclude_hv : 0;
760 	int eH = evsel ? evsel->attr.exclude_host : 0;
761 	int eG = evsel ? evsel->attr.exclude_guest : 0;
762 	int eI = evsel ? evsel->attr.exclude_idle : 0;
763 	int precise = evsel ? evsel->attr.precise_ip : 0;
764 	int sample_read = 0;
765 	int pinned = evsel ? evsel->attr.pinned : 0;
766 
767 	int exclude = eu | ek | eh;
768 	int exclude_GH = evsel ? evsel->exclude_GH : 0;
769 
770 	memset(mod, 0, sizeof(*mod));
771 
772 	while (*str) {
773 		if (*str == 'u') {
774 			if (!exclude)
775 				exclude = eu = ek = eh = 1;
776 			eu = 0;
777 		} else if (*str == 'k') {
778 			if (!exclude)
779 				exclude = eu = ek = eh = 1;
780 			ek = 0;
781 		} else if (*str == 'h') {
782 			if (!exclude)
783 				exclude = eu = ek = eh = 1;
784 			eh = 0;
785 		} else if (*str == 'G') {
786 			if (!exclude_GH)
787 				exclude_GH = eG = eH = 1;
788 			eG = 0;
789 		} else if (*str == 'H') {
790 			if (!exclude_GH)
791 				exclude_GH = eG = eH = 1;
792 			eH = 0;
793 		} else if (*str == 'I') {
794 			eI = 1;
795 		} else if (*str == 'p') {
796 			precise++;
797 			/* use of precise requires exclude_guest */
798 			if (!exclude_GH)
799 				eG = 1;
800 		} else if (*str == 'S') {
801 			sample_read = 1;
802 		} else if (*str == 'D') {
803 			pinned = 1;
804 		} else
805 			break;
806 
807 		++str;
808 	}
809 
810 	/*
811 	 * precise ip:
812 	 *
813 	 *  0 - SAMPLE_IP can have arbitrary skid
814 	 *  1 - SAMPLE_IP must have constant skid
815 	 *  2 - SAMPLE_IP requested to have 0 skid
816 	 *  3 - SAMPLE_IP must have 0 skid
817 	 *
818 	 *  See also PERF_RECORD_MISC_EXACT_IP
819 	 */
820 	if (precise > 3)
821 		return -EINVAL;
822 
823 	mod->eu = eu;
824 	mod->ek = ek;
825 	mod->eh = eh;
826 	mod->eH = eH;
827 	mod->eG = eG;
828 	mod->eI = eI;
829 	mod->precise = precise;
830 	mod->exclude_GH = exclude_GH;
831 	mod->sample_read = sample_read;
832 	mod->pinned = pinned;
833 
834 	return 0;
835 }
836 
837 /*
838  * Basic modifier sanity check to validate it contains only one
839  * instance of any modifier (apart from 'p') present.
840  */
841 static int check_modifier(char *str)
842 {
843 	char *p = str;
844 
845 	/* The sizeof includes 0 byte as well. */
846 	if (strlen(str) > (sizeof("ukhGHpppSDI") - 1))
847 		return -1;
848 
849 	while (*p) {
850 		if (*p != 'p' && strchr(p + 1, *p))
851 			return -1;
852 		p++;
853 	}
854 
855 	return 0;
856 }
857 
858 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
859 {
860 	struct perf_evsel *evsel;
861 	struct event_modifier mod;
862 
863 	if (str == NULL)
864 		return 0;
865 
866 	if (check_modifier(str))
867 		return -EINVAL;
868 
869 	if (!add && get_event_modifier(&mod, str, NULL))
870 		return -EINVAL;
871 
872 	__evlist__for_each(list, evsel) {
873 		if (add && get_event_modifier(&mod, str, evsel))
874 			return -EINVAL;
875 
876 		evsel->attr.exclude_user   = mod.eu;
877 		evsel->attr.exclude_kernel = mod.ek;
878 		evsel->attr.exclude_hv     = mod.eh;
879 		evsel->attr.precise_ip     = mod.precise;
880 		evsel->attr.exclude_host   = mod.eH;
881 		evsel->attr.exclude_guest  = mod.eG;
882 		evsel->attr.exclude_idle   = mod.eI;
883 		evsel->exclude_GH          = mod.exclude_GH;
884 		evsel->sample_read         = mod.sample_read;
885 
886 		if (perf_evsel__is_group_leader(evsel))
887 			evsel->attr.pinned = mod.pinned;
888 	}
889 
890 	return 0;
891 }
892 
893 int parse_events_name(struct list_head *list, char *name)
894 {
895 	struct perf_evsel *evsel;
896 
897 	__evlist__for_each(list, evsel) {
898 		if (!evsel->name)
899 			evsel->name = strdup(name);
900 	}
901 
902 	return 0;
903 }
904 
905 static int
906 comp_pmu(const void *p1, const void *p2)
907 {
908 	struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
909 	struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
910 
911 	return strcmp(pmu1->symbol, pmu2->symbol);
912 }
913 
914 static void perf_pmu__parse_cleanup(void)
915 {
916 	if (perf_pmu_events_list_num > 0) {
917 		struct perf_pmu_event_symbol *p;
918 		int i;
919 
920 		for (i = 0; i < perf_pmu_events_list_num; i++) {
921 			p = perf_pmu_events_list + i;
922 			free(p->symbol);
923 		}
924 		free(perf_pmu_events_list);
925 		perf_pmu_events_list = NULL;
926 		perf_pmu_events_list_num = 0;
927 	}
928 }
929 
930 #define SET_SYMBOL(str, stype)		\
931 do {					\
932 	p->symbol = str;		\
933 	if (!p->symbol)			\
934 		goto err;		\
935 	p->type = stype;		\
936 } while (0)
937 
938 /*
939  * Read the pmu events list from sysfs
940  * Save it into perf_pmu_events_list
941  */
942 static void perf_pmu__parse_init(void)
943 {
944 
945 	struct perf_pmu *pmu = NULL;
946 	struct perf_pmu_alias *alias;
947 	int len = 0;
948 
949 	pmu = perf_pmu__find("cpu");
950 	if ((pmu == NULL) || list_empty(&pmu->aliases)) {
951 		perf_pmu_events_list_num = -1;
952 		return;
953 	}
954 	list_for_each_entry(alias, &pmu->aliases, list) {
955 		if (strchr(alias->name, '-'))
956 			len++;
957 		len++;
958 	}
959 	perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
960 	if (!perf_pmu_events_list)
961 		return;
962 	perf_pmu_events_list_num = len;
963 
964 	len = 0;
965 	list_for_each_entry(alias, &pmu->aliases, list) {
966 		struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
967 		char *tmp = strchr(alias->name, '-');
968 
969 		if (tmp != NULL) {
970 			SET_SYMBOL(strndup(alias->name, tmp - alias->name),
971 					PMU_EVENT_SYMBOL_PREFIX);
972 			p++;
973 			SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
974 			len += 2;
975 		} else {
976 			SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
977 			len++;
978 		}
979 	}
980 	qsort(perf_pmu_events_list, len,
981 		sizeof(struct perf_pmu_event_symbol), comp_pmu);
982 
983 	return;
984 err:
985 	perf_pmu__parse_cleanup();
986 }
987 
988 enum perf_pmu_event_symbol_type
989 perf_pmu__parse_check(const char *name)
990 {
991 	struct perf_pmu_event_symbol p, *r;
992 
993 	/* scan kernel pmu events from sysfs if needed */
994 	if (perf_pmu_events_list_num == 0)
995 		perf_pmu__parse_init();
996 	/*
997 	 * name "cpu" could be prefix of cpu-cycles or cpu// events.
998 	 * cpu-cycles has been handled by hardcode.
999 	 * So it must be cpu// events, not kernel pmu event.
1000 	 */
1001 	if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
1002 		return PMU_EVENT_SYMBOL_ERR;
1003 
1004 	p.symbol = strdup(name);
1005 	r = bsearch(&p, perf_pmu_events_list,
1006 			(size_t) perf_pmu_events_list_num,
1007 			sizeof(struct perf_pmu_event_symbol), comp_pmu);
1008 	free(p.symbol);
1009 	return r ? r->type : PMU_EVENT_SYMBOL_ERR;
1010 }
1011 
1012 static int parse_events__scanner(const char *str, void *data, int start_token)
1013 {
1014 	YY_BUFFER_STATE buffer;
1015 	void *scanner;
1016 	int ret;
1017 
1018 	ret = parse_events_lex_init_extra(start_token, &scanner);
1019 	if (ret)
1020 		return ret;
1021 
1022 	buffer = parse_events__scan_string(str, scanner);
1023 
1024 #ifdef PARSER_DEBUG
1025 	parse_events_debug = 1;
1026 #endif
1027 	ret = parse_events_parse(data, scanner);
1028 
1029 	parse_events__flush_buffer(buffer, scanner);
1030 	parse_events__delete_buffer(buffer, scanner);
1031 	parse_events_lex_destroy(scanner);
1032 	return ret;
1033 }
1034 
1035 /*
1036  * parse event config string, return a list of event terms.
1037  */
1038 int parse_events_terms(struct list_head *terms, const char *str)
1039 {
1040 	struct parse_events_terms data = {
1041 		.terms = NULL,
1042 	};
1043 	int ret;
1044 
1045 	ret = parse_events__scanner(str, &data, PE_START_TERMS);
1046 	if (!ret) {
1047 		list_splice(data.terms, terms);
1048 		zfree(&data.terms);
1049 		return 0;
1050 	}
1051 
1052 	if (data.terms)
1053 		parse_events__free_terms(data.terms);
1054 	return ret;
1055 }
1056 
1057 int parse_events(struct perf_evlist *evlist, const char *str,
1058 		 struct parse_events_error *err)
1059 {
1060 	struct parse_events_evlist data = {
1061 		.list  = LIST_HEAD_INIT(data.list),
1062 		.idx   = evlist->nr_entries,
1063 		.error = err,
1064 	};
1065 	int ret;
1066 
1067 	ret = parse_events__scanner(str, &data, PE_START_EVENTS);
1068 	perf_pmu__parse_cleanup();
1069 	if (!ret) {
1070 		int entries = data.idx - evlist->nr_entries;
1071 		perf_evlist__splice_list_tail(evlist, &data.list, entries);
1072 		evlist->nr_groups += data.nr_groups;
1073 		return 0;
1074 	}
1075 
1076 	/*
1077 	 * There are 2 users - builtin-record and builtin-test objects.
1078 	 * Both call perf_evlist__delete in case of error, so we dont
1079 	 * need to bother.
1080 	 */
1081 	return ret;
1082 }
1083 
1084 #define MAX_WIDTH 1000
1085 static int get_term_width(void)
1086 {
1087 	struct winsize ws;
1088 
1089 	get_term_dimensions(&ws);
1090 	return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
1091 }
1092 
1093 static void parse_events_print_error(struct parse_events_error *err,
1094 				     const char *event)
1095 {
1096 	const char *str = "invalid or unsupported event: ";
1097 	char _buf[MAX_WIDTH];
1098 	char *buf = (char *) event;
1099 	int idx = 0;
1100 
1101 	if (err->str) {
1102 		/* -2 for extra '' in the final fprintf */
1103 		int width       = get_term_width() - 2;
1104 		int len_event   = strlen(event);
1105 		int len_str, max_len, cut = 0;
1106 
1107 		/*
1108 		 * Maximum error index indent, we will cut
1109 		 * the event string if it's bigger.
1110 		 */
1111 		int max_err_idx = 10;
1112 
1113 		/*
1114 		 * Let's be specific with the message when
1115 		 * we have the precise error.
1116 		 */
1117 		str     = "event syntax error: ";
1118 		len_str = strlen(str);
1119 		max_len = width - len_str;
1120 
1121 		buf = _buf;
1122 
1123 		/* We're cutting from the beggining. */
1124 		if (err->idx > max_err_idx)
1125 			cut = err->idx - max_err_idx;
1126 
1127 		strncpy(buf, event + cut, max_len);
1128 
1129 		/* Mark cut parts with '..' on both sides. */
1130 		if (cut)
1131 			buf[0] = buf[1] = '.';
1132 
1133 		if ((len_event - cut) > max_len) {
1134 			buf[max_len - 1] = buf[max_len - 2] = '.';
1135 			buf[max_len] = 0;
1136 		}
1137 
1138 		idx = len_str + err->idx - cut;
1139 	}
1140 
1141 	fprintf(stderr, "%s'%s'\n", str, buf);
1142 	if (idx) {
1143 		fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err->str);
1144 		if (err->help)
1145 			fprintf(stderr, "\n%s\n", err->help);
1146 		free(err->str);
1147 		free(err->help);
1148 	}
1149 
1150 	fprintf(stderr, "Run 'perf list' for a list of valid events\n");
1151 }
1152 
1153 #undef MAX_WIDTH
1154 
1155 int parse_events_option(const struct option *opt, const char *str,
1156 			int unset __maybe_unused)
1157 {
1158 	struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
1159 	struct parse_events_error err = { .idx = 0, };
1160 	int ret = parse_events(evlist, str, &err);
1161 
1162 	if (ret)
1163 		parse_events_print_error(&err, str);
1164 
1165 	return ret;
1166 }
1167 
1168 int parse_filter(const struct option *opt, const char *str,
1169 		 int unset __maybe_unused)
1170 {
1171 	struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
1172 	struct perf_evsel *last = NULL;
1173 
1174 	if (evlist->nr_entries > 0)
1175 		last = perf_evlist__last(evlist);
1176 
1177 	if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
1178 		fprintf(stderr,
1179 			"--filter option should follow a -e tracepoint option\n");
1180 		return -1;
1181 	}
1182 
1183 	last->filter = strdup(str);
1184 	if (last->filter == NULL) {
1185 		fprintf(stderr, "not enough memory to hold filter string\n");
1186 		return -1;
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 static const char * const event_type_descriptors[] = {
1193 	"Hardware event",
1194 	"Software event",
1195 	"Tracepoint event",
1196 	"Hardware cache event",
1197 	"Raw hardware event descriptor",
1198 	"Hardware breakpoint",
1199 };
1200 
1201 static int cmp_string(const void *a, const void *b)
1202 {
1203 	const char * const *as = a;
1204 	const char * const *bs = b;
1205 
1206 	return strcmp(*as, *bs);
1207 }
1208 
1209 /*
1210  * Print the events from <debugfs_mount_point>/tracing/events
1211  */
1212 
1213 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
1214 			     bool name_only)
1215 {
1216 	DIR *sys_dir, *evt_dir;
1217 	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
1218 	char evt_path[MAXPATHLEN];
1219 	char dir_path[MAXPATHLEN];
1220 	char **evt_list = NULL;
1221 	unsigned int evt_i = 0, evt_num = 0;
1222 	bool evt_num_known = false;
1223 
1224 restart:
1225 	sys_dir = opendir(tracing_events_path);
1226 	if (!sys_dir)
1227 		return;
1228 
1229 	if (evt_num_known) {
1230 		evt_list = zalloc(sizeof(char *) * evt_num);
1231 		if (!evt_list)
1232 			goto out_close_sys_dir;
1233 	}
1234 
1235 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
1236 		if (subsys_glob != NULL &&
1237 		    !strglobmatch(sys_dirent.d_name, subsys_glob))
1238 			continue;
1239 
1240 		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
1241 			 sys_dirent.d_name);
1242 		evt_dir = opendir(dir_path);
1243 		if (!evt_dir)
1244 			continue;
1245 
1246 		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
1247 			if (event_glob != NULL &&
1248 			    !strglobmatch(evt_dirent.d_name, event_glob))
1249 				continue;
1250 
1251 			if (!evt_num_known) {
1252 				evt_num++;
1253 				continue;
1254 			}
1255 
1256 			snprintf(evt_path, MAXPATHLEN, "%s:%s",
1257 				 sys_dirent.d_name, evt_dirent.d_name);
1258 
1259 			evt_list[evt_i] = strdup(evt_path);
1260 			if (evt_list[evt_i] == NULL)
1261 				goto out_close_evt_dir;
1262 			evt_i++;
1263 		}
1264 		closedir(evt_dir);
1265 	}
1266 	closedir(sys_dir);
1267 
1268 	if (!evt_num_known) {
1269 		evt_num_known = true;
1270 		goto restart;
1271 	}
1272 	qsort(evt_list, evt_num, sizeof(char *), cmp_string);
1273 	evt_i = 0;
1274 	while (evt_i < evt_num) {
1275 		if (name_only) {
1276 			printf("%s ", evt_list[evt_i++]);
1277 			continue;
1278 		}
1279 		printf("  %-50s [%s]\n", evt_list[evt_i++],
1280 				event_type_descriptors[PERF_TYPE_TRACEPOINT]);
1281 	}
1282 	if (evt_num)
1283 		printf("\n");
1284 
1285 out_free:
1286 	evt_num = evt_i;
1287 	for (evt_i = 0; evt_i < evt_num; evt_i++)
1288 		zfree(&evt_list[evt_i]);
1289 	zfree(&evt_list);
1290 	return;
1291 
1292 out_close_evt_dir:
1293 	closedir(evt_dir);
1294 out_close_sys_dir:
1295 	closedir(sys_dir);
1296 
1297 	printf("FATAL: not enough memory to print %s\n",
1298 			event_type_descriptors[PERF_TYPE_TRACEPOINT]);
1299 	if (evt_list)
1300 		goto out_free;
1301 }
1302 
1303 /*
1304  * Check whether event is in <debugfs_mount_point>/tracing/events
1305  */
1306 
1307 int is_valid_tracepoint(const char *event_string)
1308 {
1309 	DIR *sys_dir, *evt_dir;
1310 	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
1311 	char evt_path[MAXPATHLEN];
1312 	char dir_path[MAXPATHLEN];
1313 
1314 	sys_dir = opendir(tracing_events_path);
1315 	if (!sys_dir)
1316 		return 0;
1317 
1318 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
1319 
1320 		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
1321 			 sys_dirent.d_name);
1322 		evt_dir = opendir(dir_path);
1323 		if (!evt_dir)
1324 			continue;
1325 
1326 		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
1327 			snprintf(evt_path, MAXPATHLEN, "%s:%s",
1328 				 sys_dirent.d_name, evt_dirent.d_name);
1329 			if (!strcmp(evt_path, event_string)) {
1330 				closedir(evt_dir);
1331 				closedir(sys_dir);
1332 				return 1;
1333 			}
1334 		}
1335 		closedir(evt_dir);
1336 	}
1337 	closedir(sys_dir);
1338 	return 0;
1339 }
1340 
1341 static bool is_event_supported(u8 type, unsigned config)
1342 {
1343 	bool ret = true;
1344 	int open_return;
1345 	struct perf_evsel *evsel;
1346 	struct perf_event_attr attr = {
1347 		.type = type,
1348 		.config = config,
1349 		.disabled = 1,
1350 	};
1351 	struct {
1352 		struct thread_map map;
1353 		int threads[1];
1354 	} tmap = {
1355 		.map.nr	 = 1,
1356 		.threads = { 0 },
1357 	};
1358 
1359 	evsel = perf_evsel__new(&attr);
1360 	if (evsel) {
1361 		open_return = perf_evsel__open(evsel, NULL, &tmap.map);
1362 		ret = open_return >= 0;
1363 
1364 		if (open_return == -EACCES) {
1365 			/*
1366 			 * This happens if the paranoid value
1367 			 * /proc/sys/kernel/perf_event_paranoid is set to 2
1368 			 * Re-run with exclude_kernel set; we don't do that
1369 			 * by default as some ARM machines do not support it.
1370 			 *
1371 			 */
1372 			evsel->attr.exclude_kernel = 1;
1373 			ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
1374 		}
1375 		perf_evsel__delete(evsel);
1376 	}
1377 
1378 	return ret;
1379 }
1380 
1381 int print_hwcache_events(const char *event_glob, bool name_only)
1382 {
1383 	unsigned int type, op, i, evt_i = 0, evt_num = 0;
1384 	char name[64];
1385 	char **evt_list = NULL;
1386 	bool evt_num_known = false;
1387 
1388 restart:
1389 	if (evt_num_known) {
1390 		evt_list = zalloc(sizeof(char *) * evt_num);
1391 		if (!evt_list)
1392 			goto out_enomem;
1393 	}
1394 
1395 	for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1396 		for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1397 			/* skip invalid cache type */
1398 			if (!perf_evsel__is_cache_op_valid(type, op))
1399 				continue;
1400 
1401 			for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1402 				__perf_evsel__hw_cache_type_op_res_name(type, op, i,
1403 									name, sizeof(name));
1404 				if (event_glob != NULL && !strglobmatch(name, event_glob))
1405 					continue;
1406 
1407 				if (!is_event_supported(PERF_TYPE_HW_CACHE,
1408 							type | (op << 8) | (i << 16)))
1409 					continue;
1410 
1411 				if (!evt_num_known) {
1412 					evt_num++;
1413 					continue;
1414 				}
1415 
1416 				evt_list[evt_i] = strdup(name);
1417 				if (evt_list[evt_i] == NULL)
1418 					goto out_enomem;
1419 				evt_i++;
1420 			}
1421 		}
1422 	}
1423 
1424 	if (!evt_num_known) {
1425 		evt_num_known = true;
1426 		goto restart;
1427 	}
1428 	qsort(evt_list, evt_num, sizeof(char *), cmp_string);
1429 	evt_i = 0;
1430 	while (evt_i < evt_num) {
1431 		if (name_only) {
1432 			printf("%s ", evt_list[evt_i++]);
1433 			continue;
1434 		}
1435 		printf("  %-50s [%s]\n", evt_list[evt_i++],
1436 				event_type_descriptors[PERF_TYPE_HW_CACHE]);
1437 	}
1438 	if (evt_num)
1439 		printf("\n");
1440 
1441 out_free:
1442 	evt_num = evt_i;
1443 	for (evt_i = 0; evt_i < evt_num; evt_i++)
1444 		zfree(&evt_list[evt_i]);
1445 	zfree(&evt_list);
1446 	return evt_num;
1447 
1448 out_enomem:
1449 	printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]);
1450 	if (evt_list)
1451 		goto out_free;
1452 	return evt_num;
1453 }
1454 
1455 void print_symbol_events(const char *event_glob, unsigned type,
1456 				struct event_symbol *syms, unsigned max,
1457 				bool name_only)
1458 {
1459 	unsigned int i, evt_i = 0, evt_num = 0;
1460 	char name[MAX_NAME_LEN];
1461 	char **evt_list = NULL;
1462 	bool evt_num_known = false;
1463 
1464 restart:
1465 	if (evt_num_known) {
1466 		evt_list = zalloc(sizeof(char *) * evt_num);
1467 		if (!evt_list)
1468 			goto out_enomem;
1469 		syms -= max;
1470 	}
1471 
1472 	for (i = 0; i < max; i++, syms++) {
1473 
1474 		if (event_glob != NULL &&
1475 		    !(strglobmatch(syms->symbol, event_glob) ||
1476 		      (syms->alias && strglobmatch(syms->alias, event_glob))))
1477 			continue;
1478 
1479 		if (!is_event_supported(type, i))
1480 			continue;
1481 
1482 		if (!evt_num_known) {
1483 			evt_num++;
1484 			continue;
1485 		}
1486 
1487 		if (!name_only && strlen(syms->alias))
1488 			snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
1489 		else
1490 			strncpy(name, syms->symbol, MAX_NAME_LEN);
1491 
1492 		evt_list[evt_i] = strdup(name);
1493 		if (evt_list[evt_i] == NULL)
1494 			goto out_enomem;
1495 		evt_i++;
1496 	}
1497 
1498 	if (!evt_num_known) {
1499 		evt_num_known = true;
1500 		goto restart;
1501 	}
1502 	qsort(evt_list, evt_num, sizeof(char *), cmp_string);
1503 	evt_i = 0;
1504 	while (evt_i < evt_num) {
1505 		if (name_only) {
1506 			printf("%s ", evt_list[evt_i++]);
1507 			continue;
1508 		}
1509 		printf("  %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
1510 	}
1511 	if (evt_num)
1512 		printf("\n");
1513 
1514 out_free:
1515 	evt_num = evt_i;
1516 	for (evt_i = 0; evt_i < evt_num; evt_i++)
1517 		zfree(&evt_list[evt_i]);
1518 	zfree(&evt_list);
1519 	return;
1520 
1521 out_enomem:
1522 	printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
1523 	if (evt_list)
1524 		goto out_free;
1525 }
1526 
1527 /*
1528  * Print the help text for the event symbols:
1529  */
1530 void print_events(const char *event_glob, bool name_only)
1531 {
1532 	print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
1533 			    event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
1534 
1535 	print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
1536 			    event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
1537 
1538 	print_hwcache_events(event_glob, name_only);
1539 
1540 	print_pmu_events(event_glob, name_only);
1541 
1542 	if (event_glob != NULL)
1543 		return;
1544 
1545 	if (!name_only) {
1546 		printf("  %-50s [%s]\n",
1547 		       "rNNN",
1548 		       event_type_descriptors[PERF_TYPE_RAW]);
1549 		printf("  %-50s [%s]\n",
1550 		       "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
1551 		       event_type_descriptors[PERF_TYPE_RAW]);
1552 		printf("   (see 'man perf-list' on how to encode it)\n");
1553 		printf("\n");
1554 
1555 		printf("  %-50s [%s]\n",
1556 		       "mem:<addr>[/len][:access]",
1557 			event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1558 		printf("\n");
1559 	}
1560 
1561 	print_tracepoint_events(NULL, NULL, name_only);
1562 }
1563 
1564 int parse_events__is_hardcoded_term(struct parse_events_term *term)
1565 {
1566 	return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1567 }
1568 
1569 static int new_term(struct parse_events_term **_term, int type_val,
1570 		    int type_term, char *config,
1571 		    char *str, u64 num, int err_term, int err_val)
1572 {
1573 	struct parse_events_term *term;
1574 
1575 	term = zalloc(sizeof(*term));
1576 	if (!term)
1577 		return -ENOMEM;
1578 
1579 	INIT_LIST_HEAD(&term->list);
1580 	term->type_val  = type_val;
1581 	term->type_term = type_term;
1582 	term->config = config;
1583 	term->err_term = err_term;
1584 	term->err_val  = err_val;
1585 
1586 	switch (type_val) {
1587 	case PARSE_EVENTS__TERM_TYPE_NUM:
1588 		term->val.num = num;
1589 		break;
1590 	case PARSE_EVENTS__TERM_TYPE_STR:
1591 		term->val.str = str;
1592 		break;
1593 	default:
1594 		free(term);
1595 		return -EINVAL;
1596 	}
1597 
1598 	*_term = term;
1599 	return 0;
1600 }
1601 
1602 int parse_events_term__num(struct parse_events_term **term,
1603 			   int type_term, char *config, u64 num,
1604 			   YYLTYPE *loc_term, YYLTYPE *loc_val)
1605 {
1606 	return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1607 			config, NULL, num,
1608 			loc_term ? loc_term->first_column : 0,
1609 			loc_val ? loc_val->first_column : 0);
1610 }
1611 
1612 int parse_events_term__str(struct parse_events_term **term,
1613 			   int type_term, char *config, char *str,
1614 			   YYLTYPE *loc_term, YYLTYPE *loc_val)
1615 {
1616 	return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1617 			config, str, 0,
1618 			loc_term ? loc_term->first_column : 0,
1619 			loc_val ? loc_val->first_column : 0);
1620 }
1621 
1622 int parse_events_term__sym_hw(struct parse_events_term **term,
1623 			      char *config, unsigned idx)
1624 {
1625 	struct event_symbol *sym;
1626 
1627 	BUG_ON(idx >= PERF_COUNT_HW_MAX);
1628 	sym = &event_symbols_hw[idx];
1629 
1630 	if (config)
1631 		return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
1632 				PARSE_EVENTS__TERM_TYPE_USER, config,
1633 				(char *) sym->symbol, 0, 0, 0);
1634 	else
1635 		return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
1636 				PARSE_EVENTS__TERM_TYPE_USER,
1637 				(char *) "event", (char *) sym->symbol,
1638 				0, 0, 0);
1639 }
1640 
1641 int parse_events_term__clone(struct parse_events_term **new,
1642 			     struct parse_events_term *term)
1643 {
1644 	return new_term(new, term->type_val, term->type_term, term->config,
1645 			term->val.str, term->val.num,
1646 			term->err_term, term->err_val);
1647 }
1648 
1649 void parse_events__free_terms(struct list_head *terms)
1650 {
1651 	struct parse_events_term *term, *h;
1652 
1653 	list_for_each_entry_safe(term, h, terms, list)
1654 		free(term);
1655 }
1656 
1657 void parse_events_evlist_error(struct parse_events_evlist *data,
1658 			       int idx, const char *str)
1659 {
1660 	struct parse_events_error *err = data->error;
1661 
1662 	err->idx = idx;
1663 	err->str = strdup(str);
1664 	WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
1665 }
1666