xref: /openbmc/linux/tools/perf/util/metricgroup.c (revision 911b8eac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017, Intel Corporation.
4  */
5 
6 /* Manage metrics and groups of metrics from JSON files */
7 
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "expr.h"
15 #include "rblist.h"
16 #include <string.h>
17 #include <errno.h>
18 #include "pmu-events/pmu-events.h"
19 #include "strlist.h"
20 #include <assert.h>
21 #include <linux/ctype.h>
22 #include <linux/string.h>
23 #include <linux/zalloc.h>
24 #include <subcmd/parse-options.h>
25 #include <api/fs/fs.h>
26 #include "util.h"
27 #include <asm/bug.h>
28 
29 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
30 					 struct evsel *evsel,
31 					 bool create)
32 {
33 	struct rb_node *nd;
34 	struct metric_event me = {
35 		.evsel = evsel
36 	};
37 
38 	if (!metric_events)
39 		return NULL;
40 
41 	nd = rblist__find(metric_events, &me);
42 	if (nd)
43 		return container_of(nd, struct metric_event, nd);
44 	if (create) {
45 		rblist__add_node(metric_events, &me);
46 		nd = rblist__find(metric_events, &me);
47 		if (nd)
48 			return container_of(nd, struct metric_event, nd);
49 	}
50 	return NULL;
51 }
52 
53 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
54 {
55 	struct metric_event *a = container_of(rb_node,
56 					      struct metric_event,
57 					      nd);
58 	const struct metric_event *b = entry;
59 
60 	if (a->evsel == b->evsel)
61 		return 0;
62 	if ((char *)a->evsel < (char *)b->evsel)
63 		return -1;
64 	return +1;
65 }
66 
67 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68 					const void *entry)
69 {
70 	struct metric_event *me = malloc(sizeof(struct metric_event));
71 
72 	if (!me)
73 		return NULL;
74 	memcpy(me, entry, sizeof(struct metric_event));
75 	me->evsel = ((struct metric_event *)entry)->evsel;
76 	INIT_LIST_HEAD(&me->head);
77 	return &me->nd;
78 }
79 
80 static void metric_event_delete(struct rblist *rblist __maybe_unused,
81 				struct rb_node *rb_node)
82 {
83 	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84 	struct metric_expr *expr, *tmp;
85 
86 	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87 		free(expr->metric_refs);
88 		free(expr->metric_events);
89 		free(expr);
90 	}
91 
92 	free(me);
93 }
94 
95 static void metricgroup__rblist_init(struct rblist *metric_events)
96 {
97 	rblist__init(metric_events);
98 	metric_events->node_cmp = metric_event_cmp;
99 	metric_events->node_new = metric_event_new;
100 	metric_events->node_delete = metric_event_delete;
101 }
102 
103 void metricgroup__rblist_exit(struct rblist *metric_events)
104 {
105 	rblist__exit(metric_events);
106 }
107 
108 /*
109  * A node in the list of referenced metrics. metric_expr
110  * is held as a convenience to avoid a search through the
111  * metric list.
112  */
113 struct metric_ref_node {
114 	const char *metric_name;
115 	const char *metric_expr;
116 	struct list_head list;
117 };
118 
119 struct metric {
120 	struct list_head nd;
121 	struct expr_parse_ctx pctx;
122 	const char *metric_name;
123 	const char *metric_expr;
124 	const char *metric_unit;
125 	struct list_head metric_refs;
126 	int metric_refs_cnt;
127 	int runtime;
128 	bool has_constraint;
129 };
130 
131 #define RECURSION_ID_MAX 1000
132 
133 struct expr_ids {
134 	struct expr_id	id[RECURSION_ID_MAX];
135 	int		cnt;
136 };
137 
138 static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
139 {
140 	if (ids->cnt >= RECURSION_ID_MAX)
141 		return NULL;
142 	return &ids->id[ids->cnt++];
143 }
144 
145 static void expr_ids__exit(struct expr_ids *ids)
146 {
147 	int i;
148 
149 	for (i = 0; i < ids->cnt; i++)
150 		free(ids->id[i].id);
151 }
152 
153 /**
154  * Find a group of events in perf_evlist that correpond to those from a parsed
155  * metric expression. Note, as find_evsel_group is called in the same order as
156  * perf_evlist was constructed, metric_no_merge doesn't need to test for
157  * underfilling a group.
158  * @perf_evlist: a list of events something like: {metric1 leader, metric1
159  * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
160  * metric2 sibling}:W,duration_time
161  * @pctx: the parse context for the metric expression.
162  * @metric_no_merge: don't attempt to share events for the metric with other
163  * metrics.
164  * @has_constraint: is there a contraint on the group of events? In which case
165  * the events won't be grouped.
166  * @metric_events: out argument, null terminated array of evsel's associated
167  * with the metric.
168  * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
169  * @return the first metric event or NULL on failure.
170  */
171 static struct evsel *find_evsel_group(struct evlist *perf_evlist,
172 				      struct expr_parse_ctx *pctx,
173 				      bool metric_no_merge,
174 				      bool has_constraint,
175 				      struct evsel **metric_events,
176 				      unsigned long *evlist_used)
177 {
178 	struct evsel *ev, *current_leader = NULL;
179 	struct expr_id_data *val_ptr;
180 	int i = 0, matched_events = 0, events_to_match;
181 	const int idnum = (int)hashmap__size(&pctx->ids);
182 
183 	/* duration_time is grouped separately. */
184 	if (!has_constraint &&
185 	    hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
186 		events_to_match = idnum - 1;
187 	else
188 		events_to_match = idnum;
189 
190 	evlist__for_each_entry (perf_evlist, ev) {
191 		/*
192 		 * Events with a constraint aren't grouped and match the first
193 		 * events available.
194 		 */
195 		if (has_constraint && ev->weak_group)
196 			continue;
197 		/* Ignore event if already used and merging is disabled. */
198 		if (metric_no_merge && test_bit(ev->idx, evlist_used))
199 			continue;
200 		if (!has_constraint && ev->leader != current_leader) {
201 			/*
202 			 * Start of a new group, discard the whole match and
203 			 * start again.
204 			 */
205 			matched_events = 0;
206 			memset(metric_events, 0,
207 				sizeof(struct evsel *) * idnum);
208 			current_leader = ev->leader;
209 		}
210 		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) {
211 			if (has_constraint) {
212 				/*
213 				 * Events aren't grouped, ensure the same event
214 				 * isn't matched from two groups.
215 				 */
216 				for (i = 0; i < matched_events; i++) {
217 					if (!strcmp(ev->name,
218 						    metric_events[i]->name)) {
219 						break;
220 					}
221 				}
222 				if (i != matched_events)
223 					continue;
224 			}
225 			metric_events[matched_events++] = ev;
226 		}
227 		if (matched_events == events_to_match)
228 			break;
229 	}
230 
231 	if (events_to_match != idnum) {
232 		/* Add the first duration_time. */
233 		evlist__for_each_entry(perf_evlist, ev) {
234 			if (!strcmp(ev->name, "duration_time")) {
235 				metric_events[matched_events++] = ev;
236 				break;
237 			}
238 		}
239 	}
240 
241 	if (matched_events != idnum) {
242 		/* Not whole match */
243 		return NULL;
244 	}
245 
246 	metric_events[idnum] = NULL;
247 
248 	for (i = 0; i < idnum; i++) {
249 		ev = metric_events[i];
250 		ev->metric_leader = ev;
251 		set_bit(ev->idx, evlist_used);
252 	}
253 
254 	return metric_events[0];
255 }
256 
257 static int metricgroup__setup_events(struct list_head *groups,
258 				     bool metric_no_merge,
259 				     struct evlist *perf_evlist,
260 				     struct rblist *metric_events_list)
261 {
262 	struct metric_event *me;
263 	struct metric_expr *expr;
264 	int i = 0;
265 	int ret = 0;
266 	struct metric *m;
267 	struct evsel *evsel, *tmp;
268 	unsigned long *evlist_used;
269 
270 	evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
271 	if (!evlist_used)
272 		return -ENOMEM;
273 
274 	list_for_each_entry (m, groups, nd) {
275 		struct evsel **metric_events;
276 		struct metric_ref *metric_refs = NULL;
277 
278 		metric_events = calloc(sizeof(void *),
279 				hashmap__size(&m->pctx.ids) + 1);
280 		if (!metric_events) {
281 			ret = -ENOMEM;
282 			break;
283 		}
284 		evsel = find_evsel_group(perf_evlist, &m->pctx,
285 					 metric_no_merge,
286 					 m->has_constraint, metric_events,
287 					 evlist_used);
288 		if (!evsel) {
289 			pr_debug("Cannot resolve %s: %s\n",
290 					m->metric_name, m->metric_expr);
291 			free(metric_events);
292 			continue;
293 		}
294 		for (i = 0; metric_events[i]; i++)
295 			metric_events[i]->collect_stat = true;
296 		me = metricgroup__lookup(metric_events_list, evsel, true);
297 		if (!me) {
298 			ret = -ENOMEM;
299 			free(metric_events);
300 			break;
301 		}
302 		expr = malloc(sizeof(struct metric_expr));
303 		if (!expr) {
304 			ret = -ENOMEM;
305 			free(metric_events);
306 			break;
307 		}
308 
309 		/*
310 		 * Collect and store collected nested expressions
311 		 * for metric processing.
312 		 */
313 		if (m->metric_refs_cnt) {
314 			struct metric_ref_node *ref;
315 
316 			metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
317 			if (!metric_refs) {
318 				ret = -ENOMEM;
319 				free(metric_events);
320 				free(expr);
321 				break;
322 			}
323 
324 			i = 0;
325 			list_for_each_entry(ref, &m->metric_refs, list) {
326 				/*
327 				 * Intentionally passing just const char pointers,
328 				 * originally from 'struct pmu_event' object.
329 				 * We don't need to change them, so there's no
330 				 * need to create our own copy.
331 				 */
332 				metric_refs[i].metric_name = ref->metric_name;
333 				metric_refs[i].metric_expr = ref->metric_expr;
334 				i++;
335 			}
336 		};
337 
338 		expr->metric_refs = metric_refs;
339 		expr->metric_expr = m->metric_expr;
340 		expr->metric_name = m->metric_name;
341 		expr->metric_unit = m->metric_unit;
342 		expr->metric_events = metric_events;
343 		expr->runtime = m->runtime;
344 		list_add(&expr->nd, &me->head);
345 	}
346 
347 	evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
348 		if (!test_bit(evsel->idx, evlist_used)) {
349 			evlist__remove(perf_evlist, evsel);
350 			evsel__delete(evsel);
351 		}
352 	}
353 	bitmap_free(evlist_used);
354 
355 	return ret;
356 }
357 
358 static bool match_metric(const char *n, const char *list)
359 {
360 	int len;
361 	char *m;
362 
363 	if (!list)
364 		return false;
365 	if (!strcmp(list, "all"))
366 		return true;
367 	if (!n)
368 		return !strcasecmp(list, "No_group");
369 	len = strlen(list);
370 	m = strcasestr(n, list);
371 	if (!m)
372 		return false;
373 	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
374 	    (m[len] == 0 || m[len] == ';'))
375 		return true;
376 	return false;
377 }
378 
379 struct mep {
380 	struct rb_node nd;
381 	const char *name;
382 	struct strlist *metrics;
383 };
384 
385 static int mep_cmp(struct rb_node *rb_node, const void *entry)
386 {
387 	struct mep *a = container_of(rb_node, struct mep, nd);
388 	struct mep *b = (struct mep *)entry;
389 
390 	return strcmp(a->name, b->name);
391 }
392 
393 static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
394 					const void *entry)
395 {
396 	struct mep *me = malloc(sizeof(struct mep));
397 
398 	if (!me)
399 		return NULL;
400 	memcpy(me, entry, sizeof(struct mep));
401 	me->name = strdup(me->name);
402 	if (!me->name)
403 		goto out_me;
404 	me->metrics = strlist__new(NULL, NULL);
405 	if (!me->metrics)
406 		goto out_name;
407 	return &me->nd;
408 out_name:
409 	zfree(&me->name);
410 out_me:
411 	free(me);
412 	return NULL;
413 }
414 
415 static struct mep *mep_lookup(struct rblist *groups, const char *name)
416 {
417 	struct rb_node *nd;
418 	struct mep me = {
419 		.name = name
420 	};
421 	nd = rblist__find(groups, &me);
422 	if (nd)
423 		return container_of(nd, struct mep, nd);
424 	rblist__add_node(groups, &me);
425 	nd = rblist__find(groups, &me);
426 	if (nd)
427 		return container_of(nd, struct mep, nd);
428 	return NULL;
429 }
430 
431 static void mep_delete(struct rblist *rl __maybe_unused,
432 		       struct rb_node *nd)
433 {
434 	struct mep *me = container_of(nd, struct mep, nd);
435 
436 	strlist__delete(me->metrics);
437 	zfree(&me->name);
438 	free(me);
439 }
440 
441 static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
442 {
443 	struct str_node *sn;
444 	int n = 0;
445 
446 	strlist__for_each_entry (sn, metrics) {
447 		if (raw)
448 			printf("%s%s", n > 0 ? " " : "", sn->s);
449 		else
450 			printf("  %s\n", sn->s);
451 		n++;
452 	}
453 	if (raw)
454 		putchar('\n');
455 }
456 
457 void metricgroup__print(bool metrics, bool metricgroups, char *filter,
458 			bool raw, bool details)
459 {
460 	struct pmu_events_map *map = perf_pmu__find_map(NULL);
461 	struct pmu_event *pe;
462 	int i;
463 	struct rblist groups;
464 	struct rb_node *node, *next;
465 	struct strlist *metriclist = NULL;
466 
467 	if (!map)
468 		return;
469 
470 	if (!metricgroups) {
471 		metriclist = strlist__new(NULL, NULL);
472 		if (!metriclist)
473 			return;
474 	}
475 
476 	rblist__init(&groups);
477 	groups.node_new = mep_new;
478 	groups.node_cmp = mep_cmp;
479 	groups.node_delete = mep_delete;
480 	for (i = 0; ; i++) {
481 		const char *g;
482 		pe = &map->table[i];
483 
484 		if (!pe->name && !pe->metric_group && !pe->metric_name)
485 			break;
486 		if (!pe->metric_expr)
487 			continue;
488 		g = pe->metric_group;
489 		if (!g && pe->metric_name) {
490 			if (pe->name)
491 				continue;
492 			g = "No_group";
493 		}
494 		if (g) {
495 			char *omg;
496 			char *mg = strdup(g);
497 
498 			if (!mg)
499 				return;
500 			omg = mg;
501 			while ((g = strsep(&mg, ";")) != NULL) {
502 				struct mep *me;
503 				char *s;
504 
505 				g = skip_spaces(g);
506 				if (*g == 0)
507 					g = "No_group";
508 				if (filter && !strstr(g, filter))
509 					continue;
510 				if (raw)
511 					s = (char *)pe->metric_name;
512 				else {
513 					if (asprintf(&s, "%s\n%*s%s]",
514 						     pe->metric_name, 8, "[", pe->desc) < 0)
515 						return;
516 
517 					if (details) {
518 						if (asprintf(&s, "%s\n%*s%s]",
519 							     s, 8, "[", pe->metric_expr) < 0)
520 							return;
521 					}
522 				}
523 
524 				if (!s)
525 					continue;
526 
527 				if (!metricgroups) {
528 					strlist__add(metriclist, s);
529 				} else {
530 					me = mep_lookup(&groups, g);
531 					if (!me)
532 						continue;
533 					strlist__add(me->metrics, s);
534 				}
535 
536 				if (!raw)
537 					free(s);
538 			}
539 			free(omg);
540 		}
541 	}
542 
543 	if (metricgroups && !raw)
544 		printf("\nMetric Groups:\n\n");
545 	else if (metrics && !raw)
546 		printf("\nMetrics:\n\n");
547 
548 	for (node = rb_first_cached(&groups.entries); node; node = next) {
549 		struct mep *me = container_of(node, struct mep, nd);
550 
551 		if (metricgroups)
552 			printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
553 		if (metrics)
554 			metricgroup__print_strlist(me->metrics, raw);
555 		next = rb_next(node);
556 		rblist__remove_node(&groups, node);
557 	}
558 	if (!metricgroups)
559 		metricgroup__print_strlist(metriclist, raw);
560 	strlist__delete(metriclist);
561 }
562 
563 static void metricgroup__add_metric_weak_group(struct strbuf *events,
564 					       struct expr_parse_ctx *ctx)
565 {
566 	struct hashmap_entry *cur;
567 	size_t bkt;
568 	bool no_group = true, has_duration = false;
569 
570 	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
571 		pr_debug("found event %s\n", (const char *)cur->key);
572 		/*
573 		 * Duration time maps to a software event and can make
574 		 * groups not count. Always use it outside a
575 		 * group.
576 		 */
577 		if (!strcmp(cur->key, "duration_time")) {
578 			has_duration = true;
579 			continue;
580 		}
581 		strbuf_addf(events, "%s%s",
582 			no_group ? "{" : ",",
583 			(const char *)cur->key);
584 		no_group = false;
585 	}
586 	if (!no_group) {
587 		strbuf_addf(events, "}:W");
588 		if (has_duration)
589 			strbuf_addf(events, ",duration_time");
590 	} else if (has_duration)
591 		strbuf_addf(events, "duration_time");
592 }
593 
594 static void metricgroup__add_metric_non_group(struct strbuf *events,
595 					      struct expr_parse_ctx *ctx)
596 {
597 	struct hashmap_entry *cur;
598 	size_t bkt;
599 	bool first = true;
600 
601 	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
602 		if (!first)
603 			strbuf_addf(events, ",");
604 		strbuf_addf(events, "%s", (const char *)cur->key);
605 		first = false;
606 	}
607 }
608 
609 static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
610 {
611 	static bool violate_nmi_constraint;
612 
613 	if (!foot) {
614 		pr_warning("Splitting metric group %s into standalone metrics.\n", name);
615 		violate_nmi_constraint = true;
616 		return;
617 	}
618 
619 	if (!violate_nmi_constraint)
620 		return;
621 
622 	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
623 		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
624 		   "    perf stat ...\n"
625 		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
626 }
627 
628 static bool metricgroup__has_constraint(struct pmu_event *pe)
629 {
630 	if (!pe->metric_constraint)
631 		return false;
632 
633 	if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
634 	    sysctl__nmi_watchdog_enabled()) {
635 		metricgroup___watchdog_constraint_hint(pe->metric_name, false);
636 		return true;
637 	}
638 
639 	return false;
640 }
641 
642 int __weak arch_get_runtimeparam(void)
643 {
644 	return 1;
645 }
646 
647 static int __add_metric(struct list_head *metric_list,
648 			struct pmu_event *pe,
649 			bool metric_no_group,
650 			int runtime,
651 			struct metric **mp,
652 			struct expr_id *parent,
653 			struct expr_ids *ids)
654 {
655 	struct metric_ref_node *ref;
656 	struct metric *m;
657 
658 	if (*mp == NULL) {
659 		/*
660 		 * We got in here for the parent group,
661 		 * allocate it and put it on the list.
662 		 */
663 		m = zalloc(sizeof(*m));
664 		if (!m)
665 			return -ENOMEM;
666 
667 		expr__ctx_init(&m->pctx);
668 		m->metric_name = pe->metric_name;
669 		m->metric_expr = pe->metric_expr;
670 		m->metric_unit = pe->unit;
671 		m->runtime = runtime;
672 		m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
673 		INIT_LIST_HEAD(&m->metric_refs);
674 		m->metric_refs_cnt = 0;
675 
676 		parent = expr_ids__alloc(ids);
677 		if (!parent) {
678 			free(m);
679 			return -EINVAL;
680 		}
681 
682 		parent->id = strdup(pe->metric_name);
683 		if (!parent->id) {
684 			free(m);
685 			return -ENOMEM;
686 		}
687 		*mp = m;
688 	} else {
689 		/*
690 		 * We got here for the referenced metric, via the
691 		 * recursive metricgroup__add_metric call, add
692 		 * it to the parent group.
693 		 */
694 		m = *mp;
695 
696 		ref = malloc(sizeof(*ref));
697 		if (!ref)
698 			return -ENOMEM;
699 
700 		/*
701 		 * Intentionally passing just const char pointers,
702 		 * from 'pe' object, so they never go away. We don't
703 		 * need to change them, so there's no need to create
704 		 * our own copy.
705 		 */
706 		ref->metric_name = pe->metric_name;
707 		ref->metric_expr = pe->metric_expr;
708 
709 		list_add(&ref->list, &m->metric_refs);
710 		m->metric_refs_cnt++;
711 	}
712 
713 	/* Force all found IDs in metric to have us as parent ID. */
714 	WARN_ON_ONCE(!parent);
715 	m->pctx.parent = parent;
716 
717 	/*
718 	 * For both the parent and referenced metrics, we parse
719 	 * all the metric's IDs and add it to the parent context.
720 	 */
721 	if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
722 		if (m->metric_refs_cnt == 0) {
723 			expr__ctx_clear(&m->pctx);
724 			free(m);
725 			*mp = NULL;
726 		}
727 		return -EINVAL;
728 	}
729 
730 	/*
731 	 * We add new group only in the 'parent' call,
732 	 * so bail out for referenced metric case.
733 	 */
734 	if (m->metric_refs_cnt)
735 		return 0;
736 
737 	if (list_empty(metric_list))
738 		list_add(&m->nd, metric_list);
739 	else {
740 		struct list_head *pos;
741 
742 		/* Place the largest groups at the front. */
743 		list_for_each_prev(pos, metric_list) {
744 			struct metric *old = list_entry(pos, struct metric, nd);
745 
746 			if (hashmap__size(&m->pctx.ids) <=
747 			    hashmap__size(&old->pctx.ids))
748 				break;
749 		}
750 		list_add(&m->nd, pos);
751 	}
752 
753 	return 0;
754 }
755 
756 #define map_for_each_event(__pe, __idx, __map)				\
757 	for (__idx = 0, __pe = &__map->table[__idx];			\
758 	     __pe->name || __pe->metric_group || __pe->metric_name;	\
759 	     __pe = &__map->table[++__idx])
760 
761 #define map_for_each_metric(__pe, __idx, __map, __metric)		\
762 	map_for_each_event(__pe, __idx, __map)				\
763 		if (__pe->metric_expr &&				\
764 		    (match_metric(__pe->metric_group, __metric) ||	\
765 		     match_metric(__pe->metric_name, __metric)))
766 
767 static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
768 {
769 	struct pmu_event *pe;
770 	int i;
771 
772 	map_for_each_event(pe, i, map) {
773 		if (match_metric(pe->metric_name, metric))
774 			return pe;
775 	}
776 
777 	return NULL;
778 }
779 
780 static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
781 			   struct expr_ids *ids)
782 {
783 	struct expr_id_data *data;
784 	struct expr_id *p;
785 	int ret;
786 
787 	/*
788 	 * We get the parent referenced by 'id' argument and
789 	 * traverse through all the parent object IDs to check
790 	 * if we already processed 'id', if we did, it's recursion
791 	 * and we fail.
792 	 */
793 	ret = expr__get_id(&m->pctx, id, &data);
794 	if (ret)
795 		return ret;
796 
797 	p = data->parent;
798 
799 	while (p->parent) {
800 		if (!strcmp(p->id, id)) {
801 			pr_err("failed: recursion detected for %s\n", id);
802 			return -1;
803 		}
804 		p = p->parent;
805 	}
806 
807 	/*
808 	 * If we are over the limit of static entris, the metric
809 	 * is too difficult/nested to process, fail as well.
810 	 */
811 	p = expr_ids__alloc(ids);
812 	if (!p) {
813 		pr_err("failed: too many nested metrics\n");
814 		return -EINVAL;
815 	}
816 
817 	p->id     = strdup(id);
818 	p->parent = data->parent;
819 	*parent   = p;
820 
821 	return p->id ? 0 : -ENOMEM;
822 }
823 
824 static int add_metric(struct list_head *metric_list,
825 		      struct pmu_event *pe,
826 		      bool metric_no_group,
827 		      struct metric **mp,
828 		      struct expr_id *parent,
829 		      struct expr_ids *ids);
830 
831 static int __resolve_metric(struct metric *m,
832 			    bool metric_no_group,
833 			    struct list_head *metric_list,
834 			    struct pmu_events_map *map,
835 			    struct expr_ids *ids)
836 {
837 	struct hashmap_entry *cur;
838 	size_t bkt;
839 	bool all;
840 	int ret;
841 
842 	/*
843 	 * Iterate all the parsed IDs and if there's metric,
844 	 * add it to the context.
845 	 */
846 	do {
847 		all = true;
848 		hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
849 			struct expr_id *parent;
850 			struct pmu_event *pe;
851 
852 			pe = find_metric(cur->key, map);
853 			if (!pe)
854 				continue;
855 
856 			ret = recursion_check(m, cur->key, &parent, ids);
857 			if (ret)
858 				return ret;
859 
860 			all = false;
861 			/* The metric key itself needs to go out.. */
862 			expr__del_id(&m->pctx, cur->key);
863 
864 			/* ... and it gets resolved to the parent context. */
865 			ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
866 			if (ret)
867 				return ret;
868 
869 			/*
870 			 * We added new metric to hashmap, so we need
871 			 * to break the iteration and start over.
872 			 */
873 			break;
874 		}
875 	} while (!all);
876 
877 	return 0;
878 }
879 
880 static int resolve_metric(bool metric_no_group,
881 			  struct list_head *metric_list,
882 			  struct pmu_events_map *map,
883 			  struct expr_ids *ids)
884 {
885 	struct metric *m;
886 	int err;
887 
888 	list_for_each_entry(m, metric_list, nd) {
889 		err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
890 		if (err)
891 			return err;
892 	}
893 	return 0;
894 }
895 
896 static int add_metric(struct list_head *metric_list,
897 		      struct pmu_event *pe,
898 		      bool metric_no_group,
899 		      struct metric **m,
900 		      struct expr_id *parent,
901 		      struct expr_ids *ids)
902 {
903 	struct metric *orig = *m;
904 	int ret = 0;
905 
906 	pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
907 
908 	if (!strstr(pe->metric_expr, "?")) {
909 		ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
910 	} else {
911 		int j, count;
912 
913 		count = arch_get_runtimeparam();
914 
915 		/* This loop is added to create multiple
916 		 * events depend on count value and add
917 		 * those events to metric_list.
918 		 */
919 
920 		for (j = 0; j < count && !ret; j++, *m = orig)
921 			ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
922 	}
923 
924 	return ret;
925 }
926 
927 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
928 				   struct strbuf *events,
929 				   struct list_head *metric_list,
930 				   struct pmu_events_map *map)
931 {
932 	struct expr_ids ids = { .cnt = 0, };
933 	struct pmu_event *pe;
934 	struct metric *m;
935 	LIST_HEAD(list);
936 	int i, ret;
937 	bool has_match = false;
938 
939 	map_for_each_metric(pe, i, map, metric) {
940 		has_match = true;
941 		m = NULL;
942 
943 		ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
944 		if (ret)
945 			goto out;
946 
947 		/*
948 		 * Process any possible referenced metrics
949 		 * included in the expression.
950 		 */
951 		ret = resolve_metric(metric_no_group,
952 				     &list, map, &ids);
953 		if (ret)
954 			goto out;
955 	}
956 
957 	/* End of pmu events. */
958 	if (!has_match) {
959 		ret = -EINVAL;
960 		goto out;
961 	}
962 
963 	list_for_each_entry(m, &list, nd) {
964 		if (events->len > 0)
965 			strbuf_addf(events, ",");
966 
967 		if (m->has_constraint) {
968 			metricgroup__add_metric_non_group(events,
969 							  &m->pctx);
970 		} else {
971 			metricgroup__add_metric_weak_group(events,
972 							   &m->pctx);
973 		}
974 	}
975 
976 out:
977 	/*
978 	 * add to metric_list so that they can be released
979 	 * even if it's failed
980 	 */
981 	list_splice(&list, metric_list);
982 	expr_ids__exit(&ids);
983 	return ret;
984 }
985 
986 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
987 					struct strbuf *events,
988 					struct list_head *metric_list,
989 					struct pmu_events_map *map)
990 {
991 	char *llist, *nlist, *p;
992 	int ret = -EINVAL;
993 
994 	nlist = strdup(list);
995 	if (!nlist)
996 		return -ENOMEM;
997 	llist = nlist;
998 
999 	strbuf_init(events, 100);
1000 	strbuf_addf(events, "%s", "");
1001 
1002 	while ((p = strsep(&llist, ",")) != NULL) {
1003 		ret = metricgroup__add_metric(p, metric_no_group, events,
1004 					      metric_list, map);
1005 		if (ret == -EINVAL) {
1006 			fprintf(stderr, "Cannot find metric or group `%s'\n",
1007 					p);
1008 			break;
1009 		}
1010 	}
1011 	free(nlist);
1012 
1013 	if (!ret)
1014 		metricgroup___watchdog_constraint_hint(NULL, true);
1015 
1016 	return ret;
1017 }
1018 
1019 static void metric__free_refs(struct metric *metric)
1020 {
1021 	struct metric_ref_node *ref, *tmp;
1022 
1023 	list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1024 		list_del(&ref->list);
1025 		free(ref);
1026 	}
1027 }
1028 
1029 static void metricgroup__free_metrics(struct list_head *metric_list)
1030 {
1031 	struct metric *m, *tmp;
1032 
1033 	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1034 		metric__free_refs(m);
1035 		expr__ctx_clear(&m->pctx);
1036 		list_del_init(&m->nd);
1037 		free(m);
1038 	}
1039 }
1040 
1041 static int parse_groups(struct evlist *perf_evlist, const char *str,
1042 			bool metric_no_group,
1043 			bool metric_no_merge,
1044 			struct perf_pmu *fake_pmu,
1045 			struct rblist *metric_events,
1046 			struct pmu_events_map *map)
1047 {
1048 	struct parse_events_error parse_error;
1049 	struct strbuf extra_events;
1050 	LIST_HEAD(metric_list);
1051 	int ret;
1052 
1053 	if (metric_events->nr_entries == 0)
1054 		metricgroup__rblist_init(metric_events);
1055 	ret = metricgroup__add_metric_list(str, metric_no_group,
1056 					   &extra_events, &metric_list, map);
1057 	if (ret)
1058 		goto out;
1059 	pr_debug("adding %s\n", extra_events.buf);
1060 	bzero(&parse_error, sizeof(parse_error));
1061 	ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1062 	if (ret) {
1063 		parse_events_print_error(&parse_error, extra_events.buf);
1064 		goto out;
1065 	}
1066 	ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1067 					perf_evlist, metric_events);
1068 out:
1069 	metricgroup__free_metrics(&metric_list);
1070 	strbuf_release(&extra_events);
1071 	return ret;
1072 }
1073 
1074 int metricgroup__parse_groups(const struct option *opt,
1075 			      const char *str,
1076 			      bool metric_no_group,
1077 			      bool metric_no_merge,
1078 			      struct rblist *metric_events)
1079 {
1080 	struct evlist *perf_evlist = *(struct evlist **)opt->value;
1081 	struct pmu_events_map *map = perf_pmu__find_map(NULL);
1082 
1083 	if (!map)
1084 		return 0;
1085 
1086 	return parse_groups(perf_evlist, str, metric_no_group,
1087 			    metric_no_merge, NULL, metric_events, map);
1088 }
1089 
1090 int metricgroup__parse_groups_test(struct evlist *evlist,
1091 				   struct pmu_events_map *map,
1092 				   const char *str,
1093 				   bool metric_no_group,
1094 				   bool metric_no_merge,
1095 				   struct rblist *metric_events)
1096 {
1097 	return parse_groups(evlist, str, metric_no_group,
1098 			    metric_no_merge, &perf_pmu__fake, metric_events, map);
1099 }
1100 
1101 bool metricgroup__has_metric(const char *metric)
1102 {
1103 	struct pmu_events_map *map = perf_pmu__find_map(NULL);
1104 	struct pmu_event *pe;
1105 	int i;
1106 
1107 	if (!map)
1108 		return false;
1109 
1110 	for (i = 0; ; i++) {
1111 		pe = &map->table[i];
1112 
1113 		if (!pe->name && !pe->metric_group && !pe->metric_name)
1114 			break;
1115 		if (!pe->metric_expr)
1116 			continue;
1117 		if (match_metric(pe->metric_name, metric))
1118 			return true;
1119 	}
1120 	return false;
1121 }
1122