xref: /openbmc/linux/tools/perf/util/metricgroup.c (revision fc772314)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017, Intel Corporation.
4  */
5 
6 /* Manage metrics and groups of metrics from JSON files */
7 
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "expr.h"
15 #include "rblist.h"
16 #include <string.h>
17 #include <errno.h>
18 #include "pmu-events/pmu-events.h"
19 #include "strlist.h"
20 #include <assert.h>
21 #include <linux/ctype.h>
22 #include <linux/string.h>
23 #include <linux/zalloc.h>
24 #include <subcmd/parse-options.h>
25 #include <api/fs/fs.h>
26 #include "util.h"
27 #include <asm/bug.h>
28 
29 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
30 					 struct evsel *evsel,
31 					 bool create)
32 {
33 	struct rb_node *nd;
34 	struct metric_event me = {
35 		.evsel = evsel
36 	};
37 
38 	if (!metric_events)
39 		return NULL;
40 
41 	nd = rblist__find(metric_events, &me);
42 	if (nd)
43 		return container_of(nd, struct metric_event, nd);
44 	if (create) {
45 		rblist__add_node(metric_events, &me);
46 		nd = rblist__find(metric_events, &me);
47 		if (nd)
48 			return container_of(nd, struct metric_event, nd);
49 	}
50 	return NULL;
51 }
52 
53 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
54 {
55 	struct metric_event *a = container_of(rb_node,
56 					      struct metric_event,
57 					      nd);
58 	const struct metric_event *b = entry;
59 
60 	if (a->evsel == b->evsel)
61 		return 0;
62 	if ((char *)a->evsel < (char *)b->evsel)
63 		return -1;
64 	return +1;
65 }
66 
67 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68 					const void *entry)
69 {
70 	struct metric_event *me = malloc(sizeof(struct metric_event));
71 
72 	if (!me)
73 		return NULL;
74 	memcpy(me, entry, sizeof(struct metric_event));
75 	me->evsel = ((struct metric_event *)entry)->evsel;
76 	INIT_LIST_HEAD(&me->head);
77 	return &me->nd;
78 }
79 
80 static void metric_event_delete(struct rblist *rblist __maybe_unused,
81 				struct rb_node *rb_node)
82 {
83 	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84 	struct metric_expr *expr, *tmp;
85 
86 	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87 		free(expr->metric_refs);
88 		free(expr);
89 	}
90 
91 	free(me);
92 }
93 
94 static void metricgroup__rblist_init(struct rblist *metric_events)
95 {
96 	rblist__init(metric_events);
97 	metric_events->node_cmp = metric_event_cmp;
98 	metric_events->node_new = metric_event_new;
99 	metric_events->node_delete = metric_event_delete;
100 }
101 
102 void metricgroup__rblist_exit(struct rblist *metric_events)
103 {
104 	rblist__exit(metric_events);
105 }
106 
107 /*
108  * A node in the list of referenced metrics. metric_expr
109  * is held as a convenience to avoid a search through the
110  * metric list.
111  */
112 struct metric_ref_node {
113 	const char *metric_name;
114 	const char *metric_expr;
115 	struct list_head list;
116 };
117 
118 struct metric {
119 	struct list_head nd;
120 	struct expr_parse_ctx pctx;
121 	const char *metric_name;
122 	const char *metric_expr;
123 	const char *metric_unit;
124 	struct list_head metric_refs;
125 	int metric_refs_cnt;
126 	int runtime;
127 	bool has_constraint;
128 };
129 
130 #define RECURSION_ID_MAX 1000
131 
132 struct expr_ids {
133 	struct expr_id	id[RECURSION_ID_MAX];
134 	int		cnt;
135 };
136 
137 static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
138 {
139 	if (ids->cnt >= RECURSION_ID_MAX)
140 		return NULL;
141 	return &ids->id[ids->cnt++];
142 }
143 
144 static void expr_ids__exit(struct expr_ids *ids)
145 {
146 	int i;
147 
148 	for (i = 0; i < ids->cnt; i++)
149 		free(ids->id[i].id);
150 }
151 
152 /**
153  * Find a group of events in perf_evlist that correpond to those from a parsed
154  * metric expression. Note, as find_evsel_group is called in the same order as
155  * perf_evlist was constructed, metric_no_merge doesn't need to test for
156  * underfilling a group.
157  * @perf_evlist: a list of events something like: {metric1 leader, metric1
158  * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
159  * metric2 sibling}:W,duration_time
160  * @pctx: the parse context for the metric expression.
161  * @metric_no_merge: don't attempt to share events for the metric with other
162  * metrics.
163  * @has_constraint: is there a contraint on the group of events? In which case
164  * the events won't be grouped.
165  * @metric_events: out argument, null terminated array of evsel's associated
166  * with the metric.
167  * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
168  * @return the first metric event or NULL on failure.
169  */
170 static struct evsel *find_evsel_group(struct evlist *perf_evlist,
171 				      struct expr_parse_ctx *pctx,
172 				      bool metric_no_merge,
173 				      bool has_constraint,
174 				      struct evsel **metric_events,
175 				      unsigned long *evlist_used)
176 {
177 	struct evsel *ev, *current_leader = NULL;
178 	struct expr_id_data *val_ptr;
179 	int i = 0, matched_events = 0, events_to_match;
180 	const int idnum = (int)hashmap__size(&pctx->ids);
181 
182 	/* duration_time is grouped separately. */
183 	if (!has_constraint &&
184 	    hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
185 		events_to_match = idnum - 1;
186 	else
187 		events_to_match = idnum;
188 
189 	evlist__for_each_entry (perf_evlist, ev) {
190 		/*
191 		 * Events with a constraint aren't grouped and match the first
192 		 * events available.
193 		 */
194 		if (has_constraint && ev->weak_group)
195 			continue;
196 		/* Ignore event if already used and merging is disabled. */
197 		if (metric_no_merge && test_bit(ev->idx, evlist_used))
198 			continue;
199 		if (!has_constraint && ev->leader != current_leader) {
200 			/*
201 			 * Start of a new group, discard the whole match and
202 			 * start again.
203 			 */
204 			matched_events = 0;
205 			memset(metric_events, 0,
206 				sizeof(struct evsel *) * idnum);
207 			current_leader = ev->leader;
208 		}
209 		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) {
210 			if (has_constraint) {
211 				/*
212 				 * Events aren't grouped, ensure the same event
213 				 * isn't matched from two groups.
214 				 */
215 				for (i = 0; i < matched_events; i++) {
216 					if (!strcmp(ev->name,
217 						    metric_events[i]->name)) {
218 						break;
219 					}
220 				}
221 				if (i != matched_events)
222 					continue;
223 			}
224 			metric_events[matched_events++] = ev;
225 		}
226 		if (matched_events == events_to_match)
227 			break;
228 	}
229 
230 	if (events_to_match != idnum) {
231 		/* Add the first duration_time. */
232 		evlist__for_each_entry(perf_evlist, ev) {
233 			if (!strcmp(ev->name, "duration_time")) {
234 				metric_events[matched_events++] = ev;
235 				break;
236 			}
237 		}
238 	}
239 
240 	if (matched_events != idnum) {
241 		/* Not whole match */
242 		return NULL;
243 	}
244 
245 	metric_events[idnum] = NULL;
246 
247 	for (i = 0; i < idnum; i++) {
248 		ev = metric_events[i];
249 		ev->metric_leader = ev;
250 		set_bit(ev->idx, evlist_used);
251 	}
252 
253 	return metric_events[0];
254 }
255 
256 static int metricgroup__setup_events(struct list_head *groups,
257 				     bool metric_no_merge,
258 				     struct evlist *perf_evlist,
259 				     struct rblist *metric_events_list)
260 {
261 	struct metric_event *me;
262 	struct metric_expr *expr;
263 	int i = 0;
264 	int ret = 0;
265 	struct metric *m;
266 	struct evsel *evsel, *tmp;
267 	unsigned long *evlist_used;
268 
269 	evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
270 	if (!evlist_used)
271 		return -ENOMEM;
272 
273 	list_for_each_entry (m, groups, nd) {
274 		struct evsel **metric_events;
275 		struct metric_ref *metric_refs = NULL;
276 
277 		metric_events = calloc(sizeof(void *),
278 				hashmap__size(&m->pctx.ids) + 1);
279 		if (!metric_events) {
280 			ret = -ENOMEM;
281 			break;
282 		}
283 		evsel = find_evsel_group(perf_evlist, &m->pctx,
284 					 metric_no_merge,
285 					 m->has_constraint, metric_events,
286 					 evlist_used);
287 		if (!evsel) {
288 			pr_debug("Cannot resolve %s: %s\n",
289 					m->metric_name, m->metric_expr);
290 			free(metric_events);
291 			continue;
292 		}
293 		for (i = 0; metric_events[i]; i++)
294 			metric_events[i]->collect_stat = true;
295 		me = metricgroup__lookup(metric_events_list, evsel, true);
296 		if (!me) {
297 			ret = -ENOMEM;
298 			free(metric_events);
299 			break;
300 		}
301 		expr = malloc(sizeof(struct metric_expr));
302 		if (!expr) {
303 			ret = -ENOMEM;
304 			free(metric_events);
305 			break;
306 		}
307 
308 		/*
309 		 * Collect and store collected nested expressions
310 		 * for metric processing.
311 		 */
312 		if (m->metric_refs_cnt) {
313 			struct metric_ref_node *ref;
314 
315 			metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
316 			if (!metric_refs) {
317 				ret = -ENOMEM;
318 				free(metric_events);
319 				break;
320 			}
321 
322 			i = 0;
323 			list_for_each_entry(ref, &m->metric_refs, list) {
324 				/*
325 				 * Intentionally passing just const char pointers,
326 				 * originally from 'struct pmu_event' object.
327 				 * We don't need to change them, so there's no
328 				 * need to create our own copy.
329 				 */
330 				metric_refs[i].metric_name = ref->metric_name;
331 				metric_refs[i].metric_expr = ref->metric_expr;
332 				i++;
333 			}
334 		};
335 
336 		expr->metric_refs = metric_refs;
337 		expr->metric_expr = m->metric_expr;
338 		expr->metric_name = m->metric_name;
339 		expr->metric_unit = m->metric_unit;
340 		expr->metric_events = metric_events;
341 		expr->runtime = m->runtime;
342 		list_add(&expr->nd, &me->head);
343 	}
344 
345 	evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
346 		if (!test_bit(evsel->idx, evlist_used)) {
347 			evlist__remove(perf_evlist, evsel);
348 			evsel__delete(evsel);
349 		}
350 	}
351 	bitmap_free(evlist_used);
352 
353 	return ret;
354 }
355 
356 static bool match_metric(const char *n, const char *list)
357 {
358 	int len;
359 	char *m;
360 
361 	if (!list)
362 		return false;
363 	if (!strcmp(list, "all"))
364 		return true;
365 	if (!n)
366 		return !strcasecmp(list, "No_group");
367 	len = strlen(list);
368 	m = strcasestr(n, list);
369 	if (!m)
370 		return false;
371 	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
372 	    (m[len] == 0 || m[len] == ';'))
373 		return true;
374 	return false;
375 }
376 
377 struct mep {
378 	struct rb_node nd;
379 	const char *name;
380 	struct strlist *metrics;
381 };
382 
383 static int mep_cmp(struct rb_node *rb_node, const void *entry)
384 {
385 	struct mep *a = container_of(rb_node, struct mep, nd);
386 	struct mep *b = (struct mep *)entry;
387 
388 	return strcmp(a->name, b->name);
389 }
390 
391 static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
392 					const void *entry)
393 {
394 	struct mep *me = malloc(sizeof(struct mep));
395 
396 	if (!me)
397 		return NULL;
398 	memcpy(me, entry, sizeof(struct mep));
399 	me->name = strdup(me->name);
400 	if (!me->name)
401 		goto out_me;
402 	me->metrics = strlist__new(NULL, NULL);
403 	if (!me->metrics)
404 		goto out_name;
405 	return &me->nd;
406 out_name:
407 	zfree(&me->name);
408 out_me:
409 	free(me);
410 	return NULL;
411 }
412 
413 static struct mep *mep_lookup(struct rblist *groups, const char *name)
414 {
415 	struct rb_node *nd;
416 	struct mep me = {
417 		.name = name
418 	};
419 	nd = rblist__find(groups, &me);
420 	if (nd)
421 		return container_of(nd, struct mep, nd);
422 	rblist__add_node(groups, &me);
423 	nd = rblist__find(groups, &me);
424 	if (nd)
425 		return container_of(nd, struct mep, nd);
426 	return NULL;
427 }
428 
429 static void mep_delete(struct rblist *rl __maybe_unused,
430 		       struct rb_node *nd)
431 {
432 	struct mep *me = container_of(nd, struct mep, nd);
433 
434 	strlist__delete(me->metrics);
435 	zfree(&me->name);
436 	free(me);
437 }
438 
439 static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
440 {
441 	struct str_node *sn;
442 	int n = 0;
443 
444 	strlist__for_each_entry (sn, metrics) {
445 		if (raw)
446 			printf("%s%s", n > 0 ? " " : "", sn->s);
447 		else
448 			printf("  %s\n", sn->s);
449 		n++;
450 	}
451 	if (raw)
452 		putchar('\n');
453 }
454 
455 void metricgroup__print(bool metrics, bool metricgroups, char *filter,
456 			bool raw, bool details)
457 {
458 	struct pmu_events_map *map = perf_pmu__find_map(NULL);
459 	struct pmu_event *pe;
460 	int i;
461 	struct rblist groups;
462 	struct rb_node *node, *next;
463 	struct strlist *metriclist = NULL;
464 
465 	if (!map)
466 		return;
467 
468 	if (!metricgroups) {
469 		metriclist = strlist__new(NULL, NULL);
470 		if (!metriclist)
471 			return;
472 	}
473 
474 	rblist__init(&groups);
475 	groups.node_new = mep_new;
476 	groups.node_cmp = mep_cmp;
477 	groups.node_delete = mep_delete;
478 	for (i = 0; ; i++) {
479 		const char *g;
480 		pe = &map->table[i];
481 
482 		if (!pe->name && !pe->metric_group && !pe->metric_name)
483 			break;
484 		if (!pe->metric_expr)
485 			continue;
486 		g = pe->metric_group;
487 		if (!g && pe->metric_name) {
488 			if (pe->name)
489 				continue;
490 			g = "No_group";
491 		}
492 		if (g) {
493 			char *omg;
494 			char *mg = strdup(g);
495 
496 			if (!mg)
497 				return;
498 			omg = mg;
499 			while ((g = strsep(&mg, ";")) != NULL) {
500 				struct mep *me;
501 				char *s;
502 
503 				g = skip_spaces(g);
504 				if (*g == 0)
505 					g = "No_group";
506 				if (filter && !strstr(g, filter))
507 					continue;
508 				if (raw)
509 					s = (char *)pe->metric_name;
510 				else {
511 					if (asprintf(&s, "%s\n%*s%s]",
512 						     pe->metric_name, 8, "[", pe->desc) < 0)
513 						return;
514 
515 					if (details) {
516 						if (asprintf(&s, "%s\n%*s%s]",
517 							     s, 8, "[", pe->metric_expr) < 0)
518 							return;
519 					}
520 				}
521 
522 				if (!s)
523 					continue;
524 
525 				if (!metricgroups) {
526 					strlist__add(metriclist, s);
527 				} else {
528 					me = mep_lookup(&groups, g);
529 					if (!me)
530 						continue;
531 					strlist__add(me->metrics, s);
532 				}
533 			}
534 			free(omg);
535 		}
536 	}
537 
538 	if (metricgroups && !raw)
539 		printf("\nMetric Groups:\n\n");
540 	else if (metrics && !raw)
541 		printf("\nMetrics:\n\n");
542 
543 	for (node = rb_first_cached(&groups.entries); node; node = next) {
544 		struct mep *me = container_of(node, struct mep, nd);
545 
546 		if (metricgroups)
547 			printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
548 		if (metrics)
549 			metricgroup__print_strlist(me->metrics, raw);
550 		next = rb_next(node);
551 		rblist__remove_node(&groups, node);
552 	}
553 	if (!metricgroups)
554 		metricgroup__print_strlist(metriclist, raw);
555 	strlist__delete(metriclist);
556 }
557 
558 static void metricgroup__add_metric_weak_group(struct strbuf *events,
559 					       struct expr_parse_ctx *ctx)
560 {
561 	struct hashmap_entry *cur;
562 	size_t bkt;
563 	bool no_group = true, has_duration = false;
564 
565 	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
566 		pr_debug("found event %s\n", (const char *)cur->key);
567 		/*
568 		 * Duration time maps to a software event and can make
569 		 * groups not count. Always use it outside a
570 		 * group.
571 		 */
572 		if (!strcmp(cur->key, "duration_time")) {
573 			has_duration = true;
574 			continue;
575 		}
576 		strbuf_addf(events, "%s%s",
577 			no_group ? "{" : ",",
578 			(const char *)cur->key);
579 		no_group = false;
580 	}
581 	if (!no_group) {
582 		strbuf_addf(events, "}:W");
583 		if (has_duration)
584 			strbuf_addf(events, ",duration_time");
585 	} else if (has_duration)
586 		strbuf_addf(events, "duration_time");
587 }
588 
589 static void metricgroup__add_metric_non_group(struct strbuf *events,
590 					      struct expr_parse_ctx *ctx)
591 {
592 	struct hashmap_entry *cur;
593 	size_t bkt;
594 	bool first = true;
595 
596 	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
597 		if (!first)
598 			strbuf_addf(events, ",");
599 		strbuf_addf(events, "%s", (const char *)cur->key);
600 		first = false;
601 	}
602 }
603 
604 static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
605 {
606 	static bool violate_nmi_constraint;
607 
608 	if (!foot) {
609 		pr_warning("Splitting metric group %s into standalone metrics.\n", name);
610 		violate_nmi_constraint = true;
611 		return;
612 	}
613 
614 	if (!violate_nmi_constraint)
615 		return;
616 
617 	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
618 		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
619 		   "    perf stat ...\n"
620 		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
621 }
622 
623 static bool metricgroup__has_constraint(struct pmu_event *pe)
624 {
625 	if (!pe->metric_constraint)
626 		return false;
627 
628 	if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
629 	    sysctl__nmi_watchdog_enabled()) {
630 		metricgroup___watchdog_constraint_hint(pe->metric_name, false);
631 		return true;
632 	}
633 
634 	return false;
635 }
636 
637 int __weak arch_get_runtimeparam(void)
638 {
639 	return 1;
640 }
641 
642 static int __add_metric(struct list_head *metric_list,
643 			struct pmu_event *pe,
644 			bool metric_no_group,
645 			int runtime,
646 			struct metric **mp,
647 			struct expr_id *parent,
648 			struct expr_ids *ids)
649 {
650 	struct metric_ref_node *ref;
651 	struct metric *m;
652 
653 	if (*mp == NULL) {
654 		/*
655 		 * We got in here for the parent group,
656 		 * allocate it and put it on the list.
657 		 */
658 		m = zalloc(sizeof(*m));
659 		if (!m)
660 			return -ENOMEM;
661 
662 		expr__ctx_init(&m->pctx);
663 		m->metric_name = pe->metric_name;
664 		m->metric_expr = pe->metric_expr;
665 		m->metric_unit = pe->unit;
666 		m->runtime = runtime;
667 		m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
668 		INIT_LIST_HEAD(&m->metric_refs);
669 		m->metric_refs_cnt = 0;
670 		*mp = m;
671 
672 		parent = expr_ids__alloc(ids);
673 		if (!parent) {
674 			free(m);
675 			return -EINVAL;
676 		}
677 
678 		parent->id = strdup(pe->metric_name);
679 		if (!parent->id) {
680 			free(m);
681 			return -ENOMEM;
682 		}
683 	} else {
684 		/*
685 		 * We got here for the referenced metric, via the
686 		 * recursive metricgroup__add_metric call, add
687 		 * it to the parent group.
688 		 */
689 		m = *mp;
690 
691 		ref = malloc(sizeof(*ref));
692 		if (!ref)
693 			return -ENOMEM;
694 
695 		/*
696 		 * Intentionally passing just const char pointers,
697 		 * from 'pe' object, so they never go away. We don't
698 		 * need to change them, so there's no need to create
699 		 * our own copy.
700 		 */
701 		ref->metric_name = pe->metric_name;
702 		ref->metric_expr = pe->metric_expr;
703 
704 		list_add(&ref->list, &m->metric_refs);
705 		m->metric_refs_cnt++;
706 	}
707 
708 	/* Force all found IDs in metric to have us as parent ID. */
709 	WARN_ON_ONCE(!parent);
710 	m->pctx.parent = parent;
711 
712 	/*
713 	 * For both the parent and referenced metrics, we parse
714 	 * all the metric's IDs and add it to the parent context.
715 	 */
716 	if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
717 		expr__ctx_clear(&m->pctx);
718 		free(m);
719 		return -EINVAL;
720 	}
721 
722 	/*
723 	 * We add new group only in the 'parent' call,
724 	 * so bail out for referenced metric case.
725 	 */
726 	if (m->metric_refs_cnt)
727 		return 0;
728 
729 	if (list_empty(metric_list))
730 		list_add(&m->nd, metric_list);
731 	else {
732 		struct list_head *pos;
733 
734 		/* Place the largest groups at the front. */
735 		list_for_each_prev(pos, metric_list) {
736 			struct metric *old = list_entry(pos, struct metric, nd);
737 
738 			if (hashmap__size(&m->pctx.ids) <=
739 			    hashmap__size(&old->pctx.ids))
740 				break;
741 		}
742 		list_add(&m->nd, pos);
743 	}
744 
745 	return 0;
746 }
747 
748 #define map_for_each_event(__pe, __idx, __map)				\
749 	for (__idx = 0, __pe = &__map->table[__idx];			\
750 	     __pe->name || __pe->metric_group || __pe->metric_name;	\
751 	     __pe = &__map->table[++__idx])
752 
753 #define map_for_each_metric(__pe, __idx, __map, __metric)		\
754 	map_for_each_event(__pe, __idx, __map)				\
755 		if (__pe->metric_expr &&				\
756 		    (match_metric(__pe->metric_group, __metric) ||	\
757 		     match_metric(__pe->metric_name, __metric)))
758 
759 static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
760 {
761 	struct pmu_event *pe;
762 	int i;
763 
764 	map_for_each_event(pe, i, map) {
765 		if (match_metric(pe->metric_name, metric))
766 			return pe;
767 	}
768 
769 	return NULL;
770 }
771 
772 static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
773 			   struct expr_ids *ids)
774 {
775 	struct expr_id_data *data;
776 	struct expr_id *p;
777 	int ret;
778 
779 	/*
780 	 * We get the parent referenced by 'id' argument and
781 	 * traverse through all the parent object IDs to check
782 	 * if we already processed 'id', if we did, it's recursion
783 	 * and we fail.
784 	 */
785 	ret = expr__get_id(&m->pctx, id, &data);
786 	if (ret)
787 		return ret;
788 
789 	p = data->parent;
790 
791 	while (p->parent) {
792 		if (!strcmp(p->id, id)) {
793 			pr_err("failed: recursion detected for %s\n", id);
794 			return -1;
795 		}
796 		p = p->parent;
797 	}
798 
799 	/*
800 	 * If we are over the limit of static entris, the metric
801 	 * is too difficult/nested to process, fail as well.
802 	 */
803 	p = expr_ids__alloc(ids);
804 	if (!p) {
805 		pr_err("failed: too many nested metrics\n");
806 		return -EINVAL;
807 	}
808 
809 	p->id     = strdup(id);
810 	p->parent = data->parent;
811 	*parent   = p;
812 
813 	return p->id ? 0 : -ENOMEM;
814 }
815 
816 static int add_metric(struct list_head *metric_list,
817 		      struct pmu_event *pe,
818 		      bool metric_no_group,
819 		      struct metric **mp,
820 		      struct expr_id *parent,
821 		      struct expr_ids *ids);
822 
823 static int __resolve_metric(struct metric *m,
824 			    bool metric_no_group,
825 			    struct list_head *metric_list,
826 			    struct pmu_events_map *map,
827 			    struct expr_ids *ids)
828 {
829 	struct hashmap_entry *cur;
830 	size_t bkt;
831 	bool all;
832 	int ret;
833 
834 	/*
835 	 * Iterate all the parsed IDs and if there's metric,
836 	 * add it to the context.
837 	 */
838 	do {
839 		all = true;
840 		hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
841 			struct expr_id *parent;
842 			struct pmu_event *pe;
843 
844 			pe = find_metric(cur->key, map);
845 			if (!pe)
846 				continue;
847 
848 			ret = recursion_check(m, cur->key, &parent, ids);
849 			if (ret)
850 				return ret;
851 
852 			all = false;
853 			/* The metric key itself needs to go out.. */
854 			expr__del_id(&m->pctx, cur->key);
855 
856 			/* ... and it gets resolved to the parent context. */
857 			ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
858 			if (ret)
859 				return ret;
860 
861 			/*
862 			 * We added new metric to hashmap, so we need
863 			 * to break the iteration and start over.
864 			 */
865 			break;
866 		}
867 	} while (!all);
868 
869 	return 0;
870 }
871 
872 static int resolve_metric(bool metric_no_group,
873 			  struct list_head *metric_list,
874 			  struct pmu_events_map *map,
875 			  struct expr_ids *ids)
876 {
877 	struct metric *m;
878 	int err;
879 
880 	list_for_each_entry(m, metric_list, nd) {
881 		err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
882 		if (err)
883 			return err;
884 	}
885 	return 0;
886 }
887 
888 static int add_metric(struct list_head *metric_list,
889 		      struct pmu_event *pe,
890 		      bool metric_no_group,
891 		      struct metric **m,
892 		      struct expr_id *parent,
893 		      struct expr_ids *ids)
894 {
895 	struct metric *orig = *m;
896 	int ret = 0;
897 
898 	pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
899 
900 	if (!strstr(pe->metric_expr, "?")) {
901 		ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
902 	} else {
903 		int j, count;
904 
905 		count = arch_get_runtimeparam();
906 
907 		/* This loop is added to create multiple
908 		 * events depend on count value and add
909 		 * those events to metric_list.
910 		 */
911 
912 		for (j = 0; j < count && !ret; j++, *m = orig)
913 			ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
914 	}
915 
916 	return ret;
917 }
918 
919 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
920 				   struct strbuf *events,
921 				   struct list_head *metric_list,
922 				   struct pmu_events_map *map)
923 {
924 	struct expr_ids ids = { .cnt = 0, };
925 	struct pmu_event *pe;
926 	struct metric *m;
927 	LIST_HEAD(list);
928 	int i, ret;
929 	bool has_match = false;
930 
931 	map_for_each_metric(pe, i, map, metric) {
932 		has_match = true;
933 		m = NULL;
934 
935 		ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
936 		if (ret)
937 			return ret;
938 
939 		/*
940 		 * Process any possible referenced metrics
941 		 * included in the expression.
942 		 */
943 		ret = resolve_metric(metric_no_group,
944 				     &list, map, &ids);
945 		if (ret)
946 			return ret;
947 	}
948 
949 	/* End of pmu events. */
950 	if (!has_match)
951 		return -EINVAL;
952 
953 	list_for_each_entry(m, &list, nd) {
954 		if (events->len > 0)
955 			strbuf_addf(events, ",");
956 
957 		if (m->has_constraint) {
958 			metricgroup__add_metric_non_group(events,
959 							  &m->pctx);
960 		} else {
961 			metricgroup__add_metric_weak_group(events,
962 							   &m->pctx);
963 		}
964 	}
965 
966 	list_splice(&list, metric_list);
967 	expr_ids__exit(&ids);
968 	return 0;
969 }
970 
971 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
972 					struct strbuf *events,
973 					struct list_head *metric_list,
974 					struct pmu_events_map *map)
975 {
976 	char *llist, *nlist, *p;
977 	int ret = -EINVAL;
978 
979 	nlist = strdup(list);
980 	if (!nlist)
981 		return -ENOMEM;
982 	llist = nlist;
983 
984 	strbuf_init(events, 100);
985 	strbuf_addf(events, "%s", "");
986 
987 	while ((p = strsep(&llist, ",")) != NULL) {
988 		ret = metricgroup__add_metric(p, metric_no_group, events,
989 					      metric_list, map);
990 		if (ret == -EINVAL) {
991 			fprintf(stderr, "Cannot find metric or group `%s'\n",
992 					p);
993 			break;
994 		}
995 	}
996 	free(nlist);
997 
998 	if (!ret)
999 		metricgroup___watchdog_constraint_hint(NULL, true);
1000 
1001 	return ret;
1002 }
1003 
1004 static void metric__free_refs(struct metric *metric)
1005 {
1006 	struct metric_ref_node *ref, *tmp;
1007 
1008 	list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1009 		list_del(&ref->list);
1010 		free(ref);
1011 	}
1012 }
1013 
1014 static void metricgroup__free_metrics(struct list_head *metric_list)
1015 {
1016 	struct metric *m, *tmp;
1017 
1018 	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1019 		metric__free_refs(m);
1020 		expr__ctx_clear(&m->pctx);
1021 		list_del_init(&m->nd);
1022 		free(m);
1023 	}
1024 }
1025 
1026 static int parse_groups(struct evlist *perf_evlist, const char *str,
1027 			bool metric_no_group,
1028 			bool metric_no_merge,
1029 			struct perf_pmu *fake_pmu,
1030 			struct rblist *metric_events,
1031 			struct pmu_events_map *map)
1032 {
1033 	struct parse_events_error parse_error;
1034 	struct strbuf extra_events;
1035 	LIST_HEAD(metric_list);
1036 	int ret;
1037 
1038 	if (metric_events->nr_entries == 0)
1039 		metricgroup__rblist_init(metric_events);
1040 	ret = metricgroup__add_metric_list(str, metric_no_group,
1041 					   &extra_events, &metric_list, map);
1042 	if (ret)
1043 		return ret;
1044 	pr_debug("adding %s\n", extra_events.buf);
1045 	bzero(&parse_error, sizeof(parse_error));
1046 	ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1047 	if (ret) {
1048 		parse_events_print_error(&parse_error, extra_events.buf);
1049 		goto out;
1050 	}
1051 	strbuf_release(&extra_events);
1052 	ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1053 					perf_evlist, metric_events);
1054 out:
1055 	metricgroup__free_metrics(&metric_list);
1056 	return ret;
1057 }
1058 
1059 int metricgroup__parse_groups(const struct option *opt,
1060 			      const char *str,
1061 			      bool metric_no_group,
1062 			      bool metric_no_merge,
1063 			      struct rblist *metric_events)
1064 {
1065 	struct evlist *perf_evlist = *(struct evlist **)opt->value;
1066 	struct pmu_events_map *map = perf_pmu__find_map(NULL);
1067 
1068 	if (!map)
1069 		return 0;
1070 
1071 	return parse_groups(perf_evlist, str, metric_no_group,
1072 			    metric_no_merge, NULL, metric_events, map);
1073 }
1074 
1075 int metricgroup__parse_groups_test(struct evlist *evlist,
1076 				   struct pmu_events_map *map,
1077 				   const char *str,
1078 				   bool metric_no_group,
1079 				   bool metric_no_merge,
1080 				   struct rblist *metric_events)
1081 {
1082 	return parse_groups(evlist, str, metric_no_group,
1083 			    metric_no_merge, &perf_pmu__fake, metric_events, map);
1084 }
1085 
1086 bool metricgroup__has_metric(const char *metric)
1087 {
1088 	struct pmu_events_map *map = perf_pmu__find_map(NULL);
1089 	struct pmu_event *pe;
1090 	int i;
1091 
1092 	if (!map)
1093 		return false;
1094 
1095 	for (i = 0; ; i++) {
1096 		pe = &map->table[i];
1097 
1098 		if (!pe->name && !pe->metric_group && !pe->metric_name)
1099 			break;
1100 		if (!pe->metric_expr)
1101 			continue;
1102 		if (match_metric(pe->metric_name, metric))
1103 			return true;
1104 	}
1105 	return false;
1106 }
1107