xref: /openbmc/linux/tools/perf/util/metricgroup.c (revision 66c98360)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017, Intel Corporation.
4  */
5 
6 /* Manage metrics and groups of metrics from JSON files */
7 
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmu-hybrid.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "expr.h"
18 #include "rblist.h"
19 #include <string.h>
20 #include <errno.h>
21 #include "strlist.h"
22 #include <assert.h>
23 #include <linux/ctype.h>
24 #include <linux/list_sort.h>
25 #include <linux/string.h>
26 #include <linux/zalloc.h>
27 #include <perf/cpumap.h>
28 #include <subcmd/parse-options.h>
29 #include <api/fs/fs.h>
30 #include "util.h"
31 #include <asm/bug.h>
32 #include "cgroup.h"
33 #include "util/hashmap.h"
34 
35 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
36 					 struct evsel *evsel,
37 					 bool create)
38 {
39 	struct rb_node *nd;
40 	struct metric_event me = {
41 		.evsel = evsel
42 	};
43 
44 	if (!metric_events)
45 		return NULL;
46 
47 	nd = rblist__find(metric_events, &me);
48 	if (nd)
49 		return container_of(nd, struct metric_event, nd);
50 	if (create) {
51 		rblist__add_node(metric_events, &me);
52 		nd = rblist__find(metric_events, &me);
53 		if (nd)
54 			return container_of(nd, struct metric_event, nd);
55 	}
56 	return NULL;
57 }
58 
59 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
60 {
61 	struct metric_event *a = container_of(rb_node,
62 					      struct metric_event,
63 					      nd);
64 	const struct metric_event *b = entry;
65 
66 	if (a->evsel == b->evsel)
67 		return 0;
68 	if ((char *)a->evsel < (char *)b->evsel)
69 		return -1;
70 	return +1;
71 }
72 
73 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
74 					const void *entry)
75 {
76 	struct metric_event *me = malloc(sizeof(struct metric_event));
77 
78 	if (!me)
79 		return NULL;
80 	memcpy(me, entry, sizeof(struct metric_event));
81 	me->evsel = ((struct metric_event *)entry)->evsel;
82 	INIT_LIST_HEAD(&me->head);
83 	return &me->nd;
84 }
85 
86 static void metric_event_delete(struct rblist *rblist __maybe_unused,
87 				struct rb_node *rb_node)
88 {
89 	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
90 	struct metric_expr *expr, *tmp;
91 
92 	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
93 		zfree(&expr->metric_name);
94 		zfree(&expr->metric_refs);
95 		zfree(&expr->metric_events);
96 		free(expr);
97 	}
98 
99 	free(me);
100 }
101 
102 static void metricgroup__rblist_init(struct rblist *metric_events)
103 {
104 	rblist__init(metric_events);
105 	metric_events->node_cmp = metric_event_cmp;
106 	metric_events->node_new = metric_event_new;
107 	metric_events->node_delete = metric_event_delete;
108 }
109 
110 void metricgroup__rblist_exit(struct rblist *metric_events)
111 {
112 	rblist__exit(metric_events);
113 }
114 
115 /**
116  * The metric under construction. The data held here will be placed in a
117  * metric_expr.
118  */
119 struct metric {
120 	struct list_head nd;
121 	/**
122 	 * The expression parse context importantly holding the IDs contained
123 	 * within the expression.
124 	 */
125 	struct expr_parse_ctx *pctx;
126 	/** The name of the metric such as "IPC". */
127 	const char *metric_name;
128 	/** Modifier on the metric such as "u" or NULL for none. */
129 	const char *modifier;
130 	/** The expression to parse, for example, "instructions/cycles". */
131 	const char *metric_expr;
132 	/** Optional threshold expression where zero value is green, otherwise red. */
133 	const char *metric_threshold;
134 	/**
135 	 * The "ScaleUnit" that scales and adds a unit to the metric during
136 	 * output.
137 	 */
138 	const char *metric_unit;
139 	/** Optional null terminated array of referenced metrics. */
140 	struct metric_ref *metric_refs;
141 	/**
142 	 * Should events of the metric be grouped?
143 	 */
144 	bool group_events;
145 	/**
146 	 * Parsed events for the metric. Optional as events may be taken from a
147 	 * different metric whose group contains all the IDs necessary for this
148 	 * one.
149 	 */
150 	struct evlist *evlist;
151 };
152 
153 static void metric__watchdog_constraint_hint(const char *name, bool foot)
154 {
155 	static bool violate_nmi_constraint;
156 
157 	if (!foot) {
158 		pr_warning("Not grouping metric %s's events.\n", name);
159 		violate_nmi_constraint = true;
160 		return;
161 	}
162 
163 	if (!violate_nmi_constraint)
164 		return;
165 
166 	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
167 		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
168 		   "    perf stat ...\n"
169 		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
170 }
171 
172 static bool metric__group_events(const struct pmu_metric *pm)
173 {
174 	switch (pm->event_grouping) {
175 	case MetricNoGroupEvents:
176 		return false;
177 	case MetricNoGroupEventsNmi:
178 		if (!sysctl__nmi_watchdog_enabled())
179 			return true;
180 		metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
181 		return false;
182 	case MetricNoGroupEventsSmt:
183 		return !smt_on();
184 	case MetricGroupEvents:
185 	default:
186 		return true;
187 	}
188 }
189 
190 static void metric__free(struct metric *m)
191 {
192 	if (!m)
193 		return;
194 
195 	zfree(&m->metric_refs);
196 	expr__ctx_free(m->pctx);
197 	zfree(&m->modifier);
198 	evlist__delete(m->evlist);
199 	free(m);
200 }
201 
202 static struct metric *metric__new(const struct pmu_metric *pm,
203 				  const char *modifier,
204 				  bool metric_no_group,
205 				  int runtime,
206 				  const char *user_requested_cpu_list,
207 				  bool system_wide)
208 {
209 	struct metric *m;
210 
211 	m = zalloc(sizeof(*m));
212 	if (!m)
213 		return NULL;
214 
215 	m->pctx = expr__ctx_new();
216 	if (!m->pctx)
217 		goto out_err;
218 
219 	m->metric_name = pm->metric_name;
220 	m->modifier = NULL;
221 	if (modifier) {
222 		m->modifier = strdup(modifier);
223 		if (!m->modifier)
224 			goto out_err;
225 	}
226 	m->metric_expr = pm->metric_expr;
227 	m->metric_threshold = pm->metric_threshold;
228 	m->metric_unit = pm->unit;
229 	m->pctx->sctx.user_requested_cpu_list = NULL;
230 	if (user_requested_cpu_list) {
231 		m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
232 		if (!m->pctx->sctx.user_requested_cpu_list)
233 			goto out_err;
234 	}
235 	m->pctx->sctx.runtime = runtime;
236 	m->pctx->sctx.system_wide = system_wide;
237 	m->group_events = !metric_no_group && metric__group_events(pm);
238 	m->metric_refs = NULL;
239 	m->evlist = NULL;
240 
241 	return m;
242 out_err:
243 	metric__free(m);
244 	return NULL;
245 }
246 
247 static bool contains_metric_id(struct evsel **metric_events, int num_events,
248 			       const char *metric_id)
249 {
250 	int i;
251 
252 	for (i = 0; i < num_events; i++) {
253 		if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
254 			return true;
255 	}
256 	return false;
257 }
258 
259 /**
260  * setup_metric_events - Find a group of events in metric_evlist that correspond
261  *                       to the IDs from a parsed metric expression.
262  * @ids: the metric IDs to match.
263  * @metric_evlist: the list of perf events.
264  * @out_metric_events: holds the created metric events array.
265  */
266 static int setup_metric_events(struct hashmap *ids,
267 			       struct evlist *metric_evlist,
268 			       struct evsel ***out_metric_events)
269 {
270 	struct evsel **metric_events;
271 	const char *metric_id;
272 	struct evsel *ev;
273 	size_t ids_size, matched_events, i;
274 
275 	*out_metric_events = NULL;
276 	ids_size = hashmap__size(ids);
277 
278 	metric_events = calloc(sizeof(void *), ids_size + 1);
279 	if (!metric_events)
280 		return -ENOMEM;
281 
282 	matched_events = 0;
283 	evlist__for_each_entry(metric_evlist, ev) {
284 		struct expr_id_data *val_ptr;
285 
286 		/*
287 		 * Check for duplicate events with the same name. For
288 		 * example, uncore_imc/cas_count_read/ will turn into 6
289 		 * events per socket on skylakex. Only the first such
290 		 * event is placed in metric_events.
291 		 */
292 		metric_id = evsel__metric_id(ev);
293 		if (contains_metric_id(metric_events, matched_events, metric_id))
294 			continue;
295 		/*
296 		 * Does this event belong to the parse context? For
297 		 * combined or shared groups, this metric may not care
298 		 * about this event.
299 		 */
300 		if (hashmap__find(ids, metric_id, &val_ptr)) {
301 			metric_events[matched_events++] = ev;
302 
303 			if (matched_events >= ids_size)
304 				break;
305 		}
306 	}
307 	if (matched_events < ids_size) {
308 		free(metric_events);
309 		return -EINVAL;
310 	}
311 	for (i = 0; i < ids_size; i++) {
312 		ev = metric_events[i];
313 		ev->collect_stat = true;
314 
315 		/*
316 		 * The metric leader points to the identically named
317 		 * event in metric_events.
318 		 */
319 		ev->metric_leader = ev;
320 		/*
321 		 * Mark two events with identical names in the same
322 		 * group (or globally) as being in use as uncore events
323 		 * may be duplicated for each pmu. Set the metric leader
324 		 * of such events to be the event that appears in
325 		 * metric_events.
326 		 */
327 		metric_id = evsel__metric_id(ev);
328 		evlist__for_each_entry_continue(metric_evlist, ev) {
329 			if (!strcmp(evsel__metric_id(ev), metric_id))
330 				ev->metric_leader = metric_events[i];
331 		}
332 	}
333 	*out_metric_events = metric_events;
334 	return 0;
335 }
336 
337 static bool match_metric(const char *n, const char *list)
338 {
339 	int len;
340 	char *m;
341 
342 	if (!list)
343 		return false;
344 	if (!strcmp(list, "all"))
345 		return true;
346 	if (!n)
347 		return !strcasecmp(list, "No_group");
348 	len = strlen(list);
349 	m = strcasestr(n, list);
350 	if (!m)
351 		return false;
352 	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
353 	    (m[len] == 0 || m[len] == ';'))
354 		return true;
355 	return false;
356 }
357 
358 static bool match_pm_metric(const struct pmu_metric *pm, const char *metric)
359 {
360 	return match_metric(pm->metric_group, metric) ||
361 	       match_metric(pm->metric_name, metric);
362 }
363 
364 /** struct mep - RB-tree node for building printing information. */
365 struct mep {
366 	/** nd - RB-tree element. */
367 	struct rb_node nd;
368 	/** @metric_group: Owned metric group name, separated others with ';'. */
369 	char *metric_group;
370 	const char *metric_name;
371 	const char *metric_desc;
372 	const char *metric_long_desc;
373 	const char *metric_expr;
374 	const char *metric_threshold;
375 	const char *metric_unit;
376 };
377 
378 static int mep_cmp(struct rb_node *rb_node, const void *entry)
379 {
380 	struct mep *a = container_of(rb_node, struct mep, nd);
381 	struct mep *b = (struct mep *)entry;
382 	int ret;
383 
384 	ret = strcmp(a->metric_group, b->metric_group);
385 	if (ret)
386 		return ret;
387 
388 	return strcmp(a->metric_name, b->metric_name);
389 }
390 
391 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
392 {
393 	struct mep *me = malloc(sizeof(struct mep));
394 
395 	if (!me)
396 		return NULL;
397 
398 	memcpy(me, entry, sizeof(struct mep));
399 	return &me->nd;
400 }
401 
402 static void mep_delete(struct rblist *rl __maybe_unused,
403 		       struct rb_node *nd)
404 {
405 	struct mep *me = container_of(nd, struct mep, nd);
406 
407 	zfree(&me->metric_group);
408 	free(me);
409 }
410 
411 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
412 			      const char *metric_name)
413 {
414 	struct rb_node *nd;
415 	struct mep me = {
416 		.metric_group = strdup(metric_group),
417 		.metric_name = metric_name,
418 	};
419 	nd = rblist__find(groups, &me);
420 	if (nd) {
421 		free(me.metric_group);
422 		return container_of(nd, struct mep, nd);
423 	}
424 	rblist__add_node(groups, &me);
425 	nd = rblist__find(groups, &me);
426 	if (nd)
427 		return container_of(nd, struct mep, nd);
428 	return NULL;
429 }
430 
431 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
432 					struct rblist *groups)
433 {
434 	const char *g;
435 	char *omg, *mg;
436 
437 	mg = strdup(pm->metric_group ?: "No_group");
438 	if (!mg)
439 		return -ENOMEM;
440 	omg = mg;
441 	while ((g = strsep(&mg, ";")) != NULL) {
442 		struct mep *me;
443 
444 		g = skip_spaces(g);
445 		if (strlen(g))
446 			me = mep_lookup(groups, g, pm->metric_name);
447 		else
448 			me = mep_lookup(groups, "No_group", pm->metric_name);
449 
450 		if (me) {
451 			me->metric_desc = pm->desc;
452 			me->metric_long_desc = pm->long_desc;
453 			me->metric_expr = pm->metric_expr;
454 			me->metric_threshold = pm->metric_threshold;
455 			me->metric_unit = pm->unit;
456 		}
457 	}
458 	free(omg);
459 
460 	return 0;
461 }
462 
463 struct metricgroup_iter_data {
464 	pmu_metric_iter_fn fn;
465 	void *data;
466 };
467 
468 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
469 				       const struct pmu_metrics_table *table,
470 				       void *data)
471 {
472 	struct metricgroup_iter_data *d = data;
473 	struct perf_pmu *pmu = NULL;
474 
475 	if (!pm->metric_expr || !pm->compat)
476 		return 0;
477 
478 	while ((pmu = perf_pmu__scan(pmu))) {
479 
480 		if (!pmu->id || strcmp(pmu->id, pm->compat))
481 			continue;
482 
483 		return d->fn(pm, table, d->data);
484 	}
485 	return 0;
486 }
487 
488 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
489 					const struct pmu_metrics_table *table __maybe_unused,
490 					void *vdata)
491 {
492 	struct rblist *groups = vdata;
493 
494 	return metricgroup__add_to_mep_groups(pm, groups);
495 }
496 
497 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
498 {
499 	struct rblist groups;
500 	const struct pmu_metrics_table *table;
501 	struct rb_node *node, *next;
502 
503 	rblist__init(&groups);
504 	groups.node_new = mep_new;
505 	groups.node_cmp = mep_cmp;
506 	groups.node_delete = mep_delete;
507 	table = pmu_metrics_table__find();
508 	if (table) {
509 		pmu_metrics_table_for_each_metric(table,
510 						 metricgroup__add_to_mep_groups_callback,
511 						 &groups);
512 	}
513 	{
514 		struct metricgroup_iter_data data = {
515 			.fn = metricgroup__add_to_mep_groups_callback,
516 			.data = &groups,
517 		};
518 		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
519 	}
520 
521 	for (node = rb_first_cached(&groups.entries); node; node = next) {
522 		struct mep *me = container_of(node, struct mep, nd);
523 
524 		print_cb->print_metric(print_state,
525 				me->metric_group,
526 				me->metric_name,
527 				me->metric_desc,
528 				me->metric_long_desc,
529 				me->metric_expr,
530 				me->metric_threshold,
531 				me->metric_unit);
532 		next = rb_next(node);
533 		rblist__remove_node(&groups, node);
534 	}
535 }
536 
537 static const char *code_characters = ",-=@";
538 
539 static int encode_metric_id(struct strbuf *sb, const char *x)
540 {
541 	char *c;
542 	int ret = 0;
543 
544 	for (; *x; x++) {
545 		c = strchr(code_characters, *x);
546 		if (c) {
547 			ret = strbuf_addch(sb, '!');
548 			if (ret)
549 				break;
550 
551 			ret = strbuf_addch(sb, '0' + (c - code_characters));
552 			if (ret)
553 				break;
554 		} else {
555 			ret = strbuf_addch(sb, *x);
556 			if (ret)
557 				break;
558 		}
559 	}
560 	return ret;
561 }
562 
563 static int decode_metric_id(struct strbuf *sb, const char *x)
564 {
565 	const char *orig = x;
566 	size_t i;
567 	char c;
568 	int ret;
569 
570 	for (; *x; x++) {
571 		c = *x;
572 		if (*x == '!') {
573 			x++;
574 			i = *x - '0';
575 			if (i > strlen(code_characters)) {
576 				pr_err("Bad metric-id encoding in: '%s'", orig);
577 				return -1;
578 			}
579 			c = code_characters[i];
580 		}
581 		ret = strbuf_addch(sb, c);
582 		if (ret)
583 			return ret;
584 	}
585 	return 0;
586 }
587 
588 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
589 {
590 	struct evsel *ev;
591 	struct strbuf sb = STRBUF_INIT;
592 	char *cur;
593 	int ret = 0;
594 
595 	evlist__for_each_entry(perf_evlist, ev) {
596 		if (!ev->metric_id)
597 			continue;
598 
599 		ret = strbuf_setlen(&sb, 0);
600 		if (ret)
601 			break;
602 
603 		ret = decode_metric_id(&sb, ev->metric_id);
604 		if (ret)
605 			break;
606 
607 		free((char *)ev->metric_id);
608 		ev->metric_id = strdup(sb.buf);
609 		if (!ev->metric_id) {
610 			ret = -ENOMEM;
611 			break;
612 		}
613 		/*
614 		 * If the name is just the parsed event, use the metric-id to
615 		 * give a more friendly display version.
616 		 */
617 		if (strstr(ev->name, "metric-id=")) {
618 			bool has_slash = false;
619 
620 			zfree(&ev->name);
621 			for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
622 				*cur = '/';
623 				has_slash = true;
624 			}
625 
626 			if (modifier) {
627 				if (!has_slash && !strchr(sb.buf, ':')) {
628 					ret = strbuf_addch(&sb, ':');
629 					if (ret)
630 						break;
631 				}
632 				ret = strbuf_addstr(&sb, modifier);
633 				if (ret)
634 					break;
635 			}
636 			ev->name = strdup(sb.buf);
637 			if (!ev->name) {
638 				ret = -ENOMEM;
639 				break;
640 			}
641 		}
642 	}
643 	strbuf_release(&sb);
644 	return ret;
645 }
646 
647 static int metricgroup__build_event_string(struct strbuf *events,
648 					   const struct expr_parse_ctx *ctx,
649 					   const char *modifier,
650 					   bool group_events)
651 {
652 	struct hashmap_entry *cur;
653 	size_t bkt;
654 	bool no_group = true, has_tool_events = false;
655 	bool tool_events[PERF_TOOL_MAX] = {false};
656 	int ret = 0;
657 
658 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
659 
660 	hashmap__for_each_entry(ctx->ids, cur, bkt) {
661 		const char *sep, *rsep, *id = cur->pkey;
662 		enum perf_tool_event ev;
663 
664 		pr_debug("found event %s\n", id);
665 
666 		/* Always move tool events outside of the group. */
667 		ev = perf_tool_event__from_str(id);
668 		if (ev != PERF_TOOL_NONE) {
669 			has_tool_events = true;
670 			tool_events[ev] = true;
671 			continue;
672 		}
673 		/* Separate events with commas and open the group if necessary. */
674 		if (no_group) {
675 			if (group_events) {
676 				ret = strbuf_addch(events, '{');
677 				RETURN_IF_NON_ZERO(ret);
678 			}
679 
680 			no_group = false;
681 		} else {
682 			ret = strbuf_addch(events, ',');
683 			RETURN_IF_NON_ZERO(ret);
684 		}
685 		/*
686 		 * Encode the ID as an event string. Add a qualifier for
687 		 * metric_id that is the original name except with characters
688 		 * that parse-events can't parse replaced. For example,
689 		 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
690 		 */
691 		sep = strchr(id, '@');
692 		if (sep != NULL) {
693 			ret = strbuf_add(events, id, sep - id);
694 			RETURN_IF_NON_ZERO(ret);
695 			ret = strbuf_addch(events, '/');
696 			RETURN_IF_NON_ZERO(ret);
697 			rsep = strrchr(sep, '@');
698 			ret = strbuf_add(events, sep + 1, rsep - sep - 1);
699 			RETURN_IF_NON_ZERO(ret);
700 			ret = strbuf_addstr(events, ",metric-id=");
701 			RETURN_IF_NON_ZERO(ret);
702 			sep = rsep;
703 		} else {
704 			sep = strchr(id, ':');
705 			if (sep != NULL) {
706 				ret = strbuf_add(events, id, sep - id);
707 				RETURN_IF_NON_ZERO(ret);
708 			} else {
709 				ret = strbuf_addstr(events, id);
710 				RETURN_IF_NON_ZERO(ret);
711 			}
712 			ret = strbuf_addstr(events, "/metric-id=");
713 			RETURN_IF_NON_ZERO(ret);
714 		}
715 		ret = encode_metric_id(events, id);
716 		RETURN_IF_NON_ZERO(ret);
717 		ret = strbuf_addstr(events, "/");
718 		RETURN_IF_NON_ZERO(ret);
719 
720 		if (sep != NULL) {
721 			ret = strbuf_addstr(events, sep + 1);
722 			RETURN_IF_NON_ZERO(ret);
723 		}
724 		if (modifier) {
725 			ret = strbuf_addstr(events, modifier);
726 			RETURN_IF_NON_ZERO(ret);
727 		}
728 	}
729 	if (!no_group && group_events) {
730 		ret = strbuf_addf(events, "}:W");
731 		RETURN_IF_NON_ZERO(ret);
732 	}
733 	if (has_tool_events) {
734 		int i;
735 
736 		perf_tool_event__for_each_event(i) {
737 			if (tool_events[i]) {
738 				if (!no_group) {
739 					ret = strbuf_addch(events, ',');
740 					RETURN_IF_NON_ZERO(ret);
741 				}
742 				no_group = false;
743 				ret = strbuf_addstr(events, perf_tool_event__to_str(i));
744 				RETURN_IF_NON_ZERO(ret);
745 			}
746 		}
747 	}
748 
749 	return ret;
750 #undef RETURN_IF_NON_ZERO
751 }
752 
753 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
754 {
755 	return 1;
756 }
757 
758 /*
759  * A singly linked list on the stack of the names of metrics being
760  * processed. Used to identify recursion.
761  */
762 struct visited_metric {
763 	const char *name;
764 	const struct visited_metric *parent;
765 };
766 
767 struct metricgroup_add_iter_data {
768 	struct list_head *metric_list;
769 	const char *metric_name;
770 	const char *modifier;
771 	int *ret;
772 	bool *has_match;
773 	bool metric_no_group;
774 	bool metric_no_threshold;
775 	const char *user_requested_cpu_list;
776 	bool system_wide;
777 	struct metric *root_metric;
778 	const struct visited_metric *visited;
779 	const struct pmu_metrics_table *table;
780 };
781 
782 static bool metricgroup__find_metric(const char *metric,
783 				     const struct pmu_metrics_table *table,
784 				     struct pmu_metric *pm);
785 
786 static int add_metric(struct list_head *metric_list,
787 		      const struct pmu_metric *pm,
788 		      const char *modifier,
789 		      bool metric_no_group,
790 		      bool metric_no_threshold,
791 		      const char *user_requested_cpu_list,
792 		      bool system_wide,
793 		      struct metric *root_metric,
794 		      const struct visited_metric *visited,
795 		      const struct pmu_metrics_table *table);
796 
797 /**
798  * resolve_metric - Locate metrics within the root metric and recursively add
799  *                    references to them.
800  * @metric_list: The list the metric is added to.
801  * @modifier: if non-null event modifiers like "u".
802  * @metric_no_group: Should events written to events be grouped "{}" or
803  *                   global. Grouping is the default but due to multiplexing the
804  *                   user may override.
805  * @user_requested_cpu_list: Command line specified CPUs to record on.
806  * @system_wide: Are events for all processes recorded.
807  * @root_metric: Metrics may reference other metrics to form a tree. In this
808  *               case the root_metric holds all the IDs and a list of referenced
809  *               metrics. When adding a root this argument is NULL.
810  * @visited: A singly linked list of metric names being added that is used to
811  *           detect recursion.
812  * @table: The table that is searched for metrics, most commonly the table for the
813  *       architecture perf is running upon.
814  */
815 static int resolve_metric(struct list_head *metric_list,
816 			  const char *modifier,
817 			  bool metric_no_group,
818 			  bool metric_no_threshold,
819 			  const char *user_requested_cpu_list,
820 			  bool system_wide,
821 			  struct metric *root_metric,
822 			  const struct visited_metric *visited,
823 			  const struct pmu_metrics_table *table)
824 {
825 	struct hashmap_entry *cur;
826 	size_t bkt;
827 	struct to_resolve {
828 		/* The metric to resolve. */
829 		struct pmu_metric pm;
830 		/*
831 		 * The key in the IDs map, this may differ from in case,
832 		 * etc. from pm->metric_name.
833 		 */
834 		const char *key;
835 	} *pending = NULL;
836 	int i, ret = 0, pending_cnt = 0;
837 
838 	/*
839 	 * Iterate all the parsed IDs and if there's a matching metric and it to
840 	 * the pending array.
841 	 */
842 	hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
843 		struct pmu_metric pm;
844 
845 		if (metricgroup__find_metric(cur->pkey, table, &pm)) {
846 			pending = realloc(pending,
847 					(pending_cnt + 1) * sizeof(struct to_resolve));
848 			if (!pending)
849 				return -ENOMEM;
850 
851 			memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
852 			pending[pending_cnt].key = cur->pkey;
853 			pending_cnt++;
854 		}
855 	}
856 
857 	/* Remove the metric IDs from the context. */
858 	for (i = 0; i < pending_cnt; i++)
859 		expr__del_id(root_metric->pctx, pending[i].key);
860 
861 	/*
862 	 * Recursively add all the metrics, IDs are added to the root metric's
863 	 * context.
864 	 */
865 	for (i = 0; i < pending_cnt; i++) {
866 		ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
867 				 metric_no_threshold, user_requested_cpu_list, system_wide,
868 				 root_metric, visited, table);
869 		if (ret)
870 			break;
871 	}
872 
873 	free(pending);
874 	return ret;
875 }
876 
877 /**
878  * __add_metric - Add a metric to metric_list.
879  * @metric_list: The list the metric is added to.
880  * @pm: The pmu_metric containing the metric to be added.
881  * @modifier: if non-null event modifiers like "u".
882  * @metric_no_group: Should events written to events be grouped "{}" or
883  *                   global. Grouping is the default but due to multiplexing the
884  *                   user may override.
885  * @metric_no_threshold: Should threshold expressions be ignored?
886  * @runtime: A special argument for the parser only known at runtime.
887  * @user_requested_cpu_list: Command line specified CPUs to record on.
888  * @system_wide: Are events for all processes recorded.
889  * @root_metric: Metrics may reference other metrics to form a tree. In this
890  *               case the root_metric holds all the IDs and a list of referenced
891  *               metrics. When adding a root this argument is NULL.
892  * @visited: A singly linked list of metric names being added that is used to
893  *           detect recursion.
894  * @table: The table that is searched for metrics, most commonly the table for the
895  *       architecture perf is running upon.
896  */
897 static int __add_metric(struct list_head *metric_list,
898 			const struct pmu_metric *pm,
899 			const char *modifier,
900 			bool metric_no_group,
901 			bool metric_no_threshold,
902 			int runtime,
903 			const char *user_requested_cpu_list,
904 			bool system_wide,
905 			struct metric *root_metric,
906 			const struct visited_metric *visited,
907 			const struct pmu_metrics_table *table)
908 {
909 	const struct visited_metric *vm;
910 	int ret;
911 	bool is_root = !root_metric;
912 	const char *expr;
913 	struct visited_metric visited_node = {
914 		.name = pm->metric_name,
915 		.parent = visited,
916 	};
917 
918 	for (vm = visited; vm; vm = vm->parent) {
919 		if (!strcmp(pm->metric_name, vm->name)) {
920 			pr_err("failed: recursion detected for %s\n", pm->metric_name);
921 			return -1;
922 		}
923 	}
924 
925 	if (is_root) {
926 		/*
927 		 * This metric is the root of a tree and may reference other
928 		 * metrics that are added recursively.
929 		 */
930 		root_metric = metric__new(pm, modifier, metric_no_group, runtime,
931 					  user_requested_cpu_list, system_wide);
932 		if (!root_metric)
933 			return -ENOMEM;
934 
935 	} else {
936 		int cnt = 0;
937 
938 		/*
939 		 * This metric was referenced in a metric higher in the
940 		 * tree. Check if the same metric is already resolved in the
941 		 * metric_refs list.
942 		 */
943 		if (root_metric->metric_refs) {
944 			for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
945 				if (!strcmp(pm->metric_name,
946 					    root_metric->metric_refs[cnt].metric_name))
947 					return 0;
948 			}
949 		}
950 
951 		/* Create reference. Need space for the entry and the terminator. */
952 		root_metric->metric_refs = realloc(root_metric->metric_refs,
953 						(cnt + 2) * sizeof(struct metric_ref));
954 		if (!root_metric->metric_refs)
955 			return -ENOMEM;
956 
957 		/*
958 		 * Intentionally passing just const char pointers,
959 		 * from 'pe' object, so they never go away. We don't
960 		 * need to change them, so there's no need to create
961 		 * our own copy.
962 		 */
963 		root_metric->metric_refs[cnt].metric_name = pm->metric_name;
964 		root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
965 
966 		/* Null terminate array. */
967 		root_metric->metric_refs[cnt+1].metric_name = NULL;
968 		root_metric->metric_refs[cnt+1].metric_expr = NULL;
969 	}
970 
971 	/*
972 	 * For both the parent and referenced metrics, we parse
973 	 * all the metric's IDs and add it to the root context.
974 	 */
975 	ret = 0;
976 	expr = pm->metric_expr;
977 	if (is_root && pm->metric_threshold) {
978 		/*
979 		 * Threshold expressions are built off the actual metric. Switch
980 		 * to use that in case of additional necessary events. Change
981 		 * the visited node name to avoid this being flagged as
982 		 * recursion. If the threshold events are disabled, just use the
983 		 * metric's name as a reference. This allows metric threshold
984 		 * computation if there are sufficient events.
985 		 */
986 		assert(strstr(pm->metric_threshold, pm->metric_name));
987 		expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
988 		visited_node.name = "__threshold__";
989 	}
990 	if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
991 		/* Broken metric. */
992 		ret = -EINVAL;
993 	}
994 	if (!ret) {
995 		/* Resolve referenced metrics. */
996 		ret = resolve_metric(metric_list, modifier, metric_no_group,
997 				     metric_no_threshold, user_requested_cpu_list,
998 				     system_wide, root_metric, &visited_node, table);
999 	}
1000 	if (ret) {
1001 		if (is_root)
1002 			metric__free(root_metric);
1003 
1004 	} else if (is_root)
1005 		list_add(&root_metric->nd, metric_list);
1006 
1007 	return ret;
1008 }
1009 
1010 struct metricgroup__find_metric_data {
1011 	const char *metric;
1012 	struct pmu_metric *pm;
1013 };
1014 
1015 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
1016 					     const struct pmu_metrics_table *table  __maybe_unused,
1017 					     void *vdata)
1018 {
1019 	struct metricgroup__find_metric_data *data = vdata;
1020 
1021 	if (!match_metric(pm->metric_name, data->metric))
1022 		return 0;
1023 
1024 	memcpy(data->pm, pm, sizeof(*pm));
1025 	return 1;
1026 }
1027 
1028 static bool metricgroup__find_metric(const char *metric,
1029 				     const struct pmu_metrics_table *table,
1030 				     struct pmu_metric *pm)
1031 {
1032 	struct metricgroup__find_metric_data data = {
1033 		.metric = metric,
1034 		.pm = pm,
1035 	};
1036 
1037 	return pmu_metrics_table_for_each_metric(table, metricgroup__find_metric_callback, &data)
1038 		? true : false;
1039 }
1040 
1041 static int add_metric(struct list_head *metric_list,
1042 		      const struct pmu_metric *pm,
1043 		      const char *modifier,
1044 		      bool metric_no_group,
1045 		      bool metric_no_threshold,
1046 		      const char *user_requested_cpu_list,
1047 		      bool system_wide,
1048 		      struct metric *root_metric,
1049 		      const struct visited_metric *visited,
1050 		      const struct pmu_metrics_table *table)
1051 {
1052 	int ret = 0;
1053 
1054 	pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
1055 
1056 	if (!strstr(pm->metric_expr, "?")) {
1057 		ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1058 				   metric_no_threshold, 0, user_requested_cpu_list,
1059 				   system_wide, root_metric, visited, table);
1060 	} else {
1061 		int j, count;
1062 
1063 		count = arch_get_runtimeparam(pm);
1064 
1065 		/* This loop is added to create multiple
1066 		 * events depend on count value and add
1067 		 * those events to metric_list.
1068 		 */
1069 
1070 		for (j = 0; j < count && !ret; j++)
1071 			ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1072 					   metric_no_threshold, j, user_requested_cpu_list,
1073 					   system_wide, root_metric, visited, table);
1074 	}
1075 
1076 	return ret;
1077 }
1078 
1079 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
1080 					const struct pmu_metrics_table *table __maybe_unused,
1081 					void *data)
1082 {
1083 	struct metricgroup_add_iter_data *d = data;
1084 	int ret;
1085 
1086 	if (!match_pm_metric(pm, d->metric_name))
1087 		return 0;
1088 
1089 	ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
1090 			 d->metric_no_threshold, d->user_requested_cpu_list,
1091 			 d->system_wide, d->root_metric, d->visited, d->table);
1092 	if (ret)
1093 		goto out;
1094 
1095 	*(d->has_match) = true;
1096 
1097 out:
1098 	*(d->ret) = ret;
1099 	return ret;
1100 }
1101 
1102 /**
1103  * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1104  *                   the front. tool events are excluded from the count.
1105  */
1106 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1107 			   const struct list_head *r)
1108 {
1109 	const struct metric *left = container_of(l, struct metric, nd);
1110 	const struct metric *right = container_of(r, struct metric, nd);
1111 	struct expr_id_data *data;
1112 	int i, left_count, right_count;
1113 
1114 	left_count = hashmap__size(left->pctx->ids);
1115 	perf_tool_event__for_each_event(i) {
1116 		if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1117 			left_count--;
1118 	}
1119 
1120 	right_count = hashmap__size(right->pctx->ids);
1121 	perf_tool_event__for_each_event(i) {
1122 		if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1123 			right_count--;
1124 	}
1125 
1126 	return right_count - left_count;
1127 }
1128 
1129 struct metricgroup__add_metric_data {
1130 	struct list_head *list;
1131 	const char *metric_name;
1132 	const char *modifier;
1133 	const char *user_requested_cpu_list;
1134 	bool metric_no_group;
1135 	bool metric_no_threshold;
1136 	bool system_wide;
1137 	bool has_match;
1138 };
1139 
1140 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1141 					    const struct pmu_metrics_table *table,
1142 					    void *vdata)
1143 {
1144 	struct metricgroup__add_metric_data *data = vdata;
1145 	int ret = 0;
1146 
1147 	if (pm->metric_expr &&
1148 		(match_metric(pm->metric_group, data->metric_name) ||
1149 		 match_metric(pm->metric_name, data->metric_name))) {
1150 
1151 		data->has_match = true;
1152 		ret = add_metric(data->list, pm, data->modifier, data->metric_no_group,
1153 				 data->metric_no_threshold, data->user_requested_cpu_list,
1154 				 data->system_wide, /*root_metric=*/NULL,
1155 				 /*visited_metrics=*/NULL, table);
1156 	}
1157 	return ret;
1158 }
1159 
1160 /**
1161  * metricgroup__add_metric - Find and add a metric, or a metric group.
1162  * @metric_name: The name of the metric or metric group. For example, "IPC"
1163  *               could be the name of a metric and "TopDownL1" the name of a
1164  *               metric group.
1165  * @modifier: if non-null event modifiers like "u".
1166  * @metric_no_group: Should events written to events be grouped "{}" or
1167  *                   global. Grouping is the default but due to multiplexing the
1168  *                   user may override.
1169  * @user_requested_cpu_list: Command line specified CPUs to record on.
1170  * @system_wide: Are events for all processes recorded.
1171  * @metric_list: The list that the metric or metric group are added to.
1172  * @table: The table that is searched for metrics, most commonly the table for the
1173  *       architecture perf is running upon.
1174  */
1175 static int metricgroup__add_metric(const char *metric_name, const char *modifier,
1176 				   bool metric_no_group, bool metric_no_threshold,
1177 				   const char *user_requested_cpu_list,
1178 				   bool system_wide,
1179 				   struct list_head *metric_list,
1180 				   const struct pmu_metrics_table *table)
1181 {
1182 	LIST_HEAD(list);
1183 	int ret;
1184 	bool has_match = false;
1185 
1186 	{
1187 		struct metricgroup__add_metric_data data = {
1188 			.list = &list,
1189 			.metric_name = metric_name,
1190 			.modifier = modifier,
1191 			.metric_no_group = metric_no_group,
1192 			.metric_no_threshold = metric_no_threshold,
1193 			.user_requested_cpu_list = user_requested_cpu_list,
1194 			.system_wide = system_wide,
1195 			.has_match = false,
1196 		};
1197 		/*
1198 		 * Iterate over all metrics seeing if metric matches either the
1199 		 * name or group. When it does add the metric to the list.
1200 		 */
1201 		ret = pmu_metrics_table_for_each_metric(table, metricgroup__add_metric_callback,
1202 						       &data);
1203 		if (ret)
1204 			goto out;
1205 
1206 		has_match = data.has_match;
1207 	}
1208 	{
1209 		struct metricgroup_iter_data data = {
1210 			.fn = metricgroup__add_metric_sys_event_iter,
1211 			.data = (void *) &(struct metricgroup_add_iter_data) {
1212 				.metric_list = &list,
1213 				.metric_name = metric_name,
1214 				.modifier = modifier,
1215 				.metric_no_group = metric_no_group,
1216 				.user_requested_cpu_list = user_requested_cpu_list,
1217 				.system_wide = system_wide,
1218 				.has_match = &has_match,
1219 				.ret = &ret,
1220 				.table = table,
1221 			},
1222 		};
1223 
1224 		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
1225 	}
1226 	/* End of pmu events. */
1227 	if (!has_match)
1228 		ret = -EINVAL;
1229 
1230 out:
1231 	/*
1232 	 * add to metric_list so that they can be released
1233 	 * even if it's failed
1234 	 */
1235 	list_splice(&list, metric_list);
1236 	return ret;
1237 }
1238 
1239 /**
1240  * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1241  *                                specified in a list.
1242  * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1243  *        would match the IPC and CPI metrics, and TopDownL1 would match all
1244  *        the metrics in the TopDownL1 group.
1245  * @metric_no_group: Should events written to events be grouped "{}" or
1246  *                   global. Grouping is the default but due to multiplexing the
1247  *                   user may override.
1248  * @user_requested_cpu_list: Command line specified CPUs to record on.
1249  * @system_wide: Are events for all processes recorded.
1250  * @metric_list: The list that metrics are added to.
1251  * @table: The table that is searched for metrics, most commonly the table for the
1252  *       architecture perf is running upon.
1253  */
1254 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1255 					bool metric_no_threshold,
1256 					const char *user_requested_cpu_list,
1257 					bool system_wide, struct list_head *metric_list,
1258 					const struct pmu_metrics_table *table)
1259 {
1260 	char *list_itr, *list_copy, *metric_name, *modifier;
1261 	int ret, count = 0;
1262 
1263 	list_copy = strdup(list);
1264 	if (!list_copy)
1265 		return -ENOMEM;
1266 	list_itr = list_copy;
1267 
1268 	while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1269 		modifier = strchr(metric_name, ':');
1270 		if (modifier)
1271 			*modifier++ = '\0';
1272 
1273 		ret = metricgroup__add_metric(metric_name, modifier,
1274 					      metric_no_group, metric_no_threshold,
1275 					      user_requested_cpu_list,
1276 					      system_wide, metric_list, table);
1277 		if (ret == -EINVAL)
1278 			pr_err("Cannot find metric or group `%s'\n", metric_name);
1279 
1280 		if (ret)
1281 			break;
1282 
1283 		count++;
1284 	}
1285 	free(list_copy);
1286 
1287 	if (!ret) {
1288 		/*
1289 		 * Warn about nmi_watchdog if any parsed metrics had the
1290 		 * NO_NMI_WATCHDOG constraint.
1291 		 */
1292 		metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1293 		/* No metrics. */
1294 		if (count == 0)
1295 			return -EINVAL;
1296 	}
1297 	return ret;
1298 }
1299 
1300 static void metricgroup__free_metrics(struct list_head *metric_list)
1301 {
1302 	struct metric *m, *tmp;
1303 
1304 	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1305 		list_del_init(&m->nd);
1306 		metric__free(m);
1307 	}
1308 }
1309 
1310 /**
1311  * find_tool_events - Search for the pressence of tool events in metric_list.
1312  * @metric_list: List to take metrics from.
1313  * @tool_events: Array of false values, indices corresponding to tool events set
1314  *               to true if tool event is found.
1315  */
1316 static void find_tool_events(const struct list_head *metric_list,
1317 			     bool tool_events[PERF_TOOL_MAX])
1318 {
1319 	struct metric *m;
1320 
1321 	list_for_each_entry(m, metric_list, nd) {
1322 		int i;
1323 
1324 		perf_tool_event__for_each_event(i) {
1325 			struct expr_id_data *data;
1326 
1327 			if (!tool_events[i] &&
1328 			    !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1329 				tool_events[i] = true;
1330 		}
1331 	}
1332 }
1333 
1334 /**
1335  * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1336  *                           metric IDs, as the IDs are held in a set,
1337  *                           duplicates will be removed.
1338  * @metric_list: List to take metrics from.
1339  * @combined: Out argument for result.
1340  */
1341 static int build_combined_expr_ctx(const struct list_head *metric_list,
1342 				   struct expr_parse_ctx **combined)
1343 {
1344 	struct hashmap_entry *cur;
1345 	size_t bkt;
1346 	struct metric *m;
1347 	char *dup;
1348 	int ret;
1349 
1350 	*combined = expr__ctx_new();
1351 	if (!*combined)
1352 		return -ENOMEM;
1353 
1354 	list_for_each_entry(m, metric_list, nd) {
1355 		if (!m->group_events && !m->modifier) {
1356 			hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1357 				dup = strdup(cur->pkey);
1358 				if (!dup) {
1359 					ret = -ENOMEM;
1360 					goto err_out;
1361 				}
1362 				ret = expr__add_id(*combined, dup);
1363 				if (ret)
1364 					goto err_out;
1365 			}
1366 		}
1367 	}
1368 	return 0;
1369 err_out:
1370 	expr__ctx_free(*combined);
1371 	*combined = NULL;
1372 	return ret;
1373 }
1374 
1375 /**
1376  * parse_ids - Build the event string for the ids and parse them creating an
1377  *             evlist. The encoded metric_ids are decoded.
1378  * @metric_no_merge: is metric sharing explicitly disabled.
1379  * @fake_pmu: used when testing metrics not supported by the current CPU.
1380  * @ids: the event identifiers parsed from a metric.
1381  * @modifier: any modifiers added to the events.
1382  * @group_events: should events be placed in a weak group.
1383  * @tool_events: entries set true if the tool event of index could be present in
1384  *               the overall list of metrics.
1385  * @out_evlist: the created list of events.
1386  */
1387 static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1388 		     struct expr_parse_ctx *ids, const char *modifier,
1389 		     bool group_events, const bool tool_events[PERF_TOOL_MAX],
1390 		     struct evlist **out_evlist)
1391 {
1392 	struct parse_events_error parse_error;
1393 	struct evlist *parsed_evlist;
1394 	struct strbuf events = STRBUF_INIT;
1395 	int ret;
1396 
1397 	*out_evlist = NULL;
1398 	if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1399 		bool added_event = false;
1400 		int i;
1401 		/*
1402 		 * We may fail to share events between metrics because a tool
1403 		 * event isn't present in one metric. For example, a ratio of
1404 		 * cache misses doesn't need duration_time but the same events
1405 		 * may be used for a misses per second. Events without sharing
1406 		 * implies multiplexing, that is best avoided, so place
1407 		 * all tool events in every group.
1408 		 *
1409 		 * Also, there may be no ids/events in the expression parsing
1410 		 * context because of constant evaluation, e.g.:
1411 		 *    event1 if #smt_on else 0
1412 		 * Add a tool event to avoid a parse error on an empty string.
1413 		 */
1414 		perf_tool_event__for_each_event(i) {
1415 			if (tool_events[i]) {
1416 				char *tmp = strdup(perf_tool_event__to_str(i));
1417 
1418 				if (!tmp)
1419 					return -ENOMEM;
1420 				ids__insert(ids->ids, tmp);
1421 				added_event = true;
1422 			}
1423 		}
1424 		if (!added_event && hashmap__size(ids->ids) == 0) {
1425 			char *tmp = strdup("duration_time");
1426 
1427 			if (!tmp)
1428 				return -ENOMEM;
1429 			ids__insert(ids->ids, tmp);
1430 		}
1431 	}
1432 	ret = metricgroup__build_event_string(&events, ids, modifier,
1433 					      group_events);
1434 	if (ret)
1435 		return ret;
1436 
1437 	parsed_evlist = evlist__new();
1438 	if (!parsed_evlist) {
1439 		ret = -ENOMEM;
1440 		goto err_out;
1441 	}
1442 	pr_debug("Parsing metric events '%s'\n", events.buf);
1443 	parse_events_error__init(&parse_error);
1444 	ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu,
1445 			     /*warn_if_reordered=*/false);
1446 	if (ret) {
1447 		parse_events_error__print(&parse_error, events.buf);
1448 		goto err_out;
1449 	}
1450 	ret = decode_all_metric_ids(parsed_evlist, modifier);
1451 	if (ret)
1452 		goto err_out;
1453 
1454 	*out_evlist = parsed_evlist;
1455 	parsed_evlist = NULL;
1456 err_out:
1457 	parse_events_error__exit(&parse_error);
1458 	evlist__delete(parsed_evlist);
1459 	strbuf_release(&events);
1460 	return ret;
1461 }
1462 
1463 static int parse_groups(struct evlist *perf_evlist, const char *str,
1464 			bool metric_no_group,
1465 			bool metric_no_merge,
1466 			bool metric_no_threshold,
1467 			const char *user_requested_cpu_list,
1468 			bool system_wide,
1469 			struct perf_pmu *fake_pmu,
1470 			struct rblist *metric_events_list,
1471 			const struct pmu_metrics_table *table)
1472 {
1473 	struct evlist *combined_evlist = NULL;
1474 	LIST_HEAD(metric_list);
1475 	struct metric *m;
1476 	bool tool_events[PERF_TOOL_MAX] = {false};
1477 	int ret;
1478 
1479 	if (metric_events_list->nr_entries == 0)
1480 		metricgroup__rblist_init(metric_events_list);
1481 	ret = metricgroup__add_metric_list(str, metric_no_group, metric_no_threshold,
1482 					   user_requested_cpu_list,
1483 					   system_wide, &metric_list, table);
1484 	if (ret)
1485 		goto out;
1486 
1487 	/* Sort metrics from largest to smallest. */
1488 	list_sort(NULL, &metric_list, metric_list_cmp);
1489 
1490 	if (!metric_no_merge) {
1491 		struct expr_parse_ctx *combined = NULL;
1492 
1493 		find_tool_events(&metric_list, tool_events);
1494 
1495 		ret = build_combined_expr_ctx(&metric_list, &combined);
1496 
1497 		if (!ret && combined && hashmap__size(combined->ids)) {
1498 			ret = parse_ids(metric_no_merge, fake_pmu, combined,
1499 					/*modifier=*/NULL,
1500 					/*group_events=*/false,
1501 					tool_events,
1502 					&combined_evlist);
1503 		}
1504 		if (combined)
1505 			expr__ctx_free(combined);
1506 
1507 		if (ret)
1508 			goto out;
1509 	}
1510 
1511 	list_for_each_entry(m, &metric_list, nd) {
1512 		struct metric_event *me;
1513 		struct evsel **metric_events;
1514 		struct evlist *metric_evlist = NULL;
1515 		struct metric *n;
1516 		struct metric_expr *expr;
1517 
1518 		if (combined_evlist && !m->group_events) {
1519 			metric_evlist = combined_evlist;
1520 		} else if (!metric_no_merge) {
1521 			/*
1522 			 * See if the IDs for this metric are a subset of an
1523 			 * earlier metric.
1524 			 */
1525 			list_for_each_entry(n, &metric_list, nd) {
1526 				if (m == n)
1527 					break;
1528 
1529 				if (n->evlist == NULL)
1530 					continue;
1531 
1532 				if ((!m->modifier && n->modifier) ||
1533 				    (m->modifier && !n->modifier) ||
1534 				    (m->modifier && n->modifier &&
1535 					    strcmp(m->modifier, n->modifier)))
1536 					continue;
1537 
1538 				if (expr__subset_of_ids(n->pctx, m->pctx)) {
1539 					pr_debug("Events in '%s' fully contained within '%s'\n",
1540 						 m->metric_name, n->metric_name);
1541 					metric_evlist = n->evlist;
1542 					break;
1543 				}
1544 
1545 			}
1546 		}
1547 		if (!metric_evlist) {
1548 			ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1549 					m->group_events, tool_events, &m->evlist);
1550 			if (ret)
1551 				goto out;
1552 
1553 			metric_evlist = m->evlist;
1554 		}
1555 		ret = setup_metric_events(m->pctx->ids, metric_evlist, &metric_events);
1556 		if (ret) {
1557 			pr_debug("Cannot resolve IDs for %s: %s\n",
1558 				m->metric_name, m->metric_expr);
1559 			goto out;
1560 		}
1561 
1562 		me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1563 
1564 		expr = malloc(sizeof(struct metric_expr));
1565 		if (!expr) {
1566 			ret = -ENOMEM;
1567 			free(metric_events);
1568 			goto out;
1569 		}
1570 
1571 		expr->metric_refs = m->metric_refs;
1572 		m->metric_refs = NULL;
1573 		expr->metric_expr = m->metric_expr;
1574 		if (m->modifier) {
1575 			char *tmp;
1576 
1577 			if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1578 				expr->metric_name = NULL;
1579 			else
1580 				expr->metric_name = tmp;
1581 		} else
1582 			expr->metric_name = strdup(m->metric_name);
1583 
1584 		if (!expr->metric_name) {
1585 			ret = -ENOMEM;
1586 			free(metric_events);
1587 			goto out;
1588 		}
1589 		expr->metric_threshold = m->metric_threshold;
1590 		expr->metric_unit = m->metric_unit;
1591 		expr->metric_events = metric_events;
1592 		expr->runtime = m->pctx->sctx.runtime;
1593 		list_add(&expr->nd, &me->head);
1594 	}
1595 
1596 
1597 	if (combined_evlist) {
1598 		evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1599 		evlist__delete(combined_evlist);
1600 	}
1601 
1602 	list_for_each_entry(m, &metric_list, nd) {
1603 		if (m->evlist)
1604 			evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1605 	}
1606 
1607 out:
1608 	metricgroup__free_metrics(&metric_list);
1609 	return ret;
1610 }
1611 
1612 int metricgroup__parse_groups(struct evlist *perf_evlist,
1613 			      const char *str,
1614 			      bool metric_no_group,
1615 			      bool metric_no_merge,
1616 			      bool metric_no_threshold,
1617 			      const char *user_requested_cpu_list,
1618 			      bool system_wide,
1619 			      struct rblist *metric_events)
1620 {
1621 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1622 
1623 	if (!table)
1624 		return -EINVAL;
1625 
1626 	return parse_groups(perf_evlist, str, metric_no_group, metric_no_merge,
1627 			    metric_no_threshold, user_requested_cpu_list, system_wide,
1628 			    /*fake_pmu=*/NULL, metric_events, table);
1629 }
1630 
1631 int metricgroup__parse_groups_test(struct evlist *evlist,
1632 				   const struct pmu_metrics_table *table,
1633 				   const char *str,
1634 				   struct rblist *metric_events)
1635 {
1636 	return parse_groups(evlist, str,
1637 			    /*metric_no_group=*/false,
1638 			    /*metric_no_merge=*/false,
1639 			    /*metric_no_threshold=*/false,
1640 			    /*user_requested_cpu_list=*/NULL,
1641 			    /*system_wide=*/false,
1642 			    &perf_pmu__fake, metric_events, table);
1643 }
1644 
1645 static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
1646 					    const struct pmu_metrics_table *table __maybe_unused,
1647 					    void *vdata)
1648 {
1649 	const char *metric = vdata;
1650 
1651 	if (match_metric(pm->metric_name, metric) ||
1652 	    match_metric(pm->metric_group, metric))
1653 		return 1;
1654 
1655 	return 0;
1656 }
1657 
1658 bool metricgroup__has_metric(const char *metric)
1659 {
1660 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1661 
1662 	if (!table)
1663 		return false;
1664 
1665 	return pmu_metrics_table_for_each_metric(table, metricgroup__has_metric_callback,
1666 						(void *)metric) ? true : false;
1667 }
1668 
1669 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1670 					    const struct pmu_metrics_table *table __maybe_unused,
1671 					    void *data)
1672 {
1673 	unsigned int *max_level = data;
1674 	unsigned int level;
1675 	const char *p = strstr(pm->metric_group, "TopdownL");
1676 
1677 	if (!p || p[8] == '\0')
1678 		return 0;
1679 
1680 	level = p[8] - '0';
1681 	if (level > *max_level)
1682 		*max_level = level;
1683 
1684 	return 0;
1685 }
1686 
1687 unsigned int metricgroups__topdown_max_level(void)
1688 {
1689 	unsigned int max_level = 0;
1690 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1691 
1692 	if (!table)
1693 		return false;
1694 
1695 	pmu_metrics_table_for_each_metric(table, metricgroup__topdown_max_level_callback,
1696 					  &max_level);
1697 	return max_level;
1698 }
1699 
1700 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1701 				    struct rblist *new_metric_events,
1702 				    struct rblist *old_metric_events)
1703 {
1704 	unsigned int i;
1705 
1706 	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1707 		struct rb_node *nd;
1708 		struct metric_event *old_me, *new_me;
1709 		struct metric_expr *old_expr, *new_expr;
1710 		struct evsel *evsel;
1711 		size_t alloc_size;
1712 		int idx, nr;
1713 
1714 		nd = rblist__entry(old_metric_events, i);
1715 		old_me = container_of(nd, struct metric_event, nd);
1716 
1717 		evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1718 		if (!evsel)
1719 			return -EINVAL;
1720 		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1721 		if (!new_me)
1722 			return -ENOMEM;
1723 
1724 		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1725 			 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1726 
1727 		list_for_each_entry(old_expr, &old_me->head, nd) {
1728 			new_expr = malloc(sizeof(*new_expr));
1729 			if (!new_expr)
1730 				return -ENOMEM;
1731 
1732 			new_expr->metric_expr = old_expr->metric_expr;
1733 			new_expr->metric_threshold = old_expr->metric_threshold;
1734 			new_expr->metric_name = strdup(old_expr->metric_name);
1735 			if (!new_expr->metric_name)
1736 				return -ENOMEM;
1737 
1738 			new_expr->metric_unit = old_expr->metric_unit;
1739 			new_expr->runtime = old_expr->runtime;
1740 
1741 			if (old_expr->metric_refs) {
1742 				/* calculate number of metric_events */
1743 				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1744 					continue;
1745 				alloc_size = sizeof(*new_expr->metric_refs);
1746 				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1747 				if (!new_expr->metric_refs) {
1748 					free(new_expr);
1749 					return -ENOMEM;
1750 				}
1751 
1752 				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1753 				       nr * alloc_size);
1754 			} else {
1755 				new_expr->metric_refs = NULL;
1756 			}
1757 
1758 			/* calculate number of metric_events */
1759 			for (nr = 0; old_expr->metric_events[nr]; nr++)
1760 				continue;
1761 			alloc_size = sizeof(*new_expr->metric_events);
1762 			new_expr->metric_events = calloc(nr + 1, alloc_size);
1763 			if (!new_expr->metric_events) {
1764 				zfree(&new_expr->metric_refs);
1765 				free(new_expr);
1766 				return -ENOMEM;
1767 			}
1768 
1769 			/* copy evsel in the same position */
1770 			for (idx = 0; idx < nr; idx++) {
1771 				evsel = old_expr->metric_events[idx];
1772 				evsel = evlist__find_evsel(evlist, evsel->core.idx);
1773 				if (evsel == NULL) {
1774 					zfree(&new_expr->metric_events);
1775 					zfree(&new_expr->metric_refs);
1776 					free(new_expr);
1777 					return -EINVAL;
1778 				}
1779 				new_expr->metric_events[idx] = evsel;
1780 			}
1781 
1782 			list_add(&new_expr->nd, &new_me->head);
1783 		}
1784 	}
1785 	return 0;
1786 }
1787