1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2017, Intel Corporation. 4 */ 5 6 /* Manage metrics and groups of metrics from JSON files */ 7 8 #include "metricgroup.h" 9 #include "debug.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "strbuf.h" 13 #include "pmu.h" 14 #include "pmus.h" 15 #include "print-events.h" 16 #include "smt.h" 17 #include "expr.h" 18 #include "rblist.h" 19 #include <string.h> 20 #include <errno.h> 21 #include "strlist.h" 22 #include <assert.h> 23 #include <linux/ctype.h> 24 #include <linux/list_sort.h> 25 #include <linux/string.h> 26 #include <linux/zalloc.h> 27 #include <perf/cpumap.h> 28 #include <subcmd/parse-options.h> 29 #include <api/fs/fs.h> 30 #include "util.h" 31 #include <asm/bug.h> 32 #include "cgroup.h" 33 #include "util/hashmap.h" 34 35 struct metric_event *metricgroup__lookup(struct rblist *metric_events, 36 struct evsel *evsel, 37 bool create) 38 { 39 struct rb_node *nd; 40 struct metric_event me = { 41 .evsel = evsel 42 }; 43 44 if (!metric_events) 45 return NULL; 46 47 nd = rblist__find(metric_events, &me); 48 if (nd) 49 return container_of(nd, struct metric_event, nd); 50 if (create) { 51 rblist__add_node(metric_events, &me); 52 nd = rblist__find(metric_events, &me); 53 if (nd) 54 return container_of(nd, struct metric_event, nd); 55 } 56 return NULL; 57 } 58 59 static int metric_event_cmp(struct rb_node *rb_node, const void *entry) 60 { 61 struct metric_event *a = container_of(rb_node, 62 struct metric_event, 63 nd); 64 const struct metric_event *b = entry; 65 66 if (a->evsel == b->evsel) 67 return 0; 68 if ((char *)a->evsel < (char *)b->evsel) 69 return -1; 70 return +1; 71 } 72 73 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused, 74 const void *entry) 75 { 76 struct metric_event *me = malloc(sizeof(struct metric_event)); 77 78 if (!me) 79 return NULL; 80 memcpy(me, entry, sizeof(struct metric_event)); 81 me->evsel = ((struct metric_event *)entry)->evsel; 82 INIT_LIST_HEAD(&me->head); 83 return &me->nd; 84 } 85 86 static void metric_event_delete(struct rblist *rblist __maybe_unused, 87 struct rb_node *rb_node) 88 { 89 struct metric_event *me = container_of(rb_node, struct metric_event, nd); 90 struct metric_expr *expr, *tmp; 91 92 list_for_each_entry_safe(expr, tmp, &me->head, nd) { 93 zfree(&expr->metric_name); 94 zfree(&expr->metric_refs); 95 zfree(&expr->metric_events); 96 free(expr); 97 } 98 99 free(me); 100 } 101 102 static void metricgroup__rblist_init(struct rblist *metric_events) 103 { 104 rblist__init(metric_events); 105 metric_events->node_cmp = metric_event_cmp; 106 metric_events->node_new = metric_event_new; 107 metric_events->node_delete = metric_event_delete; 108 } 109 110 void metricgroup__rblist_exit(struct rblist *metric_events) 111 { 112 rblist__exit(metric_events); 113 } 114 115 /** 116 * The metric under construction. The data held here will be placed in a 117 * metric_expr. 118 */ 119 struct metric { 120 struct list_head nd; 121 /** 122 * The expression parse context importantly holding the IDs contained 123 * within the expression. 124 */ 125 struct expr_parse_ctx *pctx; 126 const char *pmu; 127 /** The name of the metric such as "IPC". */ 128 const char *metric_name; 129 /** Modifier on the metric such as "u" or NULL for none. */ 130 const char *modifier; 131 /** The expression to parse, for example, "instructions/cycles". */ 132 const char *metric_expr; 133 /** Optional threshold expression where zero value is green, otherwise red. */ 134 const char *metric_threshold; 135 /** 136 * The "ScaleUnit" that scales and adds a unit to the metric during 137 * output. 138 */ 139 const char *metric_unit; 140 /** Optional null terminated array of referenced metrics. */ 141 struct metric_ref *metric_refs; 142 /** 143 * Should events of the metric be grouped? 144 */ 145 bool group_events; 146 /** 147 * Parsed events for the metric. Optional as events may be taken from a 148 * different metric whose group contains all the IDs necessary for this 149 * one. 150 */ 151 struct evlist *evlist; 152 }; 153 154 static void metric__watchdog_constraint_hint(const char *name, bool foot) 155 { 156 static bool violate_nmi_constraint; 157 158 if (!foot) { 159 pr_warning("Not grouping metric %s's events.\n", name); 160 violate_nmi_constraint = true; 161 return; 162 } 163 164 if (!violate_nmi_constraint) 165 return; 166 167 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n" 168 " echo 0 > /proc/sys/kernel/nmi_watchdog\n" 169 " perf stat ...\n" 170 " echo 1 > /proc/sys/kernel/nmi_watchdog\n"); 171 } 172 173 static bool metric__group_events(const struct pmu_metric *pm) 174 { 175 switch (pm->event_grouping) { 176 case MetricNoGroupEvents: 177 return false; 178 case MetricNoGroupEventsNmi: 179 if (!sysctl__nmi_watchdog_enabled()) 180 return true; 181 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false); 182 return false; 183 case MetricNoGroupEventsSmt: 184 return !smt_on(); 185 case MetricGroupEvents: 186 default: 187 return true; 188 } 189 } 190 191 static void metric__free(struct metric *m) 192 { 193 if (!m) 194 return; 195 196 zfree(&m->metric_refs); 197 expr__ctx_free(m->pctx); 198 zfree(&m->modifier); 199 evlist__delete(m->evlist); 200 free(m); 201 } 202 203 static struct metric *metric__new(const struct pmu_metric *pm, 204 const char *modifier, 205 bool metric_no_group, 206 int runtime, 207 const char *user_requested_cpu_list, 208 bool system_wide) 209 { 210 struct metric *m; 211 212 m = zalloc(sizeof(*m)); 213 if (!m) 214 return NULL; 215 216 m->pctx = expr__ctx_new(); 217 if (!m->pctx) 218 goto out_err; 219 220 m->pmu = pm->pmu ?: "cpu"; 221 m->metric_name = pm->metric_name; 222 m->modifier = NULL; 223 if (modifier) { 224 m->modifier = strdup(modifier); 225 if (!m->modifier) 226 goto out_err; 227 } 228 m->metric_expr = pm->metric_expr; 229 m->metric_threshold = pm->metric_threshold; 230 m->metric_unit = pm->unit; 231 m->pctx->sctx.user_requested_cpu_list = NULL; 232 if (user_requested_cpu_list) { 233 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list); 234 if (!m->pctx->sctx.user_requested_cpu_list) 235 goto out_err; 236 } 237 m->pctx->sctx.runtime = runtime; 238 m->pctx->sctx.system_wide = system_wide; 239 m->group_events = !metric_no_group && metric__group_events(pm); 240 m->metric_refs = NULL; 241 m->evlist = NULL; 242 243 return m; 244 out_err: 245 metric__free(m); 246 return NULL; 247 } 248 249 static bool contains_metric_id(struct evsel **metric_events, int num_events, 250 const char *metric_id) 251 { 252 int i; 253 254 for (i = 0; i < num_events; i++) { 255 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id)) 256 return true; 257 } 258 return false; 259 } 260 261 /** 262 * setup_metric_events - Find a group of events in metric_evlist that correspond 263 * to the IDs from a parsed metric expression. 264 * @pmu: The PMU for the IDs. 265 * @ids: the metric IDs to match. 266 * @metric_evlist: the list of perf events. 267 * @out_metric_events: holds the created metric events array. 268 */ 269 static int setup_metric_events(const char *pmu, struct hashmap *ids, 270 struct evlist *metric_evlist, 271 struct evsel ***out_metric_events) 272 { 273 struct evsel **metric_events; 274 const char *metric_id; 275 struct evsel *ev; 276 size_t ids_size, matched_events, i; 277 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu); 278 279 *out_metric_events = NULL; 280 ids_size = hashmap__size(ids); 281 282 metric_events = calloc(sizeof(void *), ids_size + 1); 283 if (!metric_events) 284 return -ENOMEM; 285 286 matched_events = 0; 287 evlist__for_each_entry(metric_evlist, ev) { 288 struct expr_id_data *val_ptr; 289 290 /* Don't match events for the wrong hybrid PMU. */ 291 if (!all_pmus && ev->pmu_name && evsel__is_hybrid(ev) && 292 strcmp(ev->pmu_name, pmu)) 293 continue; 294 /* 295 * Check for duplicate events with the same name. For 296 * example, uncore_imc/cas_count_read/ will turn into 6 297 * events per socket on skylakex. Only the first such 298 * event is placed in metric_events. 299 */ 300 metric_id = evsel__metric_id(ev); 301 if (contains_metric_id(metric_events, matched_events, metric_id)) 302 continue; 303 /* 304 * Does this event belong to the parse context? For 305 * combined or shared groups, this metric may not care 306 * about this event. 307 */ 308 if (hashmap__find(ids, metric_id, &val_ptr)) { 309 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev)); 310 metric_events[matched_events++] = ev; 311 312 if (matched_events >= ids_size) 313 break; 314 } 315 } 316 if (matched_events < ids_size) { 317 free(metric_events); 318 return -EINVAL; 319 } 320 for (i = 0; i < ids_size; i++) { 321 ev = metric_events[i]; 322 ev->collect_stat = true; 323 324 /* 325 * The metric leader points to the identically named 326 * event in metric_events. 327 */ 328 ev->metric_leader = ev; 329 /* 330 * Mark two events with identical names in the same 331 * group (or globally) as being in use as uncore events 332 * may be duplicated for each pmu. Set the metric leader 333 * of such events to be the event that appears in 334 * metric_events. 335 */ 336 metric_id = evsel__metric_id(ev); 337 evlist__for_each_entry_continue(metric_evlist, ev) { 338 if (!strcmp(evsel__metric_id(ev), metric_id)) 339 ev->metric_leader = metric_events[i]; 340 } 341 } 342 *out_metric_events = metric_events; 343 return 0; 344 } 345 346 static bool match_metric(const char *n, const char *list) 347 { 348 int len; 349 char *m; 350 351 if (!list) 352 return false; 353 if (!strcmp(list, "all")) 354 return true; 355 if (!n) 356 return !strcasecmp(list, "No_group"); 357 len = strlen(list); 358 m = strcasestr(n, list); 359 if (!m) 360 return false; 361 if ((m == n || m[-1] == ';' || m[-1] == ' ') && 362 (m[len] == 0 || m[len] == ';')) 363 return true; 364 return false; 365 } 366 367 static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric) 368 { 369 const char *pm_pmu = pm->pmu ?: "cpu"; 370 371 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu)) 372 return false; 373 374 return match_metric(pm->metric_group, metric) || 375 match_metric(pm->metric_name, metric); 376 } 377 378 /** struct mep - RB-tree node for building printing information. */ 379 struct mep { 380 /** nd - RB-tree element. */ 381 struct rb_node nd; 382 /** @metric_group: Owned metric group name, separated others with ';'. */ 383 char *metric_group; 384 const char *metric_name; 385 const char *metric_desc; 386 const char *metric_long_desc; 387 const char *metric_expr; 388 const char *metric_threshold; 389 const char *metric_unit; 390 }; 391 392 static int mep_cmp(struct rb_node *rb_node, const void *entry) 393 { 394 struct mep *a = container_of(rb_node, struct mep, nd); 395 struct mep *b = (struct mep *)entry; 396 int ret; 397 398 ret = strcmp(a->metric_group, b->metric_group); 399 if (ret) 400 return ret; 401 402 return strcmp(a->metric_name, b->metric_name); 403 } 404 405 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry) 406 { 407 struct mep *me = malloc(sizeof(struct mep)); 408 409 if (!me) 410 return NULL; 411 412 memcpy(me, entry, sizeof(struct mep)); 413 return &me->nd; 414 } 415 416 static void mep_delete(struct rblist *rl __maybe_unused, 417 struct rb_node *nd) 418 { 419 struct mep *me = container_of(nd, struct mep, nd); 420 421 zfree(&me->metric_group); 422 free(me); 423 } 424 425 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group, 426 const char *metric_name) 427 { 428 struct rb_node *nd; 429 struct mep me = { 430 .metric_group = strdup(metric_group), 431 .metric_name = metric_name, 432 }; 433 nd = rblist__find(groups, &me); 434 if (nd) { 435 free(me.metric_group); 436 return container_of(nd, struct mep, nd); 437 } 438 rblist__add_node(groups, &me); 439 nd = rblist__find(groups, &me); 440 if (nd) 441 return container_of(nd, struct mep, nd); 442 return NULL; 443 } 444 445 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm, 446 struct rblist *groups) 447 { 448 const char *g; 449 char *omg, *mg; 450 451 mg = strdup(pm->metric_group ?: "No_group"); 452 if (!mg) 453 return -ENOMEM; 454 omg = mg; 455 while ((g = strsep(&mg, ";")) != NULL) { 456 struct mep *me; 457 458 g = skip_spaces(g); 459 if (strlen(g)) 460 me = mep_lookup(groups, g, pm->metric_name); 461 else 462 me = mep_lookup(groups, "No_group", pm->metric_name); 463 464 if (me) { 465 me->metric_desc = pm->desc; 466 me->metric_long_desc = pm->long_desc; 467 me->metric_expr = pm->metric_expr; 468 me->metric_threshold = pm->metric_threshold; 469 me->metric_unit = pm->unit; 470 } 471 } 472 free(omg); 473 474 return 0; 475 } 476 477 struct metricgroup_iter_data { 478 pmu_metric_iter_fn fn; 479 void *data; 480 }; 481 482 static int metricgroup__sys_event_iter(const struct pmu_metric *pm, 483 const struct pmu_metrics_table *table, 484 void *data) 485 { 486 struct metricgroup_iter_data *d = data; 487 struct perf_pmu *pmu = NULL; 488 489 if (!pm->metric_expr || !pm->compat) 490 return 0; 491 492 while ((pmu = perf_pmus__scan(pmu))) { 493 494 if (!pmu->id || strcmp(pmu->id, pm->compat)) 495 continue; 496 497 return d->fn(pm, table, d->data); 498 } 499 return 0; 500 } 501 502 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm, 503 const struct pmu_metrics_table *table __maybe_unused, 504 void *vdata) 505 { 506 struct rblist *groups = vdata; 507 508 return metricgroup__add_to_mep_groups(pm, groups); 509 } 510 511 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state) 512 { 513 struct rblist groups; 514 const struct pmu_metrics_table *table; 515 struct rb_node *node, *next; 516 517 rblist__init(&groups); 518 groups.node_new = mep_new; 519 groups.node_cmp = mep_cmp; 520 groups.node_delete = mep_delete; 521 table = pmu_metrics_table__find(); 522 if (table) { 523 pmu_metrics_table_for_each_metric(table, 524 metricgroup__add_to_mep_groups_callback, 525 &groups); 526 } 527 { 528 struct metricgroup_iter_data data = { 529 .fn = metricgroup__add_to_mep_groups_callback, 530 .data = &groups, 531 }; 532 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data); 533 } 534 535 for (node = rb_first_cached(&groups.entries); node; node = next) { 536 struct mep *me = container_of(node, struct mep, nd); 537 538 print_cb->print_metric(print_state, 539 me->metric_group, 540 me->metric_name, 541 me->metric_desc, 542 me->metric_long_desc, 543 me->metric_expr, 544 me->metric_threshold, 545 me->metric_unit); 546 next = rb_next(node); 547 rblist__remove_node(&groups, node); 548 } 549 } 550 551 static const char *code_characters = ",-=@"; 552 553 static int encode_metric_id(struct strbuf *sb, const char *x) 554 { 555 char *c; 556 int ret = 0; 557 558 for (; *x; x++) { 559 c = strchr(code_characters, *x); 560 if (c) { 561 ret = strbuf_addch(sb, '!'); 562 if (ret) 563 break; 564 565 ret = strbuf_addch(sb, '0' + (c - code_characters)); 566 if (ret) 567 break; 568 } else { 569 ret = strbuf_addch(sb, *x); 570 if (ret) 571 break; 572 } 573 } 574 return ret; 575 } 576 577 static int decode_metric_id(struct strbuf *sb, const char *x) 578 { 579 const char *orig = x; 580 size_t i; 581 char c; 582 int ret; 583 584 for (; *x; x++) { 585 c = *x; 586 if (*x == '!') { 587 x++; 588 i = *x - '0'; 589 if (i > strlen(code_characters)) { 590 pr_err("Bad metric-id encoding in: '%s'", orig); 591 return -1; 592 } 593 c = code_characters[i]; 594 } 595 ret = strbuf_addch(sb, c); 596 if (ret) 597 return ret; 598 } 599 return 0; 600 } 601 602 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier) 603 { 604 struct evsel *ev; 605 struct strbuf sb = STRBUF_INIT; 606 char *cur; 607 int ret = 0; 608 609 evlist__for_each_entry(perf_evlist, ev) { 610 if (!ev->metric_id) 611 continue; 612 613 ret = strbuf_setlen(&sb, 0); 614 if (ret) 615 break; 616 617 ret = decode_metric_id(&sb, ev->metric_id); 618 if (ret) 619 break; 620 621 free((char *)ev->metric_id); 622 ev->metric_id = strdup(sb.buf); 623 if (!ev->metric_id) { 624 ret = -ENOMEM; 625 break; 626 } 627 /* 628 * If the name is just the parsed event, use the metric-id to 629 * give a more friendly display version. 630 */ 631 if (strstr(ev->name, "metric-id=")) { 632 bool has_slash = false; 633 634 zfree(&ev->name); 635 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) { 636 *cur = '/'; 637 has_slash = true; 638 } 639 640 if (modifier) { 641 if (!has_slash && !strchr(sb.buf, ':')) { 642 ret = strbuf_addch(&sb, ':'); 643 if (ret) 644 break; 645 } 646 ret = strbuf_addstr(&sb, modifier); 647 if (ret) 648 break; 649 } 650 ev->name = strdup(sb.buf); 651 if (!ev->name) { 652 ret = -ENOMEM; 653 break; 654 } 655 } 656 } 657 strbuf_release(&sb); 658 return ret; 659 } 660 661 static int metricgroup__build_event_string(struct strbuf *events, 662 const struct expr_parse_ctx *ctx, 663 const char *modifier, 664 bool group_events) 665 { 666 struct hashmap_entry *cur; 667 size_t bkt; 668 bool no_group = true, has_tool_events = false; 669 bool tool_events[PERF_TOOL_MAX] = {false}; 670 int ret = 0; 671 672 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0) 673 674 hashmap__for_each_entry(ctx->ids, cur, bkt) { 675 const char *sep, *rsep, *id = cur->pkey; 676 enum perf_tool_event ev; 677 678 pr_debug("found event %s\n", id); 679 680 /* Always move tool events outside of the group. */ 681 ev = perf_tool_event__from_str(id); 682 if (ev != PERF_TOOL_NONE) { 683 has_tool_events = true; 684 tool_events[ev] = true; 685 continue; 686 } 687 /* Separate events with commas and open the group if necessary. */ 688 if (no_group) { 689 if (group_events) { 690 ret = strbuf_addch(events, '{'); 691 RETURN_IF_NON_ZERO(ret); 692 } 693 694 no_group = false; 695 } else { 696 ret = strbuf_addch(events, ','); 697 RETURN_IF_NON_ZERO(ret); 698 } 699 /* 700 * Encode the ID as an event string. Add a qualifier for 701 * metric_id that is the original name except with characters 702 * that parse-events can't parse replaced. For example, 703 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/ 704 */ 705 sep = strchr(id, '@'); 706 if (sep != NULL) { 707 ret = strbuf_add(events, id, sep - id); 708 RETURN_IF_NON_ZERO(ret); 709 ret = strbuf_addch(events, '/'); 710 RETURN_IF_NON_ZERO(ret); 711 rsep = strrchr(sep, '@'); 712 ret = strbuf_add(events, sep + 1, rsep - sep - 1); 713 RETURN_IF_NON_ZERO(ret); 714 ret = strbuf_addstr(events, ",metric-id="); 715 RETURN_IF_NON_ZERO(ret); 716 sep = rsep; 717 } else { 718 sep = strchr(id, ':'); 719 if (sep != NULL) { 720 ret = strbuf_add(events, id, sep - id); 721 RETURN_IF_NON_ZERO(ret); 722 } else { 723 ret = strbuf_addstr(events, id); 724 RETURN_IF_NON_ZERO(ret); 725 } 726 ret = strbuf_addstr(events, "/metric-id="); 727 RETURN_IF_NON_ZERO(ret); 728 } 729 ret = encode_metric_id(events, id); 730 RETURN_IF_NON_ZERO(ret); 731 ret = strbuf_addstr(events, "/"); 732 RETURN_IF_NON_ZERO(ret); 733 734 if (sep != NULL) { 735 ret = strbuf_addstr(events, sep + 1); 736 RETURN_IF_NON_ZERO(ret); 737 } 738 if (modifier) { 739 ret = strbuf_addstr(events, modifier); 740 RETURN_IF_NON_ZERO(ret); 741 } 742 } 743 if (!no_group && group_events) { 744 ret = strbuf_addf(events, "}:W"); 745 RETURN_IF_NON_ZERO(ret); 746 } 747 if (has_tool_events) { 748 int i; 749 750 perf_tool_event__for_each_event(i) { 751 if (tool_events[i]) { 752 if (!no_group) { 753 ret = strbuf_addch(events, ','); 754 RETURN_IF_NON_ZERO(ret); 755 } 756 no_group = false; 757 ret = strbuf_addstr(events, perf_tool_event__to_str(i)); 758 RETURN_IF_NON_ZERO(ret); 759 } 760 } 761 } 762 763 return ret; 764 #undef RETURN_IF_NON_ZERO 765 } 766 767 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused) 768 { 769 return 1; 770 } 771 772 /* 773 * A singly linked list on the stack of the names of metrics being 774 * processed. Used to identify recursion. 775 */ 776 struct visited_metric { 777 const char *name; 778 const struct visited_metric *parent; 779 }; 780 781 struct metricgroup_add_iter_data { 782 struct list_head *metric_list; 783 const char *pmu; 784 const char *metric_name; 785 const char *modifier; 786 int *ret; 787 bool *has_match; 788 bool metric_no_group; 789 bool metric_no_threshold; 790 const char *user_requested_cpu_list; 791 bool system_wide; 792 struct metric *root_metric; 793 const struct visited_metric *visited; 794 const struct pmu_metrics_table *table; 795 }; 796 797 static bool metricgroup__find_metric(const char *pmu, 798 const char *metric, 799 const struct pmu_metrics_table *table, 800 struct pmu_metric *pm); 801 802 static int add_metric(struct list_head *metric_list, 803 const struct pmu_metric *pm, 804 const char *modifier, 805 bool metric_no_group, 806 bool metric_no_threshold, 807 const char *user_requested_cpu_list, 808 bool system_wide, 809 struct metric *root_metric, 810 const struct visited_metric *visited, 811 const struct pmu_metrics_table *table); 812 813 /** 814 * resolve_metric - Locate metrics within the root metric and recursively add 815 * references to them. 816 * @metric_list: The list the metric is added to. 817 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs. 818 * @modifier: if non-null event modifiers like "u". 819 * @metric_no_group: Should events written to events be grouped "{}" or 820 * global. Grouping is the default but due to multiplexing the 821 * user may override. 822 * @user_requested_cpu_list: Command line specified CPUs to record on. 823 * @system_wide: Are events for all processes recorded. 824 * @root_metric: Metrics may reference other metrics to form a tree. In this 825 * case the root_metric holds all the IDs and a list of referenced 826 * metrics. When adding a root this argument is NULL. 827 * @visited: A singly linked list of metric names being added that is used to 828 * detect recursion. 829 * @table: The table that is searched for metrics, most commonly the table for the 830 * architecture perf is running upon. 831 */ 832 static int resolve_metric(struct list_head *metric_list, 833 const char *pmu, 834 const char *modifier, 835 bool metric_no_group, 836 bool metric_no_threshold, 837 const char *user_requested_cpu_list, 838 bool system_wide, 839 struct metric *root_metric, 840 const struct visited_metric *visited, 841 const struct pmu_metrics_table *table) 842 { 843 struct hashmap_entry *cur; 844 size_t bkt; 845 struct to_resolve { 846 /* The metric to resolve. */ 847 struct pmu_metric pm; 848 /* 849 * The key in the IDs map, this may differ from in case, 850 * etc. from pm->metric_name. 851 */ 852 const char *key; 853 } *pending = NULL; 854 int i, ret = 0, pending_cnt = 0; 855 856 /* 857 * Iterate all the parsed IDs and if there's a matching metric and it to 858 * the pending array. 859 */ 860 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) { 861 struct pmu_metric pm; 862 863 if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) { 864 pending = realloc(pending, 865 (pending_cnt + 1) * sizeof(struct to_resolve)); 866 if (!pending) 867 return -ENOMEM; 868 869 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm)); 870 pending[pending_cnt].key = cur->pkey; 871 pending_cnt++; 872 } 873 } 874 875 /* Remove the metric IDs from the context. */ 876 for (i = 0; i < pending_cnt; i++) 877 expr__del_id(root_metric->pctx, pending[i].key); 878 879 /* 880 * Recursively add all the metrics, IDs are added to the root metric's 881 * context. 882 */ 883 for (i = 0; i < pending_cnt; i++) { 884 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group, 885 metric_no_threshold, user_requested_cpu_list, system_wide, 886 root_metric, visited, table); 887 if (ret) 888 break; 889 } 890 891 free(pending); 892 return ret; 893 } 894 895 /** 896 * __add_metric - Add a metric to metric_list. 897 * @metric_list: The list the metric is added to. 898 * @pm: The pmu_metric containing the metric to be added. 899 * @modifier: if non-null event modifiers like "u". 900 * @metric_no_group: Should events written to events be grouped "{}" or 901 * global. Grouping is the default but due to multiplexing the 902 * user may override. 903 * @metric_no_threshold: Should threshold expressions be ignored? 904 * @runtime: A special argument for the parser only known at runtime. 905 * @user_requested_cpu_list: Command line specified CPUs to record on. 906 * @system_wide: Are events for all processes recorded. 907 * @root_metric: Metrics may reference other metrics to form a tree. In this 908 * case the root_metric holds all the IDs and a list of referenced 909 * metrics. When adding a root this argument is NULL. 910 * @visited: A singly linked list of metric names being added that is used to 911 * detect recursion. 912 * @table: The table that is searched for metrics, most commonly the table for the 913 * architecture perf is running upon. 914 */ 915 static int __add_metric(struct list_head *metric_list, 916 const struct pmu_metric *pm, 917 const char *modifier, 918 bool metric_no_group, 919 bool metric_no_threshold, 920 int runtime, 921 const char *user_requested_cpu_list, 922 bool system_wide, 923 struct metric *root_metric, 924 const struct visited_metric *visited, 925 const struct pmu_metrics_table *table) 926 { 927 const struct visited_metric *vm; 928 int ret; 929 bool is_root = !root_metric; 930 const char *expr; 931 struct visited_metric visited_node = { 932 .name = pm->metric_name, 933 .parent = visited, 934 }; 935 936 for (vm = visited; vm; vm = vm->parent) { 937 if (!strcmp(pm->metric_name, vm->name)) { 938 pr_err("failed: recursion detected for %s\n", pm->metric_name); 939 return -1; 940 } 941 } 942 943 if (is_root) { 944 /* 945 * This metric is the root of a tree and may reference other 946 * metrics that are added recursively. 947 */ 948 root_metric = metric__new(pm, modifier, metric_no_group, runtime, 949 user_requested_cpu_list, system_wide); 950 if (!root_metric) 951 return -ENOMEM; 952 953 } else { 954 int cnt = 0; 955 956 /* 957 * This metric was referenced in a metric higher in the 958 * tree. Check if the same metric is already resolved in the 959 * metric_refs list. 960 */ 961 if (root_metric->metric_refs) { 962 for (; root_metric->metric_refs[cnt].metric_name; cnt++) { 963 if (!strcmp(pm->metric_name, 964 root_metric->metric_refs[cnt].metric_name)) 965 return 0; 966 } 967 } 968 969 /* Create reference. Need space for the entry and the terminator. */ 970 root_metric->metric_refs = realloc(root_metric->metric_refs, 971 (cnt + 2) * sizeof(struct metric_ref)); 972 if (!root_metric->metric_refs) 973 return -ENOMEM; 974 975 /* 976 * Intentionally passing just const char pointers, 977 * from 'pe' object, so they never go away. We don't 978 * need to change them, so there's no need to create 979 * our own copy. 980 */ 981 root_metric->metric_refs[cnt].metric_name = pm->metric_name; 982 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr; 983 984 /* Null terminate array. */ 985 root_metric->metric_refs[cnt+1].metric_name = NULL; 986 root_metric->metric_refs[cnt+1].metric_expr = NULL; 987 } 988 989 /* 990 * For both the parent and referenced metrics, we parse 991 * all the metric's IDs and add it to the root context. 992 */ 993 ret = 0; 994 expr = pm->metric_expr; 995 if (is_root && pm->metric_threshold) { 996 /* 997 * Threshold expressions are built off the actual metric. Switch 998 * to use that in case of additional necessary events. Change 999 * the visited node name to avoid this being flagged as 1000 * recursion. If the threshold events are disabled, just use the 1001 * metric's name as a reference. This allows metric threshold 1002 * computation if there are sufficient events. 1003 */ 1004 assert(strstr(pm->metric_threshold, pm->metric_name)); 1005 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold; 1006 visited_node.name = "__threshold__"; 1007 } 1008 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) { 1009 /* Broken metric. */ 1010 ret = -EINVAL; 1011 } 1012 if (!ret) { 1013 /* Resolve referenced metrics. */ 1014 const char *pmu = pm->pmu ?: "cpu"; 1015 1016 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group, 1017 metric_no_threshold, user_requested_cpu_list, 1018 system_wide, root_metric, &visited_node, 1019 table); 1020 } 1021 if (ret) { 1022 if (is_root) 1023 metric__free(root_metric); 1024 1025 } else if (is_root) 1026 list_add(&root_metric->nd, metric_list); 1027 1028 return ret; 1029 } 1030 1031 struct metricgroup__find_metric_data { 1032 const char *pmu; 1033 const char *metric; 1034 struct pmu_metric *pm; 1035 }; 1036 1037 static int metricgroup__find_metric_callback(const struct pmu_metric *pm, 1038 const struct pmu_metrics_table *table __maybe_unused, 1039 void *vdata) 1040 { 1041 struct metricgroup__find_metric_data *data = vdata; 1042 const char *pm_pmu = pm->pmu ?: "cpu"; 1043 1044 if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu)) 1045 return 0; 1046 1047 if (!match_metric(pm->metric_name, data->metric)) 1048 return 0; 1049 1050 memcpy(data->pm, pm, sizeof(*pm)); 1051 return 1; 1052 } 1053 1054 static bool metricgroup__find_metric(const char *pmu, 1055 const char *metric, 1056 const struct pmu_metrics_table *table, 1057 struct pmu_metric *pm) 1058 { 1059 struct metricgroup__find_metric_data data = { 1060 .pmu = pmu, 1061 .metric = metric, 1062 .pm = pm, 1063 }; 1064 1065 return pmu_metrics_table_for_each_metric(table, metricgroup__find_metric_callback, &data) 1066 ? true : false; 1067 } 1068 1069 static int add_metric(struct list_head *metric_list, 1070 const struct pmu_metric *pm, 1071 const char *modifier, 1072 bool metric_no_group, 1073 bool metric_no_threshold, 1074 const char *user_requested_cpu_list, 1075 bool system_wide, 1076 struct metric *root_metric, 1077 const struct visited_metric *visited, 1078 const struct pmu_metrics_table *table) 1079 { 1080 int ret = 0; 1081 1082 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name); 1083 1084 if (!strstr(pm->metric_expr, "?")) { 1085 ret = __add_metric(metric_list, pm, modifier, metric_no_group, 1086 metric_no_threshold, 0, user_requested_cpu_list, 1087 system_wide, root_metric, visited, table); 1088 } else { 1089 int j, count; 1090 1091 count = arch_get_runtimeparam(pm); 1092 1093 /* This loop is added to create multiple 1094 * events depend on count value and add 1095 * those events to metric_list. 1096 */ 1097 1098 for (j = 0; j < count && !ret; j++) 1099 ret = __add_metric(metric_list, pm, modifier, metric_no_group, 1100 metric_no_threshold, j, user_requested_cpu_list, 1101 system_wide, root_metric, visited, table); 1102 } 1103 1104 return ret; 1105 } 1106 1107 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm, 1108 const struct pmu_metrics_table *table __maybe_unused, 1109 void *data) 1110 { 1111 struct metricgroup_add_iter_data *d = data; 1112 int ret; 1113 1114 if (!match_pm_metric(pm, d->pmu, d->metric_name)) 1115 return 0; 1116 1117 ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group, 1118 d->metric_no_threshold, d->user_requested_cpu_list, 1119 d->system_wide, d->root_metric, d->visited, d->table); 1120 if (ret) 1121 goto out; 1122 1123 *(d->has_match) = true; 1124 1125 out: 1126 *(d->ret) = ret; 1127 return ret; 1128 } 1129 1130 /** 1131 * metric_list_cmp - list_sort comparator that sorts metrics with more events to 1132 * the front. tool events are excluded from the count. 1133 */ 1134 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l, 1135 const struct list_head *r) 1136 { 1137 const struct metric *left = container_of(l, struct metric, nd); 1138 const struct metric *right = container_of(r, struct metric, nd); 1139 struct expr_id_data *data; 1140 int i, left_count, right_count; 1141 1142 left_count = hashmap__size(left->pctx->ids); 1143 perf_tool_event__for_each_event(i) { 1144 if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data)) 1145 left_count--; 1146 } 1147 1148 right_count = hashmap__size(right->pctx->ids); 1149 perf_tool_event__for_each_event(i) { 1150 if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data)) 1151 right_count--; 1152 } 1153 1154 return right_count - left_count; 1155 } 1156 1157 struct metricgroup__add_metric_data { 1158 struct list_head *list; 1159 const char *pmu; 1160 const char *metric_name; 1161 const char *modifier; 1162 const char *user_requested_cpu_list; 1163 bool metric_no_group; 1164 bool metric_no_threshold; 1165 bool system_wide; 1166 bool has_match; 1167 }; 1168 1169 static int metricgroup__add_metric_callback(const struct pmu_metric *pm, 1170 const struct pmu_metrics_table *table, 1171 void *vdata) 1172 { 1173 struct metricgroup__add_metric_data *data = vdata; 1174 int ret = 0; 1175 1176 if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) { 1177 bool metric_no_group = data->metric_no_group || 1178 match_metric(data->metric_name, pm->metricgroup_no_group); 1179 1180 data->has_match = true; 1181 ret = add_metric(data->list, pm, data->modifier, metric_no_group, 1182 data->metric_no_threshold, data->user_requested_cpu_list, 1183 data->system_wide, /*root_metric=*/NULL, 1184 /*visited_metrics=*/NULL, table); 1185 } 1186 return ret; 1187 } 1188 1189 /** 1190 * metricgroup__add_metric - Find and add a metric, or a metric group. 1191 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs. 1192 * @metric_name: The name of the metric or metric group. For example, "IPC" 1193 * could be the name of a metric and "TopDownL1" the name of a 1194 * metric group. 1195 * @modifier: if non-null event modifiers like "u". 1196 * @metric_no_group: Should events written to events be grouped "{}" or 1197 * global. Grouping is the default but due to multiplexing the 1198 * user may override. 1199 * @user_requested_cpu_list: Command line specified CPUs to record on. 1200 * @system_wide: Are events for all processes recorded. 1201 * @metric_list: The list that the metric or metric group are added to. 1202 * @table: The table that is searched for metrics, most commonly the table for the 1203 * architecture perf is running upon. 1204 */ 1205 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier, 1206 bool metric_no_group, bool metric_no_threshold, 1207 const char *user_requested_cpu_list, 1208 bool system_wide, 1209 struct list_head *metric_list, 1210 const struct pmu_metrics_table *table) 1211 { 1212 LIST_HEAD(list); 1213 int ret; 1214 bool has_match = false; 1215 1216 { 1217 struct metricgroup__add_metric_data data = { 1218 .list = &list, 1219 .pmu = pmu, 1220 .metric_name = metric_name, 1221 .modifier = modifier, 1222 .metric_no_group = metric_no_group, 1223 .metric_no_threshold = metric_no_threshold, 1224 .user_requested_cpu_list = user_requested_cpu_list, 1225 .system_wide = system_wide, 1226 .has_match = false, 1227 }; 1228 /* 1229 * Iterate over all metrics seeing if metric matches either the 1230 * name or group. When it does add the metric to the list. 1231 */ 1232 ret = pmu_metrics_table_for_each_metric(table, metricgroup__add_metric_callback, 1233 &data); 1234 if (ret) 1235 goto out; 1236 1237 has_match = data.has_match; 1238 } 1239 { 1240 struct metricgroup_iter_data data = { 1241 .fn = metricgroup__add_metric_sys_event_iter, 1242 .data = (void *) &(struct metricgroup_add_iter_data) { 1243 .metric_list = &list, 1244 .pmu = pmu, 1245 .metric_name = metric_name, 1246 .modifier = modifier, 1247 .metric_no_group = metric_no_group, 1248 .user_requested_cpu_list = user_requested_cpu_list, 1249 .system_wide = system_wide, 1250 .has_match = &has_match, 1251 .ret = &ret, 1252 .table = table, 1253 }, 1254 }; 1255 1256 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data); 1257 } 1258 /* End of pmu events. */ 1259 if (!has_match) 1260 ret = -EINVAL; 1261 1262 out: 1263 /* 1264 * add to metric_list so that they can be released 1265 * even if it's failed 1266 */ 1267 list_splice(&list, metric_list); 1268 return ret; 1269 } 1270 1271 /** 1272 * metricgroup__add_metric_list - Find and add metrics, or metric groups, 1273 * specified in a list. 1274 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS. 1275 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1" 1276 * would match the IPC and CPI metrics, and TopDownL1 would match all 1277 * the metrics in the TopDownL1 group. 1278 * @metric_no_group: Should events written to events be grouped "{}" or 1279 * global. Grouping is the default but due to multiplexing the 1280 * user may override. 1281 * @user_requested_cpu_list: Command line specified CPUs to record on. 1282 * @system_wide: Are events for all processes recorded. 1283 * @metric_list: The list that metrics are added to. 1284 * @table: The table that is searched for metrics, most commonly the table for the 1285 * architecture perf is running upon. 1286 */ 1287 static int metricgroup__add_metric_list(const char *pmu, const char *list, 1288 bool metric_no_group, 1289 bool metric_no_threshold, 1290 const char *user_requested_cpu_list, 1291 bool system_wide, struct list_head *metric_list, 1292 const struct pmu_metrics_table *table) 1293 { 1294 char *list_itr, *list_copy, *metric_name, *modifier; 1295 int ret, count = 0; 1296 1297 list_copy = strdup(list); 1298 if (!list_copy) 1299 return -ENOMEM; 1300 list_itr = list_copy; 1301 1302 while ((metric_name = strsep(&list_itr, ",")) != NULL) { 1303 modifier = strchr(metric_name, ':'); 1304 if (modifier) 1305 *modifier++ = '\0'; 1306 1307 ret = metricgroup__add_metric(pmu, metric_name, modifier, 1308 metric_no_group, metric_no_threshold, 1309 user_requested_cpu_list, 1310 system_wide, metric_list, table); 1311 if (ret == -EINVAL) 1312 pr_err("Cannot find metric or group `%s'\n", metric_name); 1313 1314 if (ret) 1315 break; 1316 1317 count++; 1318 } 1319 free(list_copy); 1320 1321 if (!ret) { 1322 /* 1323 * Warn about nmi_watchdog if any parsed metrics had the 1324 * NO_NMI_WATCHDOG constraint. 1325 */ 1326 metric__watchdog_constraint_hint(NULL, /*foot=*/true); 1327 /* No metrics. */ 1328 if (count == 0) 1329 return -EINVAL; 1330 } 1331 return ret; 1332 } 1333 1334 static void metricgroup__free_metrics(struct list_head *metric_list) 1335 { 1336 struct metric *m, *tmp; 1337 1338 list_for_each_entry_safe (m, tmp, metric_list, nd) { 1339 list_del_init(&m->nd); 1340 metric__free(m); 1341 } 1342 } 1343 1344 /** 1345 * find_tool_events - Search for the pressence of tool events in metric_list. 1346 * @metric_list: List to take metrics from. 1347 * @tool_events: Array of false values, indices corresponding to tool events set 1348 * to true if tool event is found. 1349 */ 1350 static void find_tool_events(const struct list_head *metric_list, 1351 bool tool_events[PERF_TOOL_MAX]) 1352 { 1353 struct metric *m; 1354 1355 list_for_each_entry(m, metric_list, nd) { 1356 int i; 1357 1358 perf_tool_event__for_each_event(i) { 1359 struct expr_id_data *data; 1360 1361 if (!tool_events[i] && 1362 !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data)) 1363 tool_events[i] = true; 1364 } 1365 } 1366 } 1367 1368 /** 1369 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events 1370 * metric IDs, as the IDs are held in a set, 1371 * duplicates will be removed. 1372 * @metric_list: List to take metrics from. 1373 * @combined: Out argument for result. 1374 */ 1375 static int build_combined_expr_ctx(const struct list_head *metric_list, 1376 struct expr_parse_ctx **combined) 1377 { 1378 struct hashmap_entry *cur; 1379 size_t bkt; 1380 struct metric *m; 1381 char *dup; 1382 int ret; 1383 1384 *combined = expr__ctx_new(); 1385 if (!*combined) 1386 return -ENOMEM; 1387 1388 list_for_each_entry(m, metric_list, nd) { 1389 if (!m->group_events && !m->modifier) { 1390 hashmap__for_each_entry(m->pctx->ids, cur, bkt) { 1391 dup = strdup(cur->pkey); 1392 if (!dup) { 1393 ret = -ENOMEM; 1394 goto err_out; 1395 } 1396 ret = expr__add_id(*combined, dup); 1397 if (ret) 1398 goto err_out; 1399 } 1400 } 1401 } 1402 return 0; 1403 err_out: 1404 expr__ctx_free(*combined); 1405 *combined = NULL; 1406 return ret; 1407 } 1408 1409 /** 1410 * parse_ids - Build the event string for the ids and parse them creating an 1411 * evlist. The encoded metric_ids are decoded. 1412 * @metric_no_merge: is metric sharing explicitly disabled. 1413 * @fake_pmu: used when testing metrics not supported by the current CPU. 1414 * @ids: the event identifiers parsed from a metric. 1415 * @modifier: any modifiers added to the events. 1416 * @group_events: should events be placed in a weak group. 1417 * @tool_events: entries set true if the tool event of index could be present in 1418 * the overall list of metrics. 1419 * @out_evlist: the created list of events. 1420 */ 1421 static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu, 1422 struct expr_parse_ctx *ids, const char *modifier, 1423 bool group_events, const bool tool_events[PERF_TOOL_MAX], 1424 struct evlist **out_evlist) 1425 { 1426 struct parse_events_error parse_error; 1427 struct evlist *parsed_evlist; 1428 struct strbuf events = STRBUF_INIT; 1429 int ret; 1430 1431 *out_evlist = NULL; 1432 if (!metric_no_merge || hashmap__size(ids->ids) == 0) { 1433 bool added_event = false; 1434 int i; 1435 /* 1436 * We may fail to share events between metrics because a tool 1437 * event isn't present in one metric. For example, a ratio of 1438 * cache misses doesn't need duration_time but the same events 1439 * may be used for a misses per second. Events without sharing 1440 * implies multiplexing, that is best avoided, so place 1441 * all tool events in every group. 1442 * 1443 * Also, there may be no ids/events in the expression parsing 1444 * context because of constant evaluation, e.g.: 1445 * event1 if #smt_on else 0 1446 * Add a tool event to avoid a parse error on an empty string. 1447 */ 1448 perf_tool_event__for_each_event(i) { 1449 if (tool_events[i]) { 1450 char *tmp = strdup(perf_tool_event__to_str(i)); 1451 1452 if (!tmp) 1453 return -ENOMEM; 1454 ids__insert(ids->ids, tmp); 1455 added_event = true; 1456 } 1457 } 1458 if (!added_event && hashmap__size(ids->ids) == 0) { 1459 char *tmp = strdup("duration_time"); 1460 1461 if (!tmp) 1462 return -ENOMEM; 1463 ids__insert(ids->ids, tmp); 1464 } 1465 } 1466 ret = metricgroup__build_event_string(&events, ids, modifier, 1467 group_events); 1468 if (ret) 1469 return ret; 1470 1471 parsed_evlist = evlist__new(); 1472 if (!parsed_evlist) { 1473 ret = -ENOMEM; 1474 goto err_out; 1475 } 1476 pr_debug("Parsing metric events '%s'\n", events.buf); 1477 parse_events_error__init(&parse_error); 1478 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL, 1479 &parse_error, fake_pmu, /*warn_if_reordered=*/false); 1480 if (ret) { 1481 parse_events_error__print(&parse_error, events.buf); 1482 goto err_out; 1483 } 1484 ret = decode_all_metric_ids(parsed_evlist, modifier); 1485 if (ret) 1486 goto err_out; 1487 1488 *out_evlist = parsed_evlist; 1489 parsed_evlist = NULL; 1490 err_out: 1491 parse_events_error__exit(&parse_error); 1492 evlist__delete(parsed_evlist); 1493 strbuf_release(&events); 1494 return ret; 1495 } 1496 1497 static int parse_groups(struct evlist *perf_evlist, 1498 const char *pmu, const char *str, 1499 bool metric_no_group, 1500 bool metric_no_merge, 1501 bool metric_no_threshold, 1502 const char *user_requested_cpu_list, 1503 bool system_wide, 1504 struct perf_pmu *fake_pmu, 1505 struct rblist *metric_events_list, 1506 const struct pmu_metrics_table *table) 1507 { 1508 struct evlist *combined_evlist = NULL; 1509 LIST_HEAD(metric_list); 1510 struct metric *m; 1511 bool tool_events[PERF_TOOL_MAX] = {false}; 1512 int ret; 1513 1514 if (metric_events_list->nr_entries == 0) 1515 metricgroup__rblist_init(metric_events_list); 1516 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold, 1517 user_requested_cpu_list, 1518 system_wide, &metric_list, table); 1519 if (ret) 1520 goto out; 1521 1522 /* Sort metrics from largest to smallest. */ 1523 list_sort(NULL, &metric_list, metric_list_cmp); 1524 1525 if (!metric_no_merge) { 1526 struct expr_parse_ctx *combined = NULL; 1527 1528 find_tool_events(&metric_list, tool_events); 1529 1530 ret = build_combined_expr_ctx(&metric_list, &combined); 1531 1532 if (!ret && combined && hashmap__size(combined->ids)) { 1533 ret = parse_ids(metric_no_merge, fake_pmu, combined, 1534 /*modifier=*/NULL, 1535 /*group_events=*/false, 1536 tool_events, 1537 &combined_evlist); 1538 } 1539 if (combined) 1540 expr__ctx_free(combined); 1541 1542 if (ret) 1543 goto out; 1544 } 1545 1546 list_for_each_entry(m, &metric_list, nd) { 1547 struct metric_event *me; 1548 struct evsel **metric_events; 1549 struct evlist *metric_evlist = NULL; 1550 struct metric *n; 1551 struct metric_expr *expr; 1552 1553 if (combined_evlist && !m->group_events) { 1554 metric_evlist = combined_evlist; 1555 } else if (!metric_no_merge) { 1556 /* 1557 * See if the IDs for this metric are a subset of an 1558 * earlier metric. 1559 */ 1560 list_for_each_entry(n, &metric_list, nd) { 1561 if (m == n) 1562 break; 1563 1564 if (n->evlist == NULL) 1565 continue; 1566 1567 if ((!m->modifier && n->modifier) || 1568 (m->modifier && !n->modifier) || 1569 (m->modifier && n->modifier && 1570 strcmp(m->modifier, n->modifier))) 1571 continue; 1572 1573 if ((!m->pmu && n->pmu) || 1574 (m->pmu && !n->pmu) || 1575 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu))) 1576 continue; 1577 1578 if (expr__subset_of_ids(n->pctx, m->pctx)) { 1579 pr_debug("Events in '%s' fully contained within '%s'\n", 1580 m->metric_name, n->metric_name); 1581 metric_evlist = n->evlist; 1582 break; 1583 } 1584 1585 } 1586 } 1587 if (!metric_evlist) { 1588 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier, 1589 m->group_events, tool_events, &m->evlist); 1590 if (ret) 1591 goto out; 1592 1593 metric_evlist = m->evlist; 1594 } 1595 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids, 1596 metric_evlist, &metric_events); 1597 if (ret) { 1598 pr_err("Cannot resolve IDs for %s: %s\n", 1599 m->metric_name, m->metric_expr); 1600 goto out; 1601 } 1602 1603 me = metricgroup__lookup(metric_events_list, metric_events[0], true); 1604 1605 expr = malloc(sizeof(struct metric_expr)); 1606 if (!expr) { 1607 ret = -ENOMEM; 1608 free(metric_events); 1609 goto out; 1610 } 1611 1612 expr->metric_refs = m->metric_refs; 1613 m->metric_refs = NULL; 1614 expr->metric_expr = m->metric_expr; 1615 if (m->modifier) { 1616 char *tmp; 1617 1618 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0) 1619 expr->metric_name = NULL; 1620 else 1621 expr->metric_name = tmp; 1622 } else 1623 expr->metric_name = strdup(m->metric_name); 1624 1625 if (!expr->metric_name) { 1626 ret = -ENOMEM; 1627 free(metric_events); 1628 goto out; 1629 } 1630 expr->metric_threshold = m->metric_threshold; 1631 expr->metric_unit = m->metric_unit; 1632 expr->metric_events = metric_events; 1633 expr->runtime = m->pctx->sctx.runtime; 1634 list_add(&expr->nd, &me->head); 1635 } 1636 1637 1638 if (combined_evlist) { 1639 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries); 1640 evlist__delete(combined_evlist); 1641 } 1642 1643 list_for_each_entry(m, &metric_list, nd) { 1644 if (m->evlist) 1645 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries); 1646 } 1647 1648 out: 1649 metricgroup__free_metrics(&metric_list); 1650 return ret; 1651 } 1652 1653 int metricgroup__parse_groups(struct evlist *perf_evlist, 1654 const char *pmu, 1655 const char *str, 1656 bool metric_no_group, 1657 bool metric_no_merge, 1658 bool metric_no_threshold, 1659 const char *user_requested_cpu_list, 1660 bool system_wide, 1661 struct rblist *metric_events) 1662 { 1663 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1664 1665 if (!table) 1666 return -EINVAL; 1667 1668 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge, 1669 metric_no_threshold, user_requested_cpu_list, system_wide, 1670 /*fake_pmu=*/NULL, metric_events, table); 1671 } 1672 1673 int metricgroup__parse_groups_test(struct evlist *evlist, 1674 const struct pmu_metrics_table *table, 1675 const char *str, 1676 struct rblist *metric_events) 1677 { 1678 return parse_groups(evlist, "all", str, 1679 /*metric_no_group=*/false, 1680 /*metric_no_merge=*/false, 1681 /*metric_no_threshold=*/false, 1682 /*user_requested_cpu_list=*/NULL, 1683 /*system_wide=*/false, 1684 &perf_pmu__fake, metric_events, table); 1685 } 1686 1687 struct metricgroup__has_metric_data { 1688 const char *pmu; 1689 const char *metric; 1690 }; 1691 static int metricgroup__has_metric_callback(const struct pmu_metric *pm, 1692 const struct pmu_metrics_table *table __maybe_unused, 1693 void *vdata) 1694 { 1695 struct metricgroup__has_metric_data *data = vdata; 1696 1697 return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0; 1698 } 1699 1700 bool metricgroup__has_metric(const char *pmu, const char *metric) 1701 { 1702 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1703 struct metricgroup__has_metric_data data = { 1704 .pmu = pmu, 1705 .metric = metric, 1706 }; 1707 1708 if (!table) 1709 return false; 1710 1711 return pmu_metrics_table_for_each_metric(table, metricgroup__has_metric_callback, &data) 1712 ? true : false; 1713 } 1714 1715 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm, 1716 const struct pmu_metrics_table *table __maybe_unused, 1717 void *data) 1718 { 1719 unsigned int *max_level = data; 1720 unsigned int level; 1721 const char *p = strstr(pm->metric_group ?: "", "TopdownL"); 1722 1723 if (!p || p[8] == '\0') 1724 return 0; 1725 1726 level = p[8] - '0'; 1727 if (level > *max_level) 1728 *max_level = level; 1729 1730 return 0; 1731 } 1732 1733 unsigned int metricgroups__topdown_max_level(void) 1734 { 1735 unsigned int max_level = 0; 1736 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1737 1738 if (!table) 1739 return false; 1740 1741 pmu_metrics_table_for_each_metric(table, metricgroup__topdown_max_level_callback, 1742 &max_level); 1743 return max_level; 1744 } 1745 1746 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 1747 struct rblist *new_metric_events, 1748 struct rblist *old_metric_events) 1749 { 1750 unsigned int i; 1751 1752 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) { 1753 struct rb_node *nd; 1754 struct metric_event *old_me, *new_me; 1755 struct metric_expr *old_expr, *new_expr; 1756 struct evsel *evsel; 1757 size_t alloc_size; 1758 int idx, nr; 1759 1760 nd = rblist__entry(old_metric_events, i); 1761 old_me = container_of(nd, struct metric_event, nd); 1762 1763 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx); 1764 if (!evsel) 1765 return -EINVAL; 1766 new_me = metricgroup__lookup(new_metric_events, evsel, true); 1767 if (!new_me) 1768 return -ENOMEM; 1769 1770 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n", 1771 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx); 1772 1773 list_for_each_entry(old_expr, &old_me->head, nd) { 1774 new_expr = malloc(sizeof(*new_expr)); 1775 if (!new_expr) 1776 return -ENOMEM; 1777 1778 new_expr->metric_expr = old_expr->metric_expr; 1779 new_expr->metric_threshold = old_expr->metric_threshold; 1780 new_expr->metric_name = strdup(old_expr->metric_name); 1781 if (!new_expr->metric_name) 1782 return -ENOMEM; 1783 1784 new_expr->metric_unit = old_expr->metric_unit; 1785 new_expr->runtime = old_expr->runtime; 1786 1787 if (old_expr->metric_refs) { 1788 /* calculate number of metric_events */ 1789 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++) 1790 continue; 1791 alloc_size = sizeof(*new_expr->metric_refs); 1792 new_expr->metric_refs = calloc(nr + 1, alloc_size); 1793 if (!new_expr->metric_refs) { 1794 free(new_expr); 1795 return -ENOMEM; 1796 } 1797 1798 memcpy(new_expr->metric_refs, old_expr->metric_refs, 1799 nr * alloc_size); 1800 } else { 1801 new_expr->metric_refs = NULL; 1802 } 1803 1804 /* calculate number of metric_events */ 1805 for (nr = 0; old_expr->metric_events[nr]; nr++) 1806 continue; 1807 alloc_size = sizeof(*new_expr->metric_events); 1808 new_expr->metric_events = calloc(nr + 1, alloc_size); 1809 if (!new_expr->metric_events) { 1810 zfree(&new_expr->metric_refs); 1811 free(new_expr); 1812 return -ENOMEM; 1813 } 1814 1815 /* copy evsel in the same position */ 1816 for (idx = 0; idx < nr; idx++) { 1817 evsel = old_expr->metric_events[idx]; 1818 evsel = evlist__find_evsel(evlist, evsel->core.idx); 1819 if (evsel == NULL) { 1820 zfree(&new_expr->metric_events); 1821 zfree(&new_expr->metric_refs); 1822 free(new_expr); 1823 return -EINVAL; 1824 } 1825 new_expr->metric_events[idx] = evsel; 1826 } 1827 1828 list_add(&new_expr->nd, &new_me->head); 1829 } 1830 } 1831 return 0; 1832 } 1833