1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "term.h" 11 #include "evlist.h" 12 #include "evsel.h" 13 #include <subcmd/parse-options.h> 14 #include "parse-events.h" 15 #include "string2.h" 16 #include "strlist.h" 17 #include "debug.h" 18 #include <api/fs/tracing_path.h> 19 #include <perf/cpumap.h> 20 #include <util/parse-events-bison.h> 21 #include <util/parse-events-flex.h> 22 #include "pmu.h" 23 #include "pmus.h" 24 #include "asm/bug.h" 25 #include "util/parse-branch-options.h" 26 #include "util/evsel_config.h" 27 #include "util/event.h" 28 #include "util/bpf-filter.h" 29 #include "util/util.h" 30 #include "tracepoint.h" 31 32 #define MAX_NAME_LEN 100 33 34 #ifdef PARSER_DEBUG 35 extern int parse_events_debug; 36 #endif 37 static int get_config_terms(struct list_head *head_config, 38 struct list_head *head_terms __maybe_unused); 39 40 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 41 [PERF_COUNT_HW_CPU_CYCLES] = { 42 .symbol = "cpu-cycles", 43 .alias = "cycles", 44 }, 45 [PERF_COUNT_HW_INSTRUCTIONS] = { 46 .symbol = "instructions", 47 .alias = "", 48 }, 49 [PERF_COUNT_HW_CACHE_REFERENCES] = { 50 .symbol = "cache-references", 51 .alias = "", 52 }, 53 [PERF_COUNT_HW_CACHE_MISSES] = { 54 .symbol = "cache-misses", 55 .alias = "", 56 }, 57 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 58 .symbol = "branch-instructions", 59 .alias = "branches", 60 }, 61 [PERF_COUNT_HW_BRANCH_MISSES] = { 62 .symbol = "branch-misses", 63 .alias = "", 64 }, 65 [PERF_COUNT_HW_BUS_CYCLES] = { 66 .symbol = "bus-cycles", 67 .alias = "", 68 }, 69 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 70 .symbol = "stalled-cycles-frontend", 71 .alias = "idle-cycles-frontend", 72 }, 73 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 74 .symbol = "stalled-cycles-backend", 75 .alias = "idle-cycles-backend", 76 }, 77 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 78 .symbol = "ref-cycles", 79 .alias = "", 80 }, 81 }; 82 83 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 84 [PERF_COUNT_SW_CPU_CLOCK] = { 85 .symbol = "cpu-clock", 86 .alias = "", 87 }, 88 [PERF_COUNT_SW_TASK_CLOCK] = { 89 .symbol = "task-clock", 90 .alias = "", 91 }, 92 [PERF_COUNT_SW_PAGE_FAULTS] = { 93 .symbol = "page-faults", 94 .alias = "faults", 95 }, 96 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 97 .symbol = "context-switches", 98 .alias = "cs", 99 }, 100 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 101 .symbol = "cpu-migrations", 102 .alias = "migrations", 103 }, 104 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 105 .symbol = "minor-faults", 106 .alias = "", 107 }, 108 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 109 .symbol = "major-faults", 110 .alias = "", 111 }, 112 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 113 .symbol = "alignment-faults", 114 .alias = "", 115 }, 116 [PERF_COUNT_SW_EMULATION_FAULTS] = { 117 .symbol = "emulation-faults", 118 .alias = "", 119 }, 120 [PERF_COUNT_SW_DUMMY] = { 121 .symbol = "dummy", 122 .alias = "", 123 }, 124 [PERF_COUNT_SW_BPF_OUTPUT] = { 125 .symbol = "bpf-output", 126 .alias = "", 127 }, 128 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 129 .symbol = "cgroup-switches", 130 .alias = "", 131 }, 132 }; 133 134 const char *event_type(int type) 135 { 136 switch (type) { 137 case PERF_TYPE_HARDWARE: 138 return "hardware"; 139 140 case PERF_TYPE_SOFTWARE: 141 return "software"; 142 143 case PERF_TYPE_TRACEPOINT: 144 return "tracepoint"; 145 146 case PERF_TYPE_HW_CACHE: 147 return "hardware-cache"; 148 149 default: 150 break; 151 } 152 153 return "unknown"; 154 } 155 156 static char *get_config_str(struct list_head *head_terms, int type_term) 157 { 158 struct parse_events_term *term; 159 160 if (!head_terms) 161 return NULL; 162 163 list_for_each_entry(term, head_terms, list) 164 if (term->type_term == type_term) 165 return term->val.str; 166 167 return NULL; 168 } 169 170 static char *get_config_metric_id(struct list_head *head_terms) 171 { 172 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 173 } 174 175 static char *get_config_name(struct list_head *head_terms) 176 { 177 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 178 } 179 180 /** 181 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 182 * matches the raw's string value. If the string value matches an 183 * event then change the term to be an event, if not then change it to 184 * be a config term. For example, "read" may be an event of the PMU or 185 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 186 * the event can be determined and we don't need to scan all PMUs 187 * ahead-of-time. 188 * @config_terms: the list of terms that may contain a raw term. 189 * @pmu: the PMU to scan for events from. 190 */ 191 static void fix_raw(struct list_head *config_terms, struct perf_pmu *pmu) 192 { 193 struct parse_events_term *term; 194 195 list_for_each_entry(term, config_terms, list) { 196 u64 num; 197 198 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 199 continue; 200 201 if (perf_pmu__have_event(pmu, term->val.str)) { 202 free(term->config); 203 term->config = term->val.str; 204 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 205 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 206 term->val.num = 1; 207 term->no_value = true; 208 continue; 209 } 210 211 free(term->config); 212 term->config = strdup("config"); 213 errno = 0; 214 num = strtoull(term->val.str + 1, NULL, 16); 215 assert(errno == 0); 216 free(term->val.str); 217 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 218 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 219 term->val.num = num; 220 term->no_value = false; 221 } 222 } 223 224 static struct evsel * 225 __add_event(struct list_head *list, int *idx, 226 struct perf_event_attr *attr, 227 bool init_attr, 228 const char *name, const char *metric_id, struct perf_pmu *pmu, 229 struct list_head *config_terms, bool auto_merge_stats, 230 const char *cpu_list) 231 { 232 struct evsel *evsel; 233 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 234 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 235 236 if (pmu) 237 perf_pmu__warn_invalid_formats(pmu); 238 239 if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) { 240 perf_pmu__warn_invalid_config(pmu, attr->config, name, 241 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 242 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 243 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 244 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 245 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 246 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 247 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 248 } 249 if (init_attr) 250 event_attr_init(attr); 251 252 evsel = evsel__new_idx(attr, *idx); 253 if (!evsel) { 254 perf_cpu_map__put(cpus); 255 return NULL; 256 } 257 258 (*idx)++; 259 evsel->core.cpus = cpus; 260 evsel->core.own_cpus = perf_cpu_map__get(cpus); 261 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 262 evsel->core.is_pmu_core = pmu ? pmu->is_core : false; 263 evsel->auto_merge_stats = auto_merge_stats; 264 evsel->pmu = pmu; 265 evsel->pmu_name = pmu && pmu->name ? strdup(pmu->name) : NULL; 266 267 if (name) 268 evsel->name = strdup(name); 269 270 if (metric_id) 271 evsel->metric_id = strdup(metric_id); 272 273 if (config_terms) 274 list_splice_init(config_terms, &evsel->config_terms); 275 276 if (list) 277 list_add_tail(&evsel->core.node, list); 278 279 return evsel; 280 } 281 282 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 283 const char *name, const char *metric_id, 284 struct perf_pmu *pmu) 285 { 286 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 287 metric_id, pmu, /*config_terms=*/NULL, 288 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 289 } 290 291 static int add_event(struct list_head *list, int *idx, 292 struct perf_event_attr *attr, const char *name, 293 const char *metric_id, struct list_head *config_terms) 294 { 295 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 296 /*pmu=*/NULL, config_terms, 297 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 298 } 299 300 static int add_event_tool(struct list_head *list, int *idx, 301 enum perf_tool_event tool_event) 302 { 303 struct evsel *evsel; 304 struct perf_event_attr attr = { 305 .type = PERF_TYPE_SOFTWARE, 306 .config = PERF_COUNT_SW_DUMMY, 307 }; 308 309 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 310 /*metric_id=*/NULL, /*pmu=*/NULL, 311 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 312 /*cpu_list=*/"0"); 313 if (!evsel) 314 return -ENOMEM; 315 evsel->tool_event = tool_event; 316 if (tool_event == PERF_TOOL_DURATION_TIME 317 || tool_event == PERF_TOOL_USER_TIME 318 || tool_event == PERF_TOOL_SYSTEM_TIME) { 319 free((char *)evsel->unit); 320 evsel->unit = strdup("ns"); 321 } 322 return 0; 323 } 324 325 /** 326 * parse_aliases - search names for entries beginning or equalling str ignoring 327 * case. If mutliple entries in names match str then the longest 328 * is chosen. 329 * @str: The needle to look for. 330 * @names: The haystack to search. 331 * @size: The size of the haystack. 332 * @longest: Out argument giving the length of the matching entry. 333 */ 334 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 335 int *longest) 336 { 337 *longest = -1; 338 for (int i = 0; i < size; i++) { 339 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 340 int n = strlen(names[i][j]); 341 342 if (n > *longest && !strncasecmp(str, names[i][j], n)) 343 *longest = n; 344 } 345 if (*longest > 0) 346 return i; 347 } 348 349 return -1; 350 } 351 352 typedef int config_term_func_t(struct perf_event_attr *attr, 353 struct parse_events_term *term, 354 struct parse_events_error *err); 355 static int config_term_common(struct perf_event_attr *attr, 356 struct parse_events_term *term, 357 struct parse_events_error *err); 358 static int config_attr(struct perf_event_attr *attr, 359 struct list_head *head, 360 struct parse_events_error *err, 361 config_term_func_t config_term); 362 363 /** 364 * parse_events__decode_legacy_cache - Search name for the legacy cache event 365 * name composed of 1, 2 or 3 hyphen 366 * separated sections. The first section is 367 * the cache type while the others are the 368 * optional op and optional result. To make 369 * life hard the names in the table also 370 * contain hyphens and the longest name 371 * should always be selected. 372 */ 373 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 374 { 375 int len, cache_type = -1, cache_op = -1, cache_result = -1; 376 const char *name_end = &name[strlen(name) + 1]; 377 const char *str = name; 378 379 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 380 if (cache_type == -1) 381 return -EINVAL; 382 str += len + 1; 383 384 if (str < name_end) { 385 cache_op = parse_aliases(str, evsel__hw_cache_op, 386 PERF_COUNT_HW_CACHE_OP_MAX, &len); 387 if (cache_op >= 0) { 388 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 389 return -EINVAL; 390 str += len + 1; 391 } else { 392 cache_result = parse_aliases(str, evsel__hw_cache_result, 393 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 394 if (cache_result >= 0) 395 str += len + 1; 396 } 397 } 398 if (str < name_end) { 399 if (cache_op < 0) { 400 cache_op = parse_aliases(str, evsel__hw_cache_op, 401 PERF_COUNT_HW_CACHE_OP_MAX, &len); 402 if (cache_op >= 0) { 403 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 404 return -EINVAL; 405 } 406 } else if (cache_result < 0) { 407 cache_result = parse_aliases(str, evsel__hw_cache_result, 408 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 409 } 410 } 411 412 /* 413 * Fall back to reads: 414 */ 415 if (cache_op == -1) 416 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 417 418 /* 419 * Fall back to accesses: 420 */ 421 if (cache_result == -1) 422 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 423 424 *config = cache_type | (cache_op << 8) | (cache_result << 16); 425 if (perf_pmus__supports_extended_type()) 426 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 427 return 0; 428 } 429 430 /** 431 * parse_events__filter_pmu - returns false if a wildcard PMU should be 432 * considered, true if it should be filtered. 433 */ 434 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 435 const struct perf_pmu *pmu) 436 { 437 if (parse_state->pmu_filter == NULL) 438 return false; 439 440 if (pmu->name == NULL) 441 return true; 442 443 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 444 } 445 446 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 447 struct parse_events_state *parse_state, 448 struct list_head *head_config) 449 { 450 struct perf_pmu *pmu = NULL; 451 bool found_supported = false; 452 const char *config_name = get_config_name(head_config); 453 const char *metric_id = get_config_metric_id(head_config); 454 455 /* Legacy cache events are only supported by core PMUs. */ 456 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 457 LIST_HEAD(config_terms); 458 struct perf_event_attr attr; 459 int ret; 460 461 if (parse_events__filter_pmu(parse_state, pmu)) 462 continue; 463 464 memset(&attr, 0, sizeof(attr)); 465 attr.type = PERF_TYPE_HW_CACHE; 466 467 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 468 if (ret) 469 return ret; 470 471 found_supported = true; 472 473 if (head_config) { 474 if (config_attr(&attr, head_config, parse_state->error, config_term_common)) 475 return -EINVAL; 476 477 if (get_config_terms(head_config, &config_terms)) 478 return -ENOMEM; 479 } 480 481 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 482 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 483 /*cpu_list=*/NULL) == NULL) 484 return -ENOMEM; 485 486 free_config_terms(&config_terms); 487 } 488 return found_supported ? 0 : -EINVAL; 489 } 490 491 #ifdef HAVE_LIBTRACEEVENT 492 static void tracepoint_error(struct parse_events_error *e, int err, 493 const char *sys, const char *name, int column) 494 { 495 const char *str; 496 char help[BUFSIZ]; 497 498 if (!e) 499 return; 500 501 /* 502 * We get error directly from syscall errno ( > 0), 503 * or from encoded pointer's error ( < 0). 504 */ 505 err = abs(err); 506 507 switch (err) { 508 case EACCES: 509 str = "can't access trace events"; 510 break; 511 case ENOENT: 512 str = "unknown tracepoint"; 513 break; 514 default: 515 str = "failed to add tracepoint"; 516 break; 517 } 518 519 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 520 parse_events_error__handle(e, column, strdup(str), strdup(help)); 521 } 522 523 static int add_tracepoint(struct list_head *list, int *idx, 524 const char *sys_name, const char *evt_name, 525 struct parse_events_error *err, 526 struct list_head *head_config, void *loc_) 527 { 528 YYLTYPE *loc = loc_; 529 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 530 531 if (IS_ERR(evsel)) { 532 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 533 return PTR_ERR(evsel); 534 } 535 536 if (head_config) { 537 LIST_HEAD(config_terms); 538 539 if (get_config_terms(head_config, &config_terms)) 540 return -ENOMEM; 541 list_splice(&config_terms, &evsel->config_terms); 542 } 543 544 list_add_tail(&evsel->core.node, list); 545 return 0; 546 } 547 548 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 549 const char *sys_name, const char *evt_name, 550 struct parse_events_error *err, 551 struct list_head *head_config, YYLTYPE *loc) 552 { 553 char *evt_path; 554 struct dirent *evt_ent; 555 DIR *evt_dir; 556 int ret = 0, found = 0; 557 558 evt_path = get_events_file(sys_name); 559 if (!evt_path) { 560 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 561 return -1; 562 } 563 evt_dir = opendir(evt_path); 564 if (!evt_dir) { 565 put_events_file(evt_path); 566 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 567 return -1; 568 } 569 570 while (!ret && (evt_ent = readdir(evt_dir))) { 571 if (!strcmp(evt_ent->d_name, ".") 572 || !strcmp(evt_ent->d_name, "..") 573 || !strcmp(evt_ent->d_name, "enable") 574 || !strcmp(evt_ent->d_name, "filter")) 575 continue; 576 577 if (!strglobmatch(evt_ent->d_name, evt_name)) 578 continue; 579 580 found++; 581 582 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 583 err, head_config, loc); 584 } 585 586 if (!found) { 587 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 588 ret = -1; 589 } 590 591 put_events_file(evt_path); 592 closedir(evt_dir); 593 return ret; 594 } 595 596 static int add_tracepoint_event(struct list_head *list, int *idx, 597 const char *sys_name, const char *evt_name, 598 struct parse_events_error *err, 599 struct list_head *head_config, YYLTYPE *loc) 600 { 601 return strpbrk(evt_name, "*?") ? 602 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 603 err, head_config, loc) : 604 add_tracepoint(list, idx, sys_name, evt_name, 605 err, head_config, loc); 606 } 607 608 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 609 const char *sys_name, const char *evt_name, 610 struct parse_events_error *err, 611 struct list_head *head_config, YYLTYPE *loc) 612 { 613 struct dirent *events_ent; 614 DIR *events_dir; 615 int ret = 0; 616 617 events_dir = tracing_events__opendir(); 618 if (!events_dir) { 619 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 620 return -1; 621 } 622 623 while (!ret && (events_ent = readdir(events_dir))) { 624 if (!strcmp(events_ent->d_name, ".") 625 || !strcmp(events_ent->d_name, "..") 626 || !strcmp(events_ent->d_name, "enable") 627 || !strcmp(events_ent->d_name, "header_event") 628 || !strcmp(events_ent->d_name, "header_page")) 629 continue; 630 631 if (!strglobmatch(events_ent->d_name, sys_name)) 632 continue; 633 634 ret = add_tracepoint_event(list, idx, events_ent->d_name, 635 evt_name, err, head_config, loc); 636 } 637 638 closedir(events_dir); 639 return ret; 640 } 641 #endif /* HAVE_LIBTRACEEVENT */ 642 643 static int 644 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 645 { 646 int i; 647 648 for (i = 0; i < 3; i++) { 649 if (!type || !type[i]) 650 break; 651 652 #define CHECK_SET_TYPE(bit) \ 653 do { \ 654 if (attr->bp_type & bit) \ 655 return -EINVAL; \ 656 else \ 657 attr->bp_type |= bit; \ 658 } while (0) 659 660 switch (type[i]) { 661 case 'r': 662 CHECK_SET_TYPE(HW_BREAKPOINT_R); 663 break; 664 case 'w': 665 CHECK_SET_TYPE(HW_BREAKPOINT_W); 666 break; 667 case 'x': 668 CHECK_SET_TYPE(HW_BREAKPOINT_X); 669 break; 670 default: 671 return -EINVAL; 672 } 673 } 674 675 #undef CHECK_SET_TYPE 676 677 if (!attr->bp_type) /* Default */ 678 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 679 680 return 0; 681 } 682 683 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 684 struct list_head *list, 685 u64 addr, char *type, u64 len, 686 struct list_head *head_config __maybe_unused) 687 { 688 struct perf_event_attr attr; 689 LIST_HEAD(config_terms); 690 const char *name; 691 692 memset(&attr, 0, sizeof(attr)); 693 attr.bp_addr = addr; 694 695 if (parse_breakpoint_type(type, &attr)) 696 return -EINVAL; 697 698 /* Provide some defaults if len is not specified */ 699 if (!len) { 700 if (attr.bp_type == HW_BREAKPOINT_X) 701 len = sizeof(long); 702 else 703 len = HW_BREAKPOINT_LEN_4; 704 } 705 706 attr.bp_len = len; 707 708 attr.type = PERF_TYPE_BREAKPOINT; 709 attr.sample_period = 1; 710 711 if (head_config) { 712 if (config_attr(&attr, head_config, parse_state->error, 713 config_term_common)) 714 return -EINVAL; 715 716 if (get_config_terms(head_config, &config_terms)) 717 return -ENOMEM; 718 } 719 720 name = get_config_name(head_config); 721 722 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 723 &config_terms); 724 } 725 726 static int check_type_val(struct parse_events_term *term, 727 struct parse_events_error *err, 728 int type) 729 { 730 if (type == term->type_val) 731 return 0; 732 733 if (err) { 734 parse_events_error__handle(err, term->err_val, 735 type == PARSE_EVENTS__TERM_TYPE_NUM 736 ? strdup("expected numeric value") 737 : strdup("expected string value"), 738 NULL); 739 } 740 return -EINVAL; 741 } 742 743 /* 744 * Update according to parse-events.l 745 */ 746 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 747 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 748 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 749 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 750 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 751 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 752 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 753 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 754 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 755 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 756 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 757 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 758 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 759 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 760 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 761 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 762 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 763 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 764 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 765 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 766 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 767 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 768 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 769 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 770 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 771 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 772 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 773 }; 774 775 static bool config_term_shrinked; 776 777 static bool 778 config_term_avail(int term_type, struct parse_events_error *err) 779 { 780 char *err_str; 781 782 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 783 parse_events_error__handle(err, -1, 784 strdup("Invalid term_type"), NULL); 785 return false; 786 } 787 if (!config_term_shrinked) 788 return true; 789 790 switch (term_type) { 791 case PARSE_EVENTS__TERM_TYPE_CONFIG: 792 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 793 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 794 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 795 case PARSE_EVENTS__TERM_TYPE_NAME: 796 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 797 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 798 case PARSE_EVENTS__TERM_TYPE_PERCORE: 799 return true; 800 default: 801 if (!err) 802 return false; 803 804 /* term_type is validated so indexing is safe */ 805 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 806 config_term_names[term_type]) >= 0) 807 parse_events_error__handle(err, -1, err_str, NULL); 808 return false; 809 } 810 } 811 812 void parse_events__shrink_config_terms(void) 813 { 814 config_term_shrinked = true; 815 } 816 817 static int config_term_common(struct perf_event_attr *attr, 818 struct parse_events_term *term, 819 struct parse_events_error *err) 820 { 821 #define CHECK_TYPE_VAL(type) \ 822 do { \ 823 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 824 return -EINVAL; \ 825 } while (0) 826 827 switch (term->type_term) { 828 case PARSE_EVENTS__TERM_TYPE_CONFIG: 829 CHECK_TYPE_VAL(NUM); 830 attr->config = term->val.num; 831 break; 832 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 833 CHECK_TYPE_VAL(NUM); 834 attr->config1 = term->val.num; 835 break; 836 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 837 CHECK_TYPE_VAL(NUM); 838 attr->config2 = term->val.num; 839 break; 840 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 841 CHECK_TYPE_VAL(NUM); 842 attr->config3 = term->val.num; 843 break; 844 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 845 CHECK_TYPE_VAL(NUM); 846 break; 847 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 848 CHECK_TYPE_VAL(NUM); 849 break; 850 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 851 CHECK_TYPE_VAL(STR); 852 if (strcmp(term->val.str, "no") && 853 parse_branch_str(term->val.str, 854 &attr->branch_sample_type)) { 855 parse_events_error__handle(err, term->err_val, 856 strdup("invalid branch sample type"), 857 NULL); 858 return -EINVAL; 859 } 860 break; 861 case PARSE_EVENTS__TERM_TYPE_TIME: 862 CHECK_TYPE_VAL(NUM); 863 if (term->val.num > 1) { 864 parse_events_error__handle(err, term->err_val, 865 strdup("expected 0 or 1"), 866 NULL); 867 return -EINVAL; 868 } 869 break; 870 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 871 CHECK_TYPE_VAL(STR); 872 break; 873 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 874 CHECK_TYPE_VAL(NUM); 875 break; 876 case PARSE_EVENTS__TERM_TYPE_INHERIT: 877 CHECK_TYPE_VAL(NUM); 878 break; 879 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 880 CHECK_TYPE_VAL(NUM); 881 break; 882 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 883 CHECK_TYPE_VAL(NUM); 884 break; 885 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 886 CHECK_TYPE_VAL(NUM); 887 break; 888 case PARSE_EVENTS__TERM_TYPE_NAME: 889 CHECK_TYPE_VAL(STR); 890 break; 891 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 892 CHECK_TYPE_VAL(STR); 893 break; 894 case PARSE_EVENTS__TERM_TYPE_RAW: 895 CHECK_TYPE_VAL(STR); 896 break; 897 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 898 CHECK_TYPE_VAL(NUM); 899 break; 900 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 901 CHECK_TYPE_VAL(NUM); 902 break; 903 case PARSE_EVENTS__TERM_TYPE_PERCORE: 904 CHECK_TYPE_VAL(NUM); 905 if ((unsigned int)term->val.num > 1) { 906 parse_events_error__handle(err, term->err_val, 907 strdup("expected 0 or 1"), 908 NULL); 909 return -EINVAL; 910 } 911 break; 912 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 913 CHECK_TYPE_VAL(NUM); 914 break; 915 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 916 CHECK_TYPE_VAL(NUM); 917 if (term->val.num > UINT_MAX) { 918 parse_events_error__handle(err, term->err_val, 919 strdup("too big"), 920 NULL); 921 return -EINVAL; 922 } 923 break; 924 default: 925 parse_events_error__handle(err, term->err_term, 926 strdup("unknown term"), 927 parse_events_formats_error_string(NULL)); 928 return -EINVAL; 929 } 930 931 /* 932 * Check term availability after basic checking so 933 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 934 * 935 * If check availability at the entry of this function, 936 * user will see "'<sysfs term>' is not usable in 'perf stat'" 937 * if an invalid config term is provided for legacy events 938 * (for example, instructions/badterm/...), which is confusing. 939 */ 940 if (!config_term_avail(term->type_term, err)) 941 return -EINVAL; 942 return 0; 943 #undef CHECK_TYPE_VAL 944 } 945 946 static int config_term_pmu(struct perf_event_attr *attr, 947 struct parse_events_term *term, 948 struct parse_events_error *err) 949 { 950 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 951 const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 952 953 if (!pmu) { 954 char *err_str; 955 956 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 957 parse_events_error__handle(err, term->err_term, 958 err_str, /*help=*/NULL); 959 return -EINVAL; 960 } 961 if (perf_pmu__supports_legacy_cache(pmu)) { 962 attr->type = PERF_TYPE_HW_CACHE; 963 return parse_events__decode_legacy_cache(term->config, pmu->type, 964 &attr->config); 965 } else 966 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 967 } 968 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 969 const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 970 971 if (!pmu) { 972 char *err_str; 973 974 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 975 parse_events_error__handle(err, term->err_term, 976 err_str, /*help=*/NULL); 977 return -EINVAL; 978 } 979 attr->type = PERF_TYPE_HARDWARE; 980 attr->config = term->val.num; 981 if (perf_pmus__supports_extended_type()) 982 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 983 return 0; 984 } 985 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 986 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 987 /* 988 * Always succeed for sysfs terms, as we dont know 989 * at this point what type they need to have. 990 */ 991 return 0; 992 } 993 return config_term_common(attr, term, err); 994 } 995 996 #ifdef HAVE_LIBTRACEEVENT 997 static int config_term_tracepoint(struct perf_event_attr *attr, 998 struct parse_events_term *term, 999 struct parse_events_error *err) 1000 { 1001 switch (term->type_term) { 1002 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1003 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1004 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1005 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1006 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1007 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1008 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1009 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1010 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1011 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1012 return config_term_common(attr, term, err); 1013 default: 1014 if (err) { 1015 parse_events_error__handle(err, term->err_term, 1016 strdup("unknown term"), 1017 strdup("valid terms: call-graph,stack-size\n")); 1018 } 1019 return -EINVAL; 1020 } 1021 1022 return 0; 1023 } 1024 #endif 1025 1026 static int config_attr(struct perf_event_attr *attr, 1027 struct list_head *head, 1028 struct parse_events_error *err, 1029 config_term_func_t config_term) 1030 { 1031 struct parse_events_term *term; 1032 1033 list_for_each_entry(term, head, list) 1034 if (config_term(attr, term, err)) 1035 return -EINVAL; 1036 1037 return 0; 1038 } 1039 1040 static int get_config_terms(struct list_head *head_config, 1041 struct list_head *head_terms __maybe_unused) 1042 { 1043 #define ADD_CONFIG_TERM(__type, __weak) \ 1044 struct evsel_config_term *__t; \ 1045 \ 1046 __t = zalloc(sizeof(*__t)); \ 1047 if (!__t) \ 1048 return -ENOMEM; \ 1049 \ 1050 INIT_LIST_HEAD(&__t->list); \ 1051 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1052 __t->weak = __weak; \ 1053 list_add_tail(&__t->list, head_terms) 1054 1055 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1056 do { \ 1057 ADD_CONFIG_TERM(__type, __weak); \ 1058 __t->val.__name = __val; \ 1059 } while (0) 1060 1061 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1062 do { \ 1063 ADD_CONFIG_TERM(__type, __weak); \ 1064 __t->val.str = strdup(__val); \ 1065 if (!__t->val.str) { \ 1066 zfree(&__t); \ 1067 return -ENOMEM; \ 1068 } \ 1069 __t->free_str = true; \ 1070 } while (0) 1071 1072 struct parse_events_term *term; 1073 1074 list_for_each_entry(term, head_config, list) { 1075 switch (term->type_term) { 1076 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1077 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1078 break; 1079 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1080 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1081 break; 1082 case PARSE_EVENTS__TERM_TYPE_TIME: 1083 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1084 break; 1085 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1086 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1087 break; 1088 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1089 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1090 break; 1091 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1092 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1093 term->val.num, term->weak); 1094 break; 1095 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1096 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1097 term->val.num ? 1 : 0, term->weak); 1098 break; 1099 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1100 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1101 term->val.num ? 0 : 1, term->weak); 1102 break; 1103 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1104 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1105 term->val.num, term->weak); 1106 break; 1107 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1108 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1109 term->val.num, term->weak); 1110 break; 1111 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1112 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1113 term->val.num ? 1 : 0, term->weak); 1114 break; 1115 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1116 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1117 term->val.num ? 0 : 1, term->weak); 1118 break; 1119 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1120 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1121 break; 1122 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1123 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1124 term->val.num ? true : false, term->weak); 1125 break; 1126 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1127 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1128 term->val.num ? 1 : 0, term->weak); 1129 break; 1130 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1131 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1132 term->val.num, term->weak); 1133 break; 1134 default: 1135 break; 1136 } 1137 } 1138 return 0; 1139 } 1140 1141 /* 1142 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1143 * each bit of attr->config that the user has changed. 1144 */ 1145 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, 1146 struct list_head *head_terms) 1147 { 1148 struct parse_events_term *term; 1149 u64 bits = 0; 1150 int type; 1151 1152 list_for_each_entry(term, head_config, list) { 1153 switch (term->type_term) { 1154 case PARSE_EVENTS__TERM_TYPE_USER: 1155 type = perf_pmu__format_type(pmu, term->config); 1156 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1157 continue; 1158 bits |= perf_pmu__format_bits(pmu, term->config); 1159 break; 1160 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1161 bits = ~(u64)0; 1162 break; 1163 default: 1164 break; 1165 } 1166 } 1167 1168 if (bits) 1169 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1170 1171 #undef ADD_CONFIG_TERM 1172 return 0; 1173 } 1174 1175 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1176 const char *sys, const char *event, 1177 struct parse_events_error *err, 1178 struct list_head *head_config, void *loc_) 1179 { 1180 YYLTYPE *loc = loc_; 1181 #ifdef HAVE_LIBTRACEEVENT 1182 if (head_config) { 1183 struct perf_event_attr attr; 1184 1185 if (config_attr(&attr, head_config, err, 1186 config_term_tracepoint)) 1187 return -EINVAL; 1188 } 1189 1190 if (strpbrk(sys, "*?")) 1191 return add_tracepoint_multi_sys(list, idx, sys, event, 1192 err, head_config, loc); 1193 else 1194 return add_tracepoint_event(list, idx, sys, event, 1195 err, head_config, loc); 1196 #else 1197 (void)list; 1198 (void)idx; 1199 (void)sys; 1200 (void)event; 1201 (void)head_config; 1202 parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"), 1203 strdup("libtraceevent is necessary for tracepoint support")); 1204 return -1; 1205 #endif 1206 } 1207 1208 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1209 struct list_head *list, 1210 struct perf_pmu *pmu, u32 type, u32 extended_type, 1211 u64 config, struct list_head *head_config) 1212 { 1213 struct perf_event_attr attr; 1214 LIST_HEAD(config_terms); 1215 const char *name, *metric_id; 1216 int ret; 1217 1218 memset(&attr, 0, sizeof(attr)); 1219 attr.type = type; 1220 attr.config = config; 1221 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1222 assert(perf_pmus__supports_extended_type()); 1223 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1224 } 1225 1226 if (head_config) { 1227 if (config_attr(&attr, head_config, parse_state->error, 1228 config_term_common)) 1229 return -EINVAL; 1230 1231 if (get_config_terms(head_config, &config_terms)) 1232 return -ENOMEM; 1233 } 1234 1235 name = get_config_name(head_config); 1236 metric_id = get_config_metric_id(head_config); 1237 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1238 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 1239 /*cpu_list=*/NULL) ? 0 : -ENOMEM; 1240 free_config_terms(&config_terms); 1241 return ret; 1242 } 1243 1244 int parse_events_add_numeric(struct parse_events_state *parse_state, 1245 struct list_head *list, 1246 u32 type, u64 config, 1247 struct list_head *head_config, 1248 bool wildcard) 1249 { 1250 struct perf_pmu *pmu = NULL; 1251 bool found_supported = false; 1252 1253 /* Wildcards on numeric values are only supported by core PMUs. */ 1254 if (wildcard && perf_pmus__supports_extended_type()) { 1255 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1256 int ret; 1257 1258 found_supported = true; 1259 if (parse_events__filter_pmu(parse_state, pmu)) 1260 continue; 1261 1262 ret = __parse_events_add_numeric(parse_state, list, pmu, 1263 type, pmu->type, 1264 config, head_config); 1265 if (ret) 1266 return ret; 1267 } 1268 if (found_supported) 1269 return 0; 1270 } 1271 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1272 type, /*extended_type=*/0, config, head_config); 1273 } 1274 1275 int parse_events_add_tool(struct parse_events_state *parse_state, 1276 struct list_head *list, 1277 int tool_event) 1278 { 1279 return add_event_tool(list, &parse_state->idx, tool_event); 1280 } 1281 1282 static bool config_term_percore(struct list_head *config_terms) 1283 { 1284 struct evsel_config_term *term; 1285 1286 list_for_each_entry(term, config_terms, list) { 1287 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1288 return term->val.percore; 1289 } 1290 1291 return false; 1292 } 1293 1294 int parse_events_add_pmu(struct parse_events_state *parse_state, 1295 struct list_head *list, char *name, 1296 struct list_head *head_config, 1297 bool auto_merge_stats, void *loc_) 1298 { 1299 struct perf_event_attr attr; 1300 struct perf_pmu_info info; 1301 struct perf_pmu *pmu; 1302 struct evsel *evsel; 1303 struct parse_events_error *err = parse_state->error; 1304 YYLTYPE *loc = loc_; 1305 LIST_HEAD(config_terms); 1306 1307 pmu = parse_state->fake_pmu ?: perf_pmus__find(name); 1308 1309 if (verbose > 1 && !(pmu && pmu->selectable)) { 1310 fprintf(stderr, "Attempting to add event pmu '%s' with '", 1311 name); 1312 if (head_config) { 1313 struct parse_events_term *term; 1314 1315 list_for_each_entry(term, head_config, list) { 1316 fprintf(stderr, "%s,", term->config); 1317 } 1318 } 1319 fprintf(stderr, "' that may result in non-fatal errors\n"); 1320 } 1321 1322 if (!pmu) { 1323 char *err_str; 1324 1325 if (asprintf(&err_str, 1326 "Cannot find PMU `%s'. Missing kernel support?", 1327 name) >= 0) 1328 parse_events_error__handle(err, loc->first_column, err_str, NULL); 1329 return -EINVAL; 1330 } 1331 if (head_config) 1332 fix_raw(head_config, pmu); 1333 1334 if (pmu->default_config) { 1335 memcpy(&attr, pmu->default_config, 1336 sizeof(struct perf_event_attr)); 1337 } else { 1338 memset(&attr, 0, sizeof(attr)); 1339 } 1340 attr.type = pmu->type; 1341 1342 if (!head_config) { 1343 evsel = __add_event(list, &parse_state->idx, &attr, 1344 /*init_attr=*/true, /*name=*/NULL, 1345 /*metric_id=*/NULL, pmu, 1346 /*config_terms=*/NULL, auto_merge_stats, 1347 /*cpu_list=*/NULL); 1348 return evsel ? 0 : -ENOMEM; 1349 } 1350 1351 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info, err)) 1352 return -EINVAL; 1353 1354 if (verbose > 1) { 1355 fprintf(stderr, "After aliases, add event pmu '%s' with '", 1356 name); 1357 if (head_config) { 1358 struct parse_events_term *term; 1359 1360 list_for_each_entry(term, head_config, list) { 1361 fprintf(stderr, "%s,", term->config); 1362 } 1363 } 1364 fprintf(stderr, "' that may result in non-fatal errors\n"); 1365 } 1366 1367 /* 1368 * Configure hardcoded terms first, no need to check 1369 * return value when called with fail == 0 ;) 1370 */ 1371 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu)) 1372 return -EINVAL; 1373 1374 if (get_config_terms(head_config, &config_terms)) 1375 return -ENOMEM; 1376 1377 /* 1378 * When using default config, record which bits of attr->config were 1379 * changed by the user. 1380 */ 1381 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms)) 1382 return -ENOMEM; 1383 1384 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { 1385 free_config_terms(&config_terms); 1386 return -EINVAL; 1387 } 1388 1389 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1390 get_config_name(head_config), 1391 get_config_metric_id(head_config), pmu, 1392 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1393 if (!evsel) 1394 return -ENOMEM; 1395 1396 if (evsel->name) 1397 evsel->use_config_name = true; 1398 1399 evsel->percore = config_term_percore(&evsel->config_terms); 1400 1401 if (parse_state->fake_pmu) 1402 return 0; 1403 1404 free((char *)evsel->unit); 1405 evsel->unit = strdup(info.unit); 1406 evsel->scale = info.scale; 1407 evsel->per_pkg = info.per_pkg; 1408 evsel->snapshot = info.snapshot; 1409 return 0; 1410 } 1411 1412 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1413 char *str, struct list_head *head, 1414 struct list_head **listp, void *loc_) 1415 { 1416 struct parse_events_term *term; 1417 struct list_head *list = NULL; 1418 struct list_head *orig_head = NULL; 1419 struct perf_pmu *pmu = NULL; 1420 YYLTYPE *loc = loc_; 1421 int ok = 0; 1422 char *config; 1423 1424 *listp = NULL; 1425 1426 if (!head) { 1427 head = malloc(sizeof(struct list_head)); 1428 if (!head) 1429 goto out_err; 1430 1431 INIT_LIST_HEAD(head); 1432 } 1433 config = strdup(str); 1434 if (!config) 1435 goto out_err; 1436 1437 if (parse_events_term__num(&term, 1438 PARSE_EVENTS__TERM_TYPE_USER, 1439 config, 1, false, NULL, 1440 NULL) < 0) { 1441 free(config); 1442 goto out_err; 1443 } 1444 list_add_tail(&term->list, head); 1445 1446 /* Add it for all PMUs that support the alias */ 1447 list = malloc(sizeof(struct list_head)); 1448 if (!list) 1449 goto out_err; 1450 1451 INIT_LIST_HEAD(list); 1452 1453 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1454 bool auto_merge_stats; 1455 1456 if (parse_events__filter_pmu(parse_state, pmu)) 1457 continue; 1458 1459 if (!perf_pmu__have_event(pmu, str)) 1460 continue; 1461 1462 auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1463 parse_events_copy_term_list(head, &orig_head); 1464 if (!parse_events_add_pmu(parse_state, list, pmu->name, 1465 orig_head, auto_merge_stats, loc)) { 1466 pr_debug("%s -> %s/%s/\n", str, pmu->name, str); 1467 ok++; 1468 } 1469 parse_events_terms__delete(orig_head); 1470 } 1471 1472 if (parse_state->fake_pmu) { 1473 if (!parse_events_add_pmu(parse_state, list, str, head, 1474 /*auto_merge_stats=*/true, loc)) { 1475 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1476 ok++; 1477 } 1478 } 1479 1480 out_err: 1481 if (ok) 1482 *listp = list; 1483 else 1484 free(list); 1485 1486 parse_events_terms__delete(head); 1487 return ok ? 0 : -1; 1488 } 1489 1490 int parse_events__modifier_group(struct list_head *list, 1491 char *event_mod) 1492 { 1493 return parse_events__modifier_event(list, event_mod, true); 1494 } 1495 1496 void parse_events__set_leader(char *name, struct list_head *list) 1497 { 1498 struct evsel *leader; 1499 1500 if (list_empty(list)) { 1501 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1502 return; 1503 } 1504 1505 leader = list_first_entry(list, struct evsel, core.node); 1506 __perf_evlist__set_leader(list, &leader->core); 1507 leader->group_name = name; 1508 } 1509 1510 /* list_event is assumed to point to malloc'ed memory */ 1511 void parse_events_update_lists(struct list_head *list_event, 1512 struct list_head *list_all) 1513 { 1514 /* 1515 * Called for single event definition. Update the 1516 * 'all event' list, and reinit the 'single event' 1517 * list, for next event definition. 1518 */ 1519 list_splice_tail(list_event, list_all); 1520 free(list_event); 1521 } 1522 1523 struct event_modifier { 1524 int eu; 1525 int ek; 1526 int eh; 1527 int eH; 1528 int eG; 1529 int eI; 1530 int precise; 1531 int precise_max; 1532 int exclude_GH; 1533 int sample_read; 1534 int pinned; 1535 int weak; 1536 int exclusive; 1537 int bpf_counter; 1538 }; 1539 1540 static int get_event_modifier(struct event_modifier *mod, char *str, 1541 struct evsel *evsel) 1542 { 1543 int eu = evsel ? evsel->core.attr.exclude_user : 0; 1544 int ek = evsel ? evsel->core.attr.exclude_kernel : 0; 1545 int eh = evsel ? evsel->core.attr.exclude_hv : 0; 1546 int eH = evsel ? evsel->core.attr.exclude_host : 0; 1547 int eG = evsel ? evsel->core.attr.exclude_guest : 0; 1548 int eI = evsel ? evsel->core.attr.exclude_idle : 0; 1549 int precise = evsel ? evsel->core.attr.precise_ip : 0; 1550 int precise_max = 0; 1551 int sample_read = 0; 1552 int pinned = evsel ? evsel->core.attr.pinned : 0; 1553 int exclusive = evsel ? evsel->core.attr.exclusive : 0; 1554 1555 int exclude = eu | ek | eh; 1556 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1557 int weak = 0; 1558 int bpf_counter = 0; 1559 1560 memset(mod, 0, sizeof(*mod)); 1561 1562 while (*str) { 1563 if (*str == 'u') { 1564 if (!exclude) 1565 exclude = eu = ek = eh = 1; 1566 if (!exclude_GH && !perf_guest) 1567 eG = 1; 1568 eu = 0; 1569 } else if (*str == 'k') { 1570 if (!exclude) 1571 exclude = eu = ek = eh = 1; 1572 ek = 0; 1573 } else if (*str == 'h') { 1574 if (!exclude) 1575 exclude = eu = ek = eh = 1; 1576 eh = 0; 1577 } else if (*str == 'G') { 1578 if (!exclude_GH) 1579 exclude_GH = eG = eH = 1; 1580 eG = 0; 1581 } else if (*str == 'H') { 1582 if (!exclude_GH) 1583 exclude_GH = eG = eH = 1; 1584 eH = 0; 1585 } else if (*str == 'I') { 1586 eI = 1; 1587 } else if (*str == 'p') { 1588 precise++; 1589 /* use of precise requires exclude_guest */ 1590 if (!exclude_GH) 1591 eG = 1; 1592 } else if (*str == 'P') { 1593 precise_max = 1; 1594 } else if (*str == 'S') { 1595 sample_read = 1; 1596 } else if (*str == 'D') { 1597 pinned = 1; 1598 } else if (*str == 'e') { 1599 exclusive = 1; 1600 } else if (*str == 'W') { 1601 weak = 1; 1602 } else if (*str == 'b') { 1603 bpf_counter = 1; 1604 } else 1605 break; 1606 1607 ++str; 1608 } 1609 1610 /* 1611 * precise ip: 1612 * 1613 * 0 - SAMPLE_IP can have arbitrary skid 1614 * 1 - SAMPLE_IP must have constant skid 1615 * 2 - SAMPLE_IP requested to have 0 skid 1616 * 3 - SAMPLE_IP must have 0 skid 1617 * 1618 * See also PERF_RECORD_MISC_EXACT_IP 1619 */ 1620 if (precise > 3) 1621 return -EINVAL; 1622 1623 mod->eu = eu; 1624 mod->ek = ek; 1625 mod->eh = eh; 1626 mod->eH = eH; 1627 mod->eG = eG; 1628 mod->eI = eI; 1629 mod->precise = precise; 1630 mod->precise_max = precise_max; 1631 mod->exclude_GH = exclude_GH; 1632 mod->sample_read = sample_read; 1633 mod->pinned = pinned; 1634 mod->weak = weak; 1635 mod->bpf_counter = bpf_counter; 1636 mod->exclusive = exclusive; 1637 1638 return 0; 1639 } 1640 1641 /* 1642 * Basic modifier sanity check to validate it contains only one 1643 * instance of any modifier (apart from 'p') present. 1644 */ 1645 static int check_modifier(char *str) 1646 { 1647 char *p = str; 1648 1649 /* The sizeof includes 0 byte as well. */ 1650 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) 1651 return -1; 1652 1653 while (*p) { 1654 if (*p != 'p' && strchr(p + 1, *p)) 1655 return -1; 1656 p++; 1657 } 1658 1659 return 0; 1660 } 1661 1662 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 1663 { 1664 struct evsel *evsel; 1665 struct event_modifier mod; 1666 1667 if (str == NULL) 1668 return 0; 1669 1670 if (check_modifier(str)) 1671 return -EINVAL; 1672 1673 if (!add && get_event_modifier(&mod, str, NULL)) 1674 return -EINVAL; 1675 1676 __evlist__for_each_entry(list, evsel) { 1677 if (add && get_event_modifier(&mod, str, evsel)) 1678 return -EINVAL; 1679 1680 evsel->core.attr.exclude_user = mod.eu; 1681 evsel->core.attr.exclude_kernel = mod.ek; 1682 evsel->core.attr.exclude_hv = mod.eh; 1683 evsel->core.attr.precise_ip = mod.precise; 1684 evsel->core.attr.exclude_host = mod.eH; 1685 evsel->core.attr.exclude_guest = mod.eG; 1686 evsel->core.attr.exclude_idle = mod.eI; 1687 evsel->exclude_GH = mod.exclude_GH; 1688 evsel->sample_read = mod.sample_read; 1689 evsel->precise_max = mod.precise_max; 1690 evsel->weak_group = mod.weak; 1691 evsel->bpf_counter = mod.bpf_counter; 1692 1693 if (evsel__is_group_leader(evsel)) { 1694 evsel->core.attr.pinned = mod.pinned; 1695 evsel->core.attr.exclusive = mod.exclusive; 1696 } 1697 } 1698 1699 return 0; 1700 } 1701 1702 int parse_events_name(struct list_head *list, const char *name) 1703 { 1704 struct evsel *evsel; 1705 1706 __evlist__for_each_entry(list, evsel) { 1707 if (!evsel->name) { 1708 evsel->name = strdup(name); 1709 if (!evsel->name) 1710 return -ENOMEM; 1711 } 1712 } 1713 1714 return 0; 1715 } 1716 1717 static int parse_events__scanner(const char *str, 1718 struct parse_events_state *parse_state) 1719 { 1720 YY_BUFFER_STATE buffer; 1721 void *scanner; 1722 int ret; 1723 1724 ret = parse_events_lex_init_extra(parse_state, &scanner); 1725 if (ret) 1726 return ret; 1727 1728 buffer = parse_events__scan_string(str, scanner); 1729 1730 #ifdef PARSER_DEBUG 1731 parse_events_debug = 1; 1732 parse_events_set_debug(1, scanner); 1733 #endif 1734 ret = parse_events_parse(parse_state, scanner); 1735 1736 parse_events__flush_buffer(buffer, scanner); 1737 parse_events__delete_buffer(buffer, scanner); 1738 parse_events_lex_destroy(scanner); 1739 return ret; 1740 } 1741 1742 /* 1743 * parse event config string, return a list of event terms. 1744 */ 1745 int parse_events_terms(struct list_head *terms, const char *str) 1746 { 1747 struct parse_events_state parse_state = { 1748 .terms = NULL, 1749 .stoken = PE_START_TERMS, 1750 }; 1751 int ret; 1752 1753 ret = parse_events__scanner(str, &parse_state); 1754 1755 if (!ret) { 1756 list_splice(parse_state.terms, terms); 1757 zfree(&parse_state.terms); 1758 return 0; 1759 } 1760 1761 parse_events_terms__delete(parse_state.terms); 1762 return ret; 1763 } 1764 1765 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1766 const struct list_head *head) 1767 { 1768 struct evsel *leader = evsel__leader(evsel); 1769 struct evsel *pos; 1770 const char *group_pmu_name; 1771 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1772 1773 if (!pmu) { 1774 /* 1775 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1776 * is a core PMU, but in heterogeneous systems this is 1777 * unknown. For now pick the first core PMU. 1778 */ 1779 pmu = perf_pmus__scan_core(NULL); 1780 } 1781 if (!pmu) { 1782 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1783 return -EINVAL; 1784 } 1785 group_pmu_name = pmu->name; 1786 /* 1787 * Software events may be in a group with other uncore PMU events. Use 1788 * the pmu_name of the first non-software event to avoid breaking the 1789 * software event out of the group. 1790 * 1791 * Aux event leaders, like intel_pt, expect a group with events from 1792 * other PMUs, so substitute the AUX event's PMU in this case. 1793 */ 1794 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1795 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1796 1797 if (!leader_pmu) { 1798 /* As with determining pmu above. */ 1799 leader_pmu = perf_pmus__scan_core(NULL); 1800 } 1801 /* 1802 * Starting with the leader, find the first event with a named 1803 * non-software PMU. for_each_group_(member|evsel) isn't used as 1804 * the list isn't yet sorted putting evsel's in the same group 1805 * together. 1806 */ 1807 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 1808 group_pmu_name = leader_pmu->name; 1809 } else if (leader->core.nr_members > 1) { 1810 list_for_each_entry(pos, head, core.node) { 1811 struct perf_pmu *pos_pmu; 1812 1813 if (pos == leader || evsel__leader(pos) != leader) 1814 continue; 1815 pos_pmu = evsel__find_pmu(pos); 1816 if (!pos_pmu) { 1817 /* As with determining pmu above. */ 1818 pos_pmu = perf_pmus__scan_core(NULL); 1819 } 1820 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 1821 group_pmu_name = pos_pmu->name; 1822 break; 1823 } 1824 } 1825 } 1826 } 1827 /* Assign the actual name taking care that the fake PMU lacks a name. */ 1828 evsel->group_pmu_name = strdup(group_pmu_name ?: "fake"); 1829 return evsel->group_pmu_name ? 0 : -ENOMEM; 1830 } 1831 1832 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 1833 { 1834 /* Order by insertion index. */ 1835 return lhs->core.idx - rhs->core.idx; 1836 } 1837 1838 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 1839 { 1840 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1841 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1842 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1843 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1844 int *force_grouped_idx = _fg_idx; 1845 int lhs_sort_idx, rhs_sort_idx, ret; 1846 const char *lhs_pmu_name, *rhs_pmu_name; 1847 bool lhs_has_group, rhs_has_group; 1848 1849 /* 1850 * First sort by grouping/leader. Read the leader idx only if the evsel 1851 * is part of a group, by default ungrouped events will be sorted 1852 * relative to grouped events based on where the first ungrouped event 1853 * occurs. If both events don't have a group we want to fall-through to 1854 * the arch specific sorting, that can reorder and fix things like 1855 * Intel's topdown events. 1856 */ 1857 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { 1858 lhs_has_group = true; 1859 lhs_sort_idx = lhs_core->leader->idx; 1860 } else { 1861 lhs_has_group = false; 1862 lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs) 1863 ? *force_grouped_idx 1864 : lhs_core->idx; 1865 } 1866 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { 1867 rhs_has_group = true; 1868 rhs_sort_idx = rhs_core->leader->idx; 1869 } else { 1870 rhs_has_group = false; 1871 rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs) 1872 ? *force_grouped_idx 1873 : rhs_core->idx; 1874 } 1875 1876 if (lhs_sort_idx != rhs_sort_idx) 1877 return lhs_sort_idx - rhs_sort_idx; 1878 1879 /* Group by PMU if there is a group. Groups can't span PMUs. */ 1880 if (lhs_has_group && rhs_has_group) { 1881 lhs_pmu_name = lhs->group_pmu_name; 1882 rhs_pmu_name = rhs->group_pmu_name; 1883 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 1884 if (ret) 1885 return ret; 1886 } 1887 1888 /* Architecture specific sorting. */ 1889 return arch_evlist__cmp(lhs, rhs); 1890 } 1891 1892 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 1893 { 1894 int idx = 0, force_grouped_idx = -1; 1895 struct evsel *pos, *cur_leader = NULL; 1896 struct perf_evsel *cur_leaders_grp = NULL; 1897 bool idx_changed = false, cur_leader_force_grouped = false; 1898 int orig_num_leaders = 0, num_leaders = 0; 1899 int ret; 1900 1901 /* 1902 * Compute index to insert ungrouped events at. Place them where the 1903 * first ungrouped event appears. 1904 */ 1905 list_for_each_entry(pos, list, core.node) { 1906 const struct evsel *pos_leader = evsel__leader(pos); 1907 1908 ret = evsel__compute_group_pmu_name(pos, list); 1909 if (ret) 1910 return ret; 1911 1912 if (pos == pos_leader) 1913 orig_num_leaders++; 1914 1915 /* 1916 * Ensure indexes are sequential, in particular for multiple 1917 * event lists being merged. The indexes are used to detect when 1918 * the user order is modified. 1919 */ 1920 pos->core.idx = idx++; 1921 1922 /* Remember an index to sort all forced grouped events together to. */ 1923 if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 && 1924 arch_evsel__must_be_in_group(pos)) 1925 force_grouped_idx = pos->core.idx; 1926 } 1927 1928 /* Sort events. */ 1929 list_sort(&force_grouped_idx, list, evlist__cmp); 1930 1931 /* 1932 * Recompute groups, splitting for PMUs and adding groups for events 1933 * that require them. 1934 */ 1935 idx = 0; 1936 list_for_each_entry(pos, list, core.node) { 1937 const struct evsel *pos_leader = evsel__leader(pos); 1938 const char *pos_pmu_name = pos->group_pmu_name; 1939 const char *cur_leader_pmu_name; 1940 bool pos_force_grouped = force_grouped_idx != -1 && 1941 arch_evsel__must_be_in_group(pos); 1942 1943 /* Reset index and nr_members. */ 1944 if (pos->core.idx != idx) 1945 idx_changed = true; 1946 pos->core.idx = idx++; 1947 pos->core.nr_members = 0; 1948 1949 /* 1950 * Set the group leader respecting the given groupings and that 1951 * groups can't span PMUs. 1952 */ 1953 if (!cur_leader) 1954 cur_leader = pos; 1955 1956 cur_leader_pmu_name = cur_leader->group_pmu_name; 1957 if ((cur_leaders_grp != pos->core.leader && 1958 (!pos_force_grouped || !cur_leader_force_grouped)) || 1959 strcmp(cur_leader_pmu_name, pos_pmu_name)) { 1960 /* Event is for a different group/PMU than last. */ 1961 cur_leader = pos; 1962 /* 1963 * Remember the leader's group before it is overwritten, 1964 * so that later events match as being in the same 1965 * group. 1966 */ 1967 cur_leaders_grp = pos->core.leader; 1968 /* 1969 * Avoid forcing events into groups with events that 1970 * don't need to be in the group. 1971 */ 1972 cur_leader_force_grouped = pos_force_grouped; 1973 } 1974 if (pos_leader != cur_leader) { 1975 /* The leader changed so update it. */ 1976 evsel__set_leader(pos, cur_leader); 1977 } 1978 } 1979 list_for_each_entry(pos, list, core.node) { 1980 struct evsel *pos_leader = evsel__leader(pos); 1981 1982 if (pos == pos_leader) 1983 num_leaders++; 1984 pos_leader->core.nr_members++; 1985 } 1986 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 1987 } 1988 1989 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 1990 struct parse_events_error *err, struct perf_pmu *fake_pmu, 1991 bool warn_if_reordered) 1992 { 1993 struct parse_events_state parse_state = { 1994 .list = LIST_HEAD_INIT(parse_state.list), 1995 .idx = evlist->core.nr_entries, 1996 .error = err, 1997 .stoken = PE_START_EVENTS, 1998 .fake_pmu = fake_pmu, 1999 .pmu_filter = pmu_filter, 2000 .match_legacy_cache_terms = true, 2001 }; 2002 int ret, ret2; 2003 2004 ret = parse_events__scanner(str, &parse_state); 2005 2006 if (!ret && list_empty(&parse_state.list)) { 2007 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2008 return -1; 2009 } 2010 2011 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2012 if (ret2 < 0) 2013 return ret; 2014 2015 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) 2016 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2017 2018 /* 2019 * Add list to the evlist even with errors to allow callers to clean up. 2020 */ 2021 evlist__splice_list_tail(evlist, &parse_state.list); 2022 2023 if (!ret) { 2024 struct evsel *last; 2025 2026 last = evlist__last(evlist); 2027 last->cmdline_group_boundary = true; 2028 2029 return 0; 2030 } 2031 2032 /* 2033 * There are 2 users - builtin-record and builtin-test objects. 2034 * Both call evlist__delete in case of error, so we dont 2035 * need to bother. 2036 */ 2037 return ret; 2038 } 2039 2040 int parse_event(struct evlist *evlist, const char *str) 2041 { 2042 struct parse_events_error err; 2043 int ret; 2044 2045 parse_events_error__init(&err); 2046 ret = parse_events(evlist, str, &err); 2047 parse_events_error__exit(&err); 2048 return ret; 2049 } 2050 2051 void parse_events_error__init(struct parse_events_error *err) 2052 { 2053 bzero(err, sizeof(*err)); 2054 } 2055 2056 void parse_events_error__exit(struct parse_events_error *err) 2057 { 2058 zfree(&err->str); 2059 zfree(&err->help); 2060 zfree(&err->first_str); 2061 zfree(&err->first_help); 2062 } 2063 2064 void parse_events_error__handle(struct parse_events_error *err, int idx, 2065 char *str, char *help) 2066 { 2067 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2068 goto out_free; 2069 switch (err->num_errors) { 2070 case 0: 2071 err->idx = idx; 2072 err->str = str; 2073 err->help = help; 2074 break; 2075 case 1: 2076 err->first_idx = err->idx; 2077 err->idx = idx; 2078 err->first_str = err->str; 2079 err->str = str; 2080 err->first_help = err->help; 2081 err->help = help; 2082 break; 2083 default: 2084 pr_debug("Multiple errors dropping message: %s (%s)\n", 2085 err->str, err->help); 2086 free(err->str); 2087 err->str = str; 2088 free(err->help); 2089 err->help = help; 2090 break; 2091 } 2092 err->num_errors++; 2093 return; 2094 2095 out_free: 2096 free(str); 2097 free(help); 2098 } 2099 2100 #define MAX_WIDTH 1000 2101 static int get_term_width(void) 2102 { 2103 struct winsize ws; 2104 2105 get_term_dimensions(&ws); 2106 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2107 } 2108 2109 static void __parse_events_error__print(int err_idx, const char *err_str, 2110 const char *err_help, const char *event) 2111 { 2112 const char *str = "invalid or unsupported event: "; 2113 char _buf[MAX_WIDTH]; 2114 char *buf = (char *) event; 2115 int idx = 0; 2116 if (err_str) { 2117 /* -2 for extra '' in the final fprintf */ 2118 int width = get_term_width() - 2; 2119 int len_event = strlen(event); 2120 int len_str, max_len, cut = 0; 2121 2122 /* 2123 * Maximum error index indent, we will cut 2124 * the event string if it's bigger. 2125 */ 2126 int max_err_idx = 13; 2127 2128 /* 2129 * Let's be specific with the message when 2130 * we have the precise error. 2131 */ 2132 str = "event syntax error: "; 2133 len_str = strlen(str); 2134 max_len = width - len_str; 2135 2136 buf = _buf; 2137 2138 /* We're cutting from the beginning. */ 2139 if (err_idx > max_err_idx) 2140 cut = err_idx - max_err_idx; 2141 2142 strncpy(buf, event + cut, max_len); 2143 2144 /* Mark cut parts with '..' on both sides. */ 2145 if (cut) 2146 buf[0] = buf[1] = '.'; 2147 2148 if ((len_event - cut) > max_len) { 2149 buf[max_len - 1] = buf[max_len - 2] = '.'; 2150 buf[max_len] = 0; 2151 } 2152 2153 idx = len_str + err_idx - cut; 2154 } 2155 2156 fprintf(stderr, "%s'%s'\n", str, buf); 2157 if (idx) { 2158 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2159 if (err_help) 2160 fprintf(stderr, "\n%s\n", err_help); 2161 } 2162 } 2163 2164 void parse_events_error__print(struct parse_events_error *err, 2165 const char *event) 2166 { 2167 if (!err->num_errors) 2168 return; 2169 2170 __parse_events_error__print(err->idx, err->str, err->help, event); 2171 2172 if (err->num_errors > 1) { 2173 fputs("\nInitial error:\n", stderr); 2174 __parse_events_error__print(err->first_idx, err->first_str, 2175 err->first_help, event); 2176 } 2177 } 2178 2179 #undef MAX_WIDTH 2180 2181 int parse_events_option(const struct option *opt, const char *str, 2182 int unset __maybe_unused) 2183 { 2184 struct parse_events_option_args *args = opt->value; 2185 struct parse_events_error err; 2186 int ret; 2187 2188 parse_events_error__init(&err); 2189 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2190 /*fake_pmu=*/NULL, /*warn_if_reordered=*/true); 2191 2192 if (ret) { 2193 parse_events_error__print(&err, str); 2194 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2195 } 2196 parse_events_error__exit(&err); 2197 2198 return ret; 2199 } 2200 2201 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2202 { 2203 struct parse_events_option_args *args = opt->value; 2204 int ret; 2205 2206 if (*args->evlistp == NULL) { 2207 *args->evlistp = evlist__new(); 2208 2209 if (*args->evlistp == NULL) { 2210 fprintf(stderr, "Not enough memory to create evlist\n"); 2211 return -1; 2212 } 2213 } 2214 ret = parse_events_option(opt, str, unset); 2215 if (ret) { 2216 evlist__delete(*args->evlistp); 2217 *args->evlistp = NULL; 2218 } 2219 2220 return ret; 2221 } 2222 2223 static int 2224 foreach_evsel_in_last_glob(struct evlist *evlist, 2225 int (*func)(struct evsel *evsel, 2226 const void *arg), 2227 const void *arg) 2228 { 2229 struct evsel *last = NULL; 2230 int err; 2231 2232 /* 2233 * Don't return when list_empty, give func a chance to report 2234 * error when it found last == NULL. 2235 * 2236 * So no need to WARN here, let *func do this. 2237 */ 2238 if (evlist->core.nr_entries > 0) 2239 last = evlist__last(evlist); 2240 2241 do { 2242 err = (*func)(last, arg); 2243 if (err) 2244 return -1; 2245 if (!last) 2246 return 0; 2247 2248 if (last->core.node.prev == &evlist->core.entries) 2249 return 0; 2250 last = list_entry(last->core.node.prev, struct evsel, core.node); 2251 } while (!last->cmdline_group_boundary); 2252 2253 return 0; 2254 } 2255 2256 static int set_filter(struct evsel *evsel, const void *arg) 2257 { 2258 const char *str = arg; 2259 bool found = false; 2260 int nr_addr_filters = 0; 2261 struct perf_pmu *pmu = NULL; 2262 2263 if (evsel == NULL) { 2264 fprintf(stderr, 2265 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2266 return -1; 2267 } 2268 2269 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2270 if (evsel__append_tp_filter(evsel, str) < 0) { 2271 fprintf(stderr, 2272 "not enough memory to hold filter string\n"); 2273 return -1; 2274 } 2275 2276 return 0; 2277 } 2278 2279 while ((pmu = perf_pmus__scan(pmu)) != NULL) 2280 if (pmu->type == evsel->core.attr.type) { 2281 found = true; 2282 break; 2283 } 2284 2285 if (found) 2286 perf_pmu__scan_file(pmu, "nr_addr_filters", 2287 "%d", &nr_addr_filters); 2288 2289 if (!nr_addr_filters) 2290 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2291 2292 if (evsel__append_addr_filter(evsel, str) < 0) { 2293 fprintf(stderr, 2294 "not enough memory to hold filter string\n"); 2295 return -1; 2296 } 2297 2298 return 0; 2299 } 2300 2301 int parse_filter(const struct option *opt, const char *str, 2302 int unset __maybe_unused) 2303 { 2304 struct evlist *evlist = *(struct evlist **)opt->value; 2305 2306 return foreach_evsel_in_last_glob(evlist, set_filter, 2307 (const void *)str); 2308 } 2309 2310 static int add_exclude_perf_filter(struct evsel *evsel, 2311 const void *arg __maybe_unused) 2312 { 2313 char new_filter[64]; 2314 2315 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2316 fprintf(stderr, 2317 "--exclude-perf option should follow a -e tracepoint option\n"); 2318 return -1; 2319 } 2320 2321 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2322 2323 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2324 fprintf(stderr, 2325 "not enough memory to hold filter string\n"); 2326 return -1; 2327 } 2328 2329 return 0; 2330 } 2331 2332 int exclude_perf(const struct option *opt, 2333 const char *arg __maybe_unused, 2334 int unset __maybe_unused) 2335 { 2336 struct evlist *evlist = *(struct evlist **)opt->value; 2337 2338 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2339 NULL); 2340 } 2341 2342 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2343 { 2344 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2345 } 2346 2347 static int new_term(struct parse_events_term **_term, 2348 struct parse_events_term *temp, 2349 char *str, u64 num) 2350 { 2351 struct parse_events_term *term; 2352 2353 term = malloc(sizeof(*term)); 2354 if (!term) 2355 return -ENOMEM; 2356 2357 *term = *temp; 2358 INIT_LIST_HEAD(&term->list); 2359 term->weak = false; 2360 2361 switch (term->type_val) { 2362 case PARSE_EVENTS__TERM_TYPE_NUM: 2363 term->val.num = num; 2364 break; 2365 case PARSE_EVENTS__TERM_TYPE_STR: 2366 term->val.str = str; 2367 break; 2368 default: 2369 free(term); 2370 return -EINVAL; 2371 } 2372 2373 *_term = term; 2374 return 0; 2375 } 2376 2377 int parse_events_term__num(struct parse_events_term **term, 2378 int type_term, char *config, u64 num, 2379 bool no_value, 2380 void *loc_term_, void *loc_val_) 2381 { 2382 YYLTYPE *loc_term = loc_term_; 2383 YYLTYPE *loc_val = loc_val_; 2384 2385 struct parse_events_term temp = { 2386 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2387 .type_term = type_term, 2388 .config = config ? : strdup(config_term_names[type_term]), 2389 .no_value = no_value, 2390 .err_term = loc_term ? loc_term->first_column : 0, 2391 .err_val = loc_val ? loc_val->first_column : 0, 2392 }; 2393 2394 return new_term(term, &temp, NULL, num); 2395 } 2396 2397 int parse_events_term__str(struct parse_events_term **term, 2398 int type_term, char *config, char *str, 2399 void *loc_term_, void *loc_val_) 2400 { 2401 YYLTYPE *loc_term = loc_term_; 2402 YYLTYPE *loc_val = loc_val_; 2403 2404 struct parse_events_term temp = { 2405 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2406 .type_term = type_term, 2407 .config = config, 2408 .err_term = loc_term ? loc_term->first_column : 0, 2409 .err_val = loc_val ? loc_val->first_column : 0, 2410 }; 2411 2412 return new_term(term, &temp, str, 0); 2413 } 2414 2415 int parse_events_term__term(struct parse_events_term **term, 2416 int term_lhs, int term_rhs, 2417 void *loc_term, void *loc_val) 2418 { 2419 return parse_events_term__str(term, term_lhs, NULL, 2420 strdup(config_term_names[term_rhs]), 2421 loc_term, loc_val); 2422 } 2423 2424 int parse_events_term__clone(struct parse_events_term **new, 2425 struct parse_events_term *term) 2426 { 2427 char *str; 2428 struct parse_events_term temp = { 2429 .type_val = term->type_val, 2430 .type_term = term->type_term, 2431 .config = NULL, 2432 .err_term = term->err_term, 2433 .err_val = term->err_val, 2434 }; 2435 2436 if (term->config) { 2437 temp.config = strdup(term->config); 2438 if (!temp.config) 2439 return -ENOMEM; 2440 } 2441 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2442 return new_term(new, &temp, NULL, term->val.num); 2443 2444 str = strdup(term->val.str); 2445 if (!str) 2446 return -ENOMEM; 2447 return new_term(new, &temp, str, 0); 2448 } 2449 2450 void parse_events_term__delete(struct parse_events_term *term) 2451 { 2452 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2453 zfree(&term->val.str); 2454 2455 zfree(&term->config); 2456 free(term); 2457 } 2458 2459 int parse_events_copy_term_list(struct list_head *old, 2460 struct list_head **new) 2461 { 2462 struct parse_events_term *term, *n; 2463 int ret; 2464 2465 if (!old) { 2466 *new = NULL; 2467 return 0; 2468 } 2469 2470 *new = malloc(sizeof(struct list_head)); 2471 if (!*new) 2472 return -ENOMEM; 2473 INIT_LIST_HEAD(*new); 2474 2475 list_for_each_entry (term, old, list) { 2476 ret = parse_events_term__clone(&n, term); 2477 if (ret) 2478 return ret; 2479 list_add_tail(&n->list, *new); 2480 } 2481 return 0; 2482 } 2483 2484 void parse_events_terms__purge(struct list_head *terms) 2485 { 2486 struct parse_events_term *term, *h; 2487 2488 list_for_each_entry_safe(term, h, terms, list) { 2489 list_del_init(&term->list); 2490 parse_events_term__delete(term); 2491 } 2492 } 2493 2494 void parse_events_terms__delete(struct list_head *terms) 2495 { 2496 if (!terms) 2497 return; 2498 parse_events_terms__purge(terms); 2499 free(terms); 2500 } 2501 2502 void parse_events_evlist_error(struct parse_events_state *parse_state, 2503 int idx, const char *str) 2504 { 2505 if (!parse_state->error) 2506 return; 2507 2508 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); 2509 } 2510 2511 static void config_terms_list(char *buf, size_t buf_sz) 2512 { 2513 int i; 2514 bool first = true; 2515 2516 buf[0] = '\0'; 2517 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2518 const char *name = config_term_names[i]; 2519 2520 if (!config_term_avail(i, NULL)) 2521 continue; 2522 if (!name) 2523 continue; 2524 if (name[0] == '<') 2525 continue; 2526 2527 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2528 return; 2529 2530 if (!first) 2531 strcat(buf, ","); 2532 else 2533 first = false; 2534 strcat(buf, name); 2535 } 2536 } 2537 2538 /* 2539 * Return string contains valid config terms of an event. 2540 * @additional_terms: For terms such as PMU sysfs terms. 2541 */ 2542 char *parse_events_formats_error_string(char *additional_terms) 2543 { 2544 char *str; 2545 /* "no-overwrite" is the longest name */ 2546 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2547 (sizeof("no-overwrite") - 1)]; 2548 2549 config_terms_list(static_terms, sizeof(static_terms)); 2550 /* valid terms */ 2551 if (additional_terms) { 2552 if (asprintf(&str, "valid terms: %s,%s", 2553 additional_terms, static_terms) < 0) 2554 goto fail; 2555 } else { 2556 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2557 goto fail; 2558 } 2559 return str; 2560 2561 fail: 2562 return NULL; 2563 } 2564