1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/zalloc.h> 5 #include <dirent.h> 6 #include <errno.h> 7 #include <sys/ioctl.h> 8 #include <sys/param.h> 9 #include "term.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include <subcmd/parse-options.h> 13 #include "parse-events.h" 14 #include "string2.h" 15 #include "strlist.h" 16 #include "bpf-loader.h" 17 #include "debug.h" 18 #include <api/fs/tracing_path.h> 19 #include <perf/cpumap.h> 20 #include "parse-events-bison.h" 21 #include "parse-events-flex.h" 22 #include "pmu.h" 23 #include "asm/bug.h" 24 #include "util/parse-branch-options.h" 25 #include "util/evsel_config.h" 26 #include "util/event.h" 27 #include "perf.h" 28 #include "util/parse-events-hybrid.h" 29 #include "util/pmu-hybrid.h" 30 #include "tracepoint.h" 31 #include "thread_map.h" 32 33 #define MAX_NAME_LEN 100 34 35 struct perf_pmu_event_symbol { 36 char *symbol; 37 enum perf_pmu_event_symbol_type type; 38 }; 39 40 #ifdef PARSER_DEBUG 41 extern int parse_events_debug; 42 #endif 43 int parse_events_parse(void *parse_state, void *scanner); 44 static int get_config_terms(struct list_head *head_config, 45 struct list_head *head_terms __maybe_unused); 46 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 47 const char *str, char *pmu_name, 48 struct list_head *list); 49 50 static struct perf_pmu_event_symbol *perf_pmu_events_list; 51 /* 52 * The variable indicates the number of supported pmu event symbols. 53 * 0 means not initialized and ready to init 54 * -1 means failed to init, don't try anymore 55 * >0 is the number of supported pmu event symbols 56 */ 57 static int perf_pmu_events_list_num; 58 59 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 60 [PERF_COUNT_HW_CPU_CYCLES] = { 61 .symbol = "cpu-cycles", 62 .alias = "cycles", 63 }, 64 [PERF_COUNT_HW_INSTRUCTIONS] = { 65 .symbol = "instructions", 66 .alias = "", 67 }, 68 [PERF_COUNT_HW_CACHE_REFERENCES] = { 69 .symbol = "cache-references", 70 .alias = "", 71 }, 72 [PERF_COUNT_HW_CACHE_MISSES] = { 73 .symbol = "cache-misses", 74 .alias = "", 75 }, 76 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 77 .symbol = "branch-instructions", 78 .alias = "branches", 79 }, 80 [PERF_COUNT_HW_BRANCH_MISSES] = { 81 .symbol = "branch-misses", 82 .alias = "", 83 }, 84 [PERF_COUNT_HW_BUS_CYCLES] = { 85 .symbol = "bus-cycles", 86 .alias = "", 87 }, 88 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 89 .symbol = "stalled-cycles-frontend", 90 .alias = "idle-cycles-frontend", 91 }, 92 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 93 .symbol = "stalled-cycles-backend", 94 .alias = "idle-cycles-backend", 95 }, 96 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 97 .symbol = "ref-cycles", 98 .alias = "", 99 }, 100 }; 101 102 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 103 [PERF_COUNT_SW_CPU_CLOCK] = { 104 .symbol = "cpu-clock", 105 .alias = "", 106 }, 107 [PERF_COUNT_SW_TASK_CLOCK] = { 108 .symbol = "task-clock", 109 .alias = "", 110 }, 111 [PERF_COUNT_SW_PAGE_FAULTS] = { 112 .symbol = "page-faults", 113 .alias = "faults", 114 }, 115 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 116 .symbol = "context-switches", 117 .alias = "cs", 118 }, 119 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 120 .symbol = "cpu-migrations", 121 .alias = "migrations", 122 }, 123 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 124 .symbol = "minor-faults", 125 .alias = "", 126 }, 127 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 128 .symbol = "major-faults", 129 .alias = "", 130 }, 131 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 132 .symbol = "alignment-faults", 133 .alias = "", 134 }, 135 [PERF_COUNT_SW_EMULATION_FAULTS] = { 136 .symbol = "emulation-faults", 137 .alias = "", 138 }, 139 [PERF_COUNT_SW_DUMMY] = { 140 .symbol = "dummy", 141 .alias = "", 142 }, 143 [PERF_COUNT_SW_BPF_OUTPUT] = { 144 .symbol = "bpf-output", 145 .alias = "", 146 }, 147 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 148 .symbol = "cgroup-switches", 149 .alias = "", 150 }, 151 }; 152 153 bool is_event_supported(u8 type, u64 config) 154 { 155 bool ret = true; 156 int open_return; 157 struct evsel *evsel; 158 struct perf_event_attr attr = { 159 .type = type, 160 .config = config, 161 .disabled = 1, 162 }; 163 struct perf_thread_map *tmap = thread_map__new_by_tid(0); 164 165 if (tmap == NULL) 166 return false; 167 168 evsel = evsel__new(&attr); 169 if (evsel) { 170 open_return = evsel__open(evsel, NULL, tmap); 171 ret = open_return >= 0; 172 173 if (open_return == -EACCES) { 174 /* 175 * This happens if the paranoid value 176 * /proc/sys/kernel/perf_event_paranoid is set to 2 177 * Re-run with exclude_kernel set; we don't do that 178 * by default as some ARM machines do not support it. 179 * 180 */ 181 evsel->core.attr.exclude_kernel = 1; 182 ret = evsel__open(evsel, NULL, tmap) >= 0; 183 } 184 evsel__delete(evsel); 185 } 186 187 perf_thread_map__put(tmap); 188 return ret; 189 } 190 191 const char *event_type(int type) 192 { 193 switch (type) { 194 case PERF_TYPE_HARDWARE: 195 return "hardware"; 196 197 case PERF_TYPE_SOFTWARE: 198 return "software"; 199 200 case PERF_TYPE_TRACEPOINT: 201 return "tracepoint"; 202 203 case PERF_TYPE_HW_CACHE: 204 return "hardware-cache"; 205 206 default: 207 break; 208 } 209 210 return "unknown"; 211 } 212 213 static char *get_config_str(struct list_head *head_terms, int type_term) 214 { 215 struct parse_events_term *term; 216 217 if (!head_terms) 218 return NULL; 219 220 list_for_each_entry(term, head_terms, list) 221 if (term->type_term == type_term) 222 return term->val.str; 223 224 return NULL; 225 } 226 227 static char *get_config_metric_id(struct list_head *head_terms) 228 { 229 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 230 } 231 232 static char *get_config_name(struct list_head *head_terms) 233 { 234 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 235 } 236 237 static struct evsel * 238 __add_event(struct list_head *list, int *idx, 239 struct perf_event_attr *attr, 240 bool init_attr, 241 const char *name, const char *metric_id, struct perf_pmu *pmu, 242 struct list_head *config_terms, bool auto_merge_stats, 243 const char *cpu_list) 244 { 245 struct evsel *evsel; 246 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 247 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 248 249 if (pmu) 250 perf_pmu__warn_invalid_formats(pmu); 251 252 if (pmu && attr->type == PERF_TYPE_RAW) 253 perf_pmu__warn_invalid_config(pmu, attr->config, name); 254 255 if (init_attr) 256 event_attr_init(attr); 257 258 evsel = evsel__new_idx(attr, *idx); 259 if (!evsel) { 260 perf_cpu_map__put(cpus); 261 return NULL; 262 } 263 264 (*idx)++; 265 evsel->core.cpus = cpus; 266 evsel->core.own_cpus = perf_cpu_map__get(cpus); 267 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 268 evsel->auto_merge_stats = auto_merge_stats; 269 evsel->pmu = pmu; 270 271 if (name) 272 evsel->name = strdup(name); 273 274 if (metric_id) 275 evsel->metric_id = strdup(metric_id); 276 277 if (config_terms) 278 list_splice_init(config_terms, &evsel->config_terms); 279 280 if (list) 281 list_add_tail(&evsel->core.node, list); 282 283 return evsel; 284 } 285 286 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 287 const char *name, const char *metric_id, 288 struct perf_pmu *pmu) 289 { 290 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 291 metric_id, pmu, /*config_terms=*/NULL, 292 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 293 } 294 295 static int add_event(struct list_head *list, int *idx, 296 struct perf_event_attr *attr, const char *name, 297 const char *metric_id, struct list_head *config_terms) 298 { 299 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 300 /*pmu=*/NULL, config_terms, 301 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 302 } 303 304 static int add_event_tool(struct list_head *list, int *idx, 305 enum perf_tool_event tool_event) 306 { 307 struct evsel *evsel; 308 struct perf_event_attr attr = { 309 .type = PERF_TYPE_SOFTWARE, 310 .config = PERF_COUNT_SW_DUMMY, 311 }; 312 313 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 314 /*metric_id=*/NULL, /*pmu=*/NULL, 315 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 316 /*cpu_list=*/"0"); 317 if (!evsel) 318 return -ENOMEM; 319 evsel->tool_event = tool_event; 320 if (tool_event == PERF_TOOL_DURATION_TIME 321 || tool_event == PERF_TOOL_USER_TIME 322 || tool_event == PERF_TOOL_SYSTEM_TIME) { 323 free((char *)evsel->unit); 324 evsel->unit = strdup("ns"); 325 } 326 return 0; 327 } 328 329 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size) 330 { 331 int i, j; 332 int n, longest = -1; 333 334 for (i = 0; i < size; i++) { 335 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 336 n = strlen(names[i][j]); 337 if (n > longest && !strncasecmp(str, names[i][j], n)) 338 longest = n; 339 } 340 if (longest > 0) 341 return i; 342 } 343 344 return -1; 345 } 346 347 typedef int config_term_func_t(struct perf_event_attr *attr, 348 struct parse_events_term *term, 349 struct parse_events_error *err); 350 static int config_term_common(struct perf_event_attr *attr, 351 struct parse_events_term *term, 352 struct parse_events_error *err); 353 static int config_attr(struct perf_event_attr *attr, 354 struct list_head *head, 355 struct parse_events_error *err, 356 config_term_func_t config_term); 357 358 int parse_events_add_cache(struct list_head *list, int *idx, 359 char *type, char *op_result1, char *op_result2, 360 struct parse_events_error *err, 361 struct list_head *head_config, 362 struct parse_events_state *parse_state) 363 { 364 struct perf_event_attr attr; 365 LIST_HEAD(config_terms); 366 char name[MAX_NAME_LEN]; 367 const char *config_name, *metric_id; 368 int cache_type = -1, cache_op = -1, cache_result = -1; 369 char *op_result[2] = { op_result1, op_result2 }; 370 int i, n, ret; 371 bool hybrid; 372 373 /* 374 * No fallback - if we cannot get a clear cache type 375 * then bail out: 376 */ 377 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX); 378 if (cache_type == -1) 379 return -EINVAL; 380 381 config_name = get_config_name(head_config); 382 n = snprintf(name, MAX_NAME_LEN, "%s", type); 383 384 for (i = 0; (i < 2) && (op_result[i]); i++) { 385 char *str = op_result[i]; 386 387 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 388 389 if (cache_op == -1) { 390 cache_op = parse_aliases(str, evsel__hw_cache_op, 391 PERF_COUNT_HW_CACHE_OP_MAX); 392 if (cache_op >= 0) { 393 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 394 return -EINVAL; 395 continue; 396 } 397 } 398 399 if (cache_result == -1) { 400 cache_result = parse_aliases(str, evsel__hw_cache_result, 401 PERF_COUNT_HW_CACHE_RESULT_MAX); 402 if (cache_result >= 0) 403 continue; 404 } 405 } 406 407 /* 408 * Fall back to reads: 409 */ 410 if (cache_op == -1) 411 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 412 413 /* 414 * Fall back to accesses: 415 */ 416 if (cache_result == -1) 417 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 418 419 memset(&attr, 0, sizeof(attr)); 420 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 421 attr.type = PERF_TYPE_HW_CACHE; 422 423 if (head_config) { 424 if (config_attr(&attr, head_config, err, 425 config_term_common)) 426 return -EINVAL; 427 428 if (get_config_terms(head_config, &config_terms)) 429 return -ENOMEM; 430 } 431 432 metric_id = get_config_metric_id(head_config); 433 ret = parse_events__add_cache_hybrid(list, idx, &attr, 434 config_name ? : name, 435 metric_id, 436 &config_terms, 437 &hybrid, parse_state); 438 if (hybrid) 439 goto out_free_terms; 440 441 ret = add_event(list, idx, &attr, config_name ? : name, metric_id, 442 &config_terms); 443 out_free_terms: 444 free_config_terms(&config_terms); 445 return ret; 446 } 447 448 static void tracepoint_error(struct parse_events_error *e, int err, 449 const char *sys, const char *name) 450 { 451 const char *str; 452 char help[BUFSIZ]; 453 454 if (!e) 455 return; 456 457 /* 458 * We get error directly from syscall errno ( > 0), 459 * or from encoded pointer's error ( < 0). 460 */ 461 err = abs(err); 462 463 switch (err) { 464 case EACCES: 465 str = "can't access trace events"; 466 break; 467 case ENOENT: 468 str = "unknown tracepoint"; 469 break; 470 default: 471 str = "failed to add tracepoint"; 472 break; 473 } 474 475 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 476 parse_events_error__handle(e, 0, strdup(str), strdup(help)); 477 } 478 479 static int add_tracepoint(struct list_head *list, int *idx, 480 const char *sys_name, const char *evt_name, 481 struct parse_events_error *err, 482 struct list_head *head_config) 483 { 484 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 485 486 if (IS_ERR(evsel)) { 487 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name); 488 return PTR_ERR(evsel); 489 } 490 491 if (head_config) { 492 LIST_HEAD(config_terms); 493 494 if (get_config_terms(head_config, &config_terms)) 495 return -ENOMEM; 496 list_splice(&config_terms, &evsel->config_terms); 497 } 498 499 list_add_tail(&evsel->core.node, list); 500 return 0; 501 } 502 503 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 504 const char *sys_name, const char *evt_name, 505 struct parse_events_error *err, 506 struct list_head *head_config) 507 { 508 char *evt_path; 509 struct dirent *evt_ent; 510 DIR *evt_dir; 511 int ret = 0, found = 0; 512 513 evt_path = get_events_file(sys_name); 514 if (!evt_path) { 515 tracepoint_error(err, errno, sys_name, evt_name); 516 return -1; 517 } 518 evt_dir = opendir(evt_path); 519 if (!evt_dir) { 520 put_events_file(evt_path); 521 tracepoint_error(err, errno, sys_name, evt_name); 522 return -1; 523 } 524 525 while (!ret && (evt_ent = readdir(evt_dir))) { 526 if (!strcmp(evt_ent->d_name, ".") 527 || !strcmp(evt_ent->d_name, "..") 528 || !strcmp(evt_ent->d_name, "enable") 529 || !strcmp(evt_ent->d_name, "filter")) 530 continue; 531 532 if (!strglobmatch(evt_ent->d_name, evt_name)) 533 continue; 534 535 found++; 536 537 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 538 err, head_config); 539 } 540 541 if (!found) { 542 tracepoint_error(err, ENOENT, sys_name, evt_name); 543 ret = -1; 544 } 545 546 put_events_file(evt_path); 547 closedir(evt_dir); 548 return ret; 549 } 550 551 static int add_tracepoint_event(struct list_head *list, int *idx, 552 const char *sys_name, const char *evt_name, 553 struct parse_events_error *err, 554 struct list_head *head_config) 555 { 556 return strpbrk(evt_name, "*?") ? 557 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 558 err, head_config) : 559 add_tracepoint(list, idx, sys_name, evt_name, 560 err, head_config); 561 } 562 563 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 564 const char *sys_name, const char *evt_name, 565 struct parse_events_error *err, 566 struct list_head *head_config) 567 { 568 struct dirent *events_ent; 569 DIR *events_dir; 570 int ret = 0; 571 572 events_dir = tracing_events__opendir(); 573 if (!events_dir) { 574 tracepoint_error(err, errno, sys_name, evt_name); 575 return -1; 576 } 577 578 while (!ret && (events_ent = readdir(events_dir))) { 579 if (!strcmp(events_ent->d_name, ".") 580 || !strcmp(events_ent->d_name, "..") 581 || !strcmp(events_ent->d_name, "enable") 582 || !strcmp(events_ent->d_name, "header_event") 583 || !strcmp(events_ent->d_name, "header_page")) 584 continue; 585 586 if (!strglobmatch(events_ent->d_name, sys_name)) 587 continue; 588 589 ret = add_tracepoint_event(list, idx, events_ent->d_name, 590 evt_name, err, head_config); 591 } 592 593 closedir(events_dir); 594 return ret; 595 } 596 597 #ifdef HAVE_LIBBPF_SUPPORT 598 struct __add_bpf_event_param { 599 struct parse_events_state *parse_state; 600 struct list_head *list; 601 struct list_head *head_config; 602 }; 603 604 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj, 605 void *_param) 606 { 607 LIST_HEAD(new_evsels); 608 struct __add_bpf_event_param *param = _param; 609 struct parse_events_state *parse_state = param->parse_state; 610 struct list_head *list = param->list; 611 struct evsel *pos; 612 int err; 613 /* 614 * Check if we should add the event, i.e. if it is a TP but starts with a '!', 615 * then don't add the tracepoint, this will be used for something else, like 616 * adding to a BPF_MAP_TYPE_PROG_ARRAY. 617 * 618 * See tools/perf/examples/bpf/augmented_raw_syscalls.c 619 */ 620 if (group[0] == '!') 621 return 0; 622 623 pr_debug("add bpf event %s:%s and attach bpf program %d\n", 624 group, event, fd); 625 626 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group, 627 event, parse_state->error, 628 param->head_config); 629 if (err) { 630 struct evsel *evsel, *tmp; 631 632 pr_debug("Failed to add BPF event %s:%s\n", 633 group, event); 634 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) { 635 list_del_init(&evsel->core.node); 636 evsel__delete(evsel); 637 } 638 return err; 639 } 640 pr_debug("adding %s:%s\n", group, event); 641 642 list_for_each_entry(pos, &new_evsels, core.node) { 643 pr_debug("adding %s:%s to %p\n", 644 group, event, pos); 645 pos->bpf_fd = fd; 646 pos->bpf_obj = obj; 647 } 648 list_splice(&new_evsels, list); 649 return 0; 650 } 651 652 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 653 struct list_head *list, 654 struct bpf_object *obj, 655 struct list_head *head_config) 656 { 657 int err; 658 char errbuf[BUFSIZ]; 659 struct __add_bpf_event_param param = {parse_state, list, head_config}; 660 static bool registered_unprobe_atexit = false; 661 662 if (IS_ERR(obj) || !obj) { 663 snprintf(errbuf, sizeof(errbuf), 664 "Internal error: load bpf obj with NULL"); 665 err = -EINVAL; 666 goto errout; 667 } 668 669 /* 670 * Register atexit handler before calling bpf__probe() so 671 * bpf__probe() don't need to unprobe probe points its already 672 * created when failure. 673 */ 674 if (!registered_unprobe_atexit) { 675 atexit(bpf__clear); 676 registered_unprobe_atexit = true; 677 } 678 679 err = bpf__probe(obj); 680 if (err) { 681 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf)); 682 goto errout; 683 } 684 685 err = bpf__load(obj); 686 if (err) { 687 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf)); 688 goto errout; 689 } 690 691 err = bpf__foreach_event(obj, add_bpf_event, ¶m); 692 if (err) { 693 snprintf(errbuf, sizeof(errbuf), 694 "Attach events in BPF object failed"); 695 goto errout; 696 } 697 698 return 0; 699 errout: 700 parse_events_error__handle(parse_state->error, 0, 701 strdup(errbuf), strdup("(add -v to see detail)")); 702 return err; 703 } 704 705 static int 706 parse_events_config_bpf(struct parse_events_state *parse_state, 707 struct bpf_object *obj, 708 struct list_head *head_config) 709 { 710 struct parse_events_term *term; 711 int error_pos; 712 713 if (!head_config || list_empty(head_config)) 714 return 0; 715 716 list_for_each_entry(term, head_config, list) { 717 int err; 718 719 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) { 720 parse_events_error__handle(parse_state->error, term->err_term, 721 strdup("Invalid config term for BPF object"), 722 NULL); 723 return -EINVAL; 724 } 725 726 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos); 727 if (err) { 728 char errbuf[BUFSIZ]; 729 int idx; 730 731 bpf__strerror_config_obj(obj, term, parse_state->evlist, 732 &error_pos, err, errbuf, 733 sizeof(errbuf)); 734 735 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE) 736 idx = term->err_val; 737 else 738 idx = term->err_term + error_pos; 739 740 parse_events_error__handle(parse_state->error, idx, 741 strdup(errbuf), 742 strdup( 743 "Hint:\tValid config terms:\n" 744 " \tmap:[<arraymap>].value<indices>=[value]\n" 745 " \tmap:[<eventmap>].event<indices>=[event]\n" 746 "\n" 747 " \twhere <indices> is something like [0,3...5] or [all]\n" 748 " \t(add -v to see detail)")); 749 return err; 750 } 751 } 752 return 0; 753 } 754 755 /* 756 * Split config terms: 757 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ... 758 * 'call-graph=fp' is 'evt config', should be applied to each 759 * events in bpf.c. 760 * 'map:array.value[0]=1' is 'obj config', should be processed 761 * with parse_events_config_bpf. 762 * 763 * Move object config terms from the first list to obj_head_config. 764 */ 765 static void 766 split_bpf_config_terms(struct list_head *evt_head_config, 767 struct list_head *obj_head_config) 768 { 769 struct parse_events_term *term, *temp; 770 771 /* 772 * Currently, all possible user config term 773 * belong to bpf object. parse_events__is_hardcoded_term() 774 * happens to be a good flag. 775 * 776 * See parse_events_config_bpf() and 777 * config_term_tracepoint(). 778 */ 779 list_for_each_entry_safe(term, temp, evt_head_config, list) 780 if (!parse_events__is_hardcoded_term(term)) 781 list_move_tail(&term->list, obj_head_config); 782 } 783 784 int parse_events_load_bpf(struct parse_events_state *parse_state, 785 struct list_head *list, 786 char *bpf_file_name, 787 bool source, 788 struct list_head *head_config) 789 { 790 int err; 791 struct bpf_object *obj; 792 LIST_HEAD(obj_head_config); 793 794 if (head_config) 795 split_bpf_config_terms(head_config, &obj_head_config); 796 797 obj = bpf__prepare_load(bpf_file_name, source); 798 if (IS_ERR(obj)) { 799 char errbuf[BUFSIZ]; 800 801 err = PTR_ERR(obj); 802 803 if (err == -ENOTSUP) 804 snprintf(errbuf, sizeof(errbuf), 805 "BPF support is not compiled"); 806 else 807 bpf__strerror_prepare_load(bpf_file_name, 808 source, 809 -err, errbuf, 810 sizeof(errbuf)); 811 812 parse_events_error__handle(parse_state->error, 0, 813 strdup(errbuf), strdup("(add -v to see detail)")); 814 return err; 815 } 816 817 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config); 818 if (err) 819 return err; 820 err = parse_events_config_bpf(parse_state, obj, &obj_head_config); 821 822 /* 823 * Caller doesn't know anything about obj_head_config, 824 * so combine them together again before returning. 825 */ 826 if (head_config) 827 list_splice_tail(&obj_head_config, head_config); 828 return err; 829 } 830 #else // HAVE_LIBBPF_SUPPORT 831 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 832 struct list_head *list __maybe_unused, 833 struct bpf_object *obj __maybe_unused, 834 struct list_head *head_config __maybe_unused) 835 { 836 parse_events_error__handle(parse_state->error, 0, 837 strdup("BPF support is not compiled"), 838 strdup("Make sure libbpf-devel is available at build time.")); 839 return -ENOTSUP; 840 } 841 842 int parse_events_load_bpf(struct parse_events_state *parse_state, 843 struct list_head *list __maybe_unused, 844 char *bpf_file_name __maybe_unused, 845 bool source __maybe_unused, 846 struct list_head *head_config __maybe_unused) 847 { 848 parse_events_error__handle(parse_state->error, 0, 849 strdup("BPF support is not compiled"), 850 strdup("Make sure libbpf-devel is available at build time.")); 851 return -ENOTSUP; 852 } 853 #endif // HAVE_LIBBPF_SUPPORT 854 855 static int 856 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 857 { 858 int i; 859 860 for (i = 0; i < 3; i++) { 861 if (!type || !type[i]) 862 break; 863 864 #define CHECK_SET_TYPE(bit) \ 865 do { \ 866 if (attr->bp_type & bit) \ 867 return -EINVAL; \ 868 else \ 869 attr->bp_type |= bit; \ 870 } while (0) 871 872 switch (type[i]) { 873 case 'r': 874 CHECK_SET_TYPE(HW_BREAKPOINT_R); 875 break; 876 case 'w': 877 CHECK_SET_TYPE(HW_BREAKPOINT_W); 878 break; 879 case 'x': 880 CHECK_SET_TYPE(HW_BREAKPOINT_X); 881 break; 882 default: 883 return -EINVAL; 884 } 885 } 886 887 #undef CHECK_SET_TYPE 888 889 if (!attr->bp_type) /* Default */ 890 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 891 892 return 0; 893 } 894 895 int parse_events_add_breakpoint(struct list_head *list, int *idx, 896 u64 addr, char *type, u64 len) 897 { 898 struct perf_event_attr attr; 899 900 memset(&attr, 0, sizeof(attr)); 901 attr.bp_addr = addr; 902 903 if (parse_breakpoint_type(type, &attr)) 904 return -EINVAL; 905 906 /* Provide some defaults if len is not specified */ 907 if (!len) { 908 if (attr.bp_type == HW_BREAKPOINT_X) 909 len = sizeof(long); 910 else 911 len = HW_BREAKPOINT_LEN_4; 912 } 913 914 attr.bp_len = len; 915 916 attr.type = PERF_TYPE_BREAKPOINT; 917 attr.sample_period = 1; 918 919 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL, 920 /*config_terms=*/NULL); 921 } 922 923 static int check_type_val(struct parse_events_term *term, 924 struct parse_events_error *err, 925 int type) 926 { 927 if (type == term->type_val) 928 return 0; 929 930 if (err) { 931 parse_events_error__handle(err, term->err_val, 932 type == PARSE_EVENTS__TERM_TYPE_NUM 933 ? strdup("expected numeric value") 934 : strdup("expected string value"), 935 NULL); 936 } 937 return -EINVAL; 938 } 939 940 /* 941 * Update according to parse-events.l 942 */ 943 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 944 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 945 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 946 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 947 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 948 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 949 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 950 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 951 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 952 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 953 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 954 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 955 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 956 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 957 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 958 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 959 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 960 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 961 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 962 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 963 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 964 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 965 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 966 }; 967 968 static bool config_term_shrinked; 969 970 static bool 971 config_term_avail(int term_type, struct parse_events_error *err) 972 { 973 char *err_str; 974 975 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 976 parse_events_error__handle(err, -1, 977 strdup("Invalid term_type"), NULL); 978 return false; 979 } 980 if (!config_term_shrinked) 981 return true; 982 983 switch (term_type) { 984 case PARSE_EVENTS__TERM_TYPE_CONFIG: 985 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 986 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 987 case PARSE_EVENTS__TERM_TYPE_NAME: 988 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 989 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 990 case PARSE_EVENTS__TERM_TYPE_PERCORE: 991 return true; 992 default: 993 if (!err) 994 return false; 995 996 /* term_type is validated so indexing is safe */ 997 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 998 config_term_names[term_type]) >= 0) 999 parse_events_error__handle(err, -1, err_str, NULL); 1000 return false; 1001 } 1002 } 1003 1004 void parse_events__shrink_config_terms(void) 1005 { 1006 config_term_shrinked = true; 1007 } 1008 1009 static int config_term_common(struct perf_event_attr *attr, 1010 struct parse_events_term *term, 1011 struct parse_events_error *err) 1012 { 1013 #define CHECK_TYPE_VAL(type) \ 1014 do { \ 1015 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 1016 return -EINVAL; \ 1017 } while (0) 1018 1019 switch (term->type_term) { 1020 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1021 CHECK_TYPE_VAL(NUM); 1022 attr->config = term->val.num; 1023 break; 1024 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1025 CHECK_TYPE_VAL(NUM); 1026 attr->config1 = term->val.num; 1027 break; 1028 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1029 CHECK_TYPE_VAL(NUM); 1030 attr->config2 = term->val.num; 1031 break; 1032 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1033 CHECK_TYPE_VAL(NUM); 1034 break; 1035 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1036 CHECK_TYPE_VAL(NUM); 1037 break; 1038 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1039 CHECK_TYPE_VAL(STR); 1040 if (strcmp(term->val.str, "no") && 1041 parse_branch_str(term->val.str, 1042 &attr->branch_sample_type)) { 1043 parse_events_error__handle(err, term->err_val, 1044 strdup("invalid branch sample type"), 1045 NULL); 1046 return -EINVAL; 1047 } 1048 break; 1049 case PARSE_EVENTS__TERM_TYPE_TIME: 1050 CHECK_TYPE_VAL(NUM); 1051 if (term->val.num > 1) { 1052 parse_events_error__handle(err, term->err_val, 1053 strdup("expected 0 or 1"), 1054 NULL); 1055 return -EINVAL; 1056 } 1057 break; 1058 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1059 CHECK_TYPE_VAL(STR); 1060 break; 1061 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1062 CHECK_TYPE_VAL(NUM); 1063 break; 1064 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1065 CHECK_TYPE_VAL(NUM); 1066 break; 1067 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1068 CHECK_TYPE_VAL(NUM); 1069 break; 1070 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1071 CHECK_TYPE_VAL(NUM); 1072 break; 1073 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1074 CHECK_TYPE_VAL(NUM); 1075 break; 1076 case PARSE_EVENTS__TERM_TYPE_NAME: 1077 CHECK_TYPE_VAL(STR); 1078 break; 1079 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1080 CHECK_TYPE_VAL(STR); 1081 break; 1082 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1083 CHECK_TYPE_VAL(NUM); 1084 break; 1085 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1086 CHECK_TYPE_VAL(NUM); 1087 break; 1088 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1089 CHECK_TYPE_VAL(NUM); 1090 if ((unsigned int)term->val.num > 1) { 1091 parse_events_error__handle(err, term->err_val, 1092 strdup("expected 0 or 1"), 1093 NULL); 1094 return -EINVAL; 1095 } 1096 break; 1097 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1098 CHECK_TYPE_VAL(NUM); 1099 break; 1100 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1101 CHECK_TYPE_VAL(NUM); 1102 if (term->val.num > UINT_MAX) { 1103 parse_events_error__handle(err, term->err_val, 1104 strdup("too big"), 1105 NULL); 1106 return -EINVAL; 1107 } 1108 break; 1109 default: 1110 parse_events_error__handle(err, term->err_term, 1111 strdup("unknown term"), 1112 parse_events_formats_error_string(NULL)); 1113 return -EINVAL; 1114 } 1115 1116 /* 1117 * Check term availability after basic checking so 1118 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1119 * 1120 * If check availability at the entry of this function, 1121 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1122 * if an invalid config term is provided for legacy events 1123 * (for example, instructions/badterm/...), which is confusing. 1124 */ 1125 if (!config_term_avail(term->type_term, err)) 1126 return -EINVAL; 1127 return 0; 1128 #undef CHECK_TYPE_VAL 1129 } 1130 1131 static int config_term_pmu(struct perf_event_attr *attr, 1132 struct parse_events_term *term, 1133 struct parse_events_error *err) 1134 { 1135 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1136 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) 1137 /* 1138 * Always succeed for sysfs terms, as we dont know 1139 * at this point what type they need to have. 1140 */ 1141 return 0; 1142 else 1143 return config_term_common(attr, term, err); 1144 } 1145 1146 static int config_term_tracepoint(struct perf_event_attr *attr, 1147 struct parse_events_term *term, 1148 struct parse_events_error *err) 1149 { 1150 switch (term->type_term) { 1151 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1152 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1153 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1154 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1155 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1156 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1157 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1158 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1159 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1160 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1161 return config_term_common(attr, term, err); 1162 default: 1163 if (err) { 1164 parse_events_error__handle(err, term->err_term, 1165 strdup("unknown term"), 1166 strdup("valid terms: call-graph,stack-size\n")); 1167 } 1168 return -EINVAL; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int config_attr(struct perf_event_attr *attr, 1175 struct list_head *head, 1176 struct parse_events_error *err, 1177 config_term_func_t config_term) 1178 { 1179 struct parse_events_term *term; 1180 1181 list_for_each_entry(term, head, list) 1182 if (config_term(attr, term, err)) 1183 return -EINVAL; 1184 1185 return 0; 1186 } 1187 1188 static int get_config_terms(struct list_head *head_config, 1189 struct list_head *head_terms __maybe_unused) 1190 { 1191 #define ADD_CONFIG_TERM(__type, __weak) \ 1192 struct evsel_config_term *__t; \ 1193 \ 1194 __t = zalloc(sizeof(*__t)); \ 1195 if (!__t) \ 1196 return -ENOMEM; \ 1197 \ 1198 INIT_LIST_HEAD(&__t->list); \ 1199 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1200 __t->weak = __weak; \ 1201 list_add_tail(&__t->list, head_terms) 1202 1203 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1204 do { \ 1205 ADD_CONFIG_TERM(__type, __weak); \ 1206 __t->val.__name = __val; \ 1207 } while (0) 1208 1209 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1210 do { \ 1211 ADD_CONFIG_TERM(__type, __weak); \ 1212 __t->val.str = strdup(__val); \ 1213 if (!__t->val.str) { \ 1214 zfree(&__t); \ 1215 return -ENOMEM; \ 1216 } \ 1217 __t->free_str = true; \ 1218 } while (0) 1219 1220 struct parse_events_term *term; 1221 1222 list_for_each_entry(term, head_config, list) { 1223 switch (term->type_term) { 1224 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1225 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1226 break; 1227 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1228 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1229 break; 1230 case PARSE_EVENTS__TERM_TYPE_TIME: 1231 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1232 break; 1233 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1234 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1235 break; 1236 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1237 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1238 break; 1239 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1240 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1241 term->val.num, term->weak); 1242 break; 1243 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1244 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1245 term->val.num ? 1 : 0, term->weak); 1246 break; 1247 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1248 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1249 term->val.num ? 0 : 1, term->weak); 1250 break; 1251 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1252 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1253 term->val.num, term->weak); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1256 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1257 term->val.num, term->weak); 1258 break; 1259 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1260 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1261 term->val.num ? 1 : 0, term->weak); 1262 break; 1263 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1264 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1265 term->val.num ? 0 : 1, term->weak); 1266 break; 1267 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1268 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1269 break; 1270 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1271 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1272 term->val.num ? true : false, term->weak); 1273 break; 1274 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1275 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1276 term->val.num ? 1 : 0, term->weak); 1277 break; 1278 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1279 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1280 term->val.num, term->weak); 1281 break; 1282 default: 1283 break; 1284 } 1285 } 1286 return 0; 1287 } 1288 1289 /* 1290 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1291 * each bit of attr->config that the user has changed. 1292 */ 1293 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, 1294 struct list_head *head_terms) 1295 { 1296 struct parse_events_term *term; 1297 u64 bits = 0; 1298 int type; 1299 1300 list_for_each_entry(term, head_config, list) { 1301 switch (term->type_term) { 1302 case PARSE_EVENTS__TERM_TYPE_USER: 1303 type = perf_pmu__format_type(&pmu->format, term->config); 1304 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1305 continue; 1306 bits |= perf_pmu__format_bits(&pmu->format, term->config); 1307 break; 1308 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1309 bits = ~(u64)0; 1310 break; 1311 default: 1312 break; 1313 } 1314 } 1315 1316 if (bits) 1317 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1318 1319 #undef ADD_CONFIG_TERM 1320 return 0; 1321 } 1322 1323 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1324 const char *sys, const char *event, 1325 struct parse_events_error *err, 1326 struct list_head *head_config) 1327 { 1328 if (head_config) { 1329 struct perf_event_attr attr; 1330 1331 if (config_attr(&attr, head_config, err, 1332 config_term_tracepoint)) 1333 return -EINVAL; 1334 } 1335 1336 if (strpbrk(sys, "*?")) 1337 return add_tracepoint_multi_sys(list, idx, sys, event, 1338 err, head_config); 1339 else 1340 return add_tracepoint_event(list, idx, sys, event, 1341 err, head_config); 1342 } 1343 1344 int parse_events_add_numeric(struct parse_events_state *parse_state, 1345 struct list_head *list, 1346 u32 type, u64 config, 1347 struct list_head *head_config) 1348 { 1349 struct perf_event_attr attr; 1350 LIST_HEAD(config_terms); 1351 const char *name, *metric_id; 1352 bool hybrid; 1353 int ret; 1354 1355 memset(&attr, 0, sizeof(attr)); 1356 attr.type = type; 1357 attr.config = config; 1358 1359 if (head_config) { 1360 if (config_attr(&attr, head_config, parse_state->error, 1361 config_term_common)) 1362 return -EINVAL; 1363 1364 if (get_config_terms(head_config, &config_terms)) 1365 return -ENOMEM; 1366 } 1367 1368 name = get_config_name(head_config); 1369 metric_id = get_config_metric_id(head_config); 1370 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr, 1371 name, metric_id, 1372 &config_terms, &hybrid); 1373 if (hybrid) 1374 goto out_free_terms; 1375 1376 ret = add_event(list, &parse_state->idx, &attr, name, metric_id, 1377 &config_terms); 1378 out_free_terms: 1379 free_config_terms(&config_terms); 1380 return ret; 1381 } 1382 1383 int parse_events_add_tool(struct parse_events_state *parse_state, 1384 struct list_head *list, 1385 int tool_event) 1386 { 1387 return add_event_tool(list, &parse_state->idx, tool_event); 1388 } 1389 1390 static bool config_term_percore(struct list_head *config_terms) 1391 { 1392 struct evsel_config_term *term; 1393 1394 list_for_each_entry(term, config_terms, list) { 1395 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1396 return term->val.percore; 1397 } 1398 1399 return false; 1400 } 1401 1402 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state, 1403 struct list_head *list, char *name, 1404 struct list_head *head_config) 1405 { 1406 struct parse_events_term *term; 1407 int ret = -1; 1408 1409 if (parse_state->fake_pmu || !head_config || list_empty(head_config) || 1410 !perf_pmu__is_hybrid(name)) { 1411 return -1; 1412 } 1413 1414 /* 1415 * More than one term in list. 1416 */ 1417 if (head_config->next && head_config->next->next != head_config) 1418 return -1; 1419 1420 term = list_first_entry(head_config, struct parse_events_term, list); 1421 if (term && term->config && strcmp(term->config, "event")) { 1422 ret = parse_events__with_hybrid_pmu(parse_state, term->config, 1423 name, list); 1424 } 1425 1426 return ret; 1427 } 1428 1429 int parse_events_add_pmu(struct parse_events_state *parse_state, 1430 struct list_head *list, char *name, 1431 struct list_head *head_config, 1432 bool auto_merge_stats, 1433 bool use_alias) 1434 { 1435 struct perf_event_attr attr; 1436 struct perf_pmu_info info; 1437 struct perf_pmu *pmu; 1438 struct evsel *evsel; 1439 struct parse_events_error *err = parse_state->error; 1440 bool use_uncore_alias; 1441 LIST_HEAD(config_terms); 1442 1443 pmu = parse_state->fake_pmu ?: perf_pmu__find(name); 1444 1445 if (verbose > 1 && !(pmu && pmu->selectable)) { 1446 fprintf(stderr, "Attempting to add event pmu '%s' with '", 1447 name); 1448 if (head_config) { 1449 struct parse_events_term *term; 1450 1451 list_for_each_entry(term, head_config, list) { 1452 fprintf(stderr, "%s,", term->config); 1453 } 1454 } 1455 fprintf(stderr, "' that may result in non-fatal errors\n"); 1456 } 1457 1458 if (!pmu) { 1459 char *err_str; 1460 1461 if (asprintf(&err_str, 1462 "Cannot find PMU `%s'. Missing kernel support?", 1463 name) >= 0) 1464 parse_events_error__handle(err, 0, err_str, NULL); 1465 return -EINVAL; 1466 } 1467 1468 if (pmu->default_config) { 1469 memcpy(&attr, pmu->default_config, 1470 sizeof(struct perf_event_attr)); 1471 } else { 1472 memset(&attr, 0, sizeof(attr)); 1473 } 1474 1475 use_uncore_alias = (pmu->is_uncore && use_alias); 1476 1477 if (!head_config) { 1478 attr.type = pmu->type; 1479 evsel = __add_event(list, &parse_state->idx, &attr, 1480 /*init_attr=*/true, /*name=*/NULL, 1481 /*metric_id=*/NULL, pmu, 1482 /*config_terms=*/NULL, auto_merge_stats, 1483 /*cpu_list=*/NULL); 1484 if (evsel) { 1485 evsel->pmu_name = name ? strdup(name) : NULL; 1486 evsel->use_uncore_alias = use_uncore_alias; 1487 return 0; 1488 } else { 1489 return -ENOMEM; 1490 } 1491 } 1492 1493 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info)) 1494 return -EINVAL; 1495 1496 if (verbose > 1) { 1497 fprintf(stderr, "After aliases, add event pmu '%s' with '", 1498 name); 1499 if (head_config) { 1500 struct parse_events_term *term; 1501 1502 list_for_each_entry(term, head_config, list) { 1503 fprintf(stderr, "%s,", term->config); 1504 } 1505 } 1506 fprintf(stderr, "' that may result in non-fatal errors\n"); 1507 } 1508 1509 /* 1510 * Configure hardcoded terms first, no need to check 1511 * return value when called with fail == 0 ;) 1512 */ 1513 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu)) 1514 return -EINVAL; 1515 1516 if (get_config_terms(head_config, &config_terms)) 1517 return -ENOMEM; 1518 1519 /* 1520 * When using default config, record which bits of attr->config were 1521 * changed by the user. 1522 */ 1523 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms)) 1524 return -ENOMEM; 1525 1526 if (!parse_events__inside_hybrid_pmu(parse_state, list, name, 1527 head_config)) { 1528 return 0; 1529 } 1530 1531 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { 1532 free_config_terms(&config_terms); 1533 return -EINVAL; 1534 } 1535 1536 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1537 get_config_name(head_config), 1538 get_config_metric_id(head_config), pmu, 1539 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1540 if (!evsel) 1541 return -ENOMEM; 1542 1543 if (evsel->name) 1544 evsel->use_config_name = true; 1545 1546 evsel->pmu_name = name ? strdup(name) : NULL; 1547 evsel->use_uncore_alias = use_uncore_alias; 1548 evsel->percore = config_term_percore(&evsel->config_terms); 1549 1550 if (parse_state->fake_pmu) 1551 return 0; 1552 1553 free((char *)evsel->unit); 1554 evsel->unit = strdup(info.unit); 1555 evsel->scale = info.scale; 1556 evsel->per_pkg = info.per_pkg; 1557 evsel->snapshot = info.snapshot; 1558 evsel->metric_expr = info.metric_expr; 1559 evsel->metric_name = info.metric_name; 1560 return 0; 1561 } 1562 1563 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1564 char *str, struct list_head *head, 1565 struct list_head **listp) 1566 { 1567 struct parse_events_term *term; 1568 struct list_head *list = NULL; 1569 struct list_head *orig_head = NULL; 1570 struct perf_pmu *pmu = NULL; 1571 int ok = 0; 1572 char *config; 1573 1574 *listp = NULL; 1575 1576 if (!head) { 1577 head = malloc(sizeof(struct list_head)); 1578 if (!head) 1579 goto out_err; 1580 1581 INIT_LIST_HEAD(head); 1582 } 1583 config = strdup(str); 1584 if (!config) 1585 goto out_err; 1586 1587 if (parse_events_term__num(&term, 1588 PARSE_EVENTS__TERM_TYPE_USER, 1589 config, 1, false, &config, 1590 NULL) < 0) { 1591 free(config); 1592 goto out_err; 1593 } 1594 list_add_tail(&term->list, head); 1595 1596 /* Add it for all PMUs that support the alias */ 1597 list = malloc(sizeof(struct list_head)); 1598 if (!list) 1599 goto out_err; 1600 1601 INIT_LIST_HEAD(list); 1602 1603 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1604 struct perf_pmu_alias *alias; 1605 1606 list_for_each_entry(alias, &pmu->aliases, list) { 1607 if (!strcasecmp(alias->name, str)) { 1608 parse_events_copy_term_list(head, &orig_head); 1609 if (!parse_events_add_pmu(parse_state, list, 1610 pmu->name, orig_head, 1611 true, true)) { 1612 pr_debug("%s -> %s/%s/\n", str, 1613 pmu->name, alias->str); 1614 ok++; 1615 } 1616 parse_events_terms__delete(orig_head); 1617 } 1618 } 1619 } 1620 1621 if (parse_state->fake_pmu) { 1622 if (!parse_events_add_pmu(parse_state, list, str, head, 1623 true, true)) { 1624 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1625 ok++; 1626 } 1627 } 1628 1629 out_err: 1630 if (ok) 1631 *listp = list; 1632 else 1633 free(list); 1634 1635 parse_events_terms__delete(head); 1636 return ok ? 0 : -1; 1637 } 1638 1639 int parse_events__modifier_group(struct list_head *list, 1640 char *event_mod) 1641 { 1642 return parse_events__modifier_event(list, event_mod, true); 1643 } 1644 1645 /* 1646 * Check if the two uncore PMUs are from the same uncore block 1647 * The format of the uncore PMU name is uncore_#blockname_#pmuidx 1648 */ 1649 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b) 1650 { 1651 char *end_a, *end_b; 1652 1653 end_a = strrchr(pmu_name_a, '_'); 1654 end_b = strrchr(pmu_name_b, '_'); 1655 1656 if (!end_a || !end_b) 1657 return false; 1658 1659 if ((end_a - pmu_name_a) != (end_b - pmu_name_b)) 1660 return false; 1661 1662 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0); 1663 } 1664 1665 static int 1666 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, 1667 struct parse_events_state *parse_state) 1668 { 1669 struct evsel *evsel, *leader; 1670 uintptr_t *leaders; 1671 bool is_leader = true; 1672 int i, nr_pmu = 0, total_members, ret = 0; 1673 1674 leader = list_first_entry(list, struct evsel, core.node); 1675 evsel = list_last_entry(list, struct evsel, core.node); 1676 total_members = evsel->core.idx - leader->core.idx + 1; 1677 1678 leaders = calloc(total_members, sizeof(uintptr_t)); 1679 if (WARN_ON(!leaders)) 1680 return 0; 1681 1682 /* 1683 * Going through the whole group and doing sanity check. 1684 * All members must use alias, and be from the same uncore block. 1685 * Also, storing the leader events in an array. 1686 */ 1687 __evlist__for_each_entry(list, evsel) { 1688 1689 /* Only split the uncore group which members use alias */ 1690 if (!evsel->use_uncore_alias) 1691 goto out; 1692 1693 /* The events must be from the same uncore block */ 1694 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name)) 1695 goto out; 1696 1697 if (!is_leader) 1698 continue; 1699 /* 1700 * If the event's PMU name starts to repeat, it must be a new 1701 * event. That can be used to distinguish the leader from 1702 * other members, even they have the same event name. 1703 */ 1704 if ((leader != evsel) && 1705 !strcmp(leader->pmu_name, evsel->pmu_name)) { 1706 is_leader = false; 1707 continue; 1708 } 1709 1710 /* Store the leader event for each PMU */ 1711 leaders[nr_pmu++] = (uintptr_t) evsel; 1712 } 1713 1714 /* only one event alias */ 1715 if (nr_pmu == total_members) { 1716 parse_state->nr_groups--; 1717 goto handled; 1718 } 1719 1720 /* 1721 * An uncore event alias is a joint name which means the same event 1722 * runs on all PMUs of a block. 1723 * Perf doesn't support mixed events from different PMUs in the same 1724 * group. The big group has to be split into multiple small groups 1725 * which only include the events from the same PMU. 1726 * 1727 * Here the uncore event aliases must be from the same uncore block. 1728 * The number of PMUs must be same for each alias. The number of new 1729 * small groups equals to the number of PMUs. 1730 * Setting the leader event for corresponding members in each group. 1731 */ 1732 i = 0; 1733 __evlist__for_each_entry(list, evsel) { 1734 if (i >= nr_pmu) 1735 i = 0; 1736 evsel__set_leader(evsel, (struct evsel *) leaders[i++]); 1737 } 1738 1739 /* The number of members and group name are same for each group */ 1740 for (i = 0; i < nr_pmu; i++) { 1741 evsel = (struct evsel *) leaders[i]; 1742 evsel->core.nr_members = total_members / nr_pmu; 1743 evsel->group_name = name ? strdup(name) : NULL; 1744 } 1745 1746 /* Take the new small groups into account */ 1747 parse_state->nr_groups += nr_pmu - 1; 1748 1749 handled: 1750 ret = 1; 1751 out: 1752 free(leaders); 1753 return ret; 1754 } 1755 1756 __weak struct evsel *arch_evlist__leader(struct list_head *list) 1757 { 1758 return list_first_entry(list, struct evsel, core.node); 1759 } 1760 1761 void parse_events__set_leader(char *name, struct list_head *list, 1762 struct parse_events_state *parse_state) 1763 { 1764 struct evsel *leader; 1765 1766 if (list_empty(list)) { 1767 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1768 return; 1769 } 1770 1771 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) 1772 return; 1773 1774 leader = arch_evlist__leader(list); 1775 __perf_evlist__set_leader(list, &leader->core); 1776 leader->group_name = name ? strdup(name) : NULL; 1777 list_move(&leader->core.node, list); 1778 } 1779 1780 /* list_event is assumed to point to malloc'ed memory */ 1781 void parse_events_update_lists(struct list_head *list_event, 1782 struct list_head *list_all) 1783 { 1784 /* 1785 * Called for single event definition. Update the 1786 * 'all event' list, and reinit the 'single event' 1787 * list, for next event definition. 1788 */ 1789 list_splice_tail(list_event, list_all); 1790 free(list_event); 1791 } 1792 1793 struct event_modifier { 1794 int eu; 1795 int ek; 1796 int eh; 1797 int eH; 1798 int eG; 1799 int eI; 1800 int precise; 1801 int precise_max; 1802 int exclude_GH; 1803 int sample_read; 1804 int pinned; 1805 int weak; 1806 int exclusive; 1807 int bpf_counter; 1808 }; 1809 1810 static int get_event_modifier(struct event_modifier *mod, char *str, 1811 struct evsel *evsel) 1812 { 1813 int eu = evsel ? evsel->core.attr.exclude_user : 0; 1814 int ek = evsel ? evsel->core.attr.exclude_kernel : 0; 1815 int eh = evsel ? evsel->core.attr.exclude_hv : 0; 1816 int eH = evsel ? evsel->core.attr.exclude_host : 0; 1817 int eG = evsel ? evsel->core.attr.exclude_guest : 0; 1818 int eI = evsel ? evsel->core.attr.exclude_idle : 0; 1819 int precise = evsel ? evsel->core.attr.precise_ip : 0; 1820 int precise_max = 0; 1821 int sample_read = 0; 1822 int pinned = evsel ? evsel->core.attr.pinned : 0; 1823 int exclusive = evsel ? evsel->core.attr.exclusive : 0; 1824 1825 int exclude = eu | ek | eh; 1826 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1827 int weak = 0; 1828 int bpf_counter = 0; 1829 1830 memset(mod, 0, sizeof(*mod)); 1831 1832 while (*str) { 1833 if (*str == 'u') { 1834 if (!exclude) 1835 exclude = eu = ek = eh = 1; 1836 if (!exclude_GH && !perf_guest) 1837 eG = 1; 1838 eu = 0; 1839 } else if (*str == 'k') { 1840 if (!exclude) 1841 exclude = eu = ek = eh = 1; 1842 ek = 0; 1843 } else if (*str == 'h') { 1844 if (!exclude) 1845 exclude = eu = ek = eh = 1; 1846 eh = 0; 1847 } else if (*str == 'G') { 1848 if (!exclude_GH) 1849 exclude_GH = eG = eH = 1; 1850 eG = 0; 1851 } else if (*str == 'H') { 1852 if (!exclude_GH) 1853 exclude_GH = eG = eH = 1; 1854 eH = 0; 1855 } else if (*str == 'I') { 1856 eI = 1; 1857 } else if (*str == 'p') { 1858 precise++; 1859 /* use of precise requires exclude_guest */ 1860 if (!exclude_GH) 1861 eG = 1; 1862 } else if (*str == 'P') { 1863 precise_max = 1; 1864 } else if (*str == 'S') { 1865 sample_read = 1; 1866 } else if (*str == 'D') { 1867 pinned = 1; 1868 } else if (*str == 'e') { 1869 exclusive = 1; 1870 } else if (*str == 'W') { 1871 weak = 1; 1872 } else if (*str == 'b') { 1873 bpf_counter = 1; 1874 } else 1875 break; 1876 1877 ++str; 1878 } 1879 1880 /* 1881 * precise ip: 1882 * 1883 * 0 - SAMPLE_IP can have arbitrary skid 1884 * 1 - SAMPLE_IP must have constant skid 1885 * 2 - SAMPLE_IP requested to have 0 skid 1886 * 3 - SAMPLE_IP must have 0 skid 1887 * 1888 * See also PERF_RECORD_MISC_EXACT_IP 1889 */ 1890 if (precise > 3) 1891 return -EINVAL; 1892 1893 mod->eu = eu; 1894 mod->ek = ek; 1895 mod->eh = eh; 1896 mod->eH = eH; 1897 mod->eG = eG; 1898 mod->eI = eI; 1899 mod->precise = precise; 1900 mod->precise_max = precise_max; 1901 mod->exclude_GH = exclude_GH; 1902 mod->sample_read = sample_read; 1903 mod->pinned = pinned; 1904 mod->weak = weak; 1905 mod->bpf_counter = bpf_counter; 1906 mod->exclusive = exclusive; 1907 1908 return 0; 1909 } 1910 1911 /* 1912 * Basic modifier sanity check to validate it contains only one 1913 * instance of any modifier (apart from 'p') present. 1914 */ 1915 static int check_modifier(char *str) 1916 { 1917 char *p = str; 1918 1919 /* The sizeof includes 0 byte as well. */ 1920 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) 1921 return -1; 1922 1923 while (*p) { 1924 if (*p != 'p' && strchr(p + 1, *p)) 1925 return -1; 1926 p++; 1927 } 1928 1929 return 0; 1930 } 1931 1932 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 1933 { 1934 struct evsel *evsel; 1935 struct event_modifier mod; 1936 1937 if (str == NULL) 1938 return 0; 1939 1940 if (check_modifier(str)) 1941 return -EINVAL; 1942 1943 if (!add && get_event_modifier(&mod, str, NULL)) 1944 return -EINVAL; 1945 1946 __evlist__for_each_entry(list, evsel) { 1947 if (add && get_event_modifier(&mod, str, evsel)) 1948 return -EINVAL; 1949 1950 evsel->core.attr.exclude_user = mod.eu; 1951 evsel->core.attr.exclude_kernel = mod.ek; 1952 evsel->core.attr.exclude_hv = mod.eh; 1953 evsel->core.attr.precise_ip = mod.precise; 1954 evsel->core.attr.exclude_host = mod.eH; 1955 evsel->core.attr.exclude_guest = mod.eG; 1956 evsel->core.attr.exclude_idle = mod.eI; 1957 evsel->exclude_GH = mod.exclude_GH; 1958 evsel->sample_read = mod.sample_read; 1959 evsel->precise_max = mod.precise_max; 1960 evsel->weak_group = mod.weak; 1961 evsel->bpf_counter = mod.bpf_counter; 1962 1963 if (evsel__is_group_leader(evsel)) { 1964 evsel->core.attr.pinned = mod.pinned; 1965 evsel->core.attr.exclusive = mod.exclusive; 1966 } 1967 } 1968 1969 return 0; 1970 } 1971 1972 int parse_events_name(struct list_head *list, const char *name) 1973 { 1974 struct evsel *evsel; 1975 1976 __evlist__for_each_entry(list, evsel) { 1977 if (!evsel->name) 1978 evsel->name = strdup(name); 1979 } 1980 1981 return 0; 1982 } 1983 1984 static int 1985 comp_pmu(const void *p1, const void *p2) 1986 { 1987 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; 1988 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; 1989 1990 return strcasecmp(pmu1->symbol, pmu2->symbol); 1991 } 1992 1993 static void perf_pmu__parse_cleanup(void) 1994 { 1995 if (perf_pmu_events_list_num > 0) { 1996 struct perf_pmu_event_symbol *p; 1997 int i; 1998 1999 for (i = 0; i < perf_pmu_events_list_num; i++) { 2000 p = perf_pmu_events_list + i; 2001 zfree(&p->symbol); 2002 } 2003 zfree(&perf_pmu_events_list); 2004 perf_pmu_events_list_num = 0; 2005 } 2006 } 2007 2008 #define SET_SYMBOL(str, stype) \ 2009 do { \ 2010 p->symbol = str; \ 2011 if (!p->symbol) \ 2012 goto err; \ 2013 p->type = stype; \ 2014 } while (0) 2015 2016 /* 2017 * Read the pmu events list from sysfs 2018 * Save it into perf_pmu_events_list 2019 */ 2020 static void perf_pmu__parse_init(void) 2021 { 2022 2023 struct perf_pmu *pmu = NULL; 2024 struct perf_pmu_alias *alias; 2025 int len = 0; 2026 2027 pmu = NULL; 2028 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2029 list_for_each_entry(alias, &pmu->aliases, list) { 2030 char *tmp = strchr(alias->name, '-'); 2031 2032 if (tmp) { 2033 char *tmp2 = NULL; 2034 2035 tmp2 = strchr(tmp + 1, '-'); 2036 len++; 2037 if (tmp2) 2038 len++; 2039 } 2040 2041 len++; 2042 } 2043 } 2044 2045 if (len == 0) { 2046 perf_pmu_events_list_num = -1; 2047 return; 2048 } 2049 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); 2050 if (!perf_pmu_events_list) 2051 return; 2052 perf_pmu_events_list_num = len; 2053 2054 len = 0; 2055 pmu = NULL; 2056 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2057 list_for_each_entry(alias, &pmu->aliases, list) { 2058 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 2059 char *tmp = strchr(alias->name, '-'); 2060 char *tmp2 = NULL; 2061 2062 if (tmp) 2063 tmp2 = strchr(tmp + 1, '-'); 2064 if (tmp2) { 2065 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2066 PMU_EVENT_SYMBOL_PREFIX); 2067 p++; 2068 tmp++; 2069 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX); 2070 p++; 2071 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2); 2072 len += 3; 2073 } else if (tmp) { 2074 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2075 PMU_EVENT_SYMBOL_PREFIX); 2076 p++; 2077 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); 2078 len += 2; 2079 } else { 2080 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); 2081 len++; 2082 } 2083 } 2084 } 2085 qsort(perf_pmu_events_list, len, 2086 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2087 2088 return; 2089 err: 2090 perf_pmu__parse_cleanup(); 2091 } 2092 2093 /* 2094 * This function injects special term in 2095 * perf_pmu_events_list so the test code 2096 * can check on this functionality. 2097 */ 2098 int perf_pmu__test_parse_init(void) 2099 { 2100 struct perf_pmu_event_symbol *list, *tmp, symbols[] = { 2101 {(char *)"read", PMU_EVENT_SYMBOL}, 2102 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX}, 2103 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX}, 2104 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX}, 2105 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2}, 2106 }; 2107 unsigned long i, j; 2108 2109 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols)); 2110 if (!list) 2111 return -ENOMEM; 2112 2113 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) { 2114 tmp->type = symbols[i].type; 2115 tmp->symbol = strdup(symbols[i].symbol); 2116 if (!tmp->symbol) 2117 goto err_free; 2118 } 2119 2120 perf_pmu_events_list = list; 2121 perf_pmu_events_list_num = ARRAY_SIZE(symbols); 2122 2123 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols), 2124 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2125 return 0; 2126 2127 err_free: 2128 for (j = 0, tmp = list; j < i; j++, tmp++) 2129 free(tmp->symbol); 2130 free(list); 2131 return -ENOMEM; 2132 } 2133 2134 enum perf_pmu_event_symbol_type 2135 perf_pmu__parse_check(const char *name) 2136 { 2137 struct perf_pmu_event_symbol p, *r; 2138 2139 /* scan kernel pmu events from sysfs if needed */ 2140 if (perf_pmu_events_list_num == 0) 2141 perf_pmu__parse_init(); 2142 /* 2143 * name "cpu" could be prefix of cpu-cycles or cpu// events. 2144 * cpu-cycles has been handled by hardcode. 2145 * So it must be cpu// events, not kernel pmu event. 2146 */ 2147 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) 2148 return PMU_EVENT_SYMBOL_ERR; 2149 2150 p.symbol = strdup(name); 2151 r = bsearch(&p, perf_pmu_events_list, 2152 (size_t) perf_pmu_events_list_num, 2153 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2154 zfree(&p.symbol); 2155 return r ? r->type : PMU_EVENT_SYMBOL_ERR; 2156 } 2157 2158 static int parse_events__scanner(const char *str, 2159 struct parse_events_state *parse_state) 2160 { 2161 YY_BUFFER_STATE buffer; 2162 void *scanner; 2163 int ret; 2164 2165 ret = parse_events_lex_init_extra(parse_state, &scanner); 2166 if (ret) 2167 return ret; 2168 2169 buffer = parse_events__scan_string(str, scanner); 2170 2171 #ifdef PARSER_DEBUG 2172 parse_events_debug = 1; 2173 parse_events_set_debug(1, scanner); 2174 #endif 2175 ret = parse_events_parse(parse_state, scanner); 2176 2177 parse_events__flush_buffer(buffer, scanner); 2178 parse_events__delete_buffer(buffer, scanner); 2179 parse_events_lex_destroy(scanner); 2180 return ret; 2181 } 2182 2183 /* 2184 * parse event config string, return a list of event terms. 2185 */ 2186 int parse_events_terms(struct list_head *terms, const char *str) 2187 { 2188 struct parse_events_state parse_state = { 2189 .terms = NULL, 2190 .stoken = PE_START_TERMS, 2191 }; 2192 int ret; 2193 2194 ret = parse_events__scanner(str, &parse_state); 2195 perf_pmu__parse_cleanup(); 2196 2197 if (!ret) { 2198 list_splice(parse_state.terms, terms); 2199 zfree(&parse_state.terms); 2200 return 0; 2201 } 2202 2203 parse_events_terms__delete(parse_state.terms); 2204 return ret; 2205 } 2206 2207 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 2208 const char *str, char *pmu_name, 2209 struct list_head *list) 2210 { 2211 struct parse_events_state ps = { 2212 .list = LIST_HEAD_INIT(ps.list), 2213 .stoken = PE_START_EVENTS, 2214 .hybrid_pmu_name = pmu_name, 2215 .idx = parse_state->idx, 2216 }; 2217 int ret; 2218 2219 ret = parse_events__scanner(str, &ps); 2220 perf_pmu__parse_cleanup(); 2221 2222 if (!ret) { 2223 if (!list_empty(&ps.list)) { 2224 list_splice(&ps.list, list); 2225 parse_state->idx = ps.idx; 2226 return 0; 2227 } else 2228 return -1; 2229 } 2230 2231 return ret; 2232 } 2233 2234 int __parse_events(struct evlist *evlist, const char *str, 2235 struct parse_events_error *err, struct perf_pmu *fake_pmu) 2236 { 2237 struct parse_events_state parse_state = { 2238 .list = LIST_HEAD_INIT(parse_state.list), 2239 .idx = evlist->core.nr_entries, 2240 .error = err, 2241 .evlist = evlist, 2242 .stoken = PE_START_EVENTS, 2243 .fake_pmu = fake_pmu, 2244 }; 2245 int ret; 2246 2247 ret = parse_events__scanner(str, &parse_state); 2248 perf_pmu__parse_cleanup(); 2249 2250 if (!ret && list_empty(&parse_state.list)) { 2251 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2252 return -1; 2253 } 2254 2255 /* 2256 * Add list to the evlist even with errors to allow callers to clean up. 2257 */ 2258 evlist__splice_list_tail(evlist, &parse_state.list); 2259 2260 if (!ret) { 2261 struct evsel *last; 2262 2263 evlist->core.nr_groups += parse_state.nr_groups; 2264 last = evlist__last(evlist); 2265 last->cmdline_group_boundary = true; 2266 2267 return 0; 2268 } 2269 2270 /* 2271 * There are 2 users - builtin-record and builtin-test objects. 2272 * Both call evlist__delete in case of error, so we dont 2273 * need to bother. 2274 */ 2275 return ret; 2276 } 2277 2278 int parse_event(struct evlist *evlist, const char *str) 2279 { 2280 struct parse_events_error err; 2281 int ret; 2282 2283 parse_events_error__init(&err); 2284 ret = parse_events(evlist, str, &err); 2285 parse_events_error__exit(&err); 2286 return ret; 2287 } 2288 2289 void parse_events_error__init(struct parse_events_error *err) 2290 { 2291 bzero(err, sizeof(*err)); 2292 } 2293 2294 void parse_events_error__exit(struct parse_events_error *err) 2295 { 2296 zfree(&err->str); 2297 zfree(&err->help); 2298 zfree(&err->first_str); 2299 zfree(&err->first_help); 2300 } 2301 2302 void parse_events_error__handle(struct parse_events_error *err, int idx, 2303 char *str, char *help) 2304 { 2305 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2306 goto out_free; 2307 switch (err->num_errors) { 2308 case 0: 2309 err->idx = idx; 2310 err->str = str; 2311 err->help = help; 2312 break; 2313 case 1: 2314 err->first_idx = err->idx; 2315 err->idx = idx; 2316 err->first_str = err->str; 2317 err->str = str; 2318 err->first_help = err->help; 2319 err->help = help; 2320 break; 2321 default: 2322 pr_debug("Multiple errors dropping message: %s (%s)\n", 2323 err->str, err->help); 2324 free(err->str); 2325 err->str = str; 2326 free(err->help); 2327 err->help = help; 2328 break; 2329 } 2330 err->num_errors++; 2331 return; 2332 2333 out_free: 2334 free(str); 2335 free(help); 2336 } 2337 2338 #define MAX_WIDTH 1000 2339 static int get_term_width(void) 2340 { 2341 struct winsize ws; 2342 2343 get_term_dimensions(&ws); 2344 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2345 } 2346 2347 static void __parse_events_error__print(int err_idx, const char *err_str, 2348 const char *err_help, const char *event) 2349 { 2350 const char *str = "invalid or unsupported event: "; 2351 char _buf[MAX_WIDTH]; 2352 char *buf = (char *) event; 2353 int idx = 0; 2354 if (err_str) { 2355 /* -2 for extra '' in the final fprintf */ 2356 int width = get_term_width() - 2; 2357 int len_event = strlen(event); 2358 int len_str, max_len, cut = 0; 2359 2360 /* 2361 * Maximum error index indent, we will cut 2362 * the event string if it's bigger. 2363 */ 2364 int max_err_idx = 13; 2365 2366 /* 2367 * Let's be specific with the message when 2368 * we have the precise error. 2369 */ 2370 str = "event syntax error: "; 2371 len_str = strlen(str); 2372 max_len = width - len_str; 2373 2374 buf = _buf; 2375 2376 /* We're cutting from the beginning. */ 2377 if (err_idx > max_err_idx) 2378 cut = err_idx - max_err_idx; 2379 2380 strncpy(buf, event + cut, max_len); 2381 2382 /* Mark cut parts with '..' on both sides. */ 2383 if (cut) 2384 buf[0] = buf[1] = '.'; 2385 2386 if ((len_event - cut) > max_len) { 2387 buf[max_len - 1] = buf[max_len - 2] = '.'; 2388 buf[max_len] = 0; 2389 } 2390 2391 idx = len_str + err_idx - cut; 2392 } 2393 2394 fprintf(stderr, "%s'%s'\n", str, buf); 2395 if (idx) { 2396 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2397 if (err_help) 2398 fprintf(stderr, "\n%s\n", err_help); 2399 } 2400 } 2401 2402 void parse_events_error__print(struct parse_events_error *err, 2403 const char *event) 2404 { 2405 if (!err->num_errors) 2406 return; 2407 2408 __parse_events_error__print(err->idx, err->str, err->help, event); 2409 2410 if (err->num_errors > 1) { 2411 fputs("\nInitial error:\n", stderr); 2412 __parse_events_error__print(err->first_idx, err->first_str, 2413 err->first_help, event); 2414 } 2415 } 2416 2417 #undef MAX_WIDTH 2418 2419 int parse_events_option(const struct option *opt, const char *str, 2420 int unset __maybe_unused) 2421 { 2422 struct evlist *evlist = *(struct evlist **)opt->value; 2423 struct parse_events_error err; 2424 int ret; 2425 2426 parse_events_error__init(&err); 2427 ret = parse_events(evlist, str, &err); 2428 2429 if (ret) { 2430 parse_events_error__print(&err, str); 2431 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2432 } 2433 parse_events_error__exit(&err); 2434 2435 return ret; 2436 } 2437 2438 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2439 { 2440 struct evlist **evlistp = opt->value; 2441 int ret; 2442 2443 if (*evlistp == NULL) { 2444 *evlistp = evlist__new(); 2445 2446 if (*evlistp == NULL) { 2447 fprintf(stderr, "Not enough memory to create evlist\n"); 2448 return -1; 2449 } 2450 } 2451 2452 ret = parse_events_option(opt, str, unset); 2453 if (ret) { 2454 evlist__delete(*evlistp); 2455 *evlistp = NULL; 2456 } 2457 2458 return ret; 2459 } 2460 2461 static int 2462 foreach_evsel_in_last_glob(struct evlist *evlist, 2463 int (*func)(struct evsel *evsel, 2464 const void *arg), 2465 const void *arg) 2466 { 2467 struct evsel *last = NULL; 2468 int err; 2469 2470 /* 2471 * Don't return when list_empty, give func a chance to report 2472 * error when it found last == NULL. 2473 * 2474 * So no need to WARN here, let *func do this. 2475 */ 2476 if (evlist->core.nr_entries > 0) 2477 last = evlist__last(evlist); 2478 2479 do { 2480 err = (*func)(last, arg); 2481 if (err) 2482 return -1; 2483 if (!last) 2484 return 0; 2485 2486 if (last->core.node.prev == &evlist->core.entries) 2487 return 0; 2488 last = list_entry(last->core.node.prev, struct evsel, core.node); 2489 } while (!last->cmdline_group_boundary); 2490 2491 return 0; 2492 } 2493 2494 static int set_filter(struct evsel *evsel, const void *arg) 2495 { 2496 const char *str = arg; 2497 bool found = false; 2498 int nr_addr_filters = 0; 2499 struct perf_pmu *pmu = NULL; 2500 2501 if (evsel == NULL) { 2502 fprintf(stderr, 2503 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2504 return -1; 2505 } 2506 2507 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2508 if (evsel__append_tp_filter(evsel, str) < 0) { 2509 fprintf(stderr, 2510 "not enough memory to hold filter string\n"); 2511 return -1; 2512 } 2513 2514 return 0; 2515 } 2516 2517 while ((pmu = perf_pmu__scan(pmu)) != NULL) 2518 if (pmu->type == evsel->core.attr.type) { 2519 found = true; 2520 break; 2521 } 2522 2523 if (found) 2524 perf_pmu__scan_file(pmu, "nr_addr_filters", 2525 "%d", &nr_addr_filters); 2526 2527 if (!nr_addr_filters) { 2528 fprintf(stderr, 2529 "This CPU does not support address filtering\n"); 2530 return -1; 2531 } 2532 2533 if (evsel__append_addr_filter(evsel, str) < 0) { 2534 fprintf(stderr, 2535 "not enough memory to hold filter string\n"); 2536 return -1; 2537 } 2538 2539 return 0; 2540 } 2541 2542 int parse_filter(const struct option *opt, const char *str, 2543 int unset __maybe_unused) 2544 { 2545 struct evlist *evlist = *(struct evlist **)opt->value; 2546 2547 return foreach_evsel_in_last_glob(evlist, set_filter, 2548 (const void *)str); 2549 } 2550 2551 static int add_exclude_perf_filter(struct evsel *evsel, 2552 const void *arg __maybe_unused) 2553 { 2554 char new_filter[64]; 2555 2556 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2557 fprintf(stderr, 2558 "--exclude-perf option should follow a -e tracepoint option\n"); 2559 return -1; 2560 } 2561 2562 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2563 2564 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2565 fprintf(stderr, 2566 "not enough memory to hold filter string\n"); 2567 return -1; 2568 } 2569 2570 return 0; 2571 } 2572 2573 int exclude_perf(const struct option *opt, 2574 const char *arg __maybe_unused, 2575 int unset __maybe_unused) 2576 { 2577 struct evlist *evlist = *(struct evlist **)opt->value; 2578 2579 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2580 NULL); 2581 } 2582 2583 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2584 { 2585 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2586 } 2587 2588 static int new_term(struct parse_events_term **_term, 2589 struct parse_events_term *temp, 2590 char *str, u64 num) 2591 { 2592 struct parse_events_term *term; 2593 2594 term = malloc(sizeof(*term)); 2595 if (!term) 2596 return -ENOMEM; 2597 2598 *term = *temp; 2599 INIT_LIST_HEAD(&term->list); 2600 term->weak = false; 2601 2602 switch (term->type_val) { 2603 case PARSE_EVENTS__TERM_TYPE_NUM: 2604 term->val.num = num; 2605 break; 2606 case PARSE_EVENTS__TERM_TYPE_STR: 2607 term->val.str = str; 2608 break; 2609 default: 2610 free(term); 2611 return -EINVAL; 2612 } 2613 2614 *_term = term; 2615 return 0; 2616 } 2617 2618 int parse_events_term__num(struct parse_events_term **term, 2619 int type_term, char *config, u64 num, 2620 bool no_value, 2621 void *loc_term_, void *loc_val_) 2622 { 2623 YYLTYPE *loc_term = loc_term_; 2624 YYLTYPE *loc_val = loc_val_; 2625 2626 struct parse_events_term temp = { 2627 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2628 .type_term = type_term, 2629 .config = config ? : strdup(config_term_names[type_term]), 2630 .no_value = no_value, 2631 .err_term = loc_term ? loc_term->first_column : 0, 2632 .err_val = loc_val ? loc_val->first_column : 0, 2633 }; 2634 2635 return new_term(term, &temp, NULL, num); 2636 } 2637 2638 int parse_events_term__str(struct parse_events_term **term, 2639 int type_term, char *config, char *str, 2640 void *loc_term_, void *loc_val_) 2641 { 2642 YYLTYPE *loc_term = loc_term_; 2643 YYLTYPE *loc_val = loc_val_; 2644 2645 struct parse_events_term temp = { 2646 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2647 .type_term = type_term, 2648 .config = config, 2649 .err_term = loc_term ? loc_term->first_column : 0, 2650 .err_val = loc_val ? loc_val->first_column : 0, 2651 }; 2652 2653 return new_term(term, &temp, str, 0); 2654 } 2655 2656 int parse_events_term__sym_hw(struct parse_events_term **term, 2657 char *config, unsigned idx) 2658 { 2659 struct event_symbol *sym; 2660 char *str; 2661 struct parse_events_term temp = { 2662 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2663 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 2664 .config = config, 2665 }; 2666 2667 if (!temp.config) { 2668 temp.config = strdup("event"); 2669 if (!temp.config) 2670 return -ENOMEM; 2671 } 2672 BUG_ON(idx >= PERF_COUNT_HW_MAX); 2673 sym = &event_symbols_hw[idx]; 2674 2675 str = strdup(sym->symbol); 2676 if (!str) 2677 return -ENOMEM; 2678 return new_term(term, &temp, str, 0); 2679 } 2680 2681 int parse_events_term__clone(struct parse_events_term **new, 2682 struct parse_events_term *term) 2683 { 2684 char *str; 2685 struct parse_events_term temp = { 2686 .type_val = term->type_val, 2687 .type_term = term->type_term, 2688 .config = NULL, 2689 .err_term = term->err_term, 2690 .err_val = term->err_val, 2691 }; 2692 2693 if (term->config) { 2694 temp.config = strdup(term->config); 2695 if (!temp.config) 2696 return -ENOMEM; 2697 } 2698 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2699 return new_term(new, &temp, NULL, term->val.num); 2700 2701 str = strdup(term->val.str); 2702 if (!str) 2703 return -ENOMEM; 2704 return new_term(new, &temp, str, 0); 2705 } 2706 2707 void parse_events_term__delete(struct parse_events_term *term) 2708 { 2709 if (term->array.nr_ranges) 2710 zfree(&term->array.ranges); 2711 2712 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2713 zfree(&term->val.str); 2714 2715 zfree(&term->config); 2716 free(term); 2717 } 2718 2719 int parse_events_copy_term_list(struct list_head *old, 2720 struct list_head **new) 2721 { 2722 struct parse_events_term *term, *n; 2723 int ret; 2724 2725 if (!old) { 2726 *new = NULL; 2727 return 0; 2728 } 2729 2730 *new = malloc(sizeof(struct list_head)); 2731 if (!*new) 2732 return -ENOMEM; 2733 INIT_LIST_HEAD(*new); 2734 2735 list_for_each_entry (term, old, list) { 2736 ret = parse_events_term__clone(&n, term); 2737 if (ret) 2738 return ret; 2739 list_add_tail(&n->list, *new); 2740 } 2741 return 0; 2742 } 2743 2744 void parse_events_terms__purge(struct list_head *terms) 2745 { 2746 struct parse_events_term *term, *h; 2747 2748 list_for_each_entry_safe(term, h, terms, list) { 2749 list_del_init(&term->list); 2750 parse_events_term__delete(term); 2751 } 2752 } 2753 2754 void parse_events_terms__delete(struct list_head *terms) 2755 { 2756 if (!terms) 2757 return; 2758 parse_events_terms__purge(terms); 2759 free(terms); 2760 } 2761 2762 void parse_events__clear_array(struct parse_events_array *a) 2763 { 2764 zfree(&a->ranges); 2765 } 2766 2767 void parse_events_evlist_error(struct parse_events_state *parse_state, 2768 int idx, const char *str) 2769 { 2770 if (!parse_state->error) 2771 return; 2772 2773 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); 2774 } 2775 2776 static void config_terms_list(char *buf, size_t buf_sz) 2777 { 2778 int i; 2779 bool first = true; 2780 2781 buf[0] = '\0'; 2782 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2783 const char *name = config_term_names[i]; 2784 2785 if (!config_term_avail(i, NULL)) 2786 continue; 2787 if (!name) 2788 continue; 2789 if (name[0] == '<') 2790 continue; 2791 2792 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2793 return; 2794 2795 if (!first) 2796 strcat(buf, ","); 2797 else 2798 first = false; 2799 strcat(buf, name); 2800 } 2801 } 2802 2803 /* 2804 * Return string contains valid config terms of an event. 2805 * @additional_terms: For terms such as PMU sysfs terms. 2806 */ 2807 char *parse_events_formats_error_string(char *additional_terms) 2808 { 2809 char *str; 2810 /* "no-overwrite" is the longest name */ 2811 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2812 (sizeof("no-overwrite") - 1)]; 2813 2814 config_terms_list(static_terms, sizeof(static_terms)); 2815 /* valid terms */ 2816 if (additional_terms) { 2817 if (asprintf(&str, "valid terms: %s,%s", 2818 additional_terms, static_terms) < 0) 2819 goto fail; 2820 } else { 2821 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2822 goto fail; 2823 } 2824 return str; 2825 2826 fail: 2827 return NULL; 2828 } 2829 2830 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx, 2831 struct perf_event_attr *attr, 2832 const char *name, 2833 const char *metric_id, 2834 struct perf_pmu *pmu, 2835 struct list_head *config_terms) 2836 { 2837 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id, 2838 pmu, config_terms, /*auto_merge_stats=*/false, 2839 /*cpu_list=*/NULL); 2840 } 2841