1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/zalloc.h> 5 #include <dirent.h> 6 #include <errno.h> 7 #include <sys/ioctl.h> 8 #include <sys/param.h> 9 #include "term.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include <subcmd/parse-options.h> 13 #include "parse-events.h" 14 #include "string2.h" 15 #include "strlist.h" 16 #include "bpf-loader.h" 17 #include "debug.h" 18 #include <api/fs/tracing_path.h> 19 #include <perf/cpumap.h> 20 #include "parse-events-bison.h" 21 #include "parse-events-flex.h" 22 #include "pmu.h" 23 #include "asm/bug.h" 24 #include "util/parse-branch-options.h" 25 #include "util/evsel_config.h" 26 #include "util/event.h" 27 #include "perf.h" 28 #include "util/parse-events-hybrid.h" 29 #include "util/pmu-hybrid.h" 30 #include "tracepoint.h" 31 #include "thread_map.h" 32 33 #define MAX_NAME_LEN 100 34 35 struct perf_pmu_event_symbol { 36 char *symbol; 37 enum perf_pmu_event_symbol_type type; 38 }; 39 40 #ifdef PARSER_DEBUG 41 extern int parse_events_debug; 42 #endif 43 int parse_events_parse(void *parse_state, void *scanner); 44 static int get_config_terms(struct list_head *head_config, 45 struct list_head *head_terms __maybe_unused); 46 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 47 const char *str, char *pmu_name, 48 struct list_head *list); 49 50 static struct perf_pmu_event_symbol *perf_pmu_events_list; 51 /* 52 * The variable indicates the number of supported pmu event symbols. 53 * 0 means not initialized and ready to init 54 * -1 means failed to init, don't try anymore 55 * >0 is the number of supported pmu event symbols 56 */ 57 static int perf_pmu_events_list_num; 58 59 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 60 [PERF_COUNT_HW_CPU_CYCLES] = { 61 .symbol = "cpu-cycles", 62 .alias = "cycles", 63 }, 64 [PERF_COUNT_HW_INSTRUCTIONS] = { 65 .symbol = "instructions", 66 .alias = "", 67 }, 68 [PERF_COUNT_HW_CACHE_REFERENCES] = { 69 .symbol = "cache-references", 70 .alias = "", 71 }, 72 [PERF_COUNT_HW_CACHE_MISSES] = { 73 .symbol = "cache-misses", 74 .alias = "", 75 }, 76 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 77 .symbol = "branch-instructions", 78 .alias = "branches", 79 }, 80 [PERF_COUNT_HW_BRANCH_MISSES] = { 81 .symbol = "branch-misses", 82 .alias = "", 83 }, 84 [PERF_COUNT_HW_BUS_CYCLES] = { 85 .symbol = "bus-cycles", 86 .alias = "", 87 }, 88 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 89 .symbol = "stalled-cycles-frontend", 90 .alias = "idle-cycles-frontend", 91 }, 92 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 93 .symbol = "stalled-cycles-backend", 94 .alias = "idle-cycles-backend", 95 }, 96 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 97 .symbol = "ref-cycles", 98 .alias = "", 99 }, 100 }; 101 102 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 103 [PERF_COUNT_SW_CPU_CLOCK] = { 104 .symbol = "cpu-clock", 105 .alias = "", 106 }, 107 [PERF_COUNT_SW_TASK_CLOCK] = { 108 .symbol = "task-clock", 109 .alias = "", 110 }, 111 [PERF_COUNT_SW_PAGE_FAULTS] = { 112 .symbol = "page-faults", 113 .alias = "faults", 114 }, 115 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 116 .symbol = "context-switches", 117 .alias = "cs", 118 }, 119 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 120 .symbol = "cpu-migrations", 121 .alias = "migrations", 122 }, 123 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 124 .symbol = "minor-faults", 125 .alias = "", 126 }, 127 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 128 .symbol = "major-faults", 129 .alias = "", 130 }, 131 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 132 .symbol = "alignment-faults", 133 .alias = "", 134 }, 135 [PERF_COUNT_SW_EMULATION_FAULTS] = { 136 .symbol = "emulation-faults", 137 .alias = "", 138 }, 139 [PERF_COUNT_SW_DUMMY] = { 140 .symbol = "dummy", 141 .alias = "", 142 }, 143 [PERF_COUNT_SW_BPF_OUTPUT] = { 144 .symbol = "bpf-output", 145 .alias = "", 146 }, 147 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 148 .symbol = "cgroup-switches", 149 .alias = "", 150 }, 151 }; 152 153 bool is_event_supported(u8 type, u64 config) 154 { 155 bool ret = true; 156 int open_return; 157 struct evsel *evsel; 158 struct perf_event_attr attr = { 159 .type = type, 160 .config = config, 161 .disabled = 1, 162 }; 163 struct perf_thread_map *tmap = thread_map__new_by_tid(0); 164 165 if (tmap == NULL) 166 return false; 167 168 evsel = evsel__new(&attr); 169 if (evsel) { 170 open_return = evsel__open(evsel, NULL, tmap); 171 ret = open_return >= 0; 172 173 if (open_return == -EACCES) { 174 /* 175 * This happens if the paranoid value 176 * /proc/sys/kernel/perf_event_paranoid is set to 2 177 * Re-run with exclude_kernel set; we don't do that 178 * by default as some ARM machines do not support it. 179 * 180 */ 181 evsel->core.attr.exclude_kernel = 1; 182 ret = evsel__open(evsel, NULL, tmap) >= 0; 183 } 184 evsel__delete(evsel); 185 } 186 187 perf_thread_map__put(tmap); 188 return ret; 189 } 190 191 const char *event_type(int type) 192 { 193 switch (type) { 194 case PERF_TYPE_HARDWARE: 195 return "hardware"; 196 197 case PERF_TYPE_SOFTWARE: 198 return "software"; 199 200 case PERF_TYPE_TRACEPOINT: 201 return "tracepoint"; 202 203 case PERF_TYPE_HW_CACHE: 204 return "hardware-cache"; 205 206 default: 207 break; 208 } 209 210 return "unknown"; 211 } 212 213 static char *get_config_str(struct list_head *head_terms, int type_term) 214 { 215 struct parse_events_term *term; 216 217 if (!head_terms) 218 return NULL; 219 220 list_for_each_entry(term, head_terms, list) 221 if (term->type_term == type_term) 222 return term->val.str; 223 224 return NULL; 225 } 226 227 static char *get_config_metric_id(struct list_head *head_terms) 228 { 229 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 230 } 231 232 static char *get_config_name(struct list_head *head_terms) 233 { 234 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 235 } 236 237 static struct evsel * 238 __add_event(struct list_head *list, int *idx, 239 struct perf_event_attr *attr, 240 bool init_attr, 241 const char *name, const char *metric_id, struct perf_pmu *pmu, 242 struct list_head *config_terms, bool auto_merge_stats, 243 const char *cpu_list) 244 { 245 struct evsel *evsel; 246 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 247 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 248 249 if (pmu) 250 perf_pmu__warn_invalid_formats(pmu); 251 252 if (pmu && attr->type == PERF_TYPE_RAW) 253 perf_pmu__warn_invalid_config(pmu, attr->config, name); 254 255 if (init_attr) 256 event_attr_init(attr); 257 258 evsel = evsel__new_idx(attr, *idx); 259 if (!evsel) { 260 perf_cpu_map__put(cpus); 261 return NULL; 262 } 263 264 (*idx)++; 265 evsel->core.cpus = cpus; 266 evsel->core.own_cpus = perf_cpu_map__get(cpus); 267 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 268 evsel->auto_merge_stats = auto_merge_stats; 269 evsel->pmu = pmu; 270 271 if (name) 272 evsel->name = strdup(name); 273 274 if (metric_id) 275 evsel->metric_id = strdup(metric_id); 276 277 if (config_terms) 278 list_splice_init(config_terms, &evsel->config_terms); 279 280 if (list) 281 list_add_tail(&evsel->core.node, list); 282 283 return evsel; 284 } 285 286 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 287 const char *name, const char *metric_id, 288 struct perf_pmu *pmu) 289 { 290 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 291 metric_id, pmu, /*config_terms=*/NULL, 292 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 293 } 294 295 static int add_event(struct list_head *list, int *idx, 296 struct perf_event_attr *attr, const char *name, 297 const char *metric_id, struct list_head *config_terms) 298 { 299 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 300 /*pmu=*/NULL, config_terms, 301 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 302 } 303 304 static int add_event_tool(struct list_head *list, int *idx, 305 enum perf_tool_event tool_event) 306 { 307 struct evsel *evsel; 308 struct perf_event_attr attr = { 309 .type = PERF_TYPE_SOFTWARE, 310 .config = PERF_COUNT_SW_DUMMY, 311 }; 312 313 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 314 /*metric_id=*/NULL, /*pmu=*/NULL, 315 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 316 /*cpu_list=*/"0"); 317 if (!evsel) 318 return -ENOMEM; 319 evsel->tool_event = tool_event; 320 if (tool_event == PERF_TOOL_DURATION_TIME 321 || tool_event == PERF_TOOL_USER_TIME 322 || tool_event == PERF_TOOL_SYSTEM_TIME) { 323 free((char *)evsel->unit); 324 evsel->unit = strdup("ns"); 325 } 326 return 0; 327 } 328 329 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size) 330 { 331 int i, j; 332 int n, longest = -1; 333 334 for (i = 0; i < size; i++) { 335 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 336 n = strlen(names[i][j]); 337 if (n > longest && !strncasecmp(str, names[i][j], n)) 338 longest = n; 339 } 340 if (longest > 0) 341 return i; 342 } 343 344 return -1; 345 } 346 347 typedef int config_term_func_t(struct perf_event_attr *attr, 348 struct parse_events_term *term, 349 struct parse_events_error *err); 350 static int config_term_common(struct perf_event_attr *attr, 351 struct parse_events_term *term, 352 struct parse_events_error *err); 353 static int config_attr(struct perf_event_attr *attr, 354 struct list_head *head, 355 struct parse_events_error *err, 356 config_term_func_t config_term); 357 358 int parse_events_add_cache(struct list_head *list, int *idx, 359 char *type, char *op_result1, char *op_result2, 360 struct parse_events_error *err, 361 struct list_head *head_config, 362 struct parse_events_state *parse_state) 363 { 364 struct perf_event_attr attr; 365 LIST_HEAD(config_terms); 366 char name[MAX_NAME_LEN]; 367 const char *config_name, *metric_id; 368 int cache_type = -1, cache_op = -1, cache_result = -1; 369 char *op_result[2] = { op_result1, op_result2 }; 370 int i, n, ret; 371 bool hybrid; 372 373 /* 374 * No fallback - if we cannot get a clear cache type 375 * then bail out: 376 */ 377 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX); 378 if (cache_type == -1) 379 return -EINVAL; 380 381 config_name = get_config_name(head_config); 382 n = snprintf(name, MAX_NAME_LEN, "%s", type); 383 384 for (i = 0; (i < 2) && (op_result[i]); i++) { 385 char *str = op_result[i]; 386 387 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 388 389 if (cache_op == -1) { 390 cache_op = parse_aliases(str, evsel__hw_cache_op, 391 PERF_COUNT_HW_CACHE_OP_MAX); 392 if (cache_op >= 0) { 393 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 394 return -EINVAL; 395 continue; 396 } 397 } 398 399 if (cache_result == -1) { 400 cache_result = parse_aliases(str, evsel__hw_cache_result, 401 PERF_COUNT_HW_CACHE_RESULT_MAX); 402 if (cache_result >= 0) 403 continue; 404 } 405 } 406 407 /* 408 * Fall back to reads: 409 */ 410 if (cache_op == -1) 411 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 412 413 /* 414 * Fall back to accesses: 415 */ 416 if (cache_result == -1) 417 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 418 419 memset(&attr, 0, sizeof(attr)); 420 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 421 attr.type = PERF_TYPE_HW_CACHE; 422 423 if (head_config) { 424 if (config_attr(&attr, head_config, err, 425 config_term_common)) 426 return -EINVAL; 427 428 if (get_config_terms(head_config, &config_terms)) 429 return -ENOMEM; 430 } 431 432 metric_id = get_config_metric_id(head_config); 433 ret = parse_events__add_cache_hybrid(list, idx, &attr, 434 config_name ? : name, 435 metric_id, 436 &config_terms, 437 &hybrid, parse_state); 438 if (hybrid) 439 goto out_free_terms; 440 441 ret = add_event(list, idx, &attr, config_name ? : name, metric_id, 442 &config_terms); 443 out_free_terms: 444 free_config_terms(&config_terms); 445 return ret; 446 } 447 448 #ifdef HAVE_LIBTRACEEVENT 449 static void tracepoint_error(struct parse_events_error *e, int err, 450 const char *sys, const char *name) 451 { 452 const char *str; 453 char help[BUFSIZ]; 454 455 if (!e) 456 return; 457 458 /* 459 * We get error directly from syscall errno ( > 0), 460 * or from encoded pointer's error ( < 0). 461 */ 462 err = abs(err); 463 464 switch (err) { 465 case EACCES: 466 str = "can't access trace events"; 467 break; 468 case ENOENT: 469 str = "unknown tracepoint"; 470 break; 471 default: 472 str = "failed to add tracepoint"; 473 break; 474 } 475 476 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 477 parse_events_error__handle(e, 0, strdup(str), strdup(help)); 478 } 479 480 static int add_tracepoint(struct list_head *list, int *idx, 481 const char *sys_name, const char *evt_name, 482 struct parse_events_error *err, 483 struct list_head *head_config) 484 { 485 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 486 487 if (IS_ERR(evsel)) { 488 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name); 489 return PTR_ERR(evsel); 490 } 491 492 if (head_config) { 493 LIST_HEAD(config_terms); 494 495 if (get_config_terms(head_config, &config_terms)) 496 return -ENOMEM; 497 list_splice(&config_terms, &evsel->config_terms); 498 } 499 500 list_add_tail(&evsel->core.node, list); 501 return 0; 502 } 503 504 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 505 const char *sys_name, const char *evt_name, 506 struct parse_events_error *err, 507 struct list_head *head_config) 508 { 509 char *evt_path; 510 struct dirent *evt_ent; 511 DIR *evt_dir; 512 int ret = 0, found = 0; 513 514 evt_path = get_events_file(sys_name); 515 if (!evt_path) { 516 tracepoint_error(err, errno, sys_name, evt_name); 517 return -1; 518 } 519 evt_dir = opendir(evt_path); 520 if (!evt_dir) { 521 put_events_file(evt_path); 522 tracepoint_error(err, errno, sys_name, evt_name); 523 return -1; 524 } 525 526 while (!ret && (evt_ent = readdir(evt_dir))) { 527 if (!strcmp(evt_ent->d_name, ".") 528 || !strcmp(evt_ent->d_name, "..") 529 || !strcmp(evt_ent->d_name, "enable") 530 || !strcmp(evt_ent->d_name, "filter")) 531 continue; 532 533 if (!strglobmatch(evt_ent->d_name, evt_name)) 534 continue; 535 536 found++; 537 538 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 539 err, head_config); 540 } 541 542 if (!found) { 543 tracepoint_error(err, ENOENT, sys_name, evt_name); 544 ret = -1; 545 } 546 547 put_events_file(evt_path); 548 closedir(evt_dir); 549 return ret; 550 } 551 552 static int add_tracepoint_event(struct list_head *list, int *idx, 553 const char *sys_name, const char *evt_name, 554 struct parse_events_error *err, 555 struct list_head *head_config) 556 { 557 return strpbrk(evt_name, "*?") ? 558 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 559 err, head_config) : 560 add_tracepoint(list, idx, sys_name, evt_name, 561 err, head_config); 562 } 563 564 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 565 const char *sys_name, const char *evt_name, 566 struct parse_events_error *err, 567 struct list_head *head_config) 568 { 569 struct dirent *events_ent; 570 DIR *events_dir; 571 int ret = 0; 572 573 events_dir = tracing_events__opendir(); 574 if (!events_dir) { 575 tracepoint_error(err, errno, sys_name, evt_name); 576 return -1; 577 } 578 579 while (!ret && (events_ent = readdir(events_dir))) { 580 if (!strcmp(events_ent->d_name, ".") 581 || !strcmp(events_ent->d_name, "..") 582 || !strcmp(events_ent->d_name, "enable") 583 || !strcmp(events_ent->d_name, "header_event") 584 || !strcmp(events_ent->d_name, "header_page")) 585 continue; 586 587 if (!strglobmatch(events_ent->d_name, sys_name)) 588 continue; 589 590 ret = add_tracepoint_event(list, idx, events_ent->d_name, 591 evt_name, err, head_config); 592 } 593 594 closedir(events_dir); 595 return ret; 596 } 597 #endif /* HAVE_LIBTRACEEVENT */ 598 599 #ifdef HAVE_LIBBPF_SUPPORT 600 struct __add_bpf_event_param { 601 struct parse_events_state *parse_state; 602 struct list_head *list; 603 struct list_head *head_config; 604 }; 605 606 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj, 607 void *_param) 608 { 609 LIST_HEAD(new_evsels); 610 struct __add_bpf_event_param *param = _param; 611 struct parse_events_state *parse_state = param->parse_state; 612 struct list_head *list = param->list; 613 struct evsel *pos; 614 int err; 615 /* 616 * Check if we should add the event, i.e. if it is a TP but starts with a '!', 617 * then don't add the tracepoint, this will be used for something else, like 618 * adding to a BPF_MAP_TYPE_PROG_ARRAY. 619 * 620 * See tools/perf/examples/bpf/augmented_raw_syscalls.c 621 */ 622 if (group[0] == '!') 623 return 0; 624 625 pr_debug("add bpf event %s:%s and attach bpf program %d\n", 626 group, event, fd); 627 628 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group, 629 event, parse_state->error, 630 param->head_config); 631 if (err) { 632 struct evsel *evsel, *tmp; 633 634 pr_debug("Failed to add BPF event %s:%s\n", 635 group, event); 636 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) { 637 list_del_init(&evsel->core.node); 638 evsel__delete(evsel); 639 } 640 return err; 641 } 642 pr_debug("adding %s:%s\n", group, event); 643 644 list_for_each_entry(pos, &new_evsels, core.node) { 645 pr_debug("adding %s:%s to %p\n", 646 group, event, pos); 647 pos->bpf_fd = fd; 648 pos->bpf_obj = obj; 649 } 650 list_splice(&new_evsels, list); 651 return 0; 652 } 653 654 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 655 struct list_head *list, 656 struct bpf_object *obj, 657 struct list_head *head_config) 658 { 659 int err; 660 char errbuf[BUFSIZ]; 661 struct __add_bpf_event_param param = {parse_state, list, head_config}; 662 static bool registered_unprobe_atexit = false; 663 664 if (IS_ERR(obj) || !obj) { 665 snprintf(errbuf, sizeof(errbuf), 666 "Internal error: load bpf obj with NULL"); 667 err = -EINVAL; 668 goto errout; 669 } 670 671 /* 672 * Register atexit handler before calling bpf__probe() so 673 * bpf__probe() don't need to unprobe probe points its already 674 * created when failure. 675 */ 676 if (!registered_unprobe_atexit) { 677 atexit(bpf__clear); 678 registered_unprobe_atexit = true; 679 } 680 681 err = bpf__probe(obj); 682 if (err) { 683 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf)); 684 goto errout; 685 } 686 687 err = bpf__load(obj); 688 if (err) { 689 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf)); 690 goto errout; 691 } 692 693 err = bpf__foreach_event(obj, add_bpf_event, ¶m); 694 if (err) { 695 snprintf(errbuf, sizeof(errbuf), 696 "Attach events in BPF object failed"); 697 goto errout; 698 } 699 700 return 0; 701 errout: 702 parse_events_error__handle(parse_state->error, 0, 703 strdup(errbuf), strdup("(add -v to see detail)")); 704 return err; 705 } 706 707 static int 708 parse_events_config_bpf(struct parse_events_state *parse_state, 709 struct bpf_object *obj, 710 struct list_head *head_config) 711 { 712 struct parse_events_term *term; 713 int error_pos; 714 715 if (!head_config || list_empty(head_config)) 716 return 0; 717 718 list_for_each_entry(term, head_config, list) { 719 int err; 720 721 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) { 722 parse_events_error__handle(parse_state->error, term->err_term, 723 strdup("Invalid config term for BPF object"), 724 NULL); 725 return -EINVAL; 726 } 727 728 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos); 729 if (err) { 730 char errbuf[BUFSIZ]; 731 int idx; 732 733 bpf__strerror_config_obj(obj, term, parse_state->evlist, 734 &error_pos, err, errbuf, 735 sizeof(errbuf)); 736 737 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE) 738 idx = term->err_val; 739 else 740 idx = term->err_term + error_pos; 741 742 parse_events_error__handle(parse_state->error, idx, 743 strdup(errbuf), 744 strdup( 745 "Hint:\tValid config terms:\n" 746 " \tmap:[<arraymap>].value<indices>=[value]\n" 747 " \tmap:[<eventmap>].event<indices>=[event]\n" 748 "\n" 749 " \twhere <indices> is something like [0,3...5] or [all]\n" 750 " \t(add -v to see detail)")); 751 return err; 752 } 753 } 754 return 0; 755 } 756 757 /* 758 * Split config terms: 759 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ... 760 * 'call-graph=fp' is 'evt config', should be applied to each 761 * events in bpf.c. 762 * 'map:array.value[0]=1' is 'obj config', should be processed 763 * with parse_events_config_bpf. 764 * 765 * Move object config terms from the first list to obj_head_config. 766 */ 767 static void 768 split_bpf_config_terms(struct list_head *evt_head_config, 769 struct list_head *obj_head_config) 770 { 771 struct parse_events_term *term, *temp; 772 773 /* 774 * Currently, all possible user config term 775 * belong to bpf object. parse_events__is_hardcoded_term() 776 * happens to be a good flag. 777 * 778 * See parse_events_config_bpf() and 779 * config_term_tracepoint(). 780 */ 781 list_for_each_entry_safe(term, temp, evt_head_config, list) 782 if (!parse_events__is_hardcoded_term(term)) 783 list_move_tail(&term->list, obj_head_config); 784 } 785 786 int parse_events_load_bpf(struct parse_events_state *parse_state, 787 struct list_head *list, 788 char *bpf_file_name, 789 bool source, 790 struct list_head *head_config) 791 { 792 int err; 793 struct bpf_object *obj; 794 LIST_HEAD(obj_head_config); 795 796 if (head_config) 797 split_bpf_config_terms(head_config, &obj_head_config); 798 799 obj = bpf__prepare_load(bpf_file_name, source); 800 if (IS_ERR(obj)) { 801 char errbuf[BUFSIZ]; 802 803 err = PTR_ERR(obj); 804 805 if (err == -ENOTSUP) 806 snprintf(errbuf, sizeof(errbuf), 807 "BPF support is not compiled"); 808 else 809 bpf__strerror_prepare_load(bpf_file_name, 810 source, 811 -err, errbuf, 812 sizeof(errbuf)); 813 814 parse_events_error__handle(parse_state->error, 0, 815 strdup(errbuf), strdup("(add -v to see detail)")); 816 return err; 817 } 818 819 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config); 820 if (err) 821 return err; 822 err = parse_events_config_bpf(parse_state, obj, &obj_head_config); 823 824 /* 825 * Caller doesn't know anything about obj_head_config, 826 * so combine them together again before returning. 827 */ 828 if (head_config) 829 list_splice_tail(&obj_head_config, head_config); 830 return err; 831 } 832 #else // HAVE_LIBBPF_SUPPORT 833 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 834 struct list_head *list __maybe_unused, 835 struct bpf_object *obj __maybe_unused, 836 struct list_head *head_config __maybe_unused) 837 { 838 parse_events_error__handle(parse_state->error, 0, 839 strdup("BPF support is not compiled"), 840 strdup("Make sure libbpf-devel is available at build time.")); 841 return -ENOTSUP; 842 } 843 844 int parse_events_load_bpf(struct parse_events_state *parse_state, 845 struct list_head *list __maybe_unused, 846 char *bpf_file_name __maybe_unused, 847 bool source __maybe_unused, 848 struct list_head *head_config __maybe_unused) 849 { 850 parse_events_error__handle(parse_state->error, 0, 851 strdup("BPF support is not compiled"), 852 strdup("Make sure libbpf-devel is available at build time.")); 853 return -ENOTSUP; 854 } 855 #endif // HAVE_LIBBPF_SUPPORT 856 857 static int 858 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 859 { 860 int i; 861 862 for (i = 0; i < 3; i++) { 863 if (!type || !type[i]) 864 break; 865 866 #define CHECK_SET_TYPE(bit) \ 867 do { \ 868 if (attr->bp_type & bit) \ 869 return -EINVAL; \ 870 else \ 871 attr->bp_type |= bit; \ 872 } while (0) 873 874 switch (type[i]) { 875 case 'r': 876 CHECK_SET_TYPE(HW_BREAKPOINT_R); 877 break; 878 case 'w': 879 CHECK_SET_TYPE(HW_BREAKPOINT_W); 880 break; 881 case 'x': 882 CHECK_SET_TYPE(HW_BREAKPOINT_X); 883 break; 884 default: 885 return -EINVAL; 886 } 887 } 888 889 #undef CHECK_SET_TYPE 890 891 if (!attr->bp_type) /* Default */ 892 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 893 894 return 0; 895 } 896 897 int parse_events_add_breakpoint(struct list_head *list, int *idx, 898 u64 addr, char *type, u64 len) 899 { 900 struct perf_event_attr attr; 901 902 memset(&attr, 0, sizeof(attr)); 903 attr.bp_addr = addr; 904 905 if (parse_breakpoint_type(type, &attr)) 906 return -EINVAL; 907 908 /* Provide some defaults if len is not specified */ 909 if (!len) { 910 if (attr.bp_type == HW_BREAKPOINT_X) 911 len = sizeof(long); 912 else 913 len = HW_BREAKPOINT_LEN_4; 914 } 915 916 attr.bp_len = len; 917 918 attr.type = PERF_TYPE_BREAKPOINT; 919 attr.sample_period = 1; 920 921 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL, 922 /*config_terms=*/NULL); 923 } 924 925 static int check_type_val(struct parse_events_term *term, 926 struct parse_events_error *err, 927 int type) 928 { 929 if (type == term->type_val) 930 return 0; 931 932 if (err) { 933 parse_events_error__handle(err, term->err_val, 934 type == PARSE_EVENTS__TERM_TYPE_NUM 935 ? strdup("expected numeric value") 936 : strdup("expected string value"), 937 NULL); 938 } 939 return -EINVAL; 940 } 941 942 /* 943 * Update according to parse-events.l 944 */ 945 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 946 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 947 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 948 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 949 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 950 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 951 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 952 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 953 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 954 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 955 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 956 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 957 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 958 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 959 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 960 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 961 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 962 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 963 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 964 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 965 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 966 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 967 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 968 }; 969 970 static bool config_term_shrinked; 971 972 static bool 973 config_term_avail(int term_type, struct parse_events_error *err) 974 { 975 char *err_str; 976 977 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 978 parse_events_error__handle(err, -1, 979 strdup("Invalid term_type"), NULL); 980 return false; 981 } 982 if (!config_term_shrinked) 983 return true; 984 985 switch (term_type) { 986 case PARSE_EVENTS__TERM_TYPE_CONFIG: 987 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 988 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 989 case PARSE_EVENTS__TERM_TYPE_NAME: 990 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 991 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 992 case PARSE_EVENTS__TERM_TYPE_PERCORE: 993 return true; 994 default: 995 if (!err) 996 return false; 997 998 /* term_type is validated so indexing is safe */ 999 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 1000 config_term_names[term_type]) >= 0) 1001 parse_events_error__handle(err, -1, err_str, NULL); 1002 return false; 1003 } 1004 } 1005 1006 void parse_events__shrink_config_terms(void) 1007 { 1008 config_term_shrinked = true; 1009 } 1010 1011 static int config_term_common(struct perf_event_attr *attr, 1012 struct parse_events_term *term, 1013 struct parse_events_error *err) 1014 { 1015 #define CHECK_TYPE_VAL(type) \ 1016 do { \ 1017 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 1018 return -EINVAL; \ 1019 } while (0) 1020 1021 switch (term->type_term) { 1022 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1023 CHECK_TYPE_VAL(NUM); 1024 attr->config = term->val.num; 1025 break; 1026 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1027 CHECK_TYPE_VAL(NUM); 1028 attr->config1 = term->val.num; 1029 break; 1030 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1031 CHECK_TYPE_VAL(NUM); 1032 attr->config2 = term->val.num; 1033 break; 1034 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1035 CHECK_TYPE_VAL(NUM); 1036 break; 1037 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1038 CHECK_TYPE_VAL(NUM); 1039 break; 1040 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1041 CHECK_TYPE_VAL(STR); 1042 if (strcmp(term->val.str, "no") && 1043 parse_branch_str(term->val.str, 1044 &attr->branch_sample_type)) { 1045 parse_events_error__handle(err, term->err_val, 1046 strdup("invalid branch sample type"), 1047 NULL); 1048 return -EINVAL; 1049 } 1050 break; 1051 case PARSE_EVENTS__TERM_TYPE_TIME: 1052 CHECK_TYPE_VAL(NUM); 1053 if (term->val.num > 1) { 1054 parse_events_error__handle(err, term->err_val, 1055 strdup("expected 0 or 1"), 1056 NULL); 1057 return -EINVAL; 1058 } 1059 break; 1060 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1061 CHECK_TYPE_VAL(STR); 1062 break; 1063 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1064 CHECK_TYPE_VAL(NUM); 1065 break; 1066 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1067 CHECK_TYPE_VAL(NUM); 1068 break; 1069 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1070 CHECK_TYPE_VAL(NUM); 1071 break; 1072 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1073 CHECK_TYPE_VAL(NUM); 1074 break; 1075 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1076 CHECK_TYPE_VAL(NUM); 1077 break; 1078 case PARSE_EVENTS__TERM_TYPE_NAME: 1079 CHECK_TYPE_VAL(STR); 1080 break; 1081 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1082 CHECK_TYPE_VAL(STR); 1083 break; 1084 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1085 CHECK_TYPE_VAL(NUM); 1086 break; 1087 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1088 CHECK_TYPE_VAL(NUM); 1089 break; 1090 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1091 CHECK_TYPE_VAL(NUM); 1092 if ((unsigned int)term->val.num > 1) { 1093 parse_events_error__handle(err, term->err_val, 1094 strdup("expected 0 or 1"), 1095 NULL); 1096 return -EINVAL; 1097 } 1098 break; 1099 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1100 CHECK_TYPE_VAL(NUM); 1101 break; 1102 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1103 CHECK_TYPE_VAL(NUM); 1104 if (term->val.num > UINT_MAX) { 1105 parse_events_error__handle(err, term->err_val, 1106 strdup("too big"), 1107 NULL); 1108 return -EINVAL; 1109 } 1110 break; 1111 default: 1112 parse_events_error__handle(err, term->err_term, 1113 strdup("unknown term"), 1114 parse_events_formats_error_string(NULL)); 1115 return -EINVAL; 1116 } 1117 1118 /* 1119 * Check term availability after basic checking so 1120 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1121 * 1122 * If check availability at the entry of this function, 1123 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1124 * if an invalid config term is provided for legacy events 1125 * (for example, instructions/badterm/...), which is confusing. 1126 */ 1127 if (!config_term_avail(term->type_term, err)) 1128 return -EINVAL; 1129 return 0; 1130 #undef CHECK_TYPE_VAL 1131 } 1132 1133 static int config_term_pmu(struct perf_event_attr *attr, 1134 struct parse_events_term *term, 1135 struct parse_events_error *err) 1136 { 1137 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1138 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) 1139 /* 1140 * Always succeed for sysfs terms, as we dont know 1141 * at this point what type they need to have. 1142 */ 1143 return 0; 1144 else 1145 return config_term_common(attr, term, err); 1146 } 1147 1148 #ifdef HAVE_LIBTRACEEVENT 1149 static int config_term_tracepoint(struct perf_event_attr *attr, 1150 struct parse_events_term *term, 1151 struct parse_events_error *err) 1152 { 1153 switch (term->type_term) { 1154 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1155 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1156 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1157 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1158 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1159 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1160 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1161 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1162 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1163 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1164 return config_term_common(attr, term, err); 1165 default: 1166 if (err) { 1167 parse_events_error__handle(err, term->err_term, 1168 strdup("unknown term"), 1169 strdup("valid terms: call-graph,stack-size\n")); 1170 } 1171 return -EINVAL; 1172 } 1173 1174 return 0; 1175 } 1176 #endif 1177 1178 static int config_attr(struct perf_event_attr *attr, 1179 struct list_head *head, 1180 struct parse_events_error *err, 1181 config_term_func_t config_term) 1182 { 1183 struct parse_events_term *term; 1184 1185 list_for_each_entry(term, head, list) 1186 if (config_term(attr, term, err)) 1187 return -EINVAL; 1188 1189 return 0; 1190 } 1191 1192 static int get_config_terms(struct list_head *head_config, 1193 struct list_head *head_terms __maybe_unused) 1194 { 1195 #define ADD_CONFIG_TERM(__type, __weak) \ 1196 struct evsel_config_term *__t; \ 1197 \ 1198 __t = zalloc(sizeof(*__t)); \ 1199 if (!__t) \ 1200 return -ENOMEM; \ 1201 \ 1202 INIT_LIST_HEAD(&__t->list); \ 1203 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1204 __t->weak = __weak; \ 1205 list_add_tail(&__t->list, head_terms) 1206 1207 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1208 do { \ 1209 ADD_CONFIG_TERM(__type, __weak); \ 1210 __t->val.__name = __val; \ 1211 } while (0) 1212 1213 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1214 do { \ 1215 ADD_CONFIG_TERM(__type, __weak); \ 1216 __t->val.str = strdup(__val); \ 1217 if (!__t->val.str) { \ 1218 zfree(&__t); \ 1219 return -ENOMEM; \ 1220 } \ 1221 __t->free_str = true; \ 1222 } while (0) 1223 1224 struct parse_events_term *term; 1225 1226 list_for_each_entry(term, head_config, list) { 1227 switch (term->type_term) { 1228 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1229 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1230 break; 1231 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1232 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1233 break; 1234 case PARSE_EVENTS__TERM_TYPE_TIME: 1235 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1236 break; 1237 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1238 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1239 break; 1240 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1241 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1242 break; 1243 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1244 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1245 term->val.num, term->weak); 1246 break; 1247 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1248 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1249 term->val.num ? 1 : 0, term->weak); 1250 break; 1251 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1252 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1253 term->val.num ? 0 : 1, term->weak); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1256 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1257 term->val.num, term->weak); 1258 break; 1259 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1260 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1261 term->val.num, term->weak); 1262 break; 1263 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1264 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1265 term->val.num ? 1 : 0, term->weak); 1266 break; 1267 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1268 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1269 term->val.num ? 0 : 1, term->weak); 1270 break; 1271 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1272 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1273 break; 1274 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1275 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1276 term->val.num ? true : false, term->weak); 1277 break; 1278 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1279 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1280 term->val.num ? 1 : 0, term->weak); 1281 break; 1282 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1283 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1284 term->val.num, term->weak); 1285 break; 1286 default: 1287 break; 1288 } 1289 } 1290 return 0; 1291 } 1292 1293 /* 1294 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1295 * each bit of attr->config that the user has changed. 1296 */ 1297 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, 1298 struct list_head *head_terms) 1299 { 1300 struct parse_events_term *term; 1301 u64 bits = 0; 1302 int type; 1303 1304 list_for_each_entry(term, head_config, list) { 1305 switch (term->type_term) { 1306 case PARSE_EVENTS__TERM_TYPE_USER: 1307 type = perf_pmu__format_type(&pmu->format, term->config); 1308 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1309 continue; 1310 bits |= perf_pmu__format_bits(&pmu->format, term->config); 1311 break; 1312 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1313 bits = ~(u64)0; 1314 break; 1315 default: 1316 break; 1317 } 1318 } 1319 1320 if (bits) 1321 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1322 1323 #undef ADD_CONFIG_TERM 1324 return 0; 1325 } 1326 1327 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1328 const char *sys, const char *event, 1329 struct parse_events_error *err, 1330 struct list_head *head_config) 1331 { 1332 #ifdef HAVE_LIBTRACEEVENT 1333 if (head_config) { 1334 struct perf_event_attr attr; 1335 1336 if (config_attr(&attr, head_config, err, 1337 config_term_tracepoint)) 1338 return -EINVAL; 1339 } 1340 1341 if (strpbrk(sys, "*?")) 1342 return add_tracepoint_multi_sys(list, idx, sys, event, 1343 err, head_config); 1344 else 1345 return add_tracepoint_event(list, idx, sys, event, 1346 err, head_config); 1347 #else 1348 (void)list; 1349 (void)idx; 1350 (void)sys; 1351 (void)event; 1352 (void)head_config; 1353 parse_events_error__handle(err, 0, strdup("unsupported tracepoint"), 1354 strdup("libtraceevent is necessary for tracepoint support")); 1355 return -1; 1356 #endif 1357 } 1358 1359 int parse_events_add_numeric(struct parse_events_state *parse_state, 1360 struct list_head *list, 1361 u32 type, u64 config, 1362 struct list_head *head_config) 1363 { 1364 struct perf_event_attr attr; 1365 LIST_HEAD(config_terms); 1366 const char *name, *metric_id; 1367 bool hybrid; 1368 int ret; 1369 1370 memset(&attr, 0, sizeof(attr)); 1371 attr.type = type; 1372 attr.config = config; 1373 1374 if (head_config) { 1375 if (config_attr(&attr, head_config, parse_state->error, 1376 config_term_common)) 1377 return -EINVAL; 1378 1379 if (get_config_terms(head_config, &config_terms)) 1380 return -ENOMEM; 1381 } 1382 1383 name = get_config_name(head_config); 1384 metric_id = get_config_metric_id(head_config); 1385 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr, 1386 name, metric_id, 1387 &config_terms, &hybrid); 1388 if (hybrid) 1389 goto out_free_terms; 1390 1391 ret = add_event(list, &parse_state->idx, &attr, name, metric_id, 1392 &config_terms); 1393 out_free_terms: 1394 free_config_terms(&config_terms); 1395 return ret; 1396 } 1397 1398 int parse_events_add_tool(struct parse_events_state *parse_state, 1399 struct list_head *list, 1400 int tool_event) 1401 { 1402 return add_event_tool(list, &parse_state->idx, tool_event); 1403 } 1404 1405 static bool config_term_percore(struct list_head *config_terms) 1406 { 1407 struct evsel_config_term *term; 1408 1409 list_for_each_entry(term, config_terms, list) { 1410 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1411 return term->val.percore; 1412 } 1413 1414 return false; 1415 } 1416 1417 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state, 1418 struct list_head *list, char *name, 1419 struct list_head *head_config) 1420 { 1421 struct parse_events_term *term; 1422 int ret = -1; 1423 1424 if (parse_state->fake_pmu || !head_config || list_empty(head_config) || 1425 !perf_pmu__is_hybrid(name)) { 1426 return -1; 1427 } 1428 1429 /* 1430 * More than one term in list. 1431 */ 1432 if (head_config->next && head_config->next->next != head_config) 1433 return -1; 1434 1435 term = list_first_entry(head_config, struct parse_events_term, list); 1436 if (term && term->config && strcmp(term->config, "event")) { 1437 ret = parse_events__with_hybrid_pmu(parse_state, term->config, 1438 name, list); 1439 } 1440 1441 return ret; 1442 } 1443 1444 int parse_events_add_pmu(struct parse_events_state *parse_state, 1445 struct list_head *list, char *name, 1446 struct list_head *head_config, 1447 bool auto_merge_stats, 1448 bool use_alias) 1449 { 1450 struct perf_event_attr attr; 1451 struct perf_pmu_info info; 1452 struct perf_pmu *pmu; 1453 struct evsel *evsel; 1454 struct parse_events_error *err = parse_state->error; 1455 bool use_uncore_alias; 1456 LIST_HEAD(config_terms); 1457 1458 pmu = parse_state->fake_pmu ?: perf_pmu__find(name); 1459 1460 if (verbose > 1 && !(pmu && pmu->selectable)) { 1461 fprintf(stderr, "Attempting to add event pmu '%s' with '", 1462 name); 1463 if (head_config) { 1464 struct parse_events_term *term; 1465 1466 list_for_each_entry(term, head_config, list) { 1467 fprintf(stderr, "%s,", term->config); 1468 } 1469 } 1470 fprintf(stderr, "' that may result in non-fatal errors\n"); 1471 } 1472 1473 if (!pmu) { 1474 char *err_str; 1475 1476 if (asprintf(&err_str, 1477 "Cannot find PMU `%s'. Missing kernel support?", 1478 name) >= 0) 1479 parse_events_error__handle(err, 0, err_str, NULL); 1480 return -EINVAL; 1481 } 1482 1483 if (pmu->default_config) { 1484 memcpy(&attr, pmu->default_config, 1485 sizeof(struct perf_event_attr)); 1486 } else { 1487 memset(&attr, 0, sizeof(attr)); 1488 } 1489 1490 use_uncore_alias = (pmu->is_uncore && use_alias); 1491 1492 if (!head_config) { 1493 attr.type = pmu->type; 1494 evsel = __add_event(list, &parse_state->idx, &attr, 1495 /*init_attr=*/true, /*name=*/NULL, 1496 /*metric_id=*/NULL, pmu, 1497 /*config_terms=*/NULL, auto_merge_stats, 1498 /*cpu_list=*/NULL); 1499 if (evsel) { 1500 evsel->pmu_name = name ? strdup(name) : NULL; 1501 evsel->use_uncore_alias = use_uncore_alias; 1502 return 0; 1503 } else { 1504 return -ENOMEM; 1505 } 1506 } 1507 1508 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info)) 1509 return -EINVAL; 1510 1511 if (verbose > 1) { 1512 fprintf(stderr, "After aliases, add event pmu '%s' with '", 1513 name); 1514 if (head_config) { 1515 struct parse_events_term *term; 1516 1517 list_for_each_entry(term, head_config, list) { 1518 fprintf(stderr, "%s,", term->config); 1519 } 1520 } 1521 fprintf(stderr, "' that may result in non-fatal errors\n"); 1522 } 1523 1524 /* 1525 * Configure hardcoded terms first, no need to check 1526 * return value when called with fail == 0 ;) 1527 */ 1528 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu)) 1529 return -EINVAL; 1530 1531 if (get_config_terms(head_config, &config_terms)) 1532 return -ENOMEM; 1533 1534 /* 1535 * When using default config, record which bits of attr->config were 1536 * changed by the user. 1537 */ 1538 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms)) 1539 return -ENOMEM; 1540 1541 if (!parse_events__inside_hybrid_pmu(parse_state, list, name, 1542 head_config)) { 1543 return 0; 1544 } 1545 1546 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { 1547 free_config_terms(&config_terms); 1548 return -EINVAL; 1549 } 1550 1551 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1552 get_config_name(head_config), 1553 get_config_metric_id(head_config), pmu, 1554 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1555 if (!evsel) 1556 return -ENOMEM; 1557 1558 if (evsel->name) 1559 evsel->use_config_name = true; 1560 1561 evsel->pmu_name = name ? strdup(name) : NULL; 1562 evsel->use_uncore_alias = use_uncore_alias; 1563 evsel->percore = config_term_percore(&evsel->config_terms); 1564 1565 if (parse_state->fake_pmu) 1566 return 0; 1567 1568 free((char *)evsel->unit); 1569 evsel->unit = strdup(info.unit); 1570 evsel->scale = info.scale; 1571 evsel->per_pkg = info.per_pkg; 1572 evsel->snapshot = info.snapshot; 1573 evsel->metric_expr = info.metric_expr; 1574 evsel->metric_name = info.metric_name; 1575 return 0; 1576 } 1577 1578 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1579 char *str, struct list_head *head, 1580 struct list_head **listp) 1581 { 1582 struct parse_events_term *term; 1583 struct list_head *list = NULL; 1584 struct list_head *orig_head = NULL; 1585 struct perf_pmu *pmu = NULL; 1586 int ok = 0; 1587 char *config; 1588 1589 *listp = NULL; 1590 1591 if (!head) { 1592 head = malloc(sizeof(struct list_head)); 1593 if (!head) 1594 goto out_err; 1595 1596 INIT_LIST_HEAD(head); 1597 } 1598 config = strdup(str); 1599 if (!config) 1600 goto out_err; 1601 1602 if (parse_events_term__num(&term, 1603 PARSE_EVENTS__TERM_TYPE_USER, 1604 config, 1, false, &config, 1605 NULL) < 0) { 1606 free(config); 1607 goto out_err; 1608 } 1609 list_add_tail(&term->list, head); 1610 1611 /* Add it for all PMUs that support the alias */ 1612 list = malloc(sizeof(struct list_head)); 1613 if (!list) 1614 goto out_err; 1615 1616 INIT_LIST_HEAD(list); 1617 1618 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1619 struct perf_pmu_alias *alias; 1620 1621 list_for_each_entry(alias, &pmu->aliases, list) { 1622 if (!strcasecmp(alias->name, str)) { 1623 parse_events_copy_term_list(head, &orig_head); 1624 if (!parse_events_add_pmu(parse_state, list, 1625 pmu->name, orig_head, 1626 true, true)) { 1627 pr_debug("%s -> %s/%s/\n", str, 1628 pmu->name, alias->str); 1629 ok++; 1630 } 1631 parse_events_terms__delete(orig_head); 1632 } 1633 } 1634 } 1635 1636 if (parse_state->fake_pmu) { 1637 if (!parse_events_add_pmu(parse_state, list, str, head, 1638 true, true)) { 1639 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1640 ok++; 1641 } 1642 } 1643 1644 out_err: 1645 if (ok) 1646 *listp = list; 1647 else 1648 free(list); 1649 1650 parse_events_terms__delete(head); 1651 return ok ? 0 : -1; 1652 } 1653 1654 int parse_events__modifier_group(struct list_head *list, 1655 char *event_mod) 1656 { 1657 return parse_events__modifier_event(list, event_mod, true); 1658 } 1659 1660 /* 1661 * Check if the two uncore PMUs are from the same uncore block 1662 * The format of the uncore PMU name is uncore_#blockname_#pmuidx 1663 */ 1664 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b) 1665 { 1666 char *end_a, *end_b; 1667 1668 end_a = strrchr(pmu_name_a, '_'); 1669 end_b = strrchr(pmu_name_b, '_'); 1670 1671 if (!end_a || !end_b) 1672 return false; 1673 1674 if ((end_a - pmu_name_a) != (end_b - pmu_name_b)) 1675 return false; 1676 1677 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0); 1678 } 1679 1680 static int 1681 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, 1682 struct parse_events_state *parse_state) 1683 { 1684 struct evsel *evsel, *leader; 1685 uintptr_t *leaders; 1686 bool is_leader = true; 1687 int i, nr_pmu = 0, total_members, ret = 0; 1688 1689 leader = list_first_entry(list, struct evsel, core.node); 1690 evsel = list_last_entry(list, struct evsel, core.node); 1691 total_members = evsel->core.idx - leader->core.idx + 1; 1692 1693 leaders = calloc(total_members, sizeof(uintptr_t)); 1694 if (WARN_ON(!leaders)) 1695 return 0; 1696 1697 /* 1698 * Going through the whole group and doing sanity check. 1699 * All members must use alias, and be from the same uncore block. 1700 * Also, storing the leader events in an array. 1701 */ 1702 __evlist__for_each_entry(list, evsel) { 1703 1704 /* Only split the uncore group which members use alias */ 1705 if (!evsel->use_uncore_alias) 1706 goto out; 1707 1708 /* The events must be from the same uncore block */ 1709 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name)) 1710 goto out; 1711 1712 if (!is_leader) 1713 continue; 1714 /* 1715 * If the event's PMU name starts to repeat, it must be a new 1716 * event. That can be used to distinguish the leader from 1717 * other members, even they have the same event name. 1718 */ 1719 if ((leader != evsel) && 1720 !strcmp(leader->pmu_name, evsel->pmu_name)) { 1721 is_leader = false; 1722 continue; 1723 } 1724 1725 /* Store the leader event for each PMU */ 1726 leaders[nr_pmu++] = (uintptr_t) evsel; 1727 } 1728 1729 /* only one event alias */ 1730 if (nr_pmu == total_members) { 1731 parse_state->nr_groups--; 1732 goto handled; 1733 } 1734 1735 /* 1736 * An uncore event alias is a joint name which means the same event 1737 * runs on all PMUs of a block. 1738 * Perf doesn't support mixed events from different PMUs in the same 1739 * group. The big group has to be split into multiple small groups 1740 * which only include the events from the same PMU. 1741 * 1742 * Here the uncore event aliases must be from the same uncore block. 1743 * The number of PMUs must be same for each alias. The number of new 1744 * small groups equals to the number of PMUs. 1745 * Setting the leader event for corresponding members in each group. 1746 */ 1747 i = 0; 1748 __evlist__for_each_entry(list, evsel) { 1749 if (i >= nr_pmu) 1750 i = 0; 1751 evsel__set_leader(evsel, (struct evsel *) leaders[i++]); 1752 } 1753 1754 /* The number of members and group name are same for each group */ 1755 for (i = 0; i < nr_pmu; i++) { 1756 evsel = (struct evsel *) leaders[i]; 1757 evsel->core.nr_members = total_members / nr_pmu; 1758 evsel->group_name = name ? strdup(name) : NULL; 1759 } 1760 1761 /* Take the new small groups into account */ 1762 parse_state->nr_groups += nr_pmu - 1; 1763 1764 handled: 1765 ret = 1; 1766 out: 1767 free(leaders); 1768 return ret; 1769 } 1770 1771 __weak struct evsel *arch_evlist__leader(struct list_head *list) 1772 { 1773 return list_first_entry(list, struct evsel, core.node); 1774 } 1775 1776 void parse_events__set_leader(char *name, struct list_head *list, 1777 struct parse_events_state *parse_state) 1778 { 1779 struct evsel *leader; 1780 1781 if (list_empty(list)) { 1782 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1783 return; 1784 } 1785 1786 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) 1787 return; 1788 1789 leader = arch_evlist__leader(list); 1790 __perf_evlist__set_leader(list, &leader->core); 1791 leader->group_name = name ? strdup(name) : NULL; 1792 list_move(&leader->core.node, list); 1793 } 1794 1795 /* list_event is assumed to point to malloc'ed memory */ 1796 void parse_events_update_lists(struct list_head *list_event, 1797 struct list_head *list_all) 1798 { 1799 /* 1800 * Called for single event definition. Update the 1801 * 'all event' list, and reinit the 'single event' 1802 * list, for next event definition. 1803 */ 1804 list_splice_tail(list_event, list_all); 1805 free(list_event); 1806 } 1807 1808 struct event_modifier { 1809 int eu; 1810 int ek; 1811 int eh; 1812 int eH; 1813 int eG; 1814 int eI; 1815 int precise; 1816 int precise_max; 1817 int exclude_GH; 1818 int sample_read; 1819 int pinned; 1820 int weak; 1821 int exclusive; 1822 int bpf_counter; 1823 }; 1824 1825 static int get_event_modifier(struct event_modifier *mod, char *str, 1826 struct evsel *evsel) 1827 { 1828 int eu = evsel ? evsel->core.attr.exclude_user : 0; 1829 int ek = evsel ? evsel->core.attr.exclude_kernel : 0; 1830 int eh = evsel ? evsel->core.attr.exclude_hv : 0; 1831 int eH = evsel ? evsel->core.attr.exclude_host : 0; 1832 int eG = evsel ? evsel->core.attr.exclude_guest : 0; 1833 int eI = evsel ? evsel->core.attr.exclude_idle : 0; 1834 int precise = evsel ? evsel->core.attr.precise_ip : 0; 1835 int precise_max = 0; 1836 int sample_read = 0; 1837 int pinned = evsel ? evsel->core.attr.pinned : 0; 1838 int exclusive = evsel ? evsel->core.attr.exclusive : 0; 1839 1840 int exclude = eu | ek | eh; 1841 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1842 int weak = 0; 1843 int bpf_counter = 0; 1844 1845 memset(mod, 0, sizeof(*mod)); 1846 1847 while (*str) { 1848 if (*str == 'u') { 1849 if (!exclude) 1850 exclude = eu = ek = eh = 1; 1851 if (!exclude_GH && !perf_guest) 1852 eG = 1; 1853 eu = 0; 1854 } else if (*str == 'k') { 1855 if (!exclude) 1856 exclude = eu = ek = eh = 1; 1857 ek = 0; 1858 } else if (*str == 'h') { 1859 if (!exclude) 1860 exclude = eu = ek = eh = 1; 1861 eh = 0; 1862 } else if (*str == 'G') { 1863 if (!exclude_GH) 1864 exclude_GH = eG = eH = 1; 1865 eG = 0; 1866 } else if (*str == 'H') { 1867 if (!exclude_GH) 1868 exclude_GH = eG = eH = 1; 1869 eH = 0; 1870 } else if (*str == 'I') { 1871 eI = 1; 1872 } else if (*str == 'p') { 1873 precise++; 1874 /* use of precise requires exclude_guest */ 1875 if (!exclude_GH) 1876 eG = 1; 1877 } else if (*str == 'P') { 1878 precise_max = 1; 1879 } else if (*str == 'S') { 1880 sample_read = 1; 1881 } else if (*str == 'D') { 1882 pinned = 1; 1883 } else if (*str == 'e') { 1884 exclusive = 1; 1885 } else if (*str == 'W') { 1886 weak = 1; 1887 } else if (*str == 'b') { 1888 bpf_counter = 1; 1889 } else 1890 break; 1891 1892 ++str; 1893 } 1894 1895 /* 1896 * precise ip: 1897 * 1898 * 0 - SAMPLE_IP can have arbitrary skid 1899 * 1 - SAMPLE_IP must have constant skid 1900 * 2 - SAMPLE_IP requested to have 0 skid 1901 * 3 - SAMPLE_IP must have 0 skid 1902 * 1903 * See also PERF_RECORD_MISC_EXACT_IP 1904 */ 1905 if (precise > 3) 1906 return -EINVAL; 1907 1908 mod->eu = eu; 1909 mod->ek = ek; 1910 mod->eh = eh; 1911 mod->eH = eH; 1912 mod->eG = eG; 1913 mod->eI = eI; 1914 mod->precise = precise; 1915 mod->precise_max = precise_max; 1916 mod->exclude_GH = exclude_GH; 1917 mod->sample_read = sample_read; 1918 mod->pinned = pinned; 1919 mod->weak = weak; 1920 mod->bpf_counter = bpf_counter; 1921 mod->exclusive = exclusive; 1922 1923 return 0; 1924 } 1925 1926 /* 1927 * Basic modifier sanity check to validate it contains only one 1928 * instance of any modifier (apart from 'p') present. 1929 */ 1930 static int check_modifier(char *str) 1931 { 1932 char *p = str; 1933 1934 /* The sizeof includes 0 byte as well. */ 1935 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) 1936 return -1; 1937 1938 while (*p) { 1939 if (*p != 'p' && strchr(p + 1, *p)) 1940 return -1; 1941 p++; 1942 } 1943 1944 return 0; 1945 } 1946 1947 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 1948 { 1949 struct evsel *evsel; 1950 struct event_modifier mod; 1951 1952 if (str == NULL) 1953 return 0; 1954 1955 if (check_modifier(str)) 1956 return -EINVAL; 1957 1958 if (!add && get_event_modifier(&mod, str, NULL)) 1959 return -EINVAL; 1960 1961 __evlist__for_each_entry(list, evsel) { 1962 if (add && get_event_modifier(&mod, str, evsel)) 1963 return -EINVAL; 1964 1965 evsel->core.attr.exclude_user = mod.eu; 1966 evsel->core.attr.exclude_kernel = mod.ek; 1967 evsel->core.attr.exclude_hv = mod.eh; 1968 evsel->core.attr.precise_ip = mod.precise; 1969 evsel->core.attr.exclude_host = mod.eH; 1970 evsel->core.attr.exclude_guest = mod.eG; 1971 evsel->core.attr.exclude_idle = mod.eI; 1972 evsel->exclude_GH = mod.exclude_GH; 1973 evsel->sample_read = mod.sample_read; 1974 evsel->precise_max = mod.precise_max; 1975 evsel->weak_group = mod.weak; 1976 evsel->bpf_counter = mod.bpf_counter; 1977 1978 if (evsel__is_group_leader(evsel)) { 1979 evsel->core.attr.pinned = mod.pinned; 1980 evsel->core.attr.exclusive = mod.exclusive; 1981 } 1982 } 1983 1984 return 0; 1985 } 1986 1987 int parse_events_name(struct list_head *list, const char *name) 1988 { 1989 struct evsel *evsel; 1990 1991 __evlist__for_each_entry(list, evsel) { 1992 if (!evsel->name) 1993 evsel->name = strdup(name); 1994 } 1995 1996 return 0; 1997 } 1998 1999 static int 2000 comp_pmu(const void *p1, const void *p2) 2001 { 2002 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; 2003 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; 2004 2005 return strcasecmp(pmu1->symbol, pmu2->symbol); 2006 } 2007 2008 static void perf_pmu__parse_cleanup(void) 2009 { 2010 if (perf_pmu_events_list_num > 0) { 2011 struct perf_pmu_event_symbol *p; 2012 int i; 2013 2014 for (i = 0; i < perf_pmu_events_list_num; i++) { 2015 p = perf_pmu_events_list + i; 2016 zfree(&p->symbol); 2017 } 2018 zfree(&perf_pmu_events_list); 2019 perf_pmu_events_list_num = 0; 2020 } 2021 } 2022 2023 #define SET_SYMBOL(str, stype) \ 2024 do { \ 2025 p->symbol = str; \ 2026 if (!p->symbol) \ 2027 goto err; \ 2028 p->type = stype; \ 2029 } while (0) 2030 2031 /* 2032 * Read the pmu events list from sysfs 2033 * Save it into perf_pmu_events_list 2034 */ 2035 static void perf_pmu__parse_init(void) 2036 { 2037 2038 struct perf_pmu *pmu = NULL; 2039 struct perf_pmu_alias *alias; 2040 int len = 0; 2041 2042 pmu = NULL; 2043 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2044 list_for_each_entry(alias, &pmu->aliases, list) { 2045 char *tmp = strchr(alias->name, '-'); 2046 2047 if (tmp) { 2048 char *tmp2 = NULL; 2049 2050 tmp2 = strchr(tmp + 1, '-'); 2051 len++; 2052 if (tmp2) 2053 len++; 2054 } 2055 2056 len++; 2057 } 2058 } 2059 2060 if (len == 0) { 2061 perf_pmu_events_list_num = -1; 2062 return; 2063 } 2064 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); 2065 if (!perf_pmu_events_list) 2066 return; 2067 perf_pmu_events_list_num = len; 2068 2069 len = 0; 2070 pmu = NULL; 2071 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2072 list_for_each_entry(alias, &pmu->aliases, list) { 2073 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 2074 char *tmp = strchr(alias->name, '-'); 2075 char *tmp2 = NULL; 2076 2077 if (tmp) 2078 tmp2 = strchr(tmp + 1, '-'); 2079 if (tmp2) { 2080 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2081 PMU_EVENT_SYMBOL_PREFIX); 2082 p++; 2083 tmp++; 2084 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX); 2085 p++; 2086 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2); 2087 len += 3; 2088 } else if (tmp) { 2089 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2090 PMU_EVENT_SYMBOL_PREFIX); 2091 p++; 2092 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); 2093 len += 2; 2094 } else { 2095 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); 2096 len++; 2097 } 2098 } 2099 } 2100 qsort(perf_pmu_events_list, len, 2101 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2102 2103 return; 2104 err: 2105 perf_pmu__parse_cleanup(); 2106 } 2107 2108 /* 2109 * This function injects special term in 2110 * perf_pmu_events_list so the test code 2111 * can check on this functionality. 2112 */ 2113 int perf_pmu__test_parse_init(void) 2114 { 2115 struct perf_pmu_event_symbol *list, *tmp, symbols[] = { 2116 {(char *)"read", PMU_EVENT_SYMBOL}, 2117 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX}, 2118 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX}, 2119 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX}, 2120 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2}, 2121 }; 2122 unsigned long i, j; 2123 2124 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols)); 2125 if (!list) 2126 return -ENOMEM; 2127 2128 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) { 2129 tmp->type = symbols[i].type; 2130 tmp->symbol = strdup(symbols[i].symbol); 2131 if (!tmp->symbol) 2132 goto err_free; 2133 } 2134 2135 perf_pmu_events_list = list; 2136 perf_pmu_events_list_num = ARRAY_SIZE(symbols); 2137 2138 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols), 2139 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2140 return 0; 2141 2142 err_free: 2143 for (j = 0, tmp = list; j < i; j++, tmp++) 2144 free(tmp->symbol); 2145 free(list); 2146 return -ENOMEM; 2147 } 2148 2149 enum perf_pmu_event_symbol_type 2150 perf_pmu__parse_check(const char *name) 2151 { 2152 struct perf_pmu_event_symbol p, *r; 2153 2154 /* scan kernel pmu events from sysfs if needed */ 2155 if (perf_pmu_events_list_num == 0) 2156 perf_pmu__parse_init(); 2157 /* 2158 * name "cpu" could be prefix of cpu-cycles or cpu// events. 2159 * cpu-cycles has been handled by hardcode. 2160 * So it must be cpu// events, not kernel pmu event. 2161 */ 2162 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) 2163 return PMU_EVENT_SYMBOL_ERR; 2164 2165 p.symbol = strdup(name); 2166 r = bsearch(&p, perf_pmu_events_list, 2167 (size_t) perf_pmu_events_list_num, 2168 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2169 zfree(&p.symbol); 2170 return r ? r->type : PMU_EVENT_SYMBOL_ERR; 2171 } 2172 2173 static int parse_events__scanner(const char *str, 2174 struct parse_events_state *parse_state) 2175 { 2176 YY_BUFFER_STATE buffer; 2177 void *scanner; 2178 int ret; 2179 2180 ret = parse_events_lex_init_extra(parse_state, &scanner); 2181 if (ret) 2182 return ret; 2183 2184 buffer = parse_events__scan_string(str, scanner); 2185 2186 #ifdef PARSER_DEBUG 2187 parse_events_debug = 1; 2188 parse_events_set_debug(1, scanner); 2189 #endif 2190 ret = parse_events_parse(parse_state, scanner); 2191 2192 parse_events__flush_buffer(buffer, scanner); 2193 parse_events__delete_buffer(buffer, scanner); 2194 parse_events_lex_destroy(scanner); 2195 return ret; 2196 } 2197 2198 /* 2199 * parse event config string, return a list of event terms. 2200 */ 2201 int parse_events_terms(struct list_head *terms, const char *str) 2202 { 2203 struct parse_events_state parse_state = { 2204 .terms = NULL, 2205 .stoken = PE_START_TERMS, 2206 }; 2207 int ret; 2208 2209 ret = parse_events__scanner(str, &parse_state); 2210 perf_pmu__parse_cleanup(); 2211 2212 if (!ret) { 2213 list_splice(parse_state.terms, terms); 2214 zfree(&parse_state.terms); 2215 return 0; 2216 } 2217 2218 parse_events_terms__delete(parse_state.terms); 2219 return ret; 2220 } 2221 2222 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 2223 const char *str, char *pmu_name, 2224 struct list_head *list) 2225 { 2226 struct parse_events_state ps = { 2227 .list = LIST_HEAD_INIT(ps.list), 2228 .stoken = PE_START_EVENTS, 2229 .hybrid_pmu_name = pmu_name, 2230 .idx = parse_state->idx, 2231 }; 2232 int ret; 2233 2234 ret = parse_events__scanner(str, &ps); 2235 perf_pmu__parse_cleanup(); 2236 2237 if (!ret) { 2238 if (!list_empty(&ps.list)) { 2239 list_splice(&ps.list, list); 2240 parse_state->idx = ps.idx; 2241 return 0; 2242 } else 2243 return -1; 2244 } 2245 2246 return ret; 2247 } 2248 2249 int __parse_events(struct evlist *evlist, const char *str, 2250 struct parse_events_error *err, struct perf_pmu *fake_pmu) 2251 { 2252 struct parse_events_state parse_state = { 2253 .list = LIST_HEAD_INIT(parse_state.list), 2254 .idx = evlist->core.nr_entries, 2255 .error = err, 2256 .evlist = evlist, 2257 .stoken = PE_START_EVENTS, 2258 .fake_pmu = fake_pmu, 2259 }; 2260 int ret; 2261 2262 ret = parse_events__scanner(str, &parse_state); 2263 perf_pmu__parse_cleanup(); 2264 2265 if (!ret && list_empty(&parse_state.list)) { 2266 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2267 return -1; 2268 } 2269 2270 /* 2271 * Add list to the evlist even with errors to allow callers to clean up. 2272 */ 2273 evlist__splice_list_tail(evlist, &parse_state.list); 2274 2275 if (!ret) { 2276 struct evsel *last; 2277 2278 evlist->core.nr_groups += parse_state.nr_groups; 2279 last = evlist__last(evlist); 2280 last->cmdline_group_boundary = true; 2281 2282 return 0; 2283 } 2284 2285 /* 2286 * There are 2 users - builtin-record and builtin-test objects. 2287 * Both call evlist__delete in case of error, so we dont 2288 * need to bother. 2289 */ 2290 return ret; 2291 } 2292 2293 int parse_event(struct evlist *evlist, const char *str) 2294 { 2295 struct parse_events_error err; 2296 int ret; 2297 2298 parse_events_error__init(&err); 2299 ret = parse_events(evlist, str, &err); 2300 parse_events_error__exit(&err); 2301 return ret; 2302 } 2303 2304 void parse_events_error__init(struct parse_events_error *err) 2305 { 2306 bzero(err, sizeof(*err)); 2307 } 2308 2309 void parse_events_error__exit(struct parse_events_error *err) 2310 { 2311 zfree(&err->str); 2312 zfree(&err->help); 2313 zfree(&err->first_str); 2314 zfree(&err->first_help); 2315 } 2316 2317 void parse_events_error__handle(struct parse_events_error *err, int idx, 2318 char *str, char *help) 2319 { 2320 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2321 goto out_free; 2322 switch (err->num_errors) { 2323 case 0: 2324 err->idx = idx; 2325 err->str = str; 2326 err->help = help; 2327 break; 2328 case 1: 2329 err->first_idx = err->idx; 2330 err->idx = idx; 2331 err->first_str = err->str; 2332 err->str = str; 2333 err->first_help = err->help; 2334 err->help = help; 2335 break; 2336 default: 2337 pr_debug("Multiple errors dropping message: %s (%s)\n", 2338 err->str, err->help); 2339 free(err->str); 2340 err->str = str; 2341 free(err->help); 2342 err->help = help; 2343 break; 2344 } 2345 err->num_errors++; 2346 return; 2347 2348 out_free: 2349 free(str); 2350 free(help); 2351 } 2352 2353 #define MAX_WIDTH 1000 2354 static int get_term_width(void) 2355 { 2356 struct winsize ws; 2357 2358 get_term_dimensions(&ws); 2359 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2360 } 2361 2362 static void __parse_events_error__print(int err_idx, const char *err_str, 2363 const char *err_help, const char *event) 2364 { 2365 const char *str = "invalid or unsupported event: "; 2366 char _buf[MAX_WIDTH]; 2367 char *buf = (char *) event; 2368 int idx = 0; 2369 if (err_str) { 2370 /* -2 for extra '' in the final fprintf */ 2371 int width = get_term_width() - 2; 2372 int len_event = strlen(event); 2373 int len_str, max_len, cut = 0; 2374 2375 /* 2376 * Maximum error index indent, we will cut 2377 * the event string if it's bigger. 2378 */ 2379 int max_err_idx = 13; 2380 2381 /* 2382 * Let's be specific with the message when 2383 * we have the precise error. 2384 */ 2385 str = "event syntax error: "; 2386 len_str = strlen(str); 2387 max_len = width - len_str; 2388 2389 buf = _buf; 2390 2391 /* We're cutting from the beginning. */ 2392 if (err_idx > max_err_idx) 2393 cut = err_idx - max_err_idx; 2394 2395 strncpy(buf, event + cut, max_len); 2396 2397 /* Mark cut parts with '..' on both sides. */ 2398 if (cut) 2399 buf[0] = buf[1] = '.'; 2400 2401 if ((len_event - cut) > max_len) { 2402 buf[max_len - 1] = buf[max_len - 2] = '.'; 2403 buf[max_len] = 0; 2404 } 2405 2406 idx = len_str + err_idx - cut; 2407 } 2408 2409 fprintf(stderr, "%s'%s'\n", str, buf); 2410 if (idx) { 2411 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2412 if (err_help) 2413 fprintf(stderr, "\n%s\n", err_help); 2414 } 2415 } 2416 2417 void parse_events_error__print(struct parse_events_error *err, 2418 const char *event) 2419 { 2420 if (!err->num_errors) 2421 return; 2422 2423 __parse_events_error__print(err->idx, err->str, err->help, event); 2424 2425 if (err->num_errors > 1) { 2426 fputs("\nInitial error:\n", stderr); 2427 __parse_events_error__print(err->first_idx, err->first_str, 2428 err->first_help, event); 2429 } 2430 } 2431 2432 #undef MAX_WIDTH 2433 2434 int parse_events_option(const struct option *opt, const char *str, 2435 int unset __maybe_unused) 2436 { 2437 struct evlist *evlist = *(struct evlist **)opt->value; 2438 struct parse_events_error err; 2439 int ret; 2440 2441 parse_events_error__init(&err); 2442 ret = parse_events(evlist, str, &err); 2443 2444 if (ret) { 2445 parse_events_error__print(&err, str); 2446 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2447 } 2448 parse_events_error__exit(&err); 2449 2450 return ret; 2451 } 2452 2453 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2454 { 2455 struct evlist **evlistp = opt->value; 2456 int ret; 2457 2458 if (*evlistp == NULL) { 2459 *evlistp = evlist__new(); 2460 2461 if (*evlistp == NULL) { 2462 fprintf(stderr, "Not enough memory to create evlist\n"); 2463 return -1; 2464 } 2465 } 2466 2467 ret = parse_events_option(opt, str, unset); 2468 if (ret) { 2469 evlist__delete(*evlistp); 2470 *evlistp = NULL; 2471 } 2472 2473 return ret; 2474 } 2475 2476 static int 2477 foreach_evsel_in_last_glob(struct evlist *evlist, 2478 int (*func)(struct evsel *evsel, 2479 const void *arg), 2480 const void *arg) 2481 { 2482 struct evsel *last = NULL; 2483 int err; 2484 2485 /* 2486 * Don't return when list_empty, give func a chance to report 2487 * error when it found last == NULL. 2488 * 2489 * So no need to WARN here, let *func do this. 2490 */ 2491 if (evlist->core.nr_entries > 0) 2492 last = evlist__last(evlist); 2493 2494 do { 2495 err = (*func)(last, arg); 2496 if (err) 2497 return -1; 2498 if (!last) 2499 return 0; 2500 2501 if (last->core.node.prev == &evlist->core.entries) 2502 return 0; 2503 last = list_entry(last->core.node.prev, struct evsel, core.node); 2504 } while (!last->cmdline_group_boundary); 2505 2506 return 0; 2507 } 2508 2509 static int set_filter(struct evsel *evsel, const void *arg) 2510 { 2511 const char *str = arg; 2512 bool found = false; 2513 int nr_addr_filters = 0; 2514 struct perf_pmu *pmu = NULL; 2515 2516 if (evsel == NULL) { 2517 fprintf(stderr, 2518 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2519 return -1; 2520 } 2521 2522 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2523 if (evsel__append_tp_filter(evsel, str) < 0) { 2524 fprintf(stderr, 2525 "not enough memory to hold filter string\n"); 2526 return -1; 2527 } 2528 2529 return 0; 2530 } 2531 2532 while ((pmu = perf_pmu__scan(pmu)) != NULL) 2533 if (pmu->type == evsel->core.attr.type) { 2534 found = true; 2535 break; 2536 } 2537 2538 if (found) 2539 perf_pmu__scan_file(pmu, "nr_addr_filters", 2540 "%d", &nr_addr_filters); 2541 2542 if (!nr_addr_filters) { 2543 fprintf(stderr, 2544 "This CPU does not support address filtering\n"); 2545 return -1; 2546 } 2547 2548 if (evsel__append_addr_filter(evsel, str) < 0) { 2549 fprintf(stderr, 2550 "not enough memory to hold filter string\n"); 2551 return -1; 2552 } 2553 2554 return 0; 2555 } 2556 2557 int parse_filter(const struct option *opt, const char *str, 2558 int unset __maybe_unused) 2559 { 2560 struct evlist *evlist = *(struct evlist **)opt->value; 2561 2562 return foreach_evsel_in_last_glob(evlist, set_filter, 2563 (const void *)str); 2564 } 2565 2566 static int add_exclude_perf_filter(struct evsel *evsel, 2567 const void *arg __maybe_unused) 2568 { 2569 char new_filter[64]; 2570 2571 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2572 fprintf(stderr, 2573 "--exclude-perf option should follow a -e tracepoint option\n"); 2574 return -1; 2575 } 2576 2577 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2578 2579 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2580 fprintf(stderr, 2581 "not enough memory to hold filter string\n"); 2582 return -1; 2583 } 2584 2585 return 0; 2586 } 2587 2588 int exclude_perf(const struct option *opt, 2589 const char *arg __maybe_unused, 2590 int unset __maybe_unused) 2591 { 2592 struct evlist *evlist = *(struct evlist **)opt->value; 2593 2594 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2595 NULL); 2596 } 2597 2598 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2599 { 2600 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2601 } 2602 2603 static int new_term(struct parse_events_term **_term, 2604 struct parse_events_term *temp, 2605 char *str, u64 num) 2606 { 2607 struct parse_events_term *term; 2608 2609 term = malloc(sizeof(*term)); 2610 if (!term) 2611 return -ENOMEM; 2612 2613 *term = *temp; 2614 INIT_LIST_HEAD(&term->list); 2615 term->weak = false; 2616 2617 switch (term->type_val) { 2618 case PARSE_EVENTS__TERM_TYPE_NUM: 2619 term->val.num = num; 2620 break; 2621 case PARSE_EVENTS__TERM_TYPE_STR: 2622 term->val.str = str; 2623 break; 2624 default: 2625 free(term); 2626 return -EINVAL; 2627 } 2628 2629 *_term = term; 2630 return 0; 2631 } 2632 2633 int parse_events_term__num(struct parse_events_term **term, 2634 int type_term, char *config, u64 num, 2635 bool no_value, 2636 void *loc_term_, void *loc_val_) 2637 { 2638 YYLTYPE *loc_term = loc_term_; 2639 YYLTYPE *loc_val = loc_val_; 2640 2641 struct parse_events_term temp = { 2642 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2643 .type_term = type_term, 2644 .config = config ? : strdup(config_term_names[type_term]), 2645 .no_value = no_value, 2646 .err_term = loc_term ? loc_term->first_column : 0, 2647 .err_val = loc_val ? loc_val->first_column : 0, 2648 }; 2649 2650 return new_term(term, &temp, NULL, num); 2651 } 2652 2653 int parse_events_term__str(struct parse_events_term **term, 2654 int type_term, char *config, char *str, 2655 void *loc_term_, void *loc_val_) 2656 { 2657 YYLTYPE *loc_term = loc_term_; 2658 YYLTYPE *loc_val = loc_val_; 2659 2660 struct parse_events_term temp = { 2661 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2662 .type_term = type_term, 2663 .config = config, 2664 .err_term = loc_term ? loc_term->first_column : 0, 2665 .err_val = loc_val ? loc_val->first_column : 0, 2666 }; 2667 2668 return new_term(term, &temp, str, 0); 2669 } 2670 2671 int parse_events_term__sym_hw(struct parse_events_term **term, 2672 char *config, unsigned idx) 2673 { 2674 struct event_symbol *sym; 2675 char *str; 2676 struct parse_events_term temp = { 2677 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2678 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 2679 .config = config, 2680 }; 2681 2682 if (!temp.config) { 2683 temp.config = strdup("event"); 2684 if (!temp.config) 2685 return -ENOMEM; 2686 } 2687 BUG_ON(idx >= PERF_COUNT_HW_MAX); 2688 sym = &event_symbols_hw[idx]; 2689 2690 str = strdup(sym->symbol); 2691 if (!str) 2692 return -ENOMEM; 2693 return new_term(term, &temp, str, 0); 2694 } 2695 2696 int parse_events_term__clone(struct parse_events_term **new, 2697 struct parse_events_term *term) 2698 { 2699 char *str; 2700 struct parse_events_term temp = { 2701 .type_val = term->type_val, 2702 .type_term = term->type_term, 2703 .config = NULL, 2704 .err_term = term->err_term, 2705 .err_val = term->err_val, 2706 }; 2707 2708 if (term->config) { 2709 temp.config = strdup(term->config); 2710 if (!temp.config) 2711 return -ENOMEM; 2712 } 2713 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2714 return new_term(new, &temp, NULL, term->val.num); 2715 2716 str = strdup(term->val.str); 2717 if (!str) 2718 return -ENOMEM; 2719 return new_term(new, &temp, str, 0); 2720 } 2721 2722 void parse_events_term__delete(struct parse_events_term *term) 2723 { 2724 if (term->array.nr_ranges) 2725 zfree(&term->array.ranges); 2726 2727 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2728 zfree(&term->val.str); 2729 2730 zfree(&term->config); 2731 free(term); 2732 } 2733 2734 int parse_events_copy_term_list(struct list_head *old, 2735 struct list_head **new) 2736 { 2737 struct parse_events_term *term, *n; 2738 int ret; 2739 2740 if (!old) { 2741 *new = NULL; 2742 return 0; 2743 } 2744 2745 *new = malloc(sizeof(struct list_head)); 2746 if (!*new) 2747 return -ENOMEM; 2748 INIT_LIST_HEAD(*new); 2749 2750 list_for_each_entry (term, old, list) { 2751 ret = parse_events_term__clone(&n, term); 2752 if (ret) 2753 return ret; 2754 list_add_tail(&n->list, *new); 2755 } 2756 return 0; 2757 } 2758 2759 void parse_events_terms__purge(struct list_head *terms) 2760 { 2761 struct parse_events_term *term, *h; 2762 2763 list_for_each_entry_safe(term, h, terms, list) { 2764 list_del_init(&term->list); 2765 parse_events_term__delete(term); 2766 } 2767 } 2768 2769 void parse_events_terms__delete(struct list_head *terms) 2770 { 2771 if (!terms) 2772 return; 2773 parse_events_terms__purge(terms); 2774 free(terms); 2775 } 2776 2777 void parse_events__clear_array(struct parse_events_array *a) 2778 { 2779 zfree(&a->ranges); 2780 } 2781 2782 void parse_events_evlist_error(struct parse_events_state *parse_state, 2783 int idx, const char *str) 2784 { 2785 if (!parse_state->error) 2786 return; 2787 2788 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); 2789 } 2790 2791 static void config_terms_list(char *buf, size_t buf_sz) 2792 { 2793 int i; 2794 bool first = true; 2795 2796 buf[0] = '\0'; 2797 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2798 const char *name = config_term_names[i]; 2799 2800 if (!config_term_avail(i, NULL)) 2801 continue; 2802 if (!name) 2803 continue; 2804 if (name[0] == '<') 2805 continue; 2806 2807 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2808 return; 2809 2810 if (!first) 2811 strcat(buf, ","); 2812 else 2813 first = false; 2814 strcat(buf, name); 2815 } 2816 } 2817 2818 /* 2819 * Return string contains valid config terms of an event. 2820 * @additional_terms: For terms such as PMU sysfs terms. 2821 */ 2822 char *parse_events_formats_error_string(char *additional_terms) 2823 { 2824 char *str; 2825 /* "no-overwrite" is the longest name */ 2826 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2827 (sizeof("no-overwrite") - 1)]; 2828 2829 config_terms_list(static_terms, sizeof(static_terms)); 2830 /* valid terms */ 2831 if (additional_terms) { 2832 if (asprintf(&str, "valid terms: %s,%s", 2833 additional_terms, static_terms) < 0) 2834 goto fail; 2835 } else { 2836 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2837 goto fail; 2838 } 2839 return str; 2840 2841 fail: 2842 return NULL; 2843 } 2844 2845 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx, 2846 struct perf_event_attr *attr, 2847 const char *name, 2848 const char *metric_id, 2849 struct perf_pmu *pmu, 2850 struct list_head *config_terms) 2851 { 2852 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id, 2853 pmu, config_terms, /*auto_merge_stats=*/false, 2854 /*cpu_list=*/NULL); 2855 } 2856