1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "term.h" 11 #include "evlist.h" 12 #include "evsel.h" 13 #include <subcmd/parse-options.h> 14 #include "parse-events.h" 15 #include "string2.h" 16 #include "strlist.h" 17 #include "bpf-loader.h" 18 #include "debug.h" 19 #include <api/fs/tracing_path.h> 20 #include <perf/cpumap.h> 21 #include "parse-events-bison.h" 22 #include "parse-events-flex.h" 23 #include "pmu.h" 24 #include "asm/bug.h" 25 #include "util/parse-branch-options.h" 26 #include "util/evsel_config.h" 27 #include "util/event.h" 28 #include "util/parse-events-hybrid.h" 29 #include "util/pmu-hybrid.h" 30 #include "util/bpf-filter.h" 31 #include "util/util.h" 32 #include "tracepoint.h" 33 #include "thread_map.h" 34 35 #define MAX_NAME_LEN 100 36 37 struct perf_pmu_event_symbol { 38 char *symbol; 39 enum perf_pmu_event_symbol_type type; 40 }; 41 42 #ifdef PARSER_DEBUG 43 extern int parse_events_debug; 44 #endif 45 int parse_events_parse(void *parse_state, void *scanner); 46 static int get_config_terms(struct list_head *head_config, 47 struct list_head *head_terms __maybe_unused); 48 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 49 const char *str, char *pmu_name, 50 struct list_head *list); 51 52 static struct perf_pmu_event_symbol *perf_pmu_events_list; 53 /* 54 * The variable indicates the number of supported pmu event symbols. 55 * 0 means not initialized and ready to init 56 * -1 means failed to init, don't try anymore 57 * >0 is the number of supported pmu event symbols 58 */ 59 static int perf_pmu_events_list_num; 60 61 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 62 [PERF_COUNT_HW_CPU_CYCLES] = { 63 .symbol = "cpu-cycles", 64 .alias = "cycles", 65 }, 66 [PERF_COUNT_HW_INSTRUCTIONS] = { 67 .symbol = "instructions", 68 .alias = "", 69 }, 70 [PERF_COUNT_HW_CACHE_REFERENCES] = { 71 .symbol = "cache-references", 72 .alias = "", 73 }, 74 [PERF_COUNT_HW_CACHE_MISSES] = { 75 .symbol = "cache-misses", 76 .alias = "", 77 }, 78 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 79 .symbol = "branch-instructions", 80 .alias = "branches", 81 }, 82 [PERF_COUNT_HW_BRANCH_MISSES] = { 83 .symbol = "branch-misses", 84 .alias = "", 85 }, 86 [PERF_COUNT_HW_BUS_CYCLES] = { 87 .symbol = "bus-cycles", 88 .alias = "", 89 }, 90 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 91 .symbol = "stalled-cycles-frontend", 92 .alias = "idle-cycles-frontend", 93 }, 94 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 95 .symbol = "stalled-cycles-backend", 96 .alias = "idle-cycles-backend", 97 }, 98 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 99 .symbol = "ref-cycles", 100 .alias = "", 101 }, 102 }; 103 104 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 105 [PERF_COUNT_SW_CPU_CLOCK] = { 106 .symbol = "cpu-clock", 107 .alias = "", 108 }, 109 [PERF_COUNT_SW_TASK_CLOCK] = { 110 .symbol = "task-clock", 111 .alias = "", 112 }, 113 [PERF_COUNT_SW_PAGE_FAULTS] = { 114 .symbol = "page-faults", 115 .alias = "faults", 116 }, 117 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 118 .symbol = "context-switches", 119 .alias = "cs", 120 }, 121 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 122 .symbol = "cpu-migrations", 123 .alias = "migrations", 124 }, 125 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 126 .symbol = "minor-faults", 127 .alias = "", 128 }, 129 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 130 .symbol = "major-faults", 131 .alias = "", 132 }, 133 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 134 .symbol = "alignment-faults", 135 .alias = "", 136 }, 137 [PERF_COUNT_SW_EMULATION_FAULTS] = { 138 .symbol = "emulation-faults", 139 .alias = "", 140 }, 141 [PERF_COUNT_SW_DUMMY] = { 142 .symbol = "dummy", 143 .alias = "", 144 }, 145 [PERF_COUNT_SW_BPF_OUTPUT] = { 146 .symbol = "bpf-output", 147 .alias = "", 148 }, 149 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 150 .symbol = "cgroup-switches", 151 .alias = "", 152 }, 153 }; 154 155 bool is_event_supported(u8 type, u64 config) 156 { 157 bool ret = true; 158 int open_return; 159 struct evsel *evsel; 160 struct perf_event_attr attr = { 161 .type = type, 162 .config = config, 163 .disabled = 1, 164 }; 165 struct perf_thread_map *tmap = thread_map__new_by_tid(0); 166 167 if (tmap == NULL) 168 return false; 169 170 evsel = evsel__new(&attr); 171 if (evsel) { 172 open_return = evsel__open(evsel, NULL, tmap); 173 ret = open_return >= 0; 174 175 if (open_return == -EACCES) { 176 /* 177 * This happens if the paranoid value 178 * /proc/sys/kernel/perf_event_paranoid is set to 2 179 * Re-run with exclude_kernel set; we don't do that 180 * by default as some ARM machines do not support it. 181 * 182 */ 183 evsel->core.attr.exclude_kernel = 1; 184 ret = evsel__open(evsel, NULL, tmap) >= 0; 185 } 186 evsel__delete(evsel); 187 } 188 189 perf_thread_map__put(tmap); 190 return ret; 191 } 192 193 const char *event_type(int type) 194 { 195 switch (type) { 196 case PERF_TYPE_HARDWARE: 197 return "hardware"; 198 199 case PERF_TYPE_SOFTWARE: 200 return "software"; 201 202 case PERF_TYPE_TRACEPOINT: 203 return "tracepoint"; 204 205 case PERF_TYPE_HW_CACHE: 206 return "hardware-cache"; 207 208 default: 209 break; 210 } 211 212 return "unknown"; 213 } 214 215 static char *get_config_str(struct list_head *head_terms, int type_term) 216 { 217 struct parse_events_term *term; 218 219 if (!head_terms) 220 return NULL; 221 222 list_for_each_entry(term, head_terms, list) 223 if (term->type_term == type_term) 224 return term->val.str; 225 226 return NULL; 227 } 228 229 static char *get_config_metric_id(struct list_head *head_terms) 230 { 231 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 232 } 233 234 static char *get_config_name(struct list_head *head_terms) 235 { 236 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 237 } 238 239 static struct evsel * 240 __add_event(struct list_head *list, int *idx, 241 struct perf_event_attr *attr, 242 bool init_attr, 243 const char *name, const char *metric_id, struct perf_pmu *pmu, 244 struct list_head *config_terms, bool auto_merge_stats, 245 const char *cpu_list) 246 { 247 struct evsel *evsel; 248 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 249 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 250 251 if (pmu) 252 perf_pmu__warn_invalid_formats(pmu); 253 254 if (pmu && attr->type == PERF_TYPE_RAW) 255 perf_pmu__warn_invalid_config(pmu, attr->config, name); 256 257 if (init_attr) 258 event_attr_init(attr); 259 260 evsel = evsel__new_idx(attr, *idx); 261 if (!evsel) { 262 perf_cpu_map__put(cpus); 263 return NULL; 264 } 265 266 (*idx)++; 267 evsel->core.cpus = cpus; 268 evsel->core.own_cpus = perf_cpu_map__get(cpus); 269 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 270 evsel->auto_merge_stats = auto_merge_stats; 271 evsel->pmu = pmu; 272 273 if (name) 274 evsel->name = strdup(name); 275 276 if (metric_id) 277 evsel->metric_id = strdup(metric_id); 278 279 if (config_terms) 280 list_splice_init(config_terms, &evsel->config_terms); 281 282 if (list) 283 list_add_tail(&evsel->core.node, list); 284 285 return evsel; 286 } 287 288 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 289 const char *name, const char *metric_id, 290 struct perf_pmu *pmu) 291 { 292 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 293 metric_id, pmu, /*config_terms=*/NULL, 294 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 295 } 296 297 static int add_event(struct list_head *list, int *idx, 298 struct perf_event_attr *attr, const char *name, 299 const char *metric_id, struct list_head *config_terms) 300 { 301 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 302 /*pmu=*/NULL, config_terms, 303 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 304 } 305 306 static int add_event_tool(struct list_head *list, int *idx, 307 enum perf_tool_event tool_event) 308 { 309 struct evsel *evsel; 310 struct perf_event_attr attr = { 311 .type = PERF_TYPE_SOFTWARE, 312 .config = PERF_COUNT_SW_DUMMY, 313 }; 314 315 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 316 /*metric_id=*/NULL, /*pmu=*/NULL, 317 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 318 /*cpu_list=*/"0"); 319 if (!evsel) 320 return -ENOMEM; 321 evsel->tool_event = tool_event; 322 if (tool_event == PERF_TOOL_DURATION_TIME 323 || tool_event == PERF_TOOL_USER_TIME 324 || tool_event == PERF_TOOL_SYSTEM_TIME) { 325 free((char *)evsel->unit); 326 evsel->unit = strdup("ns"); 327 } 328 return 0; 329 } 330 331 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size) 332 { 333 int i, j; 334 int n, longest = -1; 335 336 for (i = 0; i < size; i++) { 337 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 338 n = strlen(names[i][j]); 339 if (n > longest && !strncasecmp(str, names[i][j], n)) 340 longest = n; 341 } 342 if (longest > 0) 343 return i; 344 } 345 346 return -1; 347 } 348 349 typedef int config_term_func_t(struct perf_event_attr *attr, 350 struct parse_events_term *term, 351 struct parse_events_error *err); 352 static int config_term_common(struct perf_event_attr *attr, 353 struct parse_events_term *term, 354 struct parse_events_error *err); 355 static int config_attr(struct perf_event_attr *attr, 356 struct list_head *head, 357 struct parse_events_error *err, 358 config_term_func_t config_term); 359 360 int parse_events_add_cache(struct list_head *list, int *idx, 361 char *type, char *op_result1, char *op_result2, 362 struct parse_events_error *err, 363 struct list_head *head_config, 364 struct parse_events_state *parse_state) 365 { 366 struct perf_event_attr attr; 367 LIST_HEAD(config_terms); 368 char name[MAX_NAME_LEN]; 369 const char *config_name, *metric_id; 370 int cache_type = -1, cache_op = -1, cache_result = -1; 371 char *op_result[2] = { op_result1, op_result2 }; 372 int i, n, ret; 373 bool hybrid; 374 375 /* 376 * No fallback - if we cannot get a clear cache type 377 * then bail out: 378 */ 379 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX); 380 if (cache_type == -1) 381 return -EINVAL; 382 383 config_name = get_config_name(head_config); 384 n = snprintf(name, MAX_NAME_LEN, "%s", type); 385 386 for (i = 0; (i < 2) && (op_result[i]); i++) { 387 char *str = op_result[i]; 388 389 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 390 391 if (cache_op == -1) { 392 cache_op = parse_aliases(str, evsel__hw_cache_op, 393 PERF_COUNT_HW_CACHE_OP_MAX); 394 if (cache_op >= 0) { 395 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 396 return -EINVAL; 397 continue; 398 } 399 } 400 401 if (cache_result == -1) { 402 cache_result = parse_aliases(str, evsel__hw_cache_result, 403 PERF_COUNT_HW_CACHE_RESULT_MAX); 404 if (cache_result >= 0) 405 continue; 406 } 407 } 408 409 /* 410 * Fall back to reads: 411 */ 412 if (cache_op == -1) 413 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 414 415 /* 416 * Fall back to accesses: 417 */ 418 if (cache_result == -1) 419 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 420 421 memset(&attr, 0, sizeof(attr)); 422 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 423 attr.type = PERF_TYPE_HW_CACHE; 424 425 if (head_config) { 426 if (config_attr(&attr, head_config, err, 427 config_term_common)) 428 return -EINVAL; 429 430 if (get_config_terms(head_config, &config_terms)) 431 return -ENOMEM; 432 } 433 434 metric_id = get_config_metric_id(head_config); 435 ret = parse_events__add_cache_hybrid(list, idx, &attr, 436 config_name ? : name, 437 metric_id, 438 &config_terms, 439 &hybrid, parse_state); 440 if (hybrid) 441 goto out_free_terms; 442 443 ret = add_event(list, idx, &attr, config_name ? : name, metric_id, 444 &config_terms); 445 out_free_terms: 446 free_config_terms(&config_terms); 447 return ret; 448 } 449 450 #ifdef HAVE_LIBTRACEEVENT 451 static void tracepoint_error(struct parse_events_error *e, int err, 452 const char *sys, const char *name) 453 { 454 const char *str; 455 char help[BUFSIZ]; 456 457 if (!e) 458 return; 459 460 /* 461 * We get error directly from syscall errno ( > 0), 462 * or from encoded pointer's error ( < 0). 463 */ 464 err = abs(err); 465 466 switch (err) { 467 case EACCES: 468 str = "can't access trace events"; 469 break; 470 case ENOENT: 471 str = "unknown tracepoint"; 472 break; 473 default: 474 str = "failed to add tracepoint"; 475 break; 476 } 477 478 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 479 parse_events_error__handle(e, 0, strdup(str), strdup(help)); 480 } 481 482 static int add_tracepoint(struct list_head *list, int *idx, 483 const char *sys_name, const char *evt_name, 484 struct parse_events_error *err, 485 struct list_head *head_config) 486 { 487 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 488 489 if (IS_ERR(evsel)) { 490 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name); 491 return PTR_ERR(evsel); 492 } 493 494 if (head_config) { 495 LIST_HEAD(config_terms); 496 497 if (get_config_terms(head_config, &config_terms)) 498 return -ENOMEM; 499 list_splice(&config_terms, &evsel->config_terms); 500 } 501 502 list_add_tail(&evsel->core.node, list); 503 return 0; 504 } 505 506 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 507 const char *sys_name, const char *evt_name, 508 struct parse_events_error *err, 509 struct list_head *head_config) 510 { 511 char *evt_path; 512 struct dirent *evt_ent; 513 DIR *evt_dir; 514 int ret = 0, found = 0; 515 516 evt_path = get_events_file(sys_name); 517 if (!evt_path) { 518 tracepoint_error(err, errno, sys_name, evt_name); 519 return -1; 520 } 521 evt_dir = opendir(evt_path); 522 if (!evt_dir) { 523 put_events_file(evt_path); 524 tracepoint_error(err, errno, sys_name, evt_name); 525 return -1; 526 } 527 528 while (!ret && (evt_ent = readdir(evt_dir))) { 529 if (!strcmp(evt_ent->d_name, ".") 530 || !strcmp(evt_ent->d_name, "..") 531 || !strcmp(evt_ent->d_name, "enable") 532 || !strcmp(evt_ent->d_name, "filter")) 533 continue; 534 535 if (!strglobmatch(evt_ent->d_name, evt_name)) 536 continue; 537 538 found++; 539 540 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 541 err, head_config); 542 } 543 544 if (!found) { 545 tracepoint_error(err, ENOENT, sys_name, evt_name); 546 ret = -1; 547 } 548 549 put_events_file(evt_path); 550 closedir(evt_dir); 551 return ret; 552 } 553 554 static int add_tracepoint_event(struct list_head *list, int *idx, 555 const char *sys_name, const char *evt_name, 556 struct parse_events_error *err, 557 struct list_head *head_config) 558 { 559 return strpbrk(evt_name, "*?") ? 560 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 561 err, head_config) : 562 add_tracepoint(list, idx, sys_name, evt_name, 563 err, head_config); 564 } 565 566 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 567 const char *sys_name, const char *evt_name, 568 struct parse_events_error *err, 569 struct list_head *head_config) 570 { 571 struct dirent *events_ent; 572 DIR *events_dir; 573 int ret = 0; 574 575 events_dir = tracing_events__opendir(); 576 if (!events_dir) { 577 tracepoint_error(err, errno, sys_name, evt_name); 578 return -1; 579 } 580 581 while (!ret && (events_ent = readdir(events_dir))) { 582 if (!strcmp(events_ent->d_name, ".") 583 || !strcmp(events_ent->d_name, "..") 584 || !strcmp(events_ent->d_name, "enable") 585 || !strcmp(events_ent->d_name, "header_event") 586 || !strcmp(events_ent->d_name, "header_page")) 587 continue; 588 589 if (!strglobmatch(events_ent->d_name, sys_name)) 590 continue; 591 592 ret = add_tracepoint_event(list, idx, events_ent->d_name, 593 evt_name, err, head_config); 594 } 595 596 closedir(events_dir); 597 return ret; 598 } 599 #endif /* HAVE_LIBTRACEEVENT */ 600 601 #ifdef HAVE_LIBBPF_SUPPORT 602 struct __add_bpf_event_param { 603 struct parse_events_state *parse_state; 604 struct list_head *list; 605 struct list_head *head_config; 606 }; 607 608 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj, 609 void *_param) 610 { 611 LIST_HEAD(new_evsels); 612 struct __add_bpf_event_param *param = _param; 613 struct parse_events_state *parse_state = param->parse_state; 614 struct list_head *list = param->list; 615 struct evsel *pos; 616 int err; 617 /* 618 * Check if we should add the event, i.e. if it is a TP but starts with a '!', 619 * then don't add the tracepoint, this will be used for something else, like 620 * adding to a BPF_MAP_TYPE_PROG_ARRAY. 621 * 622 * See tools/perf/examples/bpf/augmented_raw_syscalls.c 623 */ 624 if (group[0] == '!') 625 return 0; 626 627 pr_debug("add bpf event %s:%s and attach bpf program %d\n", 628 group, event, fd); 629 630 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group, 631 event, parse_state->error, 632 param->head_config); 633 if (err) { 634 struct evsel *evsel, *tmp; 635 636 pr_debug("Failed to add BPF event %s:%s\n", 637 group, event); 638 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) { 639 list_del_init(&evsel->core.node); 640 evsel__delete(evsel); 641 } 642 return err; 643 } 644 pr_debug("adding %s:%s\n", group, event); 645 646 list_for_each_entry(pos, &new_evsels, core.node) { 647 pr_debug("adding %s:%s to %p\n", 648 group, event, pos); 649 pos->bpf_fd = fd; 650 pos->bpf_obj = obj; 651 } 652 list_splice(&new_evsels, list); 653 return 0; 654 } 655 656 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 657 struct list_head *list, 658 struct bpf_object *obj, 659 struct list_head *head_config) 660 { 661 int err; 662 char errbuf[BUFSIZ]; 663 struct __add_bpf_event_param param = {parse_state, list, head_config}; 664 static bool registered_unprobe_atexit = false; 665 666 if (IS_ERR(obj) || !obj) { 667 snprintf(errbuf, sizeof(errbuf), 668 "Internal error: load bpf obj with NULL"); 669 err = -EINVAL; 670 goto errout; 671 } 672 673 /* 674 * Register atexit handler before calling bpf__probe() so 675 * bpf__probe() don't need to unprobe probe points its already 676 * created when failure. 677 */ 678 if (!registered_unprobe_atexit) { 679 atexit(bpf__clear); 680 registered_unprobe_atexit = true; 681 } 682 683 err = bpf__probe(obj); 684 if (err) { 685 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf)); 686 goto errout; 687 } 688 689 err = bpf__load(obj); 690 if (err) { 691 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf)); 692 goto errout; 693 } 694 695 err = bpf__foreach_event(obj, add_bpf_event, ¶m); 696 if (err) { 697 snprintf(errbuf, sizeof(errbuf), 698 "Attach events in BPF object failed"); 699 goto errout; 700 } 701 702 return 0; 703 errout: 704 parse_events_error__handle(parse_state->error, 0, 705 strdup(errbuf), strdup("(add -v to see detail)")); 706 return err; 707 } 708 709 static int 710 parse_events_config_bpf(struct parse_events_state *parse_state, 711 struct bpf_object *obj, 712 struct list_head *head_config) 713 { 714 struct parse_events_term *term; 715 int error_pos; 716 717 if (!head_config || list_empty(head_config)) 718 return 0; 719 720 list_for_each_entry(term, head_config, list) { 721 int err; 722 723 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) { 724 parse_events_error__handle(parse_state->error, term->err_term, 725 strdup("Invalid config term for BPF object"), 726 NULL); 727 return -EINVAL; 728 } 729 730 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos); 731 if (err) { 732 char errbuf[BUFSIZ]; 733 int idx; 734 735 bpf__strerror_config_obj(obj, term, parse_state->evlist, 736 &error_pos, err, errbuf, 737 sizeof(errbuf)); 738 739 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE) 740 idx = term->err_val; 741 else 742 idx = term->err_term + error_pos; 743 744 parse_events_error__handle(parse_state->error, idx, 745 strdup(errbuf), 746 strdup( 747 "Hint:\tValid config terms:\n" 748 " \tmap:[<arraymap>].value<indices>=[value]\n" 749 " \tmap:[<eventmap>].event<indices>=[event]\n" 750 "\n" 751 " \twhere <indices> is something like [0,3...5] or [all]\n" 752 " \t(add -v to see detail)")); 753 return err; 754 } 755 } 756 return 0; 757 } 758 759 /* 760 * Split config terms: 761 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ... 762 * 'call-graph=fp' is 'evt config', should be applied to each 763 * events in bpf.c. 764 * 'map:array.value[0]=1' is 'obj config', should be processed 765 * with parse_events_config_bpf. 766 * 767 * Move object config terms from the first list to obj_head_config. 768 */ 769 static void 770 split_bpf_config_terms(struct list_head *evt_head_config, 771 struct list_head *obj_head_config) 772 { 773 struct parse_events_term *term, *temp; 774 775 /* 776 * Currently, all possible user config term 777 * belong to bpf object. parse_events__is_hardcoded_term() 778 * happens to be a good flag. 779 * 780 * See parse_events_config_bpf() and 781 * config_term_tracepoint(). 782 */ 783 list_for_each_entry_safe(term, temp, evt_head_config, list) 784 if (!parse_events__is_hardcoded_term(term)) 785 list_move_tail(&term->list, obj_head_config); 786 } 787 788 int parse_events_load_bpf(struct parse_events_state *parse_state, 789 struct list_head *list, 790 char *bpf_file_name, 791 bool source, 792 struct list_head *head_config) 793 { 794 int err; 795 struct bpf_object *obj; 796 LIST_HEAD(obj_head_config); 797 798 if (head_config) 799 split_bpf_config_terms(head_config, &obj_head_config); 800 801 obj = bpf__prepare_load(bpf_file_name, source); 802 if (IS_ERR(obj)) { 803 char errbuf[BUFSIZ]; 804 805 err = PTR_ERR(obj); 806 807 if (err == -ENOTSUP) 808 snprintf(errbuf, sizeof(errbuf), 809 "BPF support is not compiled"); 810 else 811 bpf__strerror_prepare_load(bpf_file_name, 812 source, 813 -err, errbuf, 814 sizeof(errbuf)); 815 816 parse_events_error__handle(parse_state->error, 0, 817 strdup(errbuf), strdup("(add -v to see detail)")); 818 return err; 819 } 820 821 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config); 822 if (err) 823 return err; 824 err = parse_events_config_bpf(parse_state, obj, &obj_head_config); 825 826 /* 827 * Caller doesn't know anything about obj_head_config, 828 * so combine them together again before returning. 829 */ 830 if (head_config) 831 list_splice_tail(&obj_head_config, head_config); 832 return err; 833 } 834 #else // HAVE_LIBBPF_SUPPORT 835 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 836 struct list_head *list __maybe_unused, 837 struct bpf_object *obj __maybe_unused, 838 struct list_head *head_config __maybe_unused) 839 { 840 parse_events_error__handle(parse_state->error, 0, 841 strdup("BPF support is not compiled"), 842 strdup("Make sure libbpf-devel is available at build time.")); 843 return -ENOTSUP; 844 } 845 846 int parse_events_load_bpf(struct parse_events_state *parse_state, 847 struct list_head *list __maybe_unused, 848 char *bpf_file_name __maybe_unused, 849 bool source __maybe_unused, 850 struct list_head *head_config __maybe_unused) 851 { 852 parse_events_error__handle(parse_state->error, 0, 853 strdup("BPF support is not compiled"), 854 strdup("Make sure libbpf-devel is available at build time.")); 855 return -ENOTSUP; 856 } 857 #endif // HAVE_LIBBPF_SUPPORT 858 859 static int 860 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 861 { 862 int i; 863 864 for (i = 0; i < 3; i++) { 865 if (!type || !type[i]) 866 break; 867 868 #define CHECK_SET_TYPE(bit) \ 869 do { \ 870 if (attr->bp_type & bit) \ 871 return -EINVAL; \ 872 else \ 873 attr->bp_type |= bit; \ 874 } while (0) 875 876 switch (type[i]) { 877 case 'r': 878 CHECK_SET_TYPE(HW_BREAKPOINT_R); 879 break; 880 case 'w': 881 CHECK_SET_TYPE(HW_BREAKPOINT_W); 882 break; 883 case 'x': 884 CHECK_SET_TYPE(HW_BREAKPOINT_X); 885 break; 886 default: 887 return -EINVAL; 888 } 889 } 890 891 #undef CHECK_SET_TYPE 892 893 if (!attr->bp_type) /* Default */ 894 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 895 896 return 0; 897 } 898 899 int parse_events_add_breakpoint(struct list_head *list, int *idx, 900 u64 addr, char *type, u64 len) 901 { 902 struct perf_event_attr attr; 903 904 memset(&attr, 0, sizeof(attr)); 905 attr.bp_addr = addr; 906 907 if (parse_breakpoint_type(type, &attr)) 908 return -EINVAL; 909 910 /* Provide some defaults if len is not specified */ 911 if (!len) { 912 if (attr.bp_type == HW_BREAKPOINT_X) 913 len = sizeof(long); 914 else 915 len = HW_BREAKPOINT_LEN_4; 916 } 917 918 attr.bp_len = len; 919 920 attr.type = PERF_TYPE_BREAKPOINT; 921 attr.sample_period = 1; 922 923 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL, 924 /*config_terms=*/NULL); 925 } 926 927 static int check_type_val(struct parse_events_term *term, 928 struct parse_events_error *err, 929 int type) 930 { 931 if (type == term->type_val) 932 return 0; 933 934 if (err) { 935 parse_events_error__handle(err, term->err_val, 936 type == PARSE_EVENTS__TERM_TYPE_NUM 937 ? strdup("expected numeric value") 938 : strdup("expected string value"), 939 NULL); 940 } 941 return -EINVAL; 942 } 943 944 /* 945 * Update according to parse-events.l 946 */ 947 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 948 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 949 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 950 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 951 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 952 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 953 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 954 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 955 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 956 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 957 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 958 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 959 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 960 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 961 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 962 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 963 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 964 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 965 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 966 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 967 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 968 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 969 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 970 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 971 }; 972 973 static bool config_term_shrinked; 974 975 static bool 976 config_term_avail(int term_type, struct parse_events_error *err) 977 { 978 char *err_str; 979 980 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 981 parse_events_error__handle(err, -1, 982 strdup("Invalid term_type"), NULL); 983 return false; 984 } 985 if (!config_term_shrinked) 986 return true; 987 988 switch (term_type) { 989 case PARSE_EVENTS__TERM_TYPE_CONFIG: 990 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 991 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 992 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 993 case PARSE_EVENTS__TERM_TYPE_NAME: 994 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 995 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 996 case PARSE_EVENTS__TERM_TYPE_PERCORE: 997 return true; 998 default: 999 if (!err) 1000 return false; 1001 1002 /* term_type is validated so indexing is safe */ 1003 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 1004 config_term_names[term_type]) >= 0) 1005 parse_events_error__handle(err, -1, err_str, NULL); 1006 return false; 1007 } 1008 } 1009 1010 void parse_events__shrink_config_terms(void) 1011 { 1012 config_term_shrinked = true; 1013 } 1014 1015 static int config_term_common(struct perf_event_attr *attr, 1016 struct parse_events_term *term, 1017 struct parse_events_error *err) 1018 { 1019 #define CHECK_TYPE_VAL(type) \ 1020 do { \ 1021 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 1022 return -EINVAL; \ 1023 } while (0) 1024 1025 switch (term->type_term) { 1026 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1027 CHECK_TYPE_VAL(NUM); 1028 attr->config = term->val.num; 1029 break; 1030 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1031 CHECK_TYPE_VAL(NUM); 1032 attr->config1 = term->val.num; 1033 break; 1034 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1035 CHECK_TYPE_VAL(NUM); 1036 attr->config2 = term->val.num; 1037 break; 1038 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1039 CHECK_TYPE_VAL(NUM); 1040 attr->config3 = term->val.num; 1041 break; 1042 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1043 CHECK_TYPE_VAL(NUM); 1044 break; 1045 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1046 CHECK_TYPE_VAL(NUM); 1047 break; 1048 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1049 CHECK_TYPE_VAL(STR); 1050 if (strcmp(term->val.str, "no") && 1051 parse_branch_str(term->val.str, 1052 &attr->branch_sample_type)) { 1053 parse_events_error__handle(err, term->err_val, 1054 strdup("invalid branch sample type"), 1055 NULL); 1056 return -EINVAL; 1057 } 1058 break; 1059 case PARSE_EVENTS__TERM_TYPE_TIME: 1060 CHECK_TYPE_VAL(NUM); 1061 if (term->val.num > 1) { 1062 parse_events_error__handle(err, term->err_val, 1063 strdup("expected 0 or 1"), 1064 NULL); 1065 return -EINVAL; 1066 } 1067 break; 1068 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1069 CHECK_TYPE_VAL(STR); 1070 break; 1071 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1072 CHECK_TYPE_VAL(NUM); 1073 break; 1074 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1075 CHECK_TYPE_VAL(NUM); 1076 break; 1077 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1078 CHECK_TYPE_VAL(NUM); 1079 break; 1080 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1081 CHECK_TYPE_VAL(NUM); 1082 break; 1083 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1084 CHECK_TYPE_VAL(NUM); 1085 break; 1086 case PARSE_EVENTS__TERM_TYPE_NAME: 1087 CHECK_TYPE_VAL(STR); 1088 break; 1089 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1090 CHECK_TYPE_VAL(STR); 1091 break; 1092 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1093 CHECK_TYPE_VAL(NUM); 1094 break; 1095 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1096 CHECK_TYPE_VAL(NUM); 1097 break; 1098 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1099 CHECK_TYPE_VAL(NUM); 1100 if ((unsigned int)term->val.num > 1) { 1101 parse_events_error__handle(err, term->err_val, 1102 strdup("expected 0 or 1"), 1103 NULL); 1104 return -EINVAL; 1105 } 1106 break; 1107 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1108 CHECK_TYPE_VAL(NUM); 1109 break; 1110 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1111 CHECK_TYPE_VAL(NUM); 1112 if (term->val.num > UINT_MAX) { 1113 parse_events_error__handle(err, term->err_val, 1114 strdup("too big"), 1115 NULL); 1116 return -EINVAL; 1117 } 1118 break; 1119 default: 1120 parse_events_error__handle(err, term->err_term, 1121 strdup("unknown term"), 1122 parse_events_formats_error_string(NULL)); 1123 return -EINVAL; 1124 } 1125 1126 /* 1127 * Check term availability after basic checking so 1128 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1129 * 1130 * If check availability at the entry of this function, 1131 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1132 * if an invalid config term is provided for legacy events 1133 * (for example, instructions/badterm/...), which is confusing. 1134 */ 1135 if (!config_term_avail(term->type_term, err)) 1136 return -EINVAL; 1137 return 0; 1138 #undef CHECK_TYPE_VAL 1139 } 1140 1141 static int config_term_pmu(struct perf_event_attr *attr, 1142 struct parse_events_term *term, 1143 struct parse_events_error *err) 1144 { 1145 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1146 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) 1147 /* 1148 * Always succeed for sysfs terms, as we dont know 1149 * at this point what type they need to have. 1150 */ 1151 return 0; 1152 else 1153 return config_term_common(attr, term, err); 1154 } 1155 1156 #ifdef HAVE_LIBTRACEEVENT 1157 static int config_term_tracepoint(struct perf_event_attr *attr, 1158 struct parse_events_term *term, 1159 struct parse_events_error *err) 1160 { 1161 switch (term->type_term) { 1162 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1163 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1164 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1165 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1166 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1167 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1168 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1169 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1170 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1171 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1172 return config_term_common(attr, term, err); 1173 default: 1174 if (err) { 1175 parse_events_error__handle(err, term->err_term, 1176 strdup("unknown term"), 1177 strdup("valid terms: call-graph,stack-size\n")); 1178 } 1179 return -EINVAL; 1180 } 1181 1182 return 0; 1183 } 1184 #endif 1185 1186 static int config_attr(struct perf_event_attr *attr, 1187 struct list_head *head, 1188 struct parse_events_error *err, 1189 config_term_func_t config_term) 1190 { 1191 struct parse_events_term *term; 1192 1193 list_for_each_entry(term, head, list) 1194 if (config_term(attr, term, err)) 1195 return -EINVAL; 1196 1197 return 0; 1198 } 1199 1200 static int get_config_terms(struct list_head *head_config, 1201 struct list_head *head_terms __maybe_unused) 1202 { 1203 #define ADD_CONFIG_TERM(__type, __weak) \ 1204 struct evsel_config_term *__t; \ 1205 \ 1206 __t = zalloc(sizeof(*__t)); \ 1207 if (!__t) \ 1208 return -ENOMEM; \ 1209 \ 1210 INIT_LIST_HEAD(&__t->list); \ 1211 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1212 __t->weak = __weak; \ 1213 list_add_tail(&__t->list, head_terms) 1214 1215 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1216 do { \ 1217 ADD_CONFIG_TERM(__type, __weak); \ 1218 __t->val.__name = __val; \ 1219 } while (0) 1220 1221 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1222 do { \ 1223 ADD_CONFIG_TERM(__type, __weak); \ 1224 __t->val.str = strdup(__val); \ 1225 if (!__t->val.str) { \ 1226 zfree(&__t); \ 1227 return -ENOMEM; \ 1228 } \ 1229 __t->free_str = true; \ 1230 } while (0) 1231 1232 struct parse_events_term *term; 1233 1234 list_for_each_entry(term, head_config, list) { 1235 switch (term->type_term) { 1236 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1237 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1238 break; 1239 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1240 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1241 break; 1242 case PARSE_EVENTS__TERM_TYPE_TIME: 1243 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1244 break; 1245 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1246 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1247 break; 1248 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1249 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1250 break; 1251 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1252 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1253 term->val.num, term->weak); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1256 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1257 term->val.num ? 1 : 0, term->weak); 1258 break; 1259 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1260 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1261 term->val.num ? 0 : 1, term->weak); 1262 break; 1263 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1264 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1265 term->val.num, term->weak); 1266 break; 1267 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1268 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1269 term->val.num, term->weak); 1270 break; 1271 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1272 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1273 term->val.num ? 1 : 0, term->weak); 1274 break; 1275 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1276 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1277 term->val.num ? 0 : 1, term->weak); 1278 break; 1279 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1280 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1281 break; 1282 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1283 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1284 term->val.num ? true : false, term->weak); 1285 break; 1286 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1287 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1288 term->val.num ? 1 : 0, term->weak); 1289 break; 1290 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1291 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1292 term->val.num, term->weak); 1293 break; 1294 default: 1295 break; 1296 } 1297 } 1298 return 0; 1299 } 1300 1301 /* 1302 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1303 * each bit of attr->config that the user has changed. 1304 */ 1305 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, 1306 struct list_head *head_terms) 1307 { 1308 struct parse_events_term *term; 1309 u64 bits = 0; 1310 int type; 1311 1312 list_for_each_entry(term, head_config, list) { 1313 switch (term->type_term) { 1314 case PARSE_EVENTS__TERM_TYPE_USER: 1315 type = perf_pmu__format_type(&pmu->format, term->config); 1316 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1317 continue; 1318 bits |= perf_pmu__format_bits(&pmu->format, term->config); 1319 break; 1320 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1321 bits = ~(u64)0; 1322 break; 1323 default: 1324 break; 1325 } 1326 } 1327 1328 if (bits) 1329 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1330 1331 #undef ADD_CONFIG_TERM 1332 return 0; 1333 } 1334 1335 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1336 const char *sys, const char *event, 1337 struct parse_events_error *err, 1338 struct list_head *head_config) 1339 { 1340 #ifdef HAVE_LIBTRACEEVENT 1341 if (head_config) { 1342 struct perf_event_attr attr; 1343 1344 if (config_attr(&attr, head_config, err, 1345 config_term_tracepoint)) 1346 return -EINVAL; 1347 } 1348 1349 if (strpbrk(sys, "*?")) 1350 return add_tracepoint_multi_sys(list, idx, sys, event, 1351 err, head_config); 1352 else 1353 return add_tracepoint_event(list, idx, sys, event, 1354 err, head_config); 1355 #else 1356 (void)list; 1357 (void)idx; 1358 (void)sys; 1359 (void)event; 1360 (void)head_config; 1361 parse_events_error__handle(err, 0, strdup("unsupported tracepoint"), 1362 strdup("libtraceevent is necessary for tracepoint support")); 1363 return -1; 1364 #endif 1365 } 1366 1367 int parse_events_add_numeric(struct parse_events_state *parse_state, 1368 struct list_head *list, 1369 u32 type, u64 config, 1370 struct list_head *head_config) 1371 { 1372 struct perf_event_attr attr; 1373 LIST_HEAD(config_terms); 1374 const char *name, *metric_id; 1375 bool hybrid; 1376 int ret; 1377 1378 memset(&attr, 0, sizeof(attr)); 1379 attr.type = type; 1380 attr.config = config; 1381 1382 if (head_config) { 1383 if (config_attr(&attr, head_config, parse_state->error, 1384 config_term_common)) 1385 return -EINVAL; 1386 1387 if (get_config_terms(head_config, &config_terms)) 1388 return -ENOMEM; 1389 } 1390 1391 name = get_config_name(head_config); 1392 metric_id = get_config_metric_id(head_config); 1393 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr, 1394 name, metric_id, 1395 &config_terms, &hybrid); 1396 if (hybrid) 1397 goto out_free_terms; 1398 1399 ret = add_event(list, &parse_state->idx, &attr, name, metric_id, 1400 &config_terms); 1401 out_free_terms: 1402 free_config_terms(&config_terms); 1403 return ret; 1404 } 1405 1406 int parse_events_add_tool(struct parse_events_state *parse_state, 1407 struct list_head *list, 1408 int tool_event) 1409 { 1410 return add_event_tool(list, &parse_state->idx, tool_event); 1411 } 1412 1413 static bool config_term_percore(struct list_head *config_terms) 1414 { 1415 struct evsel_config_term *term; 1416 1417 list_for_each_entry(term, config_terms, list) { 1418 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1419 return term->val.percore; 1420 } 1421 1422 return false; 1423 } 1424 1425 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state, 1426 struct list_head *list, char *name, 1427 struct list_head *head_config) 1428 { 1429 struct parse_events_term *term; 1430 int ret = -1; 1431 1432 if (parse_state->fake_pmu || !head_config || list_empty(head_config) || 1433 !perf_pmu__is_hybrid(name)) { 1434 return -1; 1435 } 1436 1437 /* 1438 * More than one term in list. 1439 */ 1440 if (head_config->next && head_config->next->next != head_config) 1441 return -1; 1442 1443 term = list_first_entry(head_config, struct parse_events_term, list); 1444 if (term && term->config && strcmp(term->config, "event")) { 1445 ret = parse_events__with_hybrid_pmu(parse_state, term->config, 1446 name, list); 1447 } 1448 1449 return ret; 1450 } 1451 1452 int parse_events_add_pmu(struct parse_events_state *parse_state, 1453 struct list_head *list, char *name, 1454 struct list_head *head_config, 1455 bool auto_merge_stats) 1456 { 1457 struct perf_event_attr attr; 1458 struct perf_pmu_info info; 1459 struct perf_pmu *pmu; 1460 struct evsel *evsel; 1461 struct parse_events_error *err = parse_state->error; 1462 LIST_HEAD(config_terms); 1463 1464 pmu = parse_state->fake_pmu ?: perf_pmu__find(name); 1465 1466 if (verbose > 1 && !(pmu && pmu->selectable)) { 1467 fprintf(stderr, "Attempting to add event pmu '%s' with '", 1468 name); 1469 if (head_config) { 1470 struct parse_events_term *term; 1471 1472 list_for_each_entry(term, head_config, list) { 1473 fprintf(stderr, "%s,", term->config); 1474 } 1475 } 1476 fprintf(stderr, "' that may result in non-fatal errors\n"); 1477 } 1478 1479 if (!pmu) { 1480 char *err_str; 1481 1482 if (asprintf(&err_str, 1483 "Cannot find PMU `%s'. Missing kernel support?", 1484 name) >= 0) 1485 parse_events_error__handle(err, 0, err_str, NULL); 1486 return -EINVAL; 1487 } 1488 1489 if (pmu->default_config) { 1490 memcpy(&attr, pmu->default_config, 1491 sizeof(struct perf_event_attr)); 1492 } else { 1493 memset(&attr, 0, sizeof(attr)); 1494 } 1495 1496 if (!head_config) { 1497 attr.type = pmu->type; 1498 evsel = __add_event(list, &parse_state->idx, &attr, 1499 /*init_attr=*/true, /*name=*/NULL, 1500 /*metric_id=*/NULL, pmu, 1501 /*config_terms=*/NULL, auto_merge_stats, 1502 /*cpu_list=*/NULL); 1503 if (evsel) { 1504 evsel->pmu_name = name ? strdup(name) : NULL; 1505 return 0; 1506 } else { 1507 return -ENOMEM; 1508 } 1509 } 1510 1511 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info)) 1512 return -EINVAL; 1513 1514 if (verbose > 1) { 1515 fprintf(stderr, "After aliases, add event pmu '%s' with '", 1516 name); 1517 if (head_config) { 1518 struct parse_events_term *term; 1519 1520 list_for_each_entry(term, head_config, list) { 1521 fprintf(stderr, "%s,", term->config); 1522 } 1523 } 1524 fprintf(stderr, "' that may result in non-fatal errors\n"); 1525 } 1526 1527 /* 1528 * Configure hardcoded terms first, no need to check 1529 * return value when called with fail == 0 ;) 1530 */ 1531 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu)) 1532 return -EINVAL; 1533 1534 if (get_config_terms(head_config, &config_terms)) 1535 return -ENOMEM; 1536 1537 /* 1538 * When using default config, record which bits of attr->config were 1539 * changed by the user. 1540 */ 1541 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms)) 1542 return -ENOMEM; 1543 1544 if (!parse_events__inside_hybrid_pmu(parse_state, list, name, 1545 head_config)) { 1546 return 0; 1547 } 1548 1549 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { 1550 free_config_terms(&config_terms); 1551 return -EINVAL; 1552 } 1553 1554 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1555 get_config_name(head_config), 1556 get_config_metric_id(head_config), pmu, 1557 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1558 if (!evsel) 1559 return -ENOMEM; 1560 1561 if (evsel->name) 1562 evsel->use_config_name = true; 1563 1564 evsel->pmu_name = name ? strdup(name) : NULL; 1565 evsel->percore = config_term_percore(&evsel->config_terms); 1566 1567 if (parse_state->fake_pmu) 1568 return 0; 1569 1570 free((char *)evsel->unit); 1571 evsel->unit = strdup(info.unit); 1572 evsel->scale = info.scale; 1573 evsel->per_pkg = info.per_pkg; 1574 evsel->snapshot = info.snapshot; 1575 return 0; 1576 } 1577 1578 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1579 char *str, struct list_head *head, 1580 struct list_head **listp) 1581 { 1582 struct parse_events_term *term; 1583 struct list_head *list = NULL; 1584 struct list_head *orig_head = NULL; 1585 struct perf_pmu *pmu = NULL; 1586 int ok = 0; 1587 char *config; 1588 1589 *listp = NULL; 1590 1591 if (!head) { 1592 head = malloc(sizeof(struct list_head)); 1593 if (!head) 1594 goto out_err; 1595 1596 INIT_LIST_HEAD(head); 1597 } 1598 config = strdup(str); 1599 if (!config) 1600 goto out_err; 1601 1602 if (parse_events_term__num(&term, 1603 PARSE_EVENTS__TERM_TYPE_USER, 1604 config, 1, false, NULL, 1605 NULL) < 0) { 1606 free(config); 1607 goto out_err; 1608 } 1609 list_add_tail(&term->list, head); 1610 1611 /* Add it for all PMUs that support the alias */ 1612 list = malloc(sizeof(struct list_head)); 1613 if (!list) 1614 goto out_err; 1615 1616 INIT_LIST_HEAD(list); 1617 1618 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1619 struct perf_pmu_alias *alias; 1620 1621 list_for_each_entry(alias, &pmu->aliases, list) { 1622 if (!strcasecmp(alias->name, str)) { 1623 parse_events_copy_term_list(head, &orig_head); 1624 if (!parse_events_add_pmu(parse_state, list, 1625 pmu->name, orig_head, 1626 /*auto_merge_stats=*/true)) { 1627 pr_debug("%s -> %s/%s/\n", str, 1628 pmu->name, alias->str); 1629 ok++; 1630 } 1631 parse_events_terms__delete(orig_head); 1632 } 1633 } 1634 } 1635 1636 if (parse_state->fake_pmu) { 1637 if (!parse_events_add_pmu(parse_state, list, str, head, 1638 /*auto_merge_stats=*/true)) { 1639 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1640 ok++; 1641 } 1642 } 1643 1644 out_err: 1645 if (ok) 1646 *listp = list; 1647 else 1648 free(list); 1649 1650 parse_events_terms__delete(head); 1651 return ok ? 0 : -1; 1652 } 1653 1654 int parse_events__modifier_group(struct list_head *list, 1655 char *event_mod) 1656 { 1657 return parse_events__modifier_event(list, event_mod, true); 1658 } 1659 1660 void parse_events__set_leader(char *name, struct list_head *list) 1661 { 1662 struct evsel *leader; 1663 1664 if (list_empty(list)) { 1665 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1666 return; 1667 } 1668 1669 leader = list_first_entry(list, struct evsel, core.node); 1670 __perf_evlist__set_leader(list, &leader->core); 1671 leader->group_name = name; 1672 } 1673 1674 /* list_event is assumed to point to malloc'ed memory */ 1675 void parse_events_update_lists(struct list_head *list_event, 1676 struct list_head *list_all) 1677 { 1678 /* 1679 * Called for single event definition. Update the 1680 * 'all event' list, and reinit the 'single event' 1681 * list, for next event definition. 1682 */ 1683 list_splice_tail(list_event, list_all); 1684 free(list_event); 1685 } 1686 1687 struct event_modifier { 1688 int eu; 1689 int ek; 1690 int eh; 1691 int eH; 1692 int eG; 1693 int eI; 1694 int precise; 1695 int precise_max; 1696 int exclude_GH; 1697 int sample_read; 1698 int pinned; 1699 int weak; 1700 int exclusive; 1701 int bpf_counter; 1702 }; 1703 1704 static int get_event_modifier(struct event_modifier *mod, char *str, 1705 struct evsel *evsel) 1706 { 1707 int eu = evsel ? evsel->core.attr.exclude_user : 0; 1708 int ek = evsel ? evsel->core.attr.exclude_kernel : 0; 1709 int eh = evsel ? evsel->core.attr.exclude_hv : 0; 1710 int eH = evsel ? evsel->core.attr.exclude_host : 0; 1711 int eG = evsel ? evsel->core.attr.exclude_guest : 0; 1712 int eI = evsel ? evsel->core.attr.exclude_idle : 0; 1713 int precise = evsel ? evsel->core.attr.precise_ip : 0; 1714 int precise_max = 0; 1715 int sample_read = 0; 1716 int pinned = evsel ? evsel->core.attr.pinned : 0; 1717 int exclusive = evsel ? evsel->core.attr.exclusive : 0; 1718 1719 int exclude = eu | ek | eh; 1720 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1721 int weak = 0; 1722 int bpf_counter = 0; 1723 1724 memset(mod, 0, sizeof(*mod)); 1725 1726 while (*str) { 1727 if (*str == 'u') { 1728 if (!exclude) 1729 exclude = eu = ek = eh = 1; 1730 if (!exclude_GH && !perf_guest) 1731 eG = 1; 1732 eu = 0; 1733 } else if (*str == 'k') { 1734 if (!exclude) 1735 exclude = eu = ek = eh = 1; 1736 ek = 0; 1737 } else if (*str == 'h') { 1738 if (!exclude) 1739 exclude = eu = ek = eh = 1; 1740 eh = 0; 1741 } else if (*str == 'G') { 1742 if (!exclude_GH) 1743 exclude_GH = eG = eH = 1; 1744 eG = 0; 1745 } else if (*str == 'H') { 1746 if (!exclude_GH) 1747 exclude_GH = eG = eH = 1; 1748 eH = 0; 1749 } else if (*str == 'I') { 1750 eI = 1; 1751 } else if (*str == 'p') { 1752 precise++; 1753 /* use of precise requires exclude_guest */ 1754 if (!exclude_GH) 1755 eG = 1; 1756 } else if (*str == 'P') { 1757 precise_max = 1; 1758 } else if (*str == 'S') { 1759 sample_read = 1; 1760 } else if (*str == 'D') { 1761 pinned = 1; 1762 } else if (*str == 'e') { 1763 exclusive = 1; 1764 } else if (*str == 'W') { 1765 weak = 1; 1766 } else if (*str == 'b') { 1767 bpf_counter = 1; 1768 } else 1769 break; 1770 1771 ++str; 1772 } 1773 1774 /* 1775 * precise ip: 1776 * 1777 * 0 - SAMPLE_IP can have arbitrary skid 1778 * 1 - SAMPLE_IP must have constant skid 1779 * 2 - SAMPLE_IP requested to have 0 skid 1780 * 3 - SAMPLE_IP must have 0 skid 1781 * 1782 * See also PERF_RECORD_MISC_EXACT_IP 1783 */ 1784 if (precise > 3) 1785 return -EINVAL; 1786 1787 mod->eu = eu; 1788 mod->ek = ek; 1789 mod->eh = eh; 1790 mod->eH = eH; 1791 mod->eG = eG; 1792 mod->eI = eI; 1793 mod->precise = precise; 1794 mod->precise_max = precise_max; 1795 mod->exclude_GH = exclude_GH; 1796 mod->sample_read = sample_read; 1797 mod->pinned = pinned; 1798 mod->weak = weak; 1799 mod->bpf_counter = bpf_counter; 1800 mod->exclusive = exclusive; 1801 1802 return 0; 1803 } 1804 1805 /* 1806 * Basic modifier sanity check to validate it contains only one 1807 * instance of any modifier (apart from 'p') present. 1808 */ 1809 static int check_modifier(char *str) 1810 { 1811 char *p = str; 1812 1813 /* The sizeof includes 0 byte as well. */ 1814 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) 1815 return -1; 1816 1817 while (*p) { 1818 if (*p != 'p' && strchr(p + 1, *p)) 1819 return -1; 1820 p++; 1821 } 1822 1823 return 0; 1824 } 1825 1826 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 1827 { 1828 struct evsel *evsel; 1829 struct event_modifier mod; 1830 1831 if (str == NULL) 1832 return 0; 1833 1834 if (check_modifier(str)) 1835 return -EINVAL; 1836 1837 if (!add && get_event_modifier(&mod, str, NULL)) 1838 return -EINVAL; 1839 1840 __evlist__for_each_entry(list, evsel) { 1841 if (add && get_event_modifier(&mod, str, evsel)) 1842 return -EINVAL; 1843 1844 evsel->core.attr.exclude_user = mod.eu; 1845 evsel->core.attr.exclude_kernel = mod.ek; 1846 evsel->core.attr.exclude_hv = mod.eh; 1847 evsel->core.attr.precise_ip = mod.precise; 1848 evsel->core.attr.exclude_host = mod.eH; 1849 evsel->core.attr.exclude_guest = mod.eG; 1850 evsel->core.attr.exclude_idle = mod.eI; 1851 evsel->exclude_GH = mod.exclude_GH; 1852 evsel->sample_read = mod.sample_read; 1853 evsel->precise_max = mod.precise_max; 1854 evsel->weak_group = mod.weak; 1855 evsel->bpf_counter = mod.bpf_counter; 1856 1857 if (evsel__is_group_leader(evsel)) { 1858 evsel->core.attr.pinned = mod.pinned; 1859 evsel->core.attr.exclusive = mod.exclusive; 1860 } 1861 } 1862 1863 return 0; 1864 } 1865 1866 int parse_events_name(struct list_head *list, const char *name) 1867 { 1868 struct evsel *evsel; 1869 1870 __evlist__for_each_entry(list, evsel) { 1871 if (!evsel->name) 1872 evsel->name = strdup(name); 1873 } 1874 1875 return 0; 1876 } 1877 1878 static int 1879 comp_pmu(const void *p1, const void *p2) 1880 { 1881 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; 1882 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; 1883 1884 return strcasecmp(pmu1->symbol, pmu2->symbol); 1885 } 1886 1887 static void perf_pmu__parse_cleanup(void) 1888 { 1889 if (perf_pmu_events_list_num > 0) { 1890 struct perf_pmu_event_symbol *p; 1891 int i; 1892 1893 for (i = 0; i < perf_pmu_events_list_num; i++) { 1894 p = perf_pmu_events_list + i; 1895 zfree(&p->symbol); 1896 } 1897 zfree(&perf_pmu_events_list); 1898 perf_pmu_events_list_num = 0; 1899 } 1900 } 1901 1902 #define SET_SYMBOL(str, stype) \ 1903 do { \ 1904 p->symbol = str; \ 1905 if (!p->symbol) \ 1906 goto err; \ 1907 p->type = stype; \ 1908 } while (0) 1909 1910 /* 1911 * Read the pmu events list from sysfs 1912 * Save it into perf_pmu_events_list 1913 */ 1914 static void perf_pmu__parse_init(void) 1915 { 1916 1917 struct perf_pmu *pmu = NULL; 1918 struct perf_pmu_alias *alias; 1919 int len = 0; 1920 1921 pmu = NULL; 1922 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1923 list_for_each_entry(alias, &pmu->aliases, list) { 1924 char *tmp = strchr(alias->name, '-'); 1925 1926 if (tmp) { 1927 char *tmp2 = NULL; 1928 1929 tmp2 = strchr(tmp + 1, '-'); 1930 len++; 1931 if (tmp2) 1932 len++; 1933 } 1934 1935 len++; 1936 } 1937 } 1938 1939 if (len == 0) { 1940 perf_pmu_events_list_num = -1; 1941 return; 1942 } 1943 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); 1944 if (!perf_pmu_events_list) 1945 return; 1946 perf_pmu_events_list_num = len; 1947 1948 len = 0; 1949 pmu = NULL; 1950 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1951 list_for_each_entry(alias, &pmu->aliases, list) { 1952 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 1953 char *tmp = strchr(alias->name, '-'); 1954 char *tmp2 = NULL; 1955 1956 if (tmp) 1957 tmp2 = strchr(tmp + 1, '-'); 1958 if (tmp2) { 1959 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 1960 PMU_EVENT_SYMBOL_PREFIX); 1961 p++; 1962 tmp++; 1963 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX); 1964 p++; 1965 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2); 1966 len += 3; 1967 } else if (tmp) { 1968 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 1969 PMU_EVENT_SYMBOL_PREFIX); 1970 p++; 1971 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); 1972 len += 2; 1973 } else { 1974 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); 1975 len++; 1976 } 1977 } 1978 } 1979 qsort(perf_pmu_events_list, len, 1980 sizeof(struct perf_pmu_event_symbol), comp_pmu); 1981 1982 return; 1983 err: 1984 perf_pmu__parse_cleanup(); 1985 } 1986 1987 /* 1988 * This function injects special term in 1989 * perf_pmu_events_list so the test code 1990 * can check on this functionality. 1991 */ 1992 int perf_pmu__test_parse_init(void) 1993 { 1994 struct perf_pmu_event_symbol *list, *tmp, symbols[] = { 1995 {(char *)"read", PMU_EVENT_SYMBOL}, 1996 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX}, 1997 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX}, 1998 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX}, 1999 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2}, 2000 }; 2001 unsigned long i, j; 2002 2003 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols)); 2004 if (!list) 2005 return -ENOMEM; 2006 2007 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) { 2008 tmp->type = symbols[i].type; 2009 tmp->symbol = strdup(symbols[i].symbol); 2010 if (!tmp->symbol) 2011 goto err_free; 2012 } 2013 2014 perf_pmu_events_list = list; 2015 perf_pmu_events_list_num = ARRAY_SIZE(symbols); 2016 2017 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols), 2018 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2019 return 0; 2020 2021 err_free: 2022 for (j = 0, tmp = list; j < i; j++, tmp++) 2023 zfree(&tmp->symbol); 2024 free(list); 2025 return -ENOMEM; 2026 } 2027 2028 enum perf_pmu_event_symbol_type 2029 perf_pmu__parse_check(const char *name) 2030 { 2031 struct perf_pmu_event_symbol p, *r; 2032 2033 /* scan kernel pmu events from sysfs if needed */ 2034 if (perf_pmu_events_list_num == 0) 2035 perf_pmu__parse_init(); 2036 /* 2037 * name "cpu" could be prefix of cpu-cycles or cpu// events. 2038 * cpu-cycles has been handled by hardcode. 2039 * So it must be cpu// events, not kernel pmu event. 2040 */ 2041 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) 2042 return PMU_EVENT_SYMBOL_ERR; 2043 2044 p.symbol = strdup(name); 2045 r = bsearch(&p, perf_pmu_events_list, 2046 (size_t) perf_pmu_events_list_num, 2047 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2048 zfree(&p.symbol); 2049 return r ? r->type : PMU_EVENT_SYMBOL_ERR; 2050 } 2051 2052 static int parse_events__scanner(const char *str, 2053 struct parse_events_state *parse_state) 2054 { 2055 YY_BUFFER_STATE buffer; 2056 void *scanner; 2057 int ret; 2058 2059 ret = parse_events_lex_init_extra(parse_state, &scanner); 2060 if (ret) 2061 return ret; 2062 2063 buffer = parse_events__scan_string(str, scanner); 2064 2065 #ifdef PARSER_DEBUG 2066 parse_events_debug = 1; 2067 parse_events_set_debug(1, scanner); 2068 #endif 2069 ret = parse_events_parse(parse_state, scanner); 2070 2071 parse_events__flush_buffer(buffer, scanner); 2072 parse_events__delete_buffer(buffer, scanner); 2073 parse_events_lex_destroy(scanner); 2074 return ret; 2075 } 2076 2077 /* 2078 * parse event config string, return a list of event terms. 2079 */ 2080 int parse_events_terms(struct list_head *terms, const char *str) 2081 { 2082 struct parse_events_state parse_state = { 2083 .terms = NULL, 2084 .stoken = PE_START_TERMS, 2085 }; 2086 int ret; 2087 2088 ret = parse_events__scanner(str, &parse_state); 2089 perf_pmu__parse_cleanup(); 2090 2091 if (!ret) { 2092 list_splice(parse_state.terms, terms); 2093 zfree(&parse_state.terms); 2094 return 0; 2095 } 2096 2097 parse_events_terms__delete(parse_state.terms); 2098 return ret; 2099 } 2100 2101 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 2102 const char *str, char *pmu_name, 2103 struct list_head *list) 2104 { 2105 struct parse_events_state ps = { 2106 .list = LIST_HEAD_INIT(ps.list), 2107 .stoken = PE_START_EVENTS, 2108 .hybrid_pmu_name = pmu_name, 2109 .idx = parse_state->idx, 2110 }; 2111 int ret; 2112 2113 ret = parse_events__scanner(str, &ps); 2114 perf_pmu__parse_cleanup(); 2115 2116 if (!ret) { 2117 if (!list_empty(&ps.list)) { 2118 list_splice(&ps.list, list); 2119 parse_state->idx = ps.idx; 2120 return 0; 2121 } else 2122 return -1; 2123 } 2124 2125 return ret; 2126 } 2127 2128 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 2129 { 2130 /* Order by insertion index. */ 2131 return lhs->core.idx - rhs->core.idx; 2132 } 2133 2134 static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r) 2135 { 2136 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 2137 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 2138 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 2139 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 2140 int *leader_idx = state; 2141 int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret; 2142 const char *lhs_pmu_name, *rhs_pmu_name; 2143 bool lhs_has_group = false, rhs_has_group = false; 2144 2145 /* 2146 * First sort by grouping/leader. Read the leader idx only if the evsel 2147 * is part of a group, as -1 indicates no group. 2148 */ 2149 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { 2150 lhs_has_group = true; 2151 lhs_leader_idx = lhs_core->leader->idx; 2152 } 2153 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { 2154 rhs_has_group = true; 2155 rhs_leader_idx = rhs_core->leader->idx; 2156 } 2157 2158 if (lhs_leader_idx != rhs_leader_idx) 2159 return lhs_leader_idx - rhs_leader_idx; 2160 2161 /* Group by PMU if there is a group. Groups can't span PMUs. */ 2162 if (lhs_has_group && rhs_has_group) { 2163 lhs_pmu_name = evsel__group_pmu_name(lhs); 2164 rhs_pmu_name = evsel__group_pmu_name(rhs); 2165 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2166 if (ret) 2167 return ret; 2168 } 2169 2170 /* Architecture specific sorting. */ 2171 return arch_evlist__cmp(lhs, rhs); 2172 } 2173 2174 static bool parse_events__sort_events_and_fix_groups(struct list_head *list) 2175 { 2176 int idx = 0, unsorted_idx = -1; 2177 struct evsel *pos, *cur_leader = NULL; 2178 struct perf_evsel *cur_leaders_grp = NULL; 2179 bool idx_changed = false; 2180 int orig_num_leaders = 0, num_leaders = 0; 2181 2182 /* 2183 * Compute index to insert ungrouped events at. Place them where the 2184 * first ungrouped event appears. 2185 */ 2186 list_for_each_entry(pos, list, core.node) { 2187 const struct evsel *pos_leader = evsel__leader(pos); 2188 2189 if (pos == pos_leader) 2190 orig_num_leaders++; 2191 2192 /* 2193 * Ensure indexes are sequential, in particular for multiple 2194 * event lists being merged. The indexes are used to detect when 2195 * the user order is modified. 2196 */ 2197 pos->core.idx = idx++; 2198 2199 if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2) 2200 unsorted_idx = pos->core.idx; 2201 } 2202 2203 /* Sort events. */ 2204 list_sort(&unsorted_idx, list, evlist__cmp); 2205 2206 /* 2207 * Recompute groups, splitting for PMUs and adding groups for events 2208 * that require them. 2209 */ 2210 idx = 0; 2211 list_for_each_entry(pos, list, core.node) { 2212 const struct evsel *pos_leader = evsel__leader(pos); 2213 const char *pos_pmu_name = evsel__group_pmu_name(pos); 2214 const char *cur_leader_pmu_name, *pos_leader_pmu_name; 2215 bool force_grouped = arch_evsel__must_be_in_group(pos); 2216 2217 /* Reset index and nr_members. */ 2218 if (pos->core.idx != idx) 2219 idx_changed = true; 2220 pos->core.idx = idx++; 2221 pos->core.nr_members = 0; 2222 2223 /* 2224 * Set the group leader respecting the given groupings and that 2225 * groups can't span PMUs. 2226 */ 2227 if (!cur_leader) 2228 cur_leader = pos; 2229 2230 cur_leader_pmu_name = evsel__group_pmu_name(cur_leader); 2231 if ((cur_leaders_grp != pos->core.leader && !force_grouped) || 2232 strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2233 /* Event is for a different group/PMU than last. */ 2234 cur_leader = pos; 2235 /* 2236 * Remember the leader's group before it is overwritten, 2237 * so that later events match as being in the same 2238 * group. 2239 */ 2240 cur_leaders_grp = pos->core.leader; 2241 } 2242 pos_leader_pmu_name = evsel__group_pmu_name(pos_leader); 2243 if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) { 2244 /* 2245 * Event's PMU differs from its leader's. Groups can't 2246 * span PMUs, so update leader from the group/PMU 2247 * tracker. 2248 */ 2249 evsel__set_leader(pos, cur_leader); 2250 } 2251 } 2252 list_for_each_entry(pos, list, core.node) { 2253 struct evsel *pos_leader = evsel__leader(pos); 2254 2255 if (pos == pos_leader) 2256 num_leaders++; 2257 pos_leader->core.nr_members++; 2258 } 2259 return idx_changed || num_leaders != orig_num_leaders; 2260 } 2261 2262 int __parse_events(struct evlist *evlist, const char *str, 2263 struct parse_events_error *err, struct perf_pmu *fake_pmu, 2264 bool warn_if_reordered) 2265 { 2266 struct parse_events_state parse_state = { 2267 .list = LIST_HEAD_INIT(parse_state.list), 2268 .idx = evlist->core.nr_entries, 2269 .error = err, 2270 .evlist = evlist, 2271 .stoken = PE_START_EVENTS, 2272 .fake_pmu = fake_pmu, 2273 }; 2274 int ret; 2275 2276 ret = parse_events__scanner(str, &parse_state); 2277 perf_pmu__parse_cleanup(); 2278 2279 if (!ret && list_empty(&parse_state.list)) { 2280 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2281 return -1; 2282 } 2283 2284 if (parse_events__sort_events_and_fix_groups(&parse_state.list) && 2285 warn_if_reordered && !parse_state.wild_card_pmus) 2286 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2287 2288 /* 2289 * Add list to the evlist even with errors to allow callers to clean up. 2290 */ 2291 evlist__splice_list_tail(evlist, &parse_state.list); 2292 2293 if (!ret) { 2294 struct evsel *last; 2295 2296 last = evlist__last(evlist); 2297 last->cmdline_group_boundary = true; 2298 2299 return 0; 2300 } 2301 2302 /* 2303 * There are 2 users - builtin-record and builtin-test objects. 2304 * Both call evlist__delete in case of error, so we dont 2305 * need to bother. 2306 */ 2307 return ret; 2308 } 2309 2310 int parse_event(struct evlist *evlist, const char *str) 2311 { 2312 struct parse_events_error err; 2313 int ret; 2314 2315 parse_events_error__init(&err); 2316 ret = parse_events(evlist, str, &err); 2317 parse_events_error__exit(&err); 2318 return ret; 2319 } 2320 2321 void parse_events_error__init(struct parse_events_error *err) 2322 { 2323 bzero(err, sizeof(*err)); 2324 } 2325 2326 void parse_events_error__exit(struct parse_events_error *err) 2327 { 2328 zfree(&err->str); 2329 zfree(&err->help); 2330 zfree(&err->first_str); 2331 zfree(&err->first_help); 2332 } 2333 2334 void parse_events_error__handle(struct parse_events_error *err, int idx, 2335 char *str, char *help) 2336 { 2337 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2338 goto out_free; 2339 switch (err->num_errors) { 2340 case 0: 2341 err->idx = idx; 2342 err->str = str; 2343 err->help = help; 2344 break; 2345 case 1: 2346 err->first_idx = err->idx; 2347 err->idx = idx; 2348 err->first_str = err->str; 2349 err->str = str; 2350 err->first_help = err->help; 2351 err->help = help; 2352 break; 2353 default: 2354 pr_debug("Multiple errors dropping message: %s (%s)\n", 2355 err->str, err->help); 2356 free(err->str); 2357 err->str = str; 2358 free(err->help); 2359 err->help = help; 2360 break; 2361 } 2362 err->num_errors++; 2363 return; 2364 2365 out_free: 2366 free(str); 2367 free(help); 2368 } 2369 2370 #define MAX_WIDTH 1000 2371 static int get_term_width(void) 2372 { 2373 struct winsize ws; 2374 2375 get_term_dimensions(&ws); 2376 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2377 } 2378 2379 static void __parse_events_error__print(int err_idx, const char *err_str, 2380 const char *err_help, const char *event) 2381 { 2382 const char *str = "invalid or unsupported event: "; 2383 char _buf[MAX_WIDTH]; 2384 char *buf = (char *) event; 2385 int idx = 0; 2386 if (err_str) { 2387 /* -2 for extra '' in the final fprintf */ 2388 int width = get_term_width() - 2; 2389 int len_event = strlen(event); 2390 int len_str, max_len, cut = 0; 2391 2392 /* 2393 * Maximum error index indent, we will cut 2394 * the event string if it's bigger. 2395 */ 2396 int max_err_idx = 13; 2397 2398 /* 2399 * Let's be specific with the message when 2400 * we have the precise error. 2401 */ 2402 str = "event syntax error: "; 2403 len_str = strlen(str); 2404 max_len = width - len_str; 2405 2406 buf = _buf; 2407 2408 /* We're cutting from the beginning. */ 2409 if (err_idx > max_err_idx) 2410 cut = err_idx - max_err_idx; 2411 2412 strncpy(buf, event + cut, max_len); 2413 2414 /* Mark cut parts with '..' on both sides. */ 2415 if (cut) 2416 buf[0] = buf[1] = '.'; 2417 2418 if ((len_event - cut) > max_len) { 2419 buf[max_len - 1] = buf[max_len - 2] = '.'; 2420 buf[max_len] = 0; 2421 } 2422 2423 idx = len_str + err_idx - cut; 2424 } 2425 2426 fprintf(stderr, "%s'%s'\n", str, buf); 2427 if (idx) { 2428 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2429 if (err_help) 2430 fprintf(stderr, "\n%s\n", err_help); 2431 } 2432 } 2433 2434 void parse_events_error__print(struct parse_events_error *err, 2435 const char *event) 2436 { 2437 if (!err->num_errors) 2438 return; 2439 2440 __parse_events_error__print(err->idx, err->str, err->help, event); 2441 2442 if (err->num_errors > 1) { 2443 fputs("\nInitial error:\n", stderr); 2444 __parse_events_error__print(err->first_idx, err->first_str, 2445 err->first_help, event); 2446 } 2447 } 2448 2449 #undef MAX_WIDTH 2450 2451 int parse_events_option(const struct option *opt, const char *str, 2452 int unset __maybe_unused) 2453 { 2454 struct evlist *evlist = *(struct evlist **)opt->value; 2455 struct parse_events_error err; 2456 int ret; 2457 2458 parse_events_error__init(&err); 2459 ret = parse_events(evlist, str, &err); 2460 2461 if (ret) { 2462 parse_events_error__print(&err, str); 2463 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2464 } 2465 parse_events_error__exit(&err); 2466 2467 return ret; 2468 } 2469 2470 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2471 { 2472 struct evlist **evlistp = opt->value; 2473 int ret; 2474 2475 if (*evlistp == NULL) { 2476 *evlistp = evlist__new(); 2477 2478 if (*evlistp == NULL) { 2479 fprintf(stderr, "Not enough memory to create evlist\n"); 2480 return -1; 2481 } 2482 } 2483 2484 ret = parse_events_option(opt, str, unset); 2485 if (ret) { 2486 evlist__delete(*evlistp); 2487 *evlistp = NULL; 2488 } 2489 2490 return ret; 2491 } 2492 2493 static int 2494 foreach_evsel_in_last_glob(struct evlist *evlist, 2495 int (*func)(struct evsel *evsel, 2496 const void *arg), 2497 const void *arg) 2498 { 2499 struct evsel *last = NULL; 2500 int err; 2501 2502 /* 2503 * Don't return when list_empty, give func a chance to report 2504 * error when it found last == NULL. 2505 * 2506 * So no need to WARN here, let *func do this. 2507 */ 2508 if (evlist->core.nr_entries > 0) 2509 last = evlist__last(evlist); 2510 2511 do { 2512 err = (*func)(last, arg); 2513 if (err) 2514 return -1; 2515 if (!last) 2516 return 0; 2517 2518 if (last->core.node.prev == &evlist->core.entries) 2519 return 0; 2520 last = list_entry(last->core.node.prev, struct evsel, core.node); 2521 } while (!last->cmdline_group_boundary); 2522 2523 return 0; 2524 } 2525 2526 static int set_filter(struct evsel *evsel, const void *arg) 2527 { 2528 const char *str = arg; 2529 bool found = false; 2530 int nr_addr_filters = 0; 2531 struct perf_pmu *pmu = NULL; 2532 2533 if (evsel == NULL) { 2534 fprintf(stderr, 2535 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2536 return -1; 2537 } 2538 2539 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2540 if (evsel__append_tp_filter(evsel, str) < 0) { 2541 fprintf(stderr, 2542 "not enough memory to hold filter string\n"); 2543 return -1; 2544 } 2545 2546 return 0; 2547 } 2548 2549 while ((pmu = perf_pmu__scan(pmu)) != NULL) 2550 if (pmu->type == evsel->core.attr.type) { 2551 found = true; 2552 break; 2553 } 2554 2555 if (found) 2556 perf_pmu__scan_file(pmu, "nr_addr_filters", 2557 "%d", &nr_addr_filters); 2558 2559 if (!nr_addr_filters) 2560 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2561 2562 if (evsel__append_addr_filter(evsel, str) < 0) { 2563 fprintf(stderr, 2564 "not enough memory to hold filter string\n"); 2565 return -1; 2566 } 2567 2568 return 0; 2569 } 2570 2571 int parse_filter(const struct option *opt, const char *str, 2572 int unset __maybe_unused) 2573 { 2574 struct evlist *evlist = *(struct evlist **)opt->value; 2575 2576 return foreach_evsel_in_last_glob(evlist, set_filter, 2577 (const void *)str); 2578 } 2579 2580 static int add_exclude_perf_filter(struct evsel *evsel, 2581 const void *arg __maybe_unused) 2582 { 2583 char new_filter[64]; 2584 2585 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2586 fprintf(stderr, 2587 "--exclude-perf option should follow a -e tracepoint option\n"); 2588 return -1; 2589 } 2590 2591 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2592 2593 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2594 fprintf(stderr, 2595 "not enough memory to hold filter string\n"); 2596 return -1; 2597 } 2598 2599 return 0; 2600 } 2601 2602 int exclude_perf(const struct option *opt, 2603 const char *arg __maybe_unused, 2604 int unset __maybe_unused) 2605 { 2606 struct evlist *evlist = *(struct evlist **)opt->value; 2607 2608 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2609 NULL); 2610 } 2611 2612 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2613 { 2614 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2615 } 2616 2617 static int new_term(struct parse_events_term **_term, 2618 struct parse_events_term *temp, 2619 char *str, u64 num) 2620 { 2621 struct parse_events_term *term; 2622 2623 term = malloc(sizeof(*term)); 2624 if (!term) 2625 return -ENOMEM; 2626 2627 *term = *temp; 2628 INIT_LIST_HEAD(&term->list); 2629 term->weak = false; 2630 2631 switch (term->type_val) { 2632 case PARSE_EVENTS__TERM_TYPE_NUM: 2633 term->val.num = num; 2634 break; 2635 case PARSE_EVENTS__TERM_TYPE_STR: 2636 term->val.str = str; 2637 break; 2638 default: 2639 free(term); 2640 return -EINVAL; 2641 } 2642 2643 *_term = term; 2644 return 0; 2645 } 2646 2647 int parse_events_term__num(struct parse_events_term **term, 2648 int type_term, char *config, u64 num, 2649 bool no_value, 2650 void *loc_term_, void *loc_val_) 2651 { 2652 YYLTYPE *loc_term = loc_term_; 2653 YYLTYPE *loc_val = loc_val_; 2654 2655 struct parse_events_term temp = { 2656 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2657 .type_term = type_term, 2658 .config = config ? : strdup(config_term_names[type_term]), 2659 .no_value = no_value, 2660 .err_term = loc_term ? loc_term->first_column : 0, 2661 .err_val = loc_val ? loc_val->first_column : 0, 2662 }; 2663 2664 return new_term(term, &temp, NULL, num); 2665 } 2666 2667 int parse_events_term__str(struct parse_events_term **term, 2668 int type_term, char *config, char *str, 2669 void *loc_term_, void *loc_val_) 2670 { 2671 YYLTYPE *loc_term = loc_term_; 2672 YYLTYPE *loc_val = loc_val_; 2673 2674 struct parse_events_term temp = { 2675 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2676 .type_term = type_term, 2677 .config = config, 2678 .err_term = loc_term ? loc_term->first_column : 0, 2679 .err_val = loc_val ? loc_val->first_column : 0, 2680 }; 2681 2682 return new_term(term, &temp, str, 0); 2683 } 2684 2685 int parse_events_term__sym_hw(struct parse_events_term **term, 2686 char *config, unsigned idx) 2687 { 2688 struct event_symbol *sym; 2689 char *str; 2690 struct parse_events_term temp = { 2691 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2692 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 2693 .config = config, 2694 }; 2695 2696 if (!temp.config) { 2697 temp.config = strdup("event"); 2698 if (!temp.config) 2699 return -ENOMEM; 2700 } 2701 BUG_ON(idx >= PERF_COUNT_HW_MAX); 2702 sym = &event_symbols_hw[idx]; 2703 2704 str = strdup(sym->symbol); 2705 if (!str) 2706 return -ENOMEM; 2707 return new_term(term, &temp, str, 0); 2708 } 2709 2710 int parse_events_term__clone(struct parse_events_term **new, 2711 struct parse_events_term *term) 2712 { 2713 char *str; 2714 struct parse_events_term temp = { 2715 .type_val = term->type_val, 2716 .type_term = term->type_term, 2717 .config = NULL, 2718 .err_term = term->err_term, 2719 .err_val = term->err_val, 2720 }; 2721 2722 if (term->config) { 2723 temp.config = strdup(term->config); 2724 if (!temp.config) 2725 return -ENOMEM; 2726 } 2727 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2728 return new_term(new, &temp, NULL, term->val.num); 2729 2730 str = strdup(term->val.str); 2731 if (!str) 2732 return -ENOMEM; 2733 return new_term(new, &temp, str, 0); 2734 } 2735 2736 void parse_events_term__delete(struct parse_events_term *term) 2737 { 2738 if (term->array.nr_ranges) 2739 zfree(&term->array.ranges); 2740 2741 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2742 zfree(&term->val.str); 2743 2744 zfree(&term->config); 2745 free(term); 2746 } 2747 2748 int parse_events_copy_term_list(struct list_head *old, 2749 struct list_head **new) 2750 { 2751 struct parse_events_term *term, *n; 2752 int ret; 2753 2754 if (!old) { 2755 *new = NULL; 2756 return 0; 2757 } 2758 2759 *new = malloc(sizeof(struct list_head)); 2760 if (!*new) 2761 return -ENOMEM; 2762 INIT_LIST_HEAD(*new); 2763 2764 list_for_each_entry (term, old, list) { 2765 ret = parse_events_term__clone(&n, term); 2766 if (ret) 2767 return ret; 2768 list_add_tail(&n->list, *new); 2769 } 2770 return 0; 2771 } 2772 2773 void parse_events_terms__purge(struct list_head *terms) 2774 { 2775 struct parse_events_term *term, *h; 2776 2777 list_for_each_entry_safe(term, h, terms, list) { 2778 list_del_init(&term->list); 2779 parse_events_term__delete(term); 2780 } 2781 } 2782 2783 void parse_events_terms__delete(struct list_head *terms) 2784 { 2785 if (!terms) 2786 return; 2787 parse_events_terms__purge(terms); 2788 free(terms); 2789 } 2790 2791 void parse_events__clear_array(struct parse_events_array *a) 2792 { 2793 zfree(&a->ranges); 2794 } 2795 2796 void parse_events_evlist_error(struct parse_events_state *parse_state, 2797 int idx, const char *str) 2798 { 2799 if (!parse_state->error) 2800 return; 2801 2802 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); 2803 } 2804 2805 static void config_terms_list(char *buf, size_t buf_sz) 2806 { 2807 int i; 2808 bool first = true; 2809 2810 buf[0] = '\0'; 2811 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2812 const char *name = config_term_names[i]; 2813 2814 if (!config_term_avail(i, NULL)) 2815 continue; 2816 if (!name) 2817 continue; 2818 if (name[0] == '<') 2819 continue; 2820 2821 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2822 return; 2823 2824 if (!first) 2825 strcat(buf, ","); 2826 else 2827 first = false; 2828 strcat(buf, name); 2829 } 2830 } 2831 2832 /* 2833 * Return string contains valid config terms of an event. 2834 * @additional_terms: For terms such as PMU sysfs terms. 2835 */ 2836 char *parse_events_formats_error_string(char *additional_terms) 2837 { 2838 char *str; 2839 /* "no-overwrite" is the longest name */ 2840 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2841 (sizeof("no-overwrite") - 1)]; 2842 2843 config_terms_list(static_terms, sizeof(static_terms)); 2844 /* valid terms */ 2845 if (additional_terms) { 2846 if (asprintf(&str, "valid terms: %s,%s", 2847 additional_terms, static_terms) < 0) 2848 goto fail; 2849 } else { 2850 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2851 goto fail; 2852 } 2853 return str; 2854 2855 fail: 2856 return NULL; 2857 } 2858 2859 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx, 2860 struct perf_event_attr *attr, 2861 const char *name, 2862 const char *metric_id, 2863 struct perf_pmu *pmu, 2864 struct list_head *config_terms) 2865 { 2866 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id, 2867 pmu, config_terms, /*auto_merge_stats=*/false, 2868 /*cpu_list=*/NULL); 2869 } 2870