1 #include <linux/hw_breakpoint.h> 2 #include "util.h" 3 #include "../perf.h" 4 #include "evlist.h" 5 #include "evsel.h" 6 #include "parse-options.h" 7 #include "parse-events.h" 8 #include "exec_cmd.h" 9 #include "string.h" 10 #include "symbol.h" 11 #include "cache.h" 12 #include "header.h" 13 #include "debugfs.h" 14 #include "parse-events-bison.h" 15 #define YY_EXTRA_TYPE int 16 #include "parse-events-flex.h" 17 #include "pmu.h" 18 19 #define MAX_NAME_LEN 100 20 21 struct event_symbol { 22 const char *symbol; 23 const char *alias; 24 }; 25 26 #ifdef PARSER_DEBUG 27 extern int parse_events_debug; 28 #endif 29 int parse_events_parse(void *data, void *scanner); 30 31 static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 32 [PERF_COUNT_HW_CPU_CYCLES] = { 33 .symbol = "cpu-cycles", 34 .alias = "cycles", 35 }, 36 [PERF_COUNT_HW_INSTRUCTIONS] = { 37 .symbol = "instructions", 38 .alias = "", 39 }, 40 [PERF_COUNT_HW_CACHE_REFERENCES] = { 41 .symbol = "cache-references", 42 .alias = "", 43 }, 44 [PERF_COUNT_HW_CACHE_MISSES] = { 45 .symbol = "cache-misses", 46 .alias = "", 47 }, 48 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 49 .symbol = "branch-instructions", 50 .alias = "branches", 51 }, 52 [PERF_COUNT_HW_BRANCH_MISSES] = { 53 .symbol = "branch-misses", 54 .alias = "", 55 }, 56 [PERF_COUNT_HW_BUS_CYCLES] = { 57 .symbol = "bus-cycles", 58 .alias = "", 59 }, 60 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 61 .symbol = "stalled-cycles-frontend", 62 .alias = "idle-cycles-frontend", 63 }, 64 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 65 .symbol = "stalled-cycles-backend", 66 .alias = "idle-cycles-backend", 67 }, 68 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 69 .symbol = "ref-cycles", 70 .alias = "", 71 }, 72 }; 73 74 static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 75 [PERF_COUNT_SW_CPU_CLOCK] = { 76 .symbol = "cpu-clock", 77 .alias = "", 78 }, 79 [PERF_COUNT_SW_TASK_CLOCK] = { 80 .symbol = "task-clock", 81 .alias = "", 82 }, 83 [PERF_COUNT_SW_PAGE_FAULTS] = { 84 .symbol = "page-faults", 85 .alias = "faults", 86 }, 87 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 88 .symbol = "context-switches", 89 .alias = "cs", 90 }, 91 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 92 .symbol = "cpu-migrations", 93 .alias = "migrations", 94 }, 95 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 96 .symbol = "minor-faults", 97 .alias = "", 98 }, 99 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 100 .symbol = "major-faults", 101 .alias = "", 102 }, 103 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 104 .symbol = "alignment-faults", 105 .alias = "", 106 }, 107 [PERF_COUNT_SW_EMULATION_FAULTS] = { 108 .symbol = "emulation-faults", 109 .alias = "", 110 }, 111 }; 112 113 #define __PERF_EVENT_FIELD(config, name) \ 114 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) 115 116 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) 117 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) 118 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) 119 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) 120 121 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ 122 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ 123 if (sys_dirent.d_type == DT_DIR && \ 124 (strcmp(sys_dirent.d_name, ".")) && \ 125 (strcmp(sys_dirent.d_name, ".."))) 126 127 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) 128 { 129 char evt_path[MAXPATHLEN]; 130 int fd; 131 132 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, 133 sys_dir->d_name, evt_dir->d_name); 134 fd = open(evt_path, O_RDONLY); 135 if (fd < 0) 136 return -EINVAL; 137 close(fd); 138 139 return 0; 140 } 141 142 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ 143 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ 144 if (evt_dirent.d_type == DT_DIR && \ 145 (strcmp(evt_dirent.d_name, ".")) && \ 146 (strcmp(evt_dirent.d_name, "..")) && \ 147 (!tp_event_has_id(&sys_dirent, &evt_dirent))) 148 149 #define MAX_EVENT_LENGTH 512 150 151 152 struct tracepoint_path *tracepoint_id_to_path(u64 config) 153 { 154 struct tracepoint_path *path = NULL; 155 DIR *sys_dir, *evt_dir; 156 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 157 char id_buf[24]; 158 int fd; 159 u64 id; 160 char evt_path[MAXPATHLEN]; 161 char dir_path[MAXPATHLEN]; 162 163 if (debugfs_valid_mountpoint(tracing_events_path)) 164 return NULL; 165 166 sys_dir = opendir(tracing_events_path); 167 if (!sys_dir) 168 return NULL; 169 170 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 171 172 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 173 sys_dirent.d_name); 174 evt_dir = opendir(dir_path); 175 if (!evt_dir) 176 continue; 177 178 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 179 180 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, 181 evt_dirent.d_name); 182 fd = open(evt_path, O_RDONLY); 183 if (fd < 0) 184 continue; 185 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 186 close(fd); 187 continue; 188 } 189 close(fd); 190 id = atoll(id_buf); 191 if (id == config) { 192 closedir(evt_dir); 193 closedir(sys_dir); 194 path = zalloc(sizeof(*path)); 195 path->system = malloc(MAX_EVENT_LENGTH); 196 if (!path->system) { 197 free(path); 198 return NULL; 199 } 200 path->name = malloc(MAX_EVENT_LENGTH); 201 if (!path->name) { 202 free(path->system); 203 free(path); 204 return NULL; 205 } 206 strncpy(path->system, sys_dirent.d_name, 207 MAX_EVENT_LENGTH); 208 strncpy(path->name, evt_dirent.d_name, 209 MAX_EVENT_LENGTH); 210 return path; 211 } 212 } 213 closedir(evt_dir); 214 } 215 216 closedir(sys_dir); 217 return NULL; 218 } 219 220 const char *event_type(int type) 221 { 222 switch (type) { 223 case PERF_TYPE_HARDWARE: 224 return "hardware"; 225 226 case PERF_TYPE_SOFTWARE: 227 return "software"; 228 229 case PERF_TYPE_TRACEPOINT: 230 return "tracepoint"; 231 232 case PERF_TYPE_HW_CACHE: 233 return "hardware-cache"; 234 235 default: 236 break; 237 } 238 239 return "unknown"; 240 } 241 242 243 244 static int __add_event(struct list_head **_list, int *idx, 245 struct perf_event_attr *attr, 246 char *name, struct cpu_map *cpus) 247 { 248 struct perf_evsel *evsel; 249 struct list_head *list = *_list; 250 251 if (!list) { 252 list = malloc(sizeof(*list)); 253 if (!list) 254 return -ENOMEM; 255 INIT_LIST_HEAD(list); 256 } 257 258 event_attr_init(attr); 259 260 evsel = perf_evsel__new(attr, (*idx)++); 261 if (!evsel) { 262 free(list); 263 return -ENOMEM; 264 } 265 266 evsel->cpus = cpus; 267 if (name) 268 evsel->name = strdup(name); 269 list_add_tail(&evsel->node, list); 270 *_list = list; 271 return 0; 272 } 273 274 static int add_event(struct list_head **_list, int *idx, 275 struct perf_event_attr *attr, char *name) 276 { 277 return __add_event(_list, idx, attr, name, NULL); 278 } 279 280 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) 281 { 282 int i, j; 283 int n, longest = -1; 284 285 for (i = 0; i < size; i++) { 286 for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) { 287 n = strlen(names[i][j]); 288 if (n > longest && !strncasecmp(str, names[i][j], n)) 289 longest = n; 290 } 291 if (longest > 0) 292 return i; 293 } 294 295 return -1; 296 } 297 298 int parse_events_add_cache(struct list_head **list, int *idx, 299 char *type, char *op_result1, char *op_result2) 300 { 301 struct perf_event_attr attr; 302 char name[MAX_NAME_LEN]; 303 int cache_type = -1, cache_op = -1, cache_result = -1; 304 char *op_result[2] = { op_result1, op_result2 }; 305 int i, n; 306 307 /* 308 * No fallback - if we cannot get a clear cache type 309 * then bail out: 310 */ 311 cache_type = parse_aliases(type, perf_evsel__hw_cache, 312 PERF_COUNT_HW_CACHE_MAX); 313 if (cache_type == -1) 314 return -EINVAL; 315 316 n = snprintf(name, MAX_NAME_LEN, "%s", type); 317 318 for (i = 0; (i < 2) && (op_result[i]); i++) { 319 char *str = op_result[i]; 320 321 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 322 323 if (cache_op == -1) { 324 cache_op = parse_aliases(str, perf_evsel__hw_cache_op, 325 PERF_COUNT_HW_CACHE_OP_MAX); 326 if (cache_op >= 0) { 327 if (!perf_evsel__is_cache_op_valid(cache_type, cache_op)) 328 return -EINVAL; 329 continue; 330 } 331 } 332 333 if (cache_result == -1) { 334 cache_result = parse_aliases(str, perf_evsel__hw_cache_result, 335 PERF_COUNT_HW_CACHE_RESULT_MAX); 336 if (cache_result >= 0) 337 continue; 338 } 339 } 340 341 /* 342 * Fall back to reads: 343 */ 344 if (cache_op == -1) 345 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 346 347 /* 348 * Fall back to accesses: 349 */ 350 if (cache_result == -1) 351 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 352 353 memset(&attr, 0, sizeof(attr)); 354 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 355 attr.type = PERF_TYPE_HW_CACHE; 356 return add_event(list, idx, &attr, name); 357 } 358 359 static int add_tracepoint(struct list_head **listp, int *idx, 360 char *sys_name, char *evt_name) 361 { 362 struct perf_evsel *evsel; 363 struct list_head *list = *listp; 364 365 if (!list) { 366 list = malloc(sizeof(*list)); 367 if (!list) 368 return -ENOMEM; 369 INIT_LIST_HEAD(list); 370 } 371 372 evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); 373 if (!evsel) { 374 free(list); 375 return -ENOMEM; 376 } 377 378 list_add_tail(&evsel->node, list); 379 *listp = list; 380 return 0; 381 } 382 383 static int add_tracepoint_multi(struct list_head **list, int *idx, 384 char *sys_name, char *evt_name) 385 { 386 char evt_path[MAXPATHLEN]; 387 struct dirent *evt_ent; 388 DIR *evt_dir; 389 int ret = 0; 390 391 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); 392 evt_dir = opendir(evt_path); 393 if (!evt_dir) { 394 perror("Can't open event dir"); 395 return -1; 396 } 397 398 while (!ret && (evt_ent = readdir(evt_dir))) { 399 if (!strcmp(evt_ent->d_name, ".") 400 || !strcmp(evt_ent->d_name, "..") 401 || !strcmp(evt_ent->d_name, "enable") 402 || !strcmp(evt_ent->d_name, "filter")) 403 continue; 404 405 if (!strglobmatch(evt_ent->d_name, evt_name)) 406 continue; 407 408 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); 409 } 410 411 return ret; 412 } 413 414 int parse_events_add_tracepoint(struct list_head **list, int *idx, 415 char *sys, char *event) 416 { 417 int ret; 418 419 ret = debugfs_valid_mountpoint(tracing_events_path); 420 if (ret) 421 return ret; 422 423 return strpbrk(event, "*?") ? 424 add_tracepoint_multi(list, idx, sys, event) : 425 add_tracepoint(list, idx, sys, event); 426 } 427 428 static int 429 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 430 { 431 int i; 432 433 for (i = 0; i < 3; i++) { 434 if (!type || !type[i]) 435 break; 436 437 #define CHECK_SET_TYPE(bit) \ 438 do { \ 439 if (attr->bp_type & bit) \ 440 return -EINVAL; \ 441 else \ 442 attr->bp_type |= bit; \ 443 } while (0) 444 445 switch (type[i]) { 446 case 'r': 447 CHECK_SET_TYPE(HW_BREAKPOINT_R); 448 break; 449 case 'w': 450 CHECK_SET_TYPE(HW_BREAKPOINT_W); 451 break; 452 case 'x': 453 CHECK_SET_TYPE(HW_BREAKPOINT_X); 454 break; 455 default: 456 return -EINVAL; 457 } 458 } 459 460 #undef CHECK_SET_TYPE 461 462 if (!attr->bp_type) /* Default */ 463 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 464 465 return 0; 466 } 467 468 int parse_events_add_breakpoint(struct list_head **list, int *idx, 469 void *ptr, char *type) 470 { 471 struct perf_event_attr attr; 472 473 memset(&attr, 0, sizeof(attr)); 474 attr.bp_addr = (unsigned long) ptr; 475 476 if (parse_breakpoint_type(type, &attr)) 477 return -EINVAL; 478 479 /* 480 * We should find a nice way to override the access length 481 * Provide some defaults for now 482 */ 483 if (attr.bp_type == HW_BREAKPOINT_X) 484 attr.bp_len = sizeof(long); 485 else 486 attr.bp_len = HW_BREAKPOINT_LEN_4; 487 488 attr.type = PERF_TYPE_BREAKPOINT; 489 attr.sample_period = 1; 490 491 return add_event(list, idx, &attr, NULL); 492 } 493 494 static int config_term(struct perf_event_attr *attr, 495 struct parse_events__term *term) 496 { 497 #define CHECK_TYPE_VAL(type) \ 498 do { \ 499 if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \ 500 return -EINVAL; \ 501 } while (0) 502 503 switch (term->type_term) { 504 case PARSE_EVENTS__TERM_TYPE_CONFIG: 505 CHECK_TYPE_VAL(NUM); 506 attr->config = term->val.num; 507 break; 508 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 509 CHECK_TYPE_VAL(NUM); 510 attr->config1 = term->val.num; 511 break; 512 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 513 CHECK_TYPE_VAL(NUM); 514 attr->config2 = term->val.num; 515 break; 516 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 517 CHECK_TYPE_VAL(NUM); 518 attr->sample_period = term->val.num; 519 break; 520 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 521 /* 522 * TODO uncomment when the field is available 523 * attr->branch_sample_type = term->val.num; 524 */ 525 break; 526 case PARSE_EVENTS__TERM_TYPE_NAME: 527 CHECK_TYPE_VAL(STR); 528 break; 529 default: 530 return -EINVAL; 531 } 532 533 return 0; 534 #undef CHECK_TYPE_VAL 535 } 536 537 static int config_attr(struct perf_event_attr *attr, 538 struct list_head *head, int fail) 539 { 540 struct parse_events__term *term; 541 542 list_for_each_entry(term, head, list) 543 if (config_term(attr, term) && fail) 544 return -EINVAL; 545 546 return 0; 547 } 548 549 int parse_events_add_numeric(struct list_head **list, int *idx, 550 u32 type, u64 config, 551 struct list_head *head_config) 552 { 553 struct perf_event_attr attr; 554 555 memset(&attr, 0, sizeof(attr)); 556 attr.type = type; 557 attr.config = config; 558 559 if (head_config && 560 config_attr(&attr, head_config, 1)) 561 return -EINVAL; 562 563 return add_event(list, idx, &attr, NULL); 564 } 565 566 static int parse_events__is_name_term(struct parse_events__term *term) 567 { 568 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; 569 } 570 571 static char *pmu_event_name(struct list_head *head_terms) 572 { 573 struct parse_events__term *term; 574 575 list_for_each_entry(term, head_terms, list) 576 if (parse_events__is_name_term(term)) 577 return term->val.str; 578 579 return NULL; 580 } 581 582 int parse_events_add_pmu(struct list_head **list, int *idx, 583 char *name, struct list_head *head_config) 584 { 585 struct perf_event_attr attr; 586 struct perf_pmu *pmu; 587 588 pmu = perf_pmu__find(name); 589 if (!pmu) 590 return -EINVAL; 591 592 memset(&attr, 0, sizeof(attr)); 593 594 if (perf_pmu__check_alias(pmu, head_config)) 595 return -EINVAL; 596 597 /* 598 * Configure hardcoded terms first, no need to check 599 * return value when called with fail == 0 ;) 600 */ 601 config_attr(&attr, head_config, 0); 602 603 if (perf_pmu__config(pmu, &attr, head_config)) 604 return -EINVAL; 605 606 return __add_event(list, idx, &attr, pmu_event_name(head_config), 607 pmu->cpus); 608 } 609 610 int parse_events__modifier_group(struct list_head *list, 611 char *event_mod) 612 { 613 return parse_events__modifier_event(list, event_mod, true); 614 } 615 616 void parse_events__set_leader(char *name, struct list_head *list) 617 { 618 struct perf_evsel *leader; 619 620 __perf_evlist__set_leader(list); 621 leader = list_entry(list->next, struct perf_evsel, node); 622 leader->group_name = name ? strdup(name) : NULL; 623 } 624 625 void parse_events_update_lists(struct list_head *list_event, 626 struct list_head *list_all) 627 { 628 /* 629 * Called for single event definition. Update the 630 * 'all event' list, and reinit the 'single event' 631 * list, for next event definition. 632 */ 633 list_splice_tail(list_event, list_all); 634 free(list_event); 635 } 636 637 struct event_modifier { 638 int eu; 639 int ek; 640 int eh; 641 int eH; 642 int eG; 643 int precise; 644 int exclude_GH; 645 }; 646 647 static int get_event_modifier(struct event_modifier *mod, char *str, 648 struct perf_evsel *evsel) 649 { 650 int eu = evsel ? evsel->attr.exclude_user : 0; 651 int ek = evsel ? evsel->attr.exclude_kernel : 0; 652 int eh = evsel ? evsel->attr.exclude_hv : 0; 653 int eH = evsel ? evsel->attr.exclude_host : 0; 654 int eG = evsel ? evsel->attr.exclude_guest : 0; 655 int precise = evsel ? evsel->attr.precise_ip : 0; 656 657 int exclude = eu | ek | eh; 658 int exclude_GH = evsel ? evsel->exclude_GH : 0; 659 660 /* 661 * We are here for group and 'GH' was not set as event 662 * modifier and whatever event/group modifier override 663 * default 'GH' setup. 664 */ 665 if (evsel && !exclude_GH) 666 eH = eG = 0; 667 668 memset(mod, 0, sizeof(*mod)); 669 670 while (*str) { 671 if (*str == 'u') { 672 if (!exclude) 673 exclude = eu = ek = eh = 1; 674 eu = 0; 675 } else if (*str == 'k') { 676 if (!exclude) 677 exclude = eu = ek = eh = 1; 678 ek = 0; 679 } else if (*str == 'h') { 680 if (!exclude) 681 exclude = eu = ek = eh = 1; 682 eh = 0; 683 } else if (*str == 'G') { 684 if (!exclude_GH) 685 exclude_GH = eG = eH = 1; 686 eG = 0; 687 } else if (*str == 'H') { 688 if (!exclude_GH) 689 exclude_GH = eG = eH = 1; 690 eH = 0; 691 } else if (*str == 'p') { 692 precise++; 693 /* use of precise requires exclude_guest */ 694 if (!exclude_GH) 695 eG = 1; 696 } else 697 break; 698 699 ++str; 700 } 701 702 /* 703 * precise ip: 704 * 705 * 0 - SAMPLE_IP can have arbitrary skid 706 * 1 - SAMPLE_IP must have constant skid 707 * 2 - SAMPLE_IP requested to have 0 skid 708 * 3 - SAMPLE_IP must have 0 skid 709 * 710 * See also PERF_RECORD_MISC_EXACT_IP 711 */ 712 if (precise > 3) 713 return -EINVAL; 714 715 mod->eu = eu; 716 mod->ek = ek; 717 mod->eh = eh; 718 mod->eH = eH; 719 mod->eG = eG; 720 mod->precise = precise; 721 mod->exclude_GH = exclude_GH; 722 return 0; 723 } 724 725 /* 726 * Basic modifier sanity check to validate it contains only one 727 * instance of any modifier (apart from 'p') present. 728 */ 729 static int check_modifier(char *str) 730 { 731 char *p = str; 732 733 /* The sizeof includes 0 byte as well. */ 734 if (strlen(str) > (sizeof("ukhGHppp") - 1)) 735 return -1; 736 737 while (*p) { 738 if (*p != 'p' && strchr(p + 1, *p)) 739 return -1; 740 p++; 741 } 742 743 return 0; 744 } 745 746 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 747 { 748 struct perf_evsel *evsel; 749 struct event_modifier mod; 750 751 if (str == NULL) 752 return 0; 753 754 if (check_modifier(str)) 755 return -EINVAL; 756 757 if (!add && get_event_modifier(&mod, str, NULL)) 758 return -EINVAL; 759 760 list_for_each_entry(evsel, list, node) { 761 762 if (add && get_event_modifier(&mod, str, evsel)) 763 return -EINVAL; 764 765 evsel->attr.exclude_user = mod.eu; 766 evsel->attr.exclude_kernel = mod.ek; 767 evsel->attr.exclude_hv = mod.eh; 768 evsel->attr.precise_ip = mod.precise; 769 evsel->attr.exclude_host = mod.eH; 770 evsel->attr.exclude_guest = mod.eG; 771 evsel->exclude_GH = mod.exclude_GH; 772 } 773 774 return 0; 775 } 776 777 int parse_events_name(struct list_head *list, char *name) 778 { 779 struct perf_evsel *evsel; 780 781 list_for_each_entry(evsel, list, node) { 782 if (!evsel->name) 783 evsel->name = strdup(name); 784 } 785 786 return 0; 787 } 788 789 static int parse_events__scanner(const char *str, void *data, int start_token) 790 { 791 YY_BUFFER_STATE buffer; 792 void *scanner; 793 int ret; 794 795 ret = parse_events_lex_init_extra(start_token, &scanner); 796 if (ret) 797 return ret; 798 799 buffer = parse_events__scan_string(str, scanner); 800 801 #ifdef PARSER_DEBUG 802 parse_events_debug = 1; 803 #endif 804 ret = parse_events_parse(data, scanner); 805 806 parse_events__flush_buffer(buffer, scanner); 807 parse_events__delete_buffer(buffer, scanner); 808 parse_events_lex_destroy(scanner); 809 return ret; 810 } 811 812 /* 813 * parse event config string, return a list of event terms. 814 */ 815 int parse_events_terms(struct list_head *terms, const char *str) 816 { 817 struct parse_events_data__terms data = { 818 .terms = NULL, 819 }; 820 int ret; 821 822 ret = parse_events__scanner(str, &data, PE_START_TERMS); 823 if (!ret) { 824 list_splice(data.terms, terms); 825 free(data.terms); 826 return 0; 827 } 828 829 parse_events__free_terms(data.terms); 830 return ret; 831 } 832 833 int parse_events(struct perf_evlist *evlist, const char *str, 834 int unset __maybe_unused) 835 { 836 struct parse_events_data__events data = { 837 .list = LIST_HEAD_INIT(data.list), 838 .idx = evlist->nr_entries, 839 }; 840 int ret; 841 842 ret = parse_events__scanner(str, &data, PE_START_EVENTS); 843 if (!ret) { 844 int entries = data.idx - evlist->nr_entries; 845 perf_evlist__splice_list_tail(evlist, &data.list, entries); 846 return 0; 847 } 848 849 /* 850 * There are 2 users - builtin-record and builtin-test objects. 851 * Both call perf_evlist__delete in case of error, so we dont 852 * need to bother. 853 */ 854 return ret; 855 } 856 857 int parse_events_option(const struct option *opt, const char *str, 858 int unset __maybe_unused) 859 { 860 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 861 int ret = parse_events(evlist, str, unset); 862 863 if (ret) { 864 fprintf(stderr, "invalid or unsupported event: '%s'\n", str); 865 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 866 } 867 return ret; 868 } 869 870 int parse_filter(const struct option *opt, const char *str, 871 int unset __maybe_unused) 872 { 873 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 874 struct perf_evsel *last = NULL; 875 876 if (evlist->nr_entries > 0) 877 last = perf_evlist__last(evlist); 878 879 if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { 880 fprintf(stderr, 881 "-F option should follow a -e tracepoint option\n"); 882 return -1; 883 } 884 885 last->filter = strdup(str); 886 if (last->filter == NULL) { 887 fprintf(stderr, "not enough memory to hold filter string\n"); 888 return -1; 889 } 890 891 return 0; 892 } 893 894 static const char * const event_type_descriptors[] = { 895 "Hardware event", 896 "Software event", 897 "Tracepoint event", 898 "Hardware cache event", 899 "Raw hardware event descriptor", 900 "Hardware breakpoint", 901 }; 902 903 /* 904 * Print the events from <debugfs_mount_point>/tracing/events 905 */ 906 907 void print_tracepoint_events(const char *subsys_glob, const char *event_glob, 908 bool name_only) 909 { 910 DIR *sys_dir, *evt_dir; 911 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 912 char evt_path[MAXPATHLEN]; 913 char dir_path[MAXPATHLEN]; 914 915 if (debugfs_valid_mountpoint(tracing_events_path)) 916 return; 917 918 sys_dir = opendir(tracing_events_path); 919 if (!sys_dir) 920 return; 921 922 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 923 if (subsys_glob != NULL && 924 !strglobmatch(sys_dirent.d_name, subsys_glob)) 925 continue; 926 927 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 928 sys_dirent.d_name); 929 evt_dir = opendir(dir_path); 930 if (!evt_dir) 931 continue; 932 933 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 934 if (event_glob != NULL && 935 !strglobmatch(evt_dirent.d_name, event_glob)) 936 continue; 937 938 if (name_only) { 939 printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name); 940 continue; 941 } 942 943 snprintf(evt_path, MAXPATHLEN, "%s:%s", 944 sys_dirent.d_name, evt_dirent.d_name); 945 printf(" %-50s [%s]\n", evt_path, 946 event_type_descriptors[PERF_TYPE_TRACEPOINT]); 947 } 948 closedir(evt_dir); 949 } 950 closedir(sys_dir); 951 } 952 953 /* 954 * Check whether event is in <debugfs_mount_point>/tracing/events 955 */ 956 957 int is_valid_tracepoint(const char *event_string) 958 { 959 DIR *sys_dir, *evt_dir; 960 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 961 char evt_path[MAXPATHLEN]; 962 char dir_path[MAXPATHLEN]; 963 964 if (debugfs_valid_mountpoint(tracing_events_path)) 965 return 0; 966 967 sys_dir = opendir(tracing_events_path); 968 if (!sys_dir) 969 return 0; 970 971 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 972 973 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 974 sys_dirent.d_name); 975 evt_dir = opendir(dir_path); 976 if (!evt_dir) 977 continue; 978 979 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 980 snprintf(evt_path, MAXPATHLEN, "%s:%s", 981 sys_dirent.d_name, evt_dirent.d_name); 982 if (!strcmp(evt_path, event_string)) { 983 closedir(evt_dir); 984 closedir(sys_dir); 985 return 1; 986 } 987 } 988 closedir(evt_dir); 989 } 990 closedir(sys_dir); 991 return 0; 992 } 993 994 static void __print_events_type(u8 type, struct event_symbol *syms, 995 unsigned max) 996 { 997 char name[64]; 998 unsigned i; 999 1000 for (i = 0; i < max ; i++, syms++) { 1001 if (strlen(syms->alias)) 1002 snprintf(name, sizeof(name), "%s OR %s", 1003 syms->symbol, syms->alias); 1004 else 1005 snprintf(name, sizeof(name), "%s", syms->symbol); 1006 1007 printf(" %-50s [%s]\n", name, 1008 event_type_descriptors[type]); 1009 } 1010 } 1011 1012 void print_events_type(u8 type) 1013 { 1014 if (type == PERF_TYPE_SOFTWARE) 1015 __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX); 1016 else 1017 __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); 1018 } 1019 1020 int print_hwcache_events(const char *event_glob, bool name_only) 1021 { 1022 unsigned int type, op, i, printed = 0; 1023 char name[64]; 1024 1025 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 1026 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { 1027 /* skip invalid cache type */ 1028 if (!perf_evsel__is_cache_op_valid(type, op)) 1029 continue; 1030 1031 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 1032 __perf_evsel__hw_cache_type_op_res_name(type, op, i, 1033 name, sizeof(name)); 1034 if (event_glob != NULL && !strglobmatch(name, event_glob)) 1035 continue; 1036 1037 if (name_only) 1038 printf("%s ", name); 1039 else 1040 printf(" %-50s [%s]\n", name, 1041 event_type_descriptors[PERF_TYPE_HW_CACHE]); 1042 ++printed; 1043 } 1044 } 1045 } 1046 1047 return printed; 1048 } 1049 1050 static void print_symbol_events(const char *event_glob, unsigned type, 1051 struct event_symbol *syms, unsigned max, 1052 bool name_only) 1053 { 1054 unsigned i, printed = 0; 1055 char name[MAX_NAME_LEN]; 1056 1057 for (i = 0; i < max; i++, syms++) { 1058 1059 if (event_glob != NULL && 1060 !(strglobmatch(syms->symbol, event_glob) || 1061 (syms->alias && strglobmatch(syms->alias, event_glob)))) 1062 continue; 1063 1064 if (name_only) { 1065 printf("%s ", syms->symbol); 1066 continue; 1067 } 1068 1069 if (strlen(syms->alias)) 1070 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); 1071 else 1072 strncpy(name, syms->symbol, MAX_NAME_LEN); 1073 1074 printf(" %-50s [%s]\n", name, event_type_descriptors[type]); 1075 1076 printed++; 1077 } 1078 1079 if (printed) 1080 printf("\n"); 1081 } 1082 1083 /* 1084 * Print the help text for the event symbols: 1085 */ 1086 void print_events(const char *event_glob, bool name_only) 1087 { 1088 if (!name_only) { 1089 printf("\n"); 1090 printf("List of pre-defined events (to be used in -e):\n"); 1091 } 1092 1093 print_symbol_events(event_glob, PERF_TYPE_HARDWARE, 1094 event_symbols_hw, PERF_COUNT_HW_MAX, name_only); 1095 1096 print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, 1097 event_symbols_sw, PERF_COUNT_SW_MAX, name_only); 1098 1099 print_hwcache_events(event_glob, name_only); 1100 1101 if (event_glob != NULL) 1102 return; 1103 1104 if (!name_only) { 1105 printf("\n"); 1106 printf(" %-50s [%s]\n", 1107 "rNNN", 1108 event_type_descriptors[PERF_TYPE_RAW]); 1109 printf(" %-50s [%s]\n", 1110 "cpu/t1=v1[,t2=v2,t3 ...]/modifier", 1111 event_type_descriptors[PERF_TYPE_RAW]); 1112 printf(" (see 'man perf-list' on how to encode it)\n"); 1113 printf("\n"); 1114 1115 printf(" %-50s [%s]\n", 1116 "mem:<addr>[:access]", 1117 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1118 printf("\n"); 1119 } 1120 1121 print_tracepoint_events(NULL, NULL, name_only); 1122 } 1123 1124 int parse_events__is_hardcoded_term(struct parse_events__term *term) 1125 { 1126 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 1127 } 1128 1129 static int new_term(struct parse_events__term **_term, int type_val, 1130 int type_term, char *config, 1131 char *str, u64 num) 1132 { 1133 struct parse_events__term *term; 1134 1135 term = zalloc(sizeof(*term)); 1136 if (!term) 1137 return -ENOMEM; 1138 1139 INIT_LIST_HEAD(&term->list); 1140 term->type_val = type_val; 1141 term->type_term = type_term; 1142 term->config = config; 1143 1144 switch (type_val) { 1145 case PARSE_EVENTS__TERM_TYPE_NUM: 1146 term->val.num = num; 1147 break; 1148 case PARSE_EVENTS__TERM_TYPE_STR: 1149 term->val.str = str; 1150 break; 1151 default: 1152 return -EINVAL; 1153 } 1154 1155 *_term = term; 1156 return 0; 1157 } 1158 1159 int parse_events__term_num(struct parse_events__term **term, 1160 int type_term, char *config, u64 num) 1161 { 1162 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, 1163 config, NULL, num); 1164 } 1165 1166 int parse_events__term_str(struct parse_events__term **term, 1167 int type_term, char *config, char *str) 1168 { 1169 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, 1170 config, str, 0); 1171 } 1172 1173 int parse_events__term_sym_hw(struct parse_events__term **term, 1174 char *config, unsigned idx) 1175 { 1176 struct event_symbol *sym; 1177 1178 BUG_ON(idx >= PERF_COUNT_HW_MAX); 1179 sym = &event_symbols_hw[idx]; 1180 1181 if (config) 1182 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, 1183 PARSE_EVENTS__TERM_TYPE_USER, config, 1184 (char *) sym->symbol, 0); 1185 else 1186 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, 1187 PARSE_EVENTS__TERM_TYPE_USER, 1188 (char *) "event", (char *) sym->symbol, 0); 1189 } 1190 1191 int parse_events__term_clone(struct parse_events__term **new, 1192 struct parse_events__term *term) 1193 { 1194 return new_term(new, term->type_val, term->type_term, term->config, 1195 term->val.str, term->val.num); 1196 } 1197 1198 void parse_events__free_terms(struct list_head *terms) 1199 { 1200 struct parse_events__term *term, *h; 1201 1202 list_for_each_entry_safe(term, h, terms, list) 1203 free(term); 1204 1205 free(terms); 1206 } 1207