1 #include <linux/hw_breakpoint.h> 2 #include "util.h" 3 #include "../perf.h" 4 #include "evlist.h" 5 #include "evsel.h" 6 #include "parse-options.h" 7 #include "parse-events.h" 8 #include "exec_cmd.h" 9 #include "string.h" 10 #include "symbol.h" 11 #include "cache.h" 12 #include "header.h" 13 #include "debug.h" 14 #include <api/fs/debugfs.h> 15 #include "parse-events-bison.h" 16 #define YY_EXTRA_TYPE int 17 #include "parse-events-flex.h" 18 #include "pmu.h" 19 #include "thread_map.h" 20 21 #define MAX_NAME_LEN 100 22 23 struct event_symbol { 24 const char *symbol; 25 const char *alias; 26 }; 27 28 #ifdef PARSER_DEBUG 29 extern int parse_events_debug; 30 #endif 31 int parse_events_parse(void *data, void *scanner); 32 33 static struct perf_pmu_event_symbol *perf_pmu_events_list; 34 /* 35 * The variable indicates the number of supported pmu event symbols. 36 * 0 means not initialized and ready to init 37 * -1 means failed to init, don't try anymore 38 * >0 is the number of supported pmu event symbols 39 */ 40 static int perf_pmu_events_list_num; 41 42 static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 43 [PERF_COUNT_HW_CPU_CYCLES] = { 44 .symbol = "cpu-cycles", 45 .alias = "cycles", 46 }, 47 [PERF_COUNT_HW_INSTRUCTIONS] = { 48 .symbol = "instructions", 49 .alias = "", 50 }, 51 [PERF_COUNT_HW_CACHE_REFERENCES] = { 52 .symbol = "cache-references", 53 .alias = "", 54 }, 55 [PERF_COUNT_HW_CACHE_MISSES] = { 56 .symbol = "cache-misses", 57 .alias = "", 58 }, 59 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 60 .symbol = "branch-instructions", 61 .alias = "branches", 62 }, 63 [PERF_COUNT_HW_BRANCH_MISSES] = { 64 .symbol = "branch-misses", 65 .alias = "", 66 }, 67 [PERF_COUNT_HW_BUS_CYCLES] = { 68 .symbol = "bus-cycles", 69 .alias = "", 70 }, 71 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 72 .symbol = "stalled-cycles-frontend", 73 .alias = "idle-cycles-frontend", 74 }, 75 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 76 .symbol = "stalled-cycles-backend", 77 .alias = "idle-cycles-backend", 78 }, 79 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 80 .symbol = "ref-cycles", 81 .alias = "", 82 }, 83 }; 84 85 static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 86 [PERF_COUNT_SW_CPU_CLOCK] = { 87 .symbol = "cpu-clock", 88 .alias = "", 89 }, 90 [PERF_COUNT_SW_TASK_CLOCK] = { 91 .symbol = "task-clock", 92 .alias = "", 93 }, 94 [PERF_COUNT_SW_PAGE_FAULTS] = { 95 .symbol = "page-faults", 96 .alias = "faults", 97 }, 98 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 99 .symbol = "context-switches", 100 .alias = "cs", 101 }, 102 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 103 .symbol = "cpu-migrations", 104 .alias = "migrations", 105 }, 106 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 107 .symbol = "minor-faults", 108 .alias = "", 109 }, 110 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 111 .symbol = "major-faults", 112 .alias = "", 113 }, 114 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 115 .symbol = "alignment-faults", 116 .alias = "", 117 }, 118 [PERF_COUNT_SW_EMULATION_FAULTS] = { 119 .symbol = "emulation-faults", 120 .alias = "", 121 }, 122 [PERF_COUNT_SW_DUMMY] = { 123 .symbol = "dummy", 124 .alias = "", 125 }, 126 }; 127 128 #define __PERF_EVENT_FIELD(config, name) \ 129 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) 130 131 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) 132 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) 133 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) 134 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) 135 136 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ 137 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ 138 if (sys_dirent.d_type == DT_DIR && \ 139 (strcmp(sys_dirent.d_name, ".")) && \ 140 (strcmp(sys_dirent.d_name, ".."))) 141 142 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) 143 { 144 char evt_path[MAXPATHLEN]; 145 int fd; 146 147 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, 148 sys_dir->d_name, evt_dir->d_name); 149 fd = open(evt_path, O_RDONLY); 150 if (fd < 0) 151 return -EINVAL; 152 close(fd); 153 154 return 0; 155 } 156 157 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ 158 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ 159 if (evt_dirent.d_type == DT_DIR && \ 160 (strcmp(evt_dirent.d_name, ".")) && \ 161 (strcmp(evt_dirent.d_name, "..")) && \ 162 (!tp_event_has_id(&sys_dirent, &evt_dirent))) 163 164 #define MAX_EVENT_LENGTH 512 165 166 167 struct tracepoint_path *tracepoint_id_to_path(u64 config) 168 { 169 struct tracepoint_path *path = NULL; 170 DIR *sys_dir, *evt_dir; 171 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 172 char id_buf[24]; 173 int fd; 174 u64 id; 175 char evt_path[MAXPATHLEN]; 176 char dir_path[MAXPATHLEN]; 177 178 if (debugfs_valid_mountpoint(tracing_events_path)) 179 return NULL; 180 181 sys_dir = opendir(tracing_events_path); 182 if (!sys_dir) 183 return NULL; 184 185 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 186 187 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 188 sys_dirent.d_name); 189 evt_dir = opendir(dir_path); 190 if (!evt_dir) 191 continue; 192 193 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 194 195 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, 196 evt_dirent.d_name); 197 fd = open(evt_path, O_RDONLY); 198 if (fd < 0) 199 continue; 200 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 201 close(fd); 202 continue; 203 } 204 close(fd); 205 id = atoll(id_buf); 206 if (id == config) { 207 closedir(evt_dir); 208 closedir(sys_dir); 209 path = zalloc(sizeof(*path)); 210 path->system = malloc(MAX_EVENT_LENGTH); 211 if (!path->system) { 212 free(path); 213 return NULL; 214 } 215 path->name = malloc(MAX_EVENT_LENGTH); 216 if (!path->name) { 217 zfree(&path->system); 218 free(path); 219 return NULL; 220 } 221 strncpy(path->system, sys_dirent.d_name, 222 MAX_EVENT_LENGTH); 223 strncpy(path->name, evt_dirent.d_name, 224 MAX_EVENT_LENGTH); 225 return path; 226 } 227 } 228 closedir(evt_dir); 229 } 230 231 closedir(sys_dir); 232 return NULL; 233 } 234 235 struct tracepoint_path *tracepoint_name_to_path(const char *name) 236 { 237 struct tracepoint_path *path = zalloc(sizeof(*path)); 238 char *str = strchr(name, ':'); 239 240 if (path == NULL || str == NULL) { 241 free(path); 242 return NULL; 243 } 244 245 path->system = strndup(name, str - name); 246 path->name = strdup(str+1); 247 248 if (path->system == NULL || path->name == NULL) { 249 zfree(&path->system); 250 zfree(&path->name); 251 free(path); 252 path = NULL; 253 } 254 255 return path; 256 } 257 258 const char *event_type(int type) 259 { 260 switch (type) { 261 case PERF_TYPE_HARDWARE: 262 return "hardware"; 263 264 case PERF_TYPE_SOFTWARE: 265 return "software"; 266 267 case PERF_TYPE_TRACEPOINT: 268 return "tracepoint"; 269 270 case PERF_TYPE_HW_CACHE: 271 return "hardware-cache"; 272 273 default: 274 break; 275 } 276 277 return "unknown"; 278 } 279 280 281 282 static struct perf_evsel * 283 __add_event(struct list_head *list, int *idx, 284 struct perf_event_attr *attr, 285 char *name, struct cpu_map *cpus) 286 { 287 struct perf_evsel *evsel; 288 289 event_attr_init(attr); 290 291 evsel = perf_evsel__new_idx(attr, (*idx)++); 292 if (!evsel) 293 return NULL; 294 295 evsel->cpus = cpus; 296 if (name) 297 evsel->name = strdup(name); 298 list_add_tail(&evsel->node, list); 299 return evsel; 300 } 301 302 static int add_event(struct list_head *list, int *idx, 303 struct perf_event_attr *attr, char *name) 304 { 305 return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM; 306 } 307 308 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) 309 { 310 int i, j; 311 int n, longest = -1; 312 313 for (i = 0; i < size; i++) { 314 for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) { 315 n = strlen(names[i][j]); 316 if (n > longest && !strncasecmp(str, names[i][j], n)) 317 longest = n; 318 } 319 if (longest > 0) 320 return i; 321 } 322 323 return -1; 324 } 325 326 int parse_events_add_cache(struct list_head *list, int *idx, 327 char *type, char *op_result1, char *op_result2) 328 { 329 struct perf_event_attr attr; 330 char name[MAX_NAME_LEN]; 331 int cache_type = -1, cache_op = -1, cache_result = -1; 332 char *op_result[2] = { op_result1, op_result2 }; 333 int i, n; 334 335 /* 336 * No fallback - if we cannot get a clear cache type 337 * then bail out: 338 */ 339 cache_type = parse_aliases(type, perf_evsel__hw_cache, 340 PERF_COUNT_HW_CACHE_MAX); 341 if (cache_type == -1) 342 return -EINVAL; 343 344 n = snprintf(name, MAX_NAME_LEN, "%s", type); 345 346 for (i = 0; (i < 2) && (op_result[i]); i++) { 347 char *str = op_result[i]; 348 349 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 350 351 if (cache_op == -1) { 352 cache_op = parse_aliases(str, perf_evsel__hw_cache_op, 353 PERF_COUNT_HW_CACHE_OP_MAX); 354 if (cache_op >= 0) { 355 if (!perf_evsel__is_cache_op_valid(cache_type, cache_op)) 356 return -EINVAL; 357 continue; 358 } 359 } 360 361 if (cache_result == -1) { 362 cache_result = parse_aliases(str, perf_evsel__hw_cache_result, 363 PERF_COUNT_HW_CACHE_RESULT_MAX); 364 if (cache_result >= 0) 365 continue; 366 } 367 } 368 369 /* 370 * Fall back to reads: 371 */ 372 if (cache_op == -1) 373 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 374 375 /* 376 * Fall back to accesses: 377 */ 378 if (cache_result == -1) 379 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 380 381 memset(&attr, 0, sizeof(attr)); 382 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 383 attr.type = PERF_TYPE_HW_CACHE; 384 return add_event(list, idx, &attr, name); 385 } 386 387 static int add_tracepoint(struct list_head *list, int *idx, 388 char *sys_name, char *evt_name) 389 { 390 struct perf_evsel *evsel; 391 392 evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++); 393 if (!evsel) 394 return -ENOMEM; 395 396 list_add_tail(&evsel->node, list); 397 398 return 0; 399 } 400 401 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 402 char *sys_name, char *evt_name) 403 { 404 char evt_path[MAXPATHLEN]; 405 struct dirent *evt_ent; 406 DIR *evt_dir; 407 int ret = 0; 408 409 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); 410 evt_dir = opendir(evt_path); 411 if (!evt_dir) { 412 perror("Can't open event dir"); 413 return -1; 414 } 415 416 while (!ret && (evt_ent = readdir(evt_dir))) { 417 if (!strcmp(evt_ent->d_name, ".") 418 || !strcmp(evt_ent->d_name, "..") 419 || !strcmp(evt_ent->d_name, "enable") 420 || !strcmp(evt_ent->d_name, "filter")) 421 continue; 422 423 if (!strglobmatch(evt_ent->d_name, evt_name)) 424 continue; 425 426 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); 427 } 428 429 closedir(evt_dir); 430 return ret; 431 } 432 433 static int add_tracepoint_event(struct list_head *list, int *idx, 434 char *sys_name, char *evt_name) 435 { 436 return strpbrk(evt_name, "*?") ? 437 add_tracepoint_multi_event(list, idx, sys_name, evt_name) : 438 add_tracepoint(list, idx, sys_name, evt_name); 439 } 440 441 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 442 char *sys_name, char *evt_name) 443 { 444 struct dirent *events_ent; 445 DIR *events_dir; 446 int ret = 0; 447 448 events_dir = opendir(tracing_events_path); 449 if (!events_dir) { 450 perror("Can't open event dir"); 451 return -1; 452 } 453 454 while (!ret && (events_ent = readdir(events_dir))) { 455 if (!strcmp(events_ent->d_name, ".") 456 || !strcmp(events_ent->d_name, "..") 457 || !strcmp(events_ent->d_name, "enable") 458 || !strcmp(events_ent->d_name, "header_event") 459 || !strcmp(events_ent->d_name, "header_page")) 460 continue; 461 462 if (!strglobmatch(events_ent->d_name, sys_name)) 463 continue; 464 465 ret = add_tracepoint_event(list, idx, events_ent->d_name, 466 evt_name); 467 } 468 469 closedir(events_dir); 470 return ret; 471 } 472 473 int parse_events_add_tracepoint(struct list_head *list, int *idx, 474 char *sys, char *event) 475 { 476 int ret; 477 478 ret = debugfs_valid_mountpoint(tracing_events_path); 479 if (ret) 480 return ret; 481 482 if (strpbrk(sys, "*?")) 483 return add_tracepoint_multi_sys(list, idx, sys, event); 484 else 485 return add_tracepoint_event(list, idx, sys, event); 486 } 487 488 static int 489 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 490 { 491 int i; 492 493 for (i = 0; i < 3; i++) { 494 if (!type || !type[i]) 495 break; 496 497 #define CHECK_SET_TYPE(bit) \ 498 do { \ 499 if (attr->bp_type & bit) \ 500 return -EINVAL; \ 501 else \ 502 attr->bp_type |= bit; \ 503 } while (0) 504 505 switch (type[i]) { 506 case 'r': 507 CHECK_SET_TYPE(HW_BREAKPOINT_R); 508 break; 509 case 'w': 510 CHECK_SET_TYPE(HW_BREAKPOINT_W); 511 break; 512 case 'x': 513 CHECK_SET_TYPE(HW_BREAKPOINT_X); 514 break; 515 default: 516 return -EINVAL; 517 } 518 } 519 520 #undef CHECK_SET_TYPE 521 522 if (!attr->bp_type) /* Default */ 523 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 524 525 return 0; 526 } 527 528 int parse_events_add_breakpoint(struct list_head *list, int *idx, 529 void *ptr, char *type) 530 { 531 struct perf_event_attr attr; 532 533 memset(&attr, 0, sizeof(attr)); 534 attr.bp_addr = (unsigned long) ptr; 535 536 if (parse_breakpoint_type(type, &attr)) 537 return -EINVAL; 538 539 /* 540 * We should find a nice way to override the access length 541 * Provide some defaults for now 542 */ 543 if (attr.bp_type == HW_BREAKPOINT_X) 544 attr.bp_len = sizeof(long); 545 else 546 attr.bp_len = HW_BREAKPOINT_LEN_4; 547 548 attr.type = PERF_TYPE_BREAKPOINT; 549 attr.sample_period = 1; 550 551 return add_event(list, idx, &attr, NULL); 552 } 553 554 static int config_term(struct perf_event_attr *attr, 555 struct parse_events_term *term) 556 { 557 #define CHECK_TYPE_VAL(type) \ 558 do { \ 559 if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \ 560 return -EINVAL; \ 561 } while (0) 562 563 switch (term->type_term) { 564 case PARSE_EVENTS__TERM_TYPE_CONFIG: 565 CHECK_TYPE_VAL(NUM); 566 attr->config = term->val.num; 567 break; 568 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 569 CHECK_TYPE_VAL(NUM); 570 attr->config1 = term->val.num; 571 break; 572 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 573 CHECK_TYPE_VAL(NUM); 574 attr->config2 = term->val.num; 575 break; 576 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 577 CHECK_TYPE_VAL(NUM); 578 attr->sample_period = term->val.num; 579 break; 580 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 581 /* 582 * TODO uncomment when the field is available 583 * attr->branch_sample_type = term->val.num; 584 */ 585 break; 586 case PARSE_EVENTS__TERM_TYPE_NAME: 587 CHECK_TYPE_VAL(STR); 588 break; 589 default: 590 return -EINVAL; 591 } 592 593 return 0; 594 #undef CHECK_TYPE_VAL 595 } 596 597 static int config_attr(struct perf_event_attr *attr, 598 struct list_head *head, int fail) 599 { 600 struct parse_events_term *term; 601 602 list_for_each_entry(term, head, list) 603 if (config_term(attr, term) && fail) 604 return -EINVAL; 605 606 return 0; 607 } 608 609 int parse_events_add_numeric(struct list_head *list, int *idx, 610 u32 type, u64 config, 611 struct list_head *head_config) 612 { 613 struct perf_event_attr attr; 614 615 memset(&attr, 0, sizeof(attr)); 616 attr.type = type; 617 attr.config = config; 618 619 if (head_config && 620 config_attr(&attr, head_config, 1)) 621 return -EINVAL; 622 623 return add_event(list, idx, &attr, NULL); 624 } 625 626 static int parse_events__is_name_term(struct parse_events_term *term) 627 { 628 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; 629 } 630 631 static char *pmu_event_name(struct list_head *head_terms) 632 { 633 struct parse_events_term *term; 634 635 list_for_each_entry(term, head_terms, list) 636 if (parse_events__is_name_term(term)) 637 return term->val.str; 638 639 return NULL; 640 } 641 642 int parse_events_add_pmu(struct list_head *list, int *idx, 643 char *name, struct list_head *head_config) 644 { 645 struct perf_event_attr attr; 646 struct perf_pmu_info info; 647 struct perf_pmu *pmu; 648 struct perf_evsel *evsel; 649 650 pmu = perf_pmu__find(name); 651 if (!pmu) 652 return -EINVAL; 653 654 if (pmu->default_config) { 655 memcpy(&attr, pmu->default_config, 656 sizeof(struct perf_event_attr)); 657 } else { 658 memset(&attr, 0, sizeof(attr)); 659 } 660 661 if (!head_config) { 662 attr.type = pmu->type; 663 evsel = __add_event(list, idx, &attr, NULL, pmu->cpus); 664 return evsel ? 0 : -ENOMEM; 665 } 666 667 if (perf_pmu__check_alias(pmu, head_config, &info)) 668 return -EINVAL; 669 670 /* 671 * Configure hardcoded terms first, no need to check 672 * return value when called with fail == 0 ;) 673 */ 674 config_attr(&attr, head_config, 0); 675 676 if (perf_pmu__config(pmu, &attr, head_config)) 677 return -EINVAL; 678 679 evsel = __add_event(list, idx, &attr, pmu_event_name(head_config), 680 pmu->cpus); 681 if (evsel) { 682 evsel->unit = info.unit; 683 evsel->scale = info.scale; 684 } 685 686 return evsel ? 0 : -ENOMEM; 687 } 688 689 int parse_events__modifier_group(struct list_head *list, 690 char *event_mod) 691 { 692 return parse_events__modifier_event(list, event_mod, true); 693 } 694 695 void parse_events__set_leader(char *name, struct list_head *list) 696 { 697 struct perf_evsel *leader; 698 699 __perf_evlist__set_leader(list); 700 leader = list_entry(list->next, struct perf_evsel, node); 701 leader->group_name = name ? strdup(name) : NULL; 702 } 703 704 /* list_event is assumed to point to malloc'ed memory */ 705 void parse_events_update_lists(struct list_head *list_event, 706 struct list_head *list_all) 707 { 708 /* 709 * Called for single event definition. Update the 710 * 'all event' list, and reinit the 'single event' 711 * list, for next event definition. 712 */ 713 list_splice_tail(list_event, list_all); 714 free(list_event); 715 } 716 717 struct event_modifier { 718 int eu; 719 int ek; 720 int eh; 721 int eH; 722 int eG; 723 int precise; 724 int exclude_GH; 725 int sample_read; 726 int pinned; 727 }; 728 729 static int get_event_modifier(struct event_modifier *mod, char *str, 730 struct perf_evsel *evsel) 731 { 732 int eu = evsel ? evsel->attr.exclude_user : 0; 733 int ek = evsel ? evsel->attr.exclude_kernel : 0; 734 int eh = evsel ? evsel->attr.exclude_hv : 0; 735 int eH = evsel ? evsel->attr.exclude_host : 0; 736 int eG = evsel ? evsel->attr.exclude_guest : 0; 737 int precise = evsel ? evsel->attr.precise_ip : 0; 738 int sample_read = 0; 739 int pinned = evsel ? evsel->attr.pinned : 0; 740 741 int exclude = eu | ek | eh; 742 int exclude_GH = evsel ? evsel->exclude_GH : 0; 743 744 memset(mod, 0, sizeof(*mod)); 745 746 while (*str) { 747 if (*str == 'u') { 748 if (!exclude) 749 exclude = eu = ek = eh = 1; 750 eu = 0; 751 } else if (*str == 'k') { 752 if (!exclude) 753 exclude = eu = ek = eh = 1; 754 ek = 0; 755 } else if (*str == 'h') { 756 if (!exclude) 757 exclude = eu = ek = eh = 1; 758 eh = 0; 759 } else if (*str == 'G') { 760 if (!exclude_GH) 761 exclude_GH = eG = eH = 1; 762 eG = 0; 763 } else if (*str == 'H') { 764 if (!exclude_GH) 765 exclude_GH = eG = eH = 1; 766 eH = 0; 767 } else if (*str == 'p') { 768 precise++; 769 /* use of precise requires exclude_guest */ 770 if (!exclude_GH) 771 eG = 1; 772 } else if (*str == 'S') { 773 sample_read = 1; 774 } else if (*str == 'D') { 775 pinned = 1; 776 } else 777 break; 778 779 ++str; 780 } 781 782 /* 783 * precise ip: 784 * 785 * 0 - SAMPLE_IP can have arbitrary skid 786 * 1 - SAMPLE_IP must have constant skid 787 * 2 - SAMPLE_IP requested to have 0 skid 788 * 3 - SAMPLE_IP must have 0 skid 789 * 790 * See also PERF_RECORD_MISC_EXACT_IP 791 */ 792 if (precise > 3) 793 return -EINVAL; 794 795 mod->eu = eu; 796 mod->ek = ek; 797 mod->eh = eh; 798 mod->eH = eH; 799 mod->eG = eG; 800 mod->precise = precise; 801 mod->exclude_GH = exclude_GH; 802 mod->sample_read = sample_read; 803 mod->pinned = pinned; 804 805 return 0; 806 } 807 808 /* 809 * Basic modifier sanity check to validate it contains only one 810 * instance of any modifier (apart from 'p') present. 811 */ 812 static int check_modifier(char *str) 813 { 814 char *p = str; 815 816 /* The sizeof includes 0 byte as well. */ 817 if (strlen(str) > (sizeof("ukhGHpppSD") - 1)) 818 return -1; 819 820 while (*p) { 821 if (*p != 'p' && strchr(p + 1, *p)) 822 return -1; 823 p++; 824 } 825 826 return 0; 827 } 828 829 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 830 { 831 struct perf_evsel *evsel; 832 struct event_modifier mod; 833 834 if (str == NULL) 835 return 0; 836 837 if (check_modifier(str)) 838 return -EINVAL; 839 840 if (!add && get_event_modifier(&mod, str, NULL)) 841 return -EINVAL; 842 843 __evlist__for_each(list, evsel) { 844 if (add && get_event_modifier(&mod, str, evsel)) 845 return -EINVAL; 846 847 evsel->attr.exclude_user = mod.eu; 848 evsel->attr.exclude_kernel = mod.ek; 849 evsel->attr.exclude_hv = mod.eh; 850 evsel->attr.precise_ip = mod.precise; 851 evsel->attr.exclude_host = mod.eH; 852 evsel->attr.exclude_guest = mod.eG; 853 evsel->exclude_GH = mod.exclude_GH; 854 evsel->sample_read = mod.sample_read; 855 856 if (perf_evsel__is_group_leader(evsel)) 857 evsel->attr.pinned = mod.pinned; 858 } 859 860 return 0; 861 } 862 863 int parse_events_name(struct list_head *list, char *name) 864 { 865 struct perf_evsel *evsel; 866 867 __evlist__for_each(list, evsel) { 868 if (!evsel->name) 869 evsel->name = strdup(name); 870 } 871 872 return 0; 873 } 874 875 static int 876 comp_pmu(const void *p1, const void *p2) 877 { 878 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; 879 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; 880 881 return strcmp(pmu1->symbol, pmu2->symbol); 882 } 883 884 static void perf_pmu__parse_cleanup(void) 885 { 886 if (perf_pmu_events_list_num > 0) { 887 struct perf_pmu_event_symbol *p; 888 int i; 889 890 for (i = 0; i < perf_pmu_events_list_num; i++) { 891 p = perf_pmu_events_list + i; 892 free(p->symbol); 893 } 894 free(perf_pmu_events_list); 895 perf_pmu_events_list = NULL; 896 perf_pmu_events_list_num = 0; 897 } 898 } 899 900 #define SET_SYMBOL(str, stype) \ 901 do { \ 902 p->symbol = str; \ 903 if (!p->symbol) \ 904 goto err; \ 905 p->type = stype; \ 906 } while (0) 907 908 /* 909 * Read the pmu events list from sysfs 910 * Save it into perf_pmu_events_list 911 */ 912 static void perf_pmu__parse_init(void) 913 { 914 915 struct perf_pmu *pmu = NULL; 916 struct perf_pmu_alias *alias; 917 int len = 0; 918 919 pmu = perf_pmu__find("cpu"); 920 if ((pmu == NULL) || list_empty(&pmu->aliases)) { 921 perf_pmu_events_list_num = -1; 922 return; 923 } 924 list_for_each_entry(alias, &pmu->aliases, list) { 925 if (strchr(alias->name, '-')) 926 len++; 927 len++; 928 } 929 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); 930 if (!perf_pmu_events_list) 931 return; 932 perf_pmu_events_list_num = len; 933 934 len = 0; 935 list_for_each_entry(alias, &pmu->aliases, list) { 936 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 937 char *tmp = strchr(alias->name, '-'); 938 939 if (tmp != NULL) { 940 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 941 PMU_EVENT_SYMBOL_PREFIX); 942 p++; 943 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); 944 len += 2; 945 } else { 946 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); 947 len++; 948 } 949 } 950 qsort(perf_pmu_events_list, len, 951 sizeof(struct perf_pmu_event_symbol), comp_pmu); 952 953 return; 954 err: 955 perf_pmu__parse_cleanup(); 956 } 957 958 enum perf_pmu_event_symbol_type 959 perf_pmu__parse_check(const char *name) 960 { 961 struct perf_pmu_event_symbol p, *r; 962 963 /* scan kernel pmu events from sysfs if needed */ 964 if (perf_pmu_events_list_num == 0) 965 perf_pmu__parse_init(); 966 /* 967 * name "cpu" could be prefix of cpu-cycles or cpu// events. 968 * cpu-cycles has been handled by hardcode. 969 * So it must be cpu// events, not kernel pmu event. 970 */ 971 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) 972 return PMU_EVENT_SYMBOL_ERR; 973 974 p.symbol = strdup(name); 975 r = bsearch(&p, perf_pmu_events_list, 976 (size_t) perf_pmu_events_list_num, 977 sizeof(struct perf_pmu_event_symbol), comp_pmu); 978 free(p.symbol); 979 return r ? r->type : PMU_EVENT_SYMBOL_ERR; 980 } 981 982 static int parse_events__scanner(const char *str, void *data, int start_token) 983 { 984 YY_BUFFER_STATE buffer; 985 void *scanner; 986 int ret; 987 988 ret = parse_events_lex_init_extra(start_token, &scanner); 989 if (ret) 990 return ret; 991 992 buffer = parse_events__scan_string(str, scanner); 993 994 #ifdef PARSER_DEBUG 995 parse_events_debug = 1; 996 #endif 997 ret = parse_events_parse(data, scanner); 998 999 parse_events__flush_buffer(buffer, scanner); 1000 parse_events__delete_buffer(buffer, scanner); 1001 parse_events_lex_destroy(scanner); 1002 return ret; 1003 } 1004 1005 /* 1006 * parse event config string, return a list of event terms. 1007 */ 1008 int parse_events_terms(struct list_head *terms, const char *str) 1009 { 1010 struct parse_events_terms data = { 1011 .terms = NULL, 1012 }; 1013 int ret; 1014 1015 ret = parse_events__scanner(str, &data, PE_START_TERMS); 1016 if (!ret) { 1017 list_splice(data.terms, terms); 1018 zfree(&data.terms); 1019 return 0; 1020 } 1021 1022 if (data.terms) 1023 parse_events__free_terms(data.terms); 1024 return ret; 1025 } 1026 1027 int parse_events(struct perf_evlist *evlist, const char *str) 1028 { 1029 struct parse_events_evlist data = { 1030 .list = LIST_HEAD_INIT(data.list), 1031 .idx = evlist->nr_entries, 1032 }; 1033 int ret; 1034 1035 ret = parse_events__scanner(str, &data, PE_START_EVENTS); 1036 perf_pmu__parse_cleanup(); 1037 if (!ret) { 1038 int entries = data.idx - evlist->nr_entries; 1039 perf_evlist__splice_list_tail(evlist, &data.list, entries); 1040 evlist->nr_groups += data.nr_groups; 1041 return 0; 1042 } 1043 1044 /* 1045 * There are 2 users - builtin-record and builtin-test objects. 1046 * Both call perf_evlist__delete in case of error, so we dont 1047 * need to bother. 1048 */ 1049 return ret; 1050 } 1051 1052 int parse_events_option(const struct option *opt, const char *str, 1053 int unset __maybe_unused) 1054 { 1055 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 1056 int ret = parse_events(evlist, str); 1057 1058 if (ret) { 1059 fprintf(stderr, "invalid or unsupported event: '%s'\n", str); 1060 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 1061 } 1062 return ret; 1063 } 1064 1065 int parse_filter(const struct option *opt, const char *str, 1066 int unset __maybe_unused) 1067 { 1068 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 1069 struct perf_evsel *last = NULL; 1070 1071 if (evlist->nr_entries > 0) 1072 last = perf_evlist__last(evlist); 1073 1074 if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { 1075 fprintf(stderr, 1076 "--filter option should follow a -e tracepoint option\n"); 1077 return -1; 1078 } 1079 1080 last->filter = strdup(str); 1081 if (last->filter == NULL) { 1082 fprintf(stderr, "not enough memory to hold filter string\n"); 1083 return -1; 1084 } 1085 1086 return 0; 1087 } 1088 1089 static const char * const event_type_descriptors[] = { 1090 "Hardware event", 1091 "Software event", 1092 "Tracepoint event", 1093 "Hardware cache event", 1094 "Raw hardware event descriptor", 1095 "Hardware breakpoint", 1096 }; 1097 1098 /* 1099 * Print the events from <debugfs_mount_point>/tracing/events 1100 */ 1101 1102 void print_tracepoint_events(const char *subsys_glob, const char *event_glob, 1103 bool name_only) 1104 { 1105 DIR *sys_dir, *evt_dir; 1106 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 1107 char evt_path[MAXPATHLEN]; 1108 char dir_path[MAXPATHLEN]; 1109 char sbuf[STRERR_BUFSIZE]; 1110 1111 if (debugfs_valid_mountpoint(tracing_events_path)) { 1112 printf(" [ Tracepoints not available: %s ]\n", 1113 strerror_r(errno, sbuf, sizeof(sbuf))); 1114 return; 1115 } 1116 1117 sys_dir = opendir(tracing_events_path); 1118 if (!sys_dir) 1119 return; 1120 1121 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 1122 if (subsys_glob != NULL && 1123 !strglobmatch(sys_dirent.d_name, subsys_glob)) 1124 continue; 1125 1126 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 1127 sys_dirent.d_name); 1128 evt_dir = opendir(dir_path); 1129 if (!evt_dir) 1130 continue; 1131 1132 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 1133 if (event_glob != NULL && 1134 !strglobmatch(evt_dirent.d_name, event_glob)) 1135 continue; 1136 1137 if (name_only) { 1138 printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name); 1139 continue; 1140 } 1141 1142 snprintf(evt_path, MAXPATHLEN, "%s:%s", 1143 sys_dirent.d_name, evt_dirent.d_name); 1144 printf(" %-50s [%s]\n", evt_path, 1145 event_type_descriptors[PERF_TYPE_TRACEPOINT]); 1146 } 1147 closedir(evt_dir); 1148 } 1149 closedir(sys_dir); 1150 } 1151 1152 /* 1153 * Check whether event is in <debugfs_mount_point>/tracing/events 1154 */ 1155 1156 int is_valid_tracepoint(const char *event_string) 1157 { 1158 DIR *sys_dir, *evt_dir; 1159 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 1160 char evt_path[MAXPATHLEN]; 1161 char dir_path[MAXPATHLEN]; 1162 1163 if (debugfs_valid_mountpoint(tracing_events_path)) 1164 return 0; 1165 1166 sys_dir = opendir(tracing_events_path); 1167 if (!sys_dir) 1168 return 0; 1169 1170 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 1171 1172 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 1173 sys_dirent.d_name); 1174 evt_dir = opendir(dir_path); 1175 if (!evt_dir) 1176 continue; 1177 1178 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 1179 snprintf(evt_path, MAXPATHLEN, "%s:%s", 1180 sys_dirent.d_name, evt_dirent.d_name); 1181 if (!strcmp(evt_path, event_string)) { 1182 closedir(evt_dir); 1183 closedir(sys_dir); 1184 return 1; 1185 } 1186 } 1187 closedir(evt_dir); 1188 } 1189 closedir(sys_dir); 1190 return 0; 1191 } 1192 1193 static bool is_event_supported(u8 type, unsigned config) 1194 { 1195 bool ret = true; 1196 int open_return; 1197 struct perf_evsel *evsel; 1198 struct perf_event_attr attr = { 1199 .type = type, 1200 .config = config, 1201 .disabled = 1, 1202 }; 1203 struct { 1204 struct thread_map map; 1205 int threads[1]; 1206 } tmap = { 1207 .map.nr = 1, 1208 .threads = { 0 }, 1209 }; 1210 1211 evsel = perf_evsel__new(&attr); 1212 if (evsel) { 1213 open_return = perf_evsel__open(evsel, NULL, &tmap.map); 1214 ret = open_return >= 0; 1215 1216 if (open_return == -EACCES) { 1217 /* 1218 * This happens if the paranoid value 1219 * /proc/sys/kernel/perf_event_paranoid is set to 2 1220 * Re-run with exclude_kernel set; we don't do that 1221 * by default as some ARM machines do not support it. 1222 * 1223 */ 1224 evsel->attr.exclude_kernel = 1; 1225 ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; 1226 } 1227 perf_evsel__delete(evsel); 1228 } 1229 1230 return ret; 1231 } 1232 1233 static void __print_events_type(u8 type, struct event_symbol *syms, 1234 unsigned max) 1235 { 1236 char name[64]; 1237 unsigned i; 1238 1239 for (i = 0; i < max ; i++, syms++) { 1240 if (!is_event_supported(type, i)) 1241 continue; 1242 1243 if (strlen(syms->alias)) 1244 snprintf(name, sizeof(name), "%s OR %s", 1245 syms->symbol, syms->alias); 1246 else 1247 snprintf(name, sizeof(name), "%s", syms->symbol); 1248 1249 printf(" %-50s [%s]\n", name, event_type_descriptors[type]); 1250 } 1251 } 1252 1253 void print_events_type(u8 type) 1254 { 1255 if (type == PERF_TYPE_SOFTWARE) 1256 __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX); 1257 else 1258 __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); 1259 } 1260 1261 int print_hwcache_events(const char *event_glob, bool name_only) 1262 { 1263 unsigned int type, op, i, printed = 0; 1264 char name[64]; 1265 1266 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 1267 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { 1268 /* skip invalid cache type */ 1269 if (!perf_evsel__is_cache_op_valid(type, op)) 1270 continue; 1271 1272 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 1273 __perf_evsel__hw_cache_type_op_res_name(type, op, i, 1274 name, sizeof(name)); 1275 if (event_glob != NULL && !strglobmatch(name, event_glob)) 1276 continue; 1277 1278 if (!is_event_supported(PERF_TYPE_HW_CACHE, 1279 type | (op << 8) | (i << 16))) 1280 continue; 1281 1282 if (name_only) 1283 printf("%s ", name); 1284 else 1285 printf(" %-50s [%s]\n", name, 1286 event_type_descriptors[PERF_TYPE_HW_CACHE]); 1287 ++printed; 1288 } 1289 } 1290 } 1291 1292 if (printed) 1293 printf("\n"); 1294 return printed; 1295 } 1296 1297 static void print_symbol_events(const char *event_glob, unsigned type, 1298 struct event_symbol *syms, unsigned max, 1299 bool name_only) 1300 { 1301 unsigned i, printed = 0; 1302 char name[MAX_NAME_LEN]; 1303 1304 for (i = 0; i < max; i++, syms++) { 1305 1306 if (event_glob != NULL && 1307 !(strglobmatch(syms->symbol, event_glob) || 1308 (syms->alias && strglobmatch(syms->alias, event_glob)))) 1309 continue; 1310 1311 if (!is_event_supported(type, i)) 1312 continue; 1313 1314 if (name_only) { 1315 printf("%s ", syms->symbol); 1316 continue; 1317 } 1318 1319 if (strlen(syms->alias)) 1320 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); 1321 else 1322 strncpy(name, syms->symbol, MAX_NAME_LEN); 1323 1324 printf(" %-50s [%s]\n", name, event_type_descriptors[type]); 1325 1326 printed++; 1327 } 1328 1329 if (printed) 1330 printf("\n"); 1331 } 1332 1333 /* 1334 * Print the help text for the event symbols: 1335 */ 1336 void print_events(const char *event_glob, bool name_only) 1337 { 1338 if (!name_only) { 1339 printf("\n"); 1340 printf("List of pre-defined events (to be used in -e):\n"); 1341 } 1342 1343 print_symbol_events(event_glob, PERF_TYPE_HARDWARE, 1344 event_symbols_hw, PERF_COUNT_HW_MAX, name_only); 1345 1346 print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, 1347 event_symbols_sw, PERF_COUNT_SW_MAX, name_only); 1348 1349 print_hwcache_events(event_glob, name_only); 1350 1351 print_pmu_events(event_glob, name_only); 1352 1353 if (event_glob != NULL) 1354 return; 1355 1356 if (!name_only) { 1357 printf(" %-50s [%s]\n", 1358 "rNNN", 1359 event_type_descriptors[PERF_TYPE_RAW]); 1360 printf(" %-50s [%s]\n", 1361 "cpu/t1=v1[,t2=v2,t3 ...]/modifier", 1362 event_type_descriptors[PERF_TYPE_RAW]); 1363 printf(" (see 'man perf-list' on how to encode it)\n"); 1364 printf("\n"); 1365 1366 printf(" %-50s [%s]\n", 1367 "mem:<addr>[:access]", 1368 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1369 printf("\n"); 1370 } 1371 1372 print_tracepoint_events(NULL, NULL, name_only); 1373 } 1374 1375 int parse_events__is_hardcoded_term(struct parse_events_term *term) 1376 { 1377 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 1378 } 1379 1380 static int new_term(struct parse_events_term **_term, int type_val, 1381 int type_term, char *config, 1382 char *str, u64 num) 1383 { 1384 struct parse_events_term *term; 1385 1386 term = zalloc(sizeof(*term)); 1387 if (!term) 1388 return -ENOMEM; 1389 1390 INIT_LIST_HEAD(&term->list); 1391 term->type_val = type_val; 1392 term->type_term = type_term; 1393 term->config = config; 1394 1395 switch (type_val) { 1396 case PARSE_EVENTS__TERM_TYPE_NUM: 1397 term->val.num = num; 1398 break; 1399 case PARSE_EVENTS__TERM_TYPE_STR: 1400 term->val.str = str; 1401 break; 1402 default: 1403 free(term); 1404 return -EINVAL; 1405 } 1406 1407 *_term = term; 1408 return 0; 1409 } 1410 1411 int parse_events_term__num(struct parse_events_term **term, 1412 int type_term, char *config, u64 num) 1413 { 1414 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, 1415 config, NULL, num); 1416 } 1417 1418 int parse_events_term__str(struct parse_events_term **term, 1419 int type_term, char *config, char *str) 1420 { 1421 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, 1422 config, str, 0); 1423 } 1424 1425 int parse_events_term__sym_hw(struct parse_events_term **term, 1426 char *config, unsigned idx) 1427 { 1428 struct event_symbol *sym; 1429 1430 BUG_ON(idx >= PERF_COUNT_HW_MAX); 1431 sym = &event_symbols_hw[idx]; 1432 1433 if (config) 1434 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, 1435 PARSE_EVENTS__TERM_TYPE_USER, config, 1436 (char *) sym->symbol, 0); 1437 else 1438 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, 1439 PARSE_EVENTS__TERM_TYPE_USER, 1440 (char *) "event", (char *) sym->symbol, 0); 1441 } 1442 1443 int parse_events_term__clone(struct parse_events_term **new, 1444 struct parse_events_term *term) 1445 { 1446 return new_term(new, term->type_val, term->type_term, term->config, 1447 term->val.str, term->val.num); 1448 } 1449 1450 void parse_events__free_terms(struct list_head *terms) 1451 { 1452 struct parse_events_term *term, *h; 1453 1454 list_for_each_entry_safe(term, h, terms, list) 1455 free(term); 1456 } 1457