1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <byteswap.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <linux/bitops.h> 13 #include <api/fs/fs.h> 14 #include <api/fs/tracing_path.h> 15 #include <traceevent/event-parse.h> 16 #include <linux/hw_breakpoint.h> 17 #include <linux/perf_event.h> 18 #include <linux/compiler.h> 19 #include <linux/err.h> 20 #include <linux/zalloc.h> 21 #include <sys/ioctl.h> 22 #include <sys/resource.h> 23 #include <sys/types.h> 24 #include <dirent.h> 25 #include <stdlib.h> 26 #include <perf/evsel.h> 27 #include "asm/bug.h" 28 #include "bpf_counter.h" 29 #include "callchain.h" 30 #include "cgroup.h" 31 #include "counts.h" 32 #include "event.h" 33 #include "evsel.h" 34 #include "util/env.h" 35 #include "util/evsel_config.h" 36 #include "util/evsel_fprintf.h" 37 #include "evlist.h" 38 #include <perf/cpumap.h> 39 #include "thread_map.h" 40 #include "target.h" 41 #include "perf_regs.h" 42 #include "record.h" 43 #include "debug.h" 44 #include "trace-event.h" 45 #include "stat.h" 46 #include "string2.h" 47 #include "memswap.h" 48 #include "util.h" 49 #include "hashmap.h" 50 #include "pmu-hybrid.h" 51 #include "../perf-sys.h" 52 #include "util/parse-branch-options.h" 53 #include <internal/xyarray.h> 54 #include <internal/lib.h> 55 56 #include <linux/ctype.h> 57 58 struct perf_missing_features perf_missing_features; 59 60 static clockid_t clockid; 61 62 static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = { 63 NULL, 64 "duration_time", 65 "user_time", 66 "system_time", 67 }; 68 69 const char *perf_tool_event__to_str(enum perf_tool_event ev) 70 { 71 if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX) 72 return perf_tool_event__tool_names[ev]; 73 74 return NULL; 75 } 76 77 enum perf_tool_event perf_tool_event__from_str(const char *str) 78 { 79 int i; 80 81 perf_tool_event__for_each_event(i) { 82 if (!strcmp(str, perf_tool_event__tool_names[i])) 83 return i; 84 } 85 return PERF_TOOL_NONE; 86 } 87 88 89 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused) 90 { 91 return 0; 92 } 93 94 void __weak test_attr__ready(void) { } 95 96 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused) 97 { 98 } 99 100 static struct { 101 size_t size; 102 int (*init)(struct evsel *evsel); 103 void (*fini)(struct evsel *evsel); 104 } perf_evsel__object = { 105 .size = sizeof(struct evsel), 106 .init = evsel__no_extra_init, 107 .fini = evsel__no_extra_fini, 108 }; 109 110 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel), 111 void (*fini)(struct evsel *evsel)) 112 { 113 114 if (object_size == 0) 115 goto set_methods; 116 117 if (perf_evsel__object.size > object_size) 118 return -EINVAL; 119 120 perf_evsel__object.size = object_size; 121 122 set_methods: 123 if (init != NULL) 124 perf_evsel__object.init = init; 125 126 if (fini != NULL) 127 perf_evsel__object.fini = fini; 128 129 return 0; 130 } 131 132 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 133 134 int __evsel__sample_size(u64 sample_type) 135 { 136 u64 mask = sample_type & PERF_SAMPLE_MASK; 137 int size = 0; 138 int i; 139 140 for (i = 0; i < 64; i++) { 141 if (mask & (1ULL << i)) 142 size++; 143 } 144 145 size *= sizeof(u64); 146 147 return size; 148 } 149 150 /** 151 * __perf_evsel__calc_id_pos - calculate id_pos. 152 * @sample_type: sample type 153 * 154 * This function returns the position of the event id (PERF_SAMPLE_ID or 155 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 156 * perf_record_sample. 157 */ 158 static int __perf_evsel__calc_id_pos(u64 sample_type) 159 { 160 int idx = 0; 161 162 if (sample_type & PERF_SAMPLE_IDENTIFIER) 163 return 0; 164 165 if (!(sample_type & PERF_SAMPLE_ID)) 166 return -1; 167 168 if (sample_type & PERF_SAMPLE_IP) 169 idx += 1; 170 171 if (sample_type & PERF_SAMPLE_TID) 172 idx += 1; 173 174 if (sample_type & PERF_SAMPLE_TIME) 175 idx += 1; 176 177 if (sample_type & PERF_SAMPLE_ADDR) 178 idx += 1; 179 180 return idx; 181 } 182 183 /** 184 * __perf_evsel__calc_is_pos - calculate is_pos. 185 * @sample_type: sample type 186 * 187 * This function returns the position (counting backwards) of the event id 188 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 189 * sample_id_all is used there is an id sample appended to non-sample events. 190 */ 191 static int __perf_evsel__calc_is_pos(u64 sample_type) 192 { 193 int idx = 1; 194 195 if (sample_type & PERF_SAMPLE_IDENTIFIER) 196 return 1; 197 198 if (!(sample_type & PERF_SAMPLE_ID)) 199 return -1; 200 201 if (sample_type & PERF_SAMPLE_CPU) 202 idx += 1; 203 204 if (sample_type & PERF_SAMPLE_STREAM_ID) 205 idx += 1; 206 207 return idx; 208 } 209 210 void evsel__calc_id_pos(struct evsel *evsel) 211 { 212 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); 213 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); 214 } 215 216 void __evsel__set_sample_bit(struct evsel *evsel, 217 enum perf_event_sample_format bit) 218 { 219 if (!(evsel->core.attr.sample_type & bit)) { 220 evsel->core.attr.sample_type |= bit; 221 evsel->sample_size += sizeof(u64); 222 evsel__calc_id_pos(evsel); 223 } 224 } 225 226 void __evsel__reset_sample_bit(struct evsel *evsel, 227 enum perf_event_sample_format bit) 228 { 229 if (evsel->core.attr.sample_type & bit) { 230 evsel->core.attr.sample_type &= ~bit; 231 evsel->sample_size -= sizeof(u64); 232 evsel__calc_id_pos(evsel); 233 } 234 } 235 236 void evsel__set_sample_id(struct evsel *evsel, 237 bool can_sample_identifier) 238 { 239 if (can_sample_identifier) { 240 evsel__reset_sample_bit(evsel, ID); 241 evsel__set_sample_bit(evsel, IDENTIFIER); 242 } else { 243 evsel__set_sample_bit(evsel, ID); 244 } 245 evsel->core.attr.read_format |= PERF_FORMAT_ID; 246 } 247 248 /** 249 * evsel__is_function_event - Return whether given evsel is a function 250 * trace event 251 * 252 * @evsel - evsel selector to be tested 253 * 254 * Return %true if event is function trace event 255 */ 256 bool evsel__is_function_event(struct evsel *evsel) 257 { 258 #define FUNCTION_EVENT "ftrace:function" 259 260 return evsel->name && 261 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); 262 263 #undef FUNCTION_EVENT 264 } 265 266 void evsel__init(struct evsel *evsel, 267 struct perf_event_attr *attr, int idx) 268 { 269 perf_evsel__init(&evsel->core, attr, idx); 270 evsel->tracking = !idx; 271 evsel->unit = strdup(""); 272 evsel->scale = 1.0; 273 evsel->max_events = ULONG_MAX; 274 evsel->evlist = NULL; 275 evsel->bpf_obj = NULL; 276 evsel->bpf_fd = -1; 277 INIT_LIST_HEAD(&evsel->config_terms); 278 INIT_LIST_HEAD(&evsel->bpf_counter_list); 279 perf_evsel__object.init(evsel); 280 evsel->sample_size = __evsel__sample_size(attr->sample_type); 281 evsel__calc_id_pos(evsel); 282 evsel->cmdline_group_boundary = false; 283 evsel->metric_expr = NULL; 284 evsel->metric_name = NULL; 285 evsel->metric_events = NULL; 286 evsel->per_pkg_mask = NULL; 287 evsel->collect_stat = false; 288 evsel->pmu_name = NULL; 289 } 290 291 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) 292 { 293 struct evsel *evsel = zalloc(perf_evsel__object.size); 294 295 if (!evsel) 296 return NULL; 297 evsel__init(evsel, attr, idx); 298 299 if (evsel__is_bpf_output(evsel)) { 300 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 301 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 302 evsel->core.attr.sample_period = 1; 303 } 304 305 if (evsel__is_clock(evsel)) { 306 free((char *)evsel->unit); 307 evsel->unit = strdup("msec"); 308 evsel->scale = 1e-6; 309 } 310 311 return evsel; 312 } 313 314 static bool perf_event_can_profile_kernel(void) 315 { 316 return perf_event_paranoid_check(1); 317 } 318 319 struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config) 320 { 321 struct perf_event_attr attr = { 322 .type = type, 323 .config = config, 324 .exclude_kernel = !perf_event_can_profile_kernel(), 325 }; 326 struct evsel *evsel; 327 328 event_attr_init(&attr); 329 330 /* 331 * Now let the usual logic to set up the perf_event_attr defaults 332 * to kick in when we return and before perf_evsel__open() is called. 333 */ 334 evsel = evsel__new(&attr); 335 if (evsel == NULL) 336 goto out; 337 338 arch_evsel__fixup_new_cycles(&evsel->core.attr); 339 340 evsel->precise_max = true; 341 342 /* use asprintf() because free(evsel) assumes name is allocated */ 343 if (asprintf(&evsel->name, "cycles%s%s%.*s", 344 (attr.precise_ip || attr.exclude_kernel) ? ":" : "", 345 attr.exclude_kernel ? "u" : "", 346 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0) 347 goto error_free; 348 out: 349 return evsel; 350 error_free: 351 evsel__delete(evsel); 352 evsel = NULL; 353 goto out; 354 } 355 356 int copy_config_terms(struct list_head *dst, struct list_head *src) 357 { 358 struct evsel_config_term *pos, *tmp; 359 360 list_for_each_entry(pos, src, list) { 361 tmp = malloc(sizeof(*tmp)); 362 if (tmp == NULL) 363 return -ENOMEM; 364 365 *tmp = *pos; 366 if (tmp->free_str) { 367 tmp->val.str = strdup(pos->val.str); 368 if (tmp->val.str == NULL) { 369 free(tmp); 370 return -ENOMEM; 371 } 372 } 373 list_add_tail(&tmp->list, dst); 374 } 375 return 0; 376 } 377 378 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) 379 { 380 return copy_config_terms(&dst->config_terms, &src->config_terms); 381 } 382 383 /** 384 * evsel__clone - create a new evsel copied from @orig 385 * @orig: original evsel 386 * 387 * The assumption is that @orig is not configured nor opened yet. 388 * So we only care about the attributes that can be set while it's parsed. 389 */ 390 struct evsel *evsel__clone(struct evsel *orig) 391 { 392 struct evsel *evsel; 393 394 BUG_ON(orig->core.fd); 395 BUG_ON(orig->counts); 396 BUG_ON(orig->priv); 397 BUG_ON(orig->per_pkg_mask); 398 399 /* cannot handle BPF objects for now */ 400 if (orig->bpf_obj) 401 return NULL; 402 403 evsel = evsel__new(&orig->core.attr); 404 if (evsel == NULL) 405 return NULL; 406 407 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); 408 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); 409 evsel->core.threads = perf_thread_map__get(orig->core.threads); 410 evsel->core.nr_members = orig->core.nr_members; 411 evsel->core.system_wide = orig->core.system_wide; 412 413 if (orig->name) { 414 evsel->name = strdup(orig->name); 415 if (evsel->name == NULL) 416 goto out_err; 417 } 418 if (orig->group_name) { 419 evsel->group_name = strdup(orig->group_name); 420 if (evsel->group_name == NULL) 421 goto out_err; 422 } 423 if (orig->pmu_name) { 424 evsel->pmu_name = strdup(orig->pmu_name); 425 if (evsel->pmu_name == NULL) 426 goto out_err; 427 } 428 if (orig->filter) { 429 evsel->filter = strdup(orig->filter); 430 if (evsel->filter == NULL) 431 goto out_err; 432 } 433 if (orig->metric_id) { 434 evsel->metric_id = strdup(orig->metric_id); 435 if (evsel->metric_id == NULL) 436 goto out_err; 437 } 438 evsel->cgrp = cgroup__get(orig->cgrp); 439 evsel->tp_format = orig->tp_format; 440 evsel->handler = orig->handler; 441 evsel->core.leader = orig->core.leader; 442 443 evsel->max_events = orig->max_events; 444 evsel->tool_event = orig->tool_event; 445 free((char *)evsel->unit); 446 evsel->unit = strdup(orig->unit); 447 if (evsel->unit == NULL) 448 goto out_err; 449 450 evsel->scale = orig->scale; 451 evsel->snapshot = orig->snapshot; 452 evsel->per_pkg = orig->per_pkg; 453 evsel->percore = orig->percore; 454 evsel->precise_max = orig->precise_max; 455 evsel->use_uncore_alias = orig->use_uncore_alias; 456 evsel->is_libpfm_event = orig->is_libpfm_event; 457 458 evsel->exclude_GH = orig->exclude_GH; 459 evsel->sample_read = orig->sample_read; 460 evsel->auto_merge_stats = orig->auto_merge_stats; 461 evsel->collect_stat = orig->collect_stat; 462 evsel->weak_group = orig->weak_group; 463 evsel->use_config_name = orig->use_config_name; 464 465 if (evsel__copy_config_terms(evsel, orig) < 0) 466 goto out_err; 467 468 return evsel; 469 470 out_err: 471 evsel__delete(evsel); 472 return NULL; 473 } 474 475 /* 476 * Returns pointer with encoded error via <linux/err.h> interface. 477 */ 478 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) 479 { 480 struct evsel *evsel = zalloc(perf_evsel__object.size); 481 int err = -ENOMEM; 482 483 if (evsel == NULL) { 484 goto out_err; 485 } else { 486 struct perf_event_attr attr = { 487 .type = PERF_TYPE_TRACEPOINT, 488 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 489 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 490 }; 491 492 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 493 goto out_free; 494 495 evsel->tp_format = trace_event__tp_format(sys, name); 496 if (IS_ERR(evsel->tp_format)) { 497 err = PTR_ERR(evsel->tp_format); 498 goto out_free; 499 } 500 501 event_attr_init(&attr); 502 attr.config = evsel->tp_format->id; 503 attr.sample_period = 1; 504 evsel__init(evsel, &attr, idx); 505 } 506 507 return evsel; 508 509 out_free: 510 zfree(&evsel->name); 511 free(evsel); 512 out_err: 513 return ERR_PTR(err); 514 } 515 516 const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = { 517 "cycles", 518 "instructions", 519 "cache-references", 520 "cache-misses", 521 "branches", 522 "branch-misses", 523 "bus-cycles", 524 "stalled-cycles-frontend", 525 "stalled-cycles-backend", 526 "ref-cycles", 527 }; 528 529 char *evsel__bpf_counter_events; 530 531 bool evsel__match_bpf_counter_events(const char *name) 532 { 533 int name_len; 534 bool match; 535 char *ptr; 536 537 if (!evsel__bpf_counter_events) 538 return false; 539 540 ptr = strstr(evsel__bpf_counter_events, name); 541 name_len = strlen(name); 542 543 /* check name matches a full token in evsel__bpf_counter_events */ 544 match = (ptr != NULL) && 545 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) && 546 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0')); 547 548 return match; 549 } 550 551 static const char *__evsel__hw_name(u64 config) 552 { 553 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config]) 554 return evsel__hw_names[config]; 555 556 return "unknown-hardware"; 557 } 558 559 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) 560 { 561 int colon = 0, r = 0; 562 struct perf_event_attr *attr = &evsel->core.attr; 563 bool exclude_guest_default = false; 564 565 #define MOD_PRINT(context, mod) do { \ 566 if (!attr->exclude_##context) { \ 567 if (!colon) colon = ++r; \ 568 r += scnprintf(bf + r, size - r, "%c", mod); \ 569 } } while(0) 570 571 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 572 MOD_PRINT(kernel, 'k'); 573 MOD_PRINT(user, 'u'); 574 MOD_PRINT(hv, 'h'); 575 exclude_guest_default = true; 576 } 577 578 if (attr->precise_ip) { 579 if (!colon) 580 colon = ++r; 581 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 582 exclude_guest_default = true; 583 } 584 585 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 586 MOD_PRINT(host, 'H'); 587 MOD_PRINT(guest, 'G'); 588 } 589 #undef MOD_PRINT 590 if (colon) 591 bf[colon - 1] = ':'; 592 return r; 593 } 594 595 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 596 { 597 int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); 598 return r + evsel__add_modifiers(evsel, bf + r, size - r); 599 } 600 601 const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = { 602 "cpu-clock", 603 "task-clock", 604 "page-faults", 605 "context-switches", 606 "cpu-migrations", 607 "minor-faults", 608 "major-faults", 609 "alignment-faults", 610 "emulation-faults", 611 "dummy", 612 }; 613 614 static const char *__evsel__sw_name(u64 config) 615 { 616 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config]) 617 return evsel__sw_names[config]; 618 return "unknown-software"; 619 } 620 621 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size) 622 { 623 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); 624 return r + evsel__add_modifiers(evsel, bf + r, size - r); 625 } 626 627 static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size) 628 { 629 return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev)); 630 } 631 632 static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 633 { 634 int r; 635 636 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 637 638 if (type & HW_BREAKPOINT_R) 639 r += scnprintf(bf + r, size - r, "r"); 640 641 if (type & HW_BREAKPOINT_W) 642 r += scnprintf(bf + r, size - r, "w"); 643 644 if (type & HW_BREAKPOINT_X) 645 r += scnprintf(bf + r, size - r, "x"); 646 647 return r; 648 } 649 650 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size) 651 { 652 struct perf_event_attr *attr = &evsel->core.attr; 653 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 654 return r + evsel__add_modifiers(evsel, bf + r, size - r); 655 } 656 657 const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = { 658 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 659 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 660 { "LLC", "L2", }, 661 { "dTLB", "d-tlb", "Data-TLB", }, 662 { "iTLB", "i-tlb", "Instruction-TLB", }, 663 { "branch", "branches", "bpu", "btb", "bpc", }, 664 { "node", }, 665 }; 666 667 const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = { 668 { "load", "loads", "read", }, 669 { "store", "stores", "write", }, 670 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 671 }; 672 673 const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = { 674 { "refs", "Reference", "ops", "access", }, 675 { "misses", "miss", }, 676 }; 677 678 #define C(x) PERF_COUNT_HW_CACHE_##x 679 #define CACHE_READ (1 << C(OP_READ)) 680 #define CACHE_WRITE (1 << C(OP_WRITE)) 681 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 682 #define COP(x) (1 << x) 683 684 /* 685 * cache operation stat 686 * L1I : Read and prefetch only 687 * ITLB and BPU : Read-only 688 */ 689 static const unsigned long evsel__hw_cache_stat[C(MAX)] = { 690 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 691 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 692 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 693 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 694 [C(ITLB)] = (CACHE_READ), 695 [C(BPU)] = (CACHE_READ), 696 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 697 }; 698 699 bool evsel__is_cache_op_valid(u8 type, u8 op) 700 { 701 if (evsel__hw_cache_stat[type] & COP(op)) 702 return true; /* valid */ 703 else 704 return false; /* invalid */ 705 } 706 707 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size) 708 { 709 if (result) { 710 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0], 711 evsel__hw_cache_op[op][0], 712 evsel__hw_cache_result[result][0]); 713 } 714 715 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0], 716 evsel__hw_cache_op[op][1]); 717 } 718 719 static int __evsel__hw_cache_name(u64 config, char *bf, size_t size) 720 { 721 u8 op, result, type = (config >> 0) & 0xff; 722 const char *err = "unknown-ext-hardware-cache-type"; 723 724 if (type >= PERF_COUNT_HW_CACHE_MAX) 725 goto out_err; 726 727 op = (config >> 8) & 0xff; 728 err = "unknown-ext-hardware-cache-op"; 729 if (op >= PERF_COUNT_HW_CACHE_OP_MAX) 730 goto out_err; 731 732 result = (config >> 16) & 0xff; 733 err = "unknown-ext-hardware-cache-result"; 734 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 735 goto out_err; 736 737 err = "invalid-cache"; 738 if (!evsel__is_cache_op_valid(type, op)) 739 goto out_err; 740 741 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 742 out_err: 743 return scnprintf(bf, size, "%s", err); 744 } 745 746 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) 747 { 748 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); 749 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 750 } 751 752 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size) 753 { 754 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); 755 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 756 } 757 758 const char *evsel__name(struct evsel *evsel) 759 { 760 char bf[128]; 761 762 if (!evsel) 763 goto out_unknown; 764 765 if (evsel->name) 766 return evsel->name; 767 768 switch (evsel->core.attr.type) { 769 case PERF_TYPE_RAW: 770 evsel__raw_name(evsel, bf, sizeof(bf)); 771 break; 772 773 case PERF_TYPE_HARDWARE: 774 evsel__hw_name(evsel, bf, sizeof(bf)); 775 break; 776 777 case PERF_TYPE_HW_CACHE: 778 evsel__hw_cache_name(evsel, bf, sizeof(bf)); 779 break; 780 781 case PERF_TYPE_SOFTWARE: 782 if (evsel__is_tool(evsel)) 783 evsel__tool_name(evsel->tool_event, bf, sizeof(bf)); 784 else 785 evsel__sw_name(evsel, bf, sizeof(bf)); 786 break; 787 788 case PERF_TYPE_TRACEPOINT: 789 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 790 break; 791 792 case PERF_TYPE_BREAKPOINT: 793 evsel__bp_name(evsel, bf, sizeof(bf)); 794 break; 795 796 default: 797 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 798 evsel->core.attr.type); 799 break; 800 } 801 802 evsel->name = strdup(bf); 803 804 if (evsel->name) 805 return evsel->name; 806 out_unknown: 807 return "unknown"; 808 } 809 810 const char *evsel__metric_id(const struct evsel *evsel) 811 { 812 if (evsel->metric_id) 813 return evsel->metric_id; 814 815 if (evsel__is_tool(evsel)) 816 return perf_tool_event__to_str(evsel->tool_event); 817 818 return "unknown"; 819 } 820 821 const char *evsel__group_name(struct evsel *evsel) 822 { 823 return evsel->group_name ?: "anon group"; 824 } 825 826 /* 827 * Returns the group details for the specified leader, 828 * with following rules. 829 * 830 * For record -e '{cycles,instructions}' 831 * 'anon group { cycles:u, instructions:u }' 832 * 833 * For record -e 'cycles,instructions' and report --group 834 * 'cycles:u, instructions:u' 835 */ 836 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size) 837 { 838 int ret = 0; 839 struct evsel *pos; 840 const char *group_name = evsel__group_name(evsel); 841 842 if (!evsel->forced_leader) 843 ret = scnprintf(buf, size, "%s { ", group_name); 844 845 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel)); 846 847 for_each_group_member(pos, evsel) 848 ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos)); 849 850 if (!evsel->forced_leader) 851 ret += scnprintf(buf + ret, size - ret, " }"); 852 853 return ret; 854 } 855 856 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 857 struct callchain_param *param) 858 { 859 bool function = evsel__is_function_event(evsel); 860 struct perf_event_attr *attr = &evsel->core.attr; 861 862 evsel__set_sample_bit(evsel, CALLCHAIN); 863 864 attr->sample_max_stack = param->max_stack; 865 866 if (opts->kernel_callchains) 867 attr->exclude_callchain_user = 1; 868 if (opts->user_callchains) 869 attr->exclude_callchain_kernel = 1; 870 if (param->record_mode == CALLCHAIN_LBR) { 871 if (!opts->branch_stack) { 872 if (attr->exclude_user) { 873 pr_warning("LBR callstack option is only available " 874 "to get user callchain information. " 875 "Falling back to framepointers.\n"); 876 } else { 877 evsel__set_sample_bit(evsel, BRANCH_STACK); 878 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 879 PERF_SAMPLE_BRANCH_CALL_STACK | 880 PERF_SAMPLE_BRANCH_NO_CYCLES | 881 PERF_SAMPLE_BRANCH_NO_FLAGS | 882 PERF_SAMPLE_BRANCH_HW_INDEX; 883 } 884 } else 885 pr_warning("Cannot use LBR callstack with branch stack. " 886 "Falling back to framepointers.\n"); 887 } 888 889 if (param->record_mode == CALLCHAIN_DWARF) { 890 if (!function) { 891 evsel__set_sample_bit(evsel, REGS_USER); 892 evsel__set_sample_bit(evsel, STACK_USER); 893 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) { 894 attr->sample_regs_user |= DWARF_MINIMAL_REGS; 895 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, " 896 "specifying a subset with --user-regs may render DWARF unwinding unreliable, " 897 "so the minimal registers set (IP, SP) is explicitly forced.\n"); 898 } else { 899 attr->sample_regs_user |= PERF_REGS_MASK; 900 } 901 attr->sample_stack_user = param->dump_size; 902 attr->exclude_callchain_user = 1; 903 } else { 904 pr_info("Cannot use DWARF unwind for function trace event," 905 " falling back to framepointers.\n"); 906 } 907 } 908 909 if (function) { 910 pr_info("Disabling user space callchains for function trace event.\n"); 911 attr->exclude_callchain_user = 1; 912 } 913 } 914 915 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 916 struct callchain_param *param) 917 { 918 if (param->enabled) 919 return __evsel__config_callchain(evsel, opts, param); 920 } 921 922 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param) 923 { 924 struct perf_event_attr *attr = &evsel->core.attr; 925 926 evsel__reset_sample_bit(evsel, CALLCHAIN); 927 if (param->record_mode == CALLCHAIN_LBR) { 928 evsel__reset_sample_bit(evsel, BRANCH_STACK); 929 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 930 PERF_SAMPLE_BRANCH_CALL_STACK | 931 PERF_SAMPLE_BRANCH_HW_INDEX); 932 } 933 if (param->record_mode == CALLCHAIN_DWARF) { 934 evsel__reset_sample_bit(evsel, REGS_USER); 935 evsel__reset_sample_bit(evsel, STACK_USER); 936 } 937 } 938 939 static void evsel__apply_config_terms(struct evsel *evsel, 940 struct record_opts *opts, bool track) 941 { 942 struct evsel_config_term *term; 943 struct list_head *config_terms = &evsel->config_terms; 944 struct perf_event_attr *attr = &evsel->core.attr; 945 /* callgraph default */ 946 struct callchain_param param = { 947 .record_mode = callchain_param.record_mode, 948 }; 949 u32 dump_size = 0; 950 int max_stack = 0; 951 const char *callgraph_buf = NULL; 952 953 list_for_each_entry(term, config_terms, list) { 954 switch (term->type) { 955 case EVSEL__CONFIG_TERM_PERIOD: 956 if (!(term->weak && opts->user_interval != ULLONG_MAX)) { 957 attr->sample_period = term->val.period; 958 attr->freq = 0; 959 evsel__reset_sample_bit(evsel, PERIOD); 960 } 961 break; 962 case EVSEL__CONFIG_TERM_FREQ: 963 if (!(term->weak && opts->user_freq != UINT_MAX)) { 964 attr->sample_freq = term->val.freq; 965 attr->freq = 1; 966 evsel__set_sample_bit(evsel, PERIOD); 967 } 968 break; 969 case EVSEL__CONFIG_TERM_TIME: 970 if (term->val.time) 971 evsel__set_sample_bit(evsel, TIME); 972 else 973 evsel__reset_sample_bit(evsel, TIME); 974 break; 975 case EVSEL__CONFIG_TERM_CALLGRAPH: 976 callgraph_buf = term->val.str; 977 break; 978 case EVSEL__CONFIG_TERM_BRANCH: 979 if (term->val.str && strcmp(term->val.str, "no")) { 980 evsel__set_sample_bit(evsel, BRANCH_STACK); 981 parse_branch_str(term->val.str, 982 &attr->branch_sample_type); 983 } else 984 evsel__reset_sample_bit(evsel, BRANCH_STACK); 985 break; 986 case EVSEL__CONFIG_TERM_STACK_USER: 987 dump_size = term->val.stack_user; 988 break; 989 case EVSEL__CONFIG_TERM_MAX_STACK: 990 max_stack = term->val.max_stack; 991 break; 992 case EVSEL__CONFIG_TERM_MAX_EVENTS: 993 evsel->max_events = term->val.max_events; 994 break; 995 case EVSEL__CONFIG_TERM_INHERIT: 996 /* 997 * attr->inherit should has already been set by 998 * evsel__config. If user explicitly set 999 * inherit using config terms, override global 1000 * opt->no_inherit setting. 1001 */ 1002 attr->inherit = term->val.inherit ? 1 : 0; 1003 break; 1004 case EVSEL__CONFIG_TERM_OVERWRITE: 1005 attr->write_backward = term->val.overwrite ? 1 : 0; 1006 break; 1007 case EVSEL__CONFIG_TERM_DRV_CFG: 1008 break; 1009 case EVSEL__CONFIG_TERM_PERCORE: 1010 break; 1011 case EVSEL__CONFIG_TERM_AUX_OUTPUT: 1012 attr->aux_output = term->val.aux_output ? 1 : 0; 1013 break; 1014 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE: 1015 /* Already applied by auxtrace */ 1016 break; 1017 case EVSEL__CONFIG_TERM_CFG_CHG: 1018 break; 1019 default: 1020 break; 1021 } 1022 } 1023 1024 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 1025 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { 1026 bool sample_address = false; 1027 1028 if (max_stack) { 1029 param.max_stack = max_stack; 1030 if (callgraph_buf == NULL) 1031 callgraph_buf = "fp"; 1032 } 1033 1034 /* parse callgraph parameters */ 1035 if (callgraph_buf != NULL) { 1036 if (!strcmp(callgraph_buf, "no")) { 1037 param.enabled = false; 1038 param.record_mode = CALLCHAIN_NONE; 1039 } else { 1040 param.enabled = true; 1041 if (parse_callchain_record(callgraph_buf, ¶m)) { 1042 pr_err("per-event callgraph setting for %s failed. " 1043 "Apply callgraph global setting for it\n", 1044 evsel->name); 1045 return; 1046 } 1047 if (param.record_mode == CALLCHAIN_DWARF) 1048 sample_address = true; 1049 } 1050 } 1051 if (dump_size > 0) { 1052 dump_size = round_up(dump_size, sizeof(u64)); 1053 param.dump_size = dump_size; 1054 } 1055 1056 /* If global callgraph set, clear it */ 1057 if (callchain_param.enabled) 1058 evsel__reset_callgraph(evsel, &callchain_param); 1059 1060 /* set perf-event callgraph */ 1061 if (param.enabled) { 1062 if (sample_address) { 1063 evsel__set_sample_bit(evsel, ADDR); 1064 evsel__set_sample_bit(evsel, DATA_SRC); 1065 evsel->core.attr.mmap_data = track; 1066 } 1067 evsel__config_callchain(evsel, opts, ¶m); 1068 } 1069 } 1070 } 1071 1072 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) 1073 { 1074 struct evsel_config_term *term, *found_term = NULL; 1075 1076 list_for_each_entry(term, &evsel->config_terms, list) { 1077 if (term->type == type) 1078 found_term = term; 1079 } 1080 1081 return found_term; 1082 } 1083 1084 void __weak arch_evsel__set_sample_weight(struct evsel *evsel) 1085 { 1086 evsel__set_sample_bit(evsel, WEIGHT); 1087 } 1088 1089 void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_unused) 1090 { 1091 } 1092 1093 static void evsel__set_default_freq_period(struct record_opts *opts, 1094 struct perf_event_attr *attr) 1095 { 1096 if (opts->freq) { 1097 attr->freq = 1; 1098 attr->sample_freq = opts->freq; 1099 } else { 1100 attr->sample_period = opts->default_interval; 1101 } 1102 } 1103 1104 /* 1105 * The enable_on_exec/disabled value strategy: 1106 * 1107 * 1) For any type of traced program: 1108 * - all independent events and group leaders are disabled 1109 * - all group members are enabled 1110 * 1111 * Group members are ruled by group leaders. They need to 1112 * be enabled, because the group scheduling relies on that. 1113 * 1114 * 2) For traced programs executed by perf: 1115 * - all independent events and group leaders have 1116 * enable_on_exec set 1117 * - we don't specifically enable or disable any event during 1118 * the record command 1119 * 1120 * Independent events and group leaders are initially disabled 1121 * and get enabled by exec. Group members are ruled by group 1122 * leaders as stated in 1). 1123 * 1124 * 3) For traced programs attached by perf (pid/tid): 1125 * - we specifically enable or disable all events during 1126 * the record command 1127 * 1128 * When attaching events to already running traced we 1129 * enable/disable events specifically, as there's no 1130 * initial traced exec call. 1131 */ 1132 void evsel__config(struct evsel *evsel, struct record_opts *opts, 1133 struct callchain_param *callchain) 1134 { 1135 struct evsel *leader = evsel__leader(evsel); 1136 struct perf_event_attr *attr = &evsel->core.attr; 1137 int track = evsel->tracking; 1138 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 1139 1140 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 1141 attr->inherit = !opts->no_inherit; 1142 attr->write_backward = opts->overwrite ? 1 : 0; 1143 1144 evsel__set_sample_bit(evsel, IP); 1145 evsel__set_sample_bit(evsel, TID); 1146 1147 if (evsel->sample_read) { 1148 evsel__set_sample_bit(evsel, READ); 1149 1150 /* 1151 * We need ID even in case of single event, because 1152 * PERF_SAMPLE_READ process ID specific data. 1153 */ 1154 evsel__set_sample_id(evsel, false); 1155 1156 /* 1157 * Apply group format only if we belong to group 1158 * with more than one members. 1159 */ 1160 if (leader->core.nr_members > 1) { 1161 attr->read_format |= PERF_FORMAT_GROUP; 1162 attr->inherit = 0; 1163 } 1164 } 1165 1166 /* 1167 * We default some events to have a default interval. But keep 1168 * it a weak assumption overridable by the user. 1169 */ 1170 if ((evsel->is_libpfm_event && !attr->sample_period) || 1171 (!evsel->is_libpfm_event && (!attr->sample_period || 1172 opts->user_freq != UINT_MAX || 1173 opts->user_interval != ULLONG_MAX))) 1174 evsel__set_default_freq_period(opts, attr); 1175 1176 /* 1177 * If attr->freq was set (here or earlier), ask for period 1178 * to be sampled. 1179 */ 1180 if (attr->freq) 1181 evsel__set_sample_bit(evsel, PERIOD); 1182 1183 if (opts->no_samples) 1184 attr->sample_freq = 0; 1185 1186 if (opts->inherit_stat) { 1187 evsel->core.attr.read_format |= 1188 PERF_FORMAT_TOTAL_TIME_ENABLED | 1189 PERF_FORMAT_TOTAL_TIME_RUNNING | 1190 PERF_FORMAT_ID; 1191 attr->inherit_stat = 1; 1192 } 1193 1194 if (opts->sample_address) { 1195 evsel__set_sample_bit(evsel, ADDR); 1196 attr->mmap_data = track; 1197 } 1198 1199 /* 1200 * We don't allow user space callchains for function trace 1201 * event, due to issues with page faults while tracing page 1202 * fault handler and its overall trickiness nature. 1203 */ 1204 if (evsel__is_function_event(evsel)) 1205 evsel->core.attr.exclude_callchain_user = 1; 1206 1207 if (callchain && callchain->enabled && !evsel->no_aux_samples) 1208 evsel__config_callchain(evsel, opts, callchain); 1209 1210 if (opts->sample_intr_regs && !evsel->no_aux_samples && 1211 !evsel__is_dummy_event(evsel)) { 1212 attr->sample_regs_intr = opts->sample_intr_regs; 1213 evsel__set_sample_bit(evsel, REGS_INTR); 1214 } 1215 1216 if (opts->sample_user_regs && !evsel->no_aux_samples && 1217 !evsel__is_dummy_event(evsel)) { 1218 attr->sample_regs_user |= opts->sample_user_regs; 1219 evsel__set_sample_bit(evsel, REGS_USER); 1220 } 1221 1222 if (target__has_cpu(&opts->target) || opts->sample_cpu) 1223 evsel__set_sample_bit(evsel, CPU); 1224 1225 /* 1226 * When the user explicitly disabled time don't force it here. 1227 */ 1228 if (opts->sample_time && 1229 (!perf_missing_features.sample_id_all && 1230 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 1231 opts->sample_time_set))) 1232 evsel__set_sample_bit(evsel, TIME); 1233 1234 if (opts->raw_samples && !evsel->no_aux_samples) { 1235 evsel__set_sample_bit(evsel, TIME); 1236 evsel__set_sample_bit(evsel, RAW); 1237 evsel__set_sample_bit(evsel, CPU); 1238 } 1239 1240 if (opts->sample_address) 1241 evsel__set_sample_bit(evsel, DATA_SRC); 1242 1243 if (opts->sample_phys_addr) 1244 evsel__set_sample_bit(evsel, PHYS_ADDR); 1245 1246 if (opts->no_buffering) { 1247 attr->watermark = 0; 1248 attr->wakeup_events = 1; 1249 } 1250 if (opts->branch_stack && !evsel->no_aux_samples) { 1251 evsel__set_sample_bit(evsel, BRANCH_STACK); 1252 attr->branch_sample_type = opts->branch_stack; 1253 } 1254 1255 if (opts->sample_weight) 1256 arch_evsel__set_sample_weight(evsel); 1257 1258 attr->task = track; 1259 attr->mmap = track; 1260 attr->mmap2 = track && !perf_missing_features.mmap2; 1261 attr->comm = track; 1262 attr->build_id = track && opts->build_id; 1263 1264 /* 1265 * ksymbol is tracked separately with text poke because it needs to be 1266 * system wide and enabled immediately. 1267 */ 1268 if (!opts->text_poke) 1269 attr->ksymbol = track && !perf_missing_features.ksymbol; 1270 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf; 1271 1272 if (opts->record_namespaces) 1273 attr->namespaces = track; 1274 1275 if (opts->record_cgroup) { 1276 attr->cgroup = track && !perf_missing_features.cgroup; 1277 evsel__set_sample_bit(evsel, CGROUP); 1278 } 1279 1280 if (opts->sample_data_page_size) 1281 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE); 1282 1283 if (opts->sample_code_page_size) 1284 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE); 1285 1286 if (opts->record_switch_events) 1287 attr->context_switch = track; 1288 1289 if (opts->sample_transaction) 1290 evsel__set_sample_bit(evsel, TRANSACTION); 1291 1292 if (opts->running_time) { 1293 evsel->core.attr.read_format |= 1294 PERF_FORMAT_TOTAL_TIME_ENABLED | 1295 PERF_FORMAT_TOTAL_TIME_RUNNING; 1296 } 1297 1298 /* 1299 * XXX see the function comment above 1300 * 1301 * Disabling only independent events or group leaders, 1302 * keeping group members enabled. 1303 */ 1304 if (evsel__is_group_leader(evsel)) 1305 attr->disabled = 1; 1306 1307 /* 1308 * Setting enable_on_exec for independent events and 1309 * group leaders for traced executed by perf. 1310 */ 1311 if (target__none(&opts->target) && evsel__is_group_leader(evsel) && 1312 !opts->initial_delay) 1313 attr->enable_on_exec = 1; 1314 1315 if (evsel->immediate) { 1316 attr->disabled = 0; 1317 attr->enable_on_exec = 0; 1318 } 1319 1320 clockid = opts->clockid; 1321 if (opts->use_clockid) { 1322 attr->use_clockid = 1; 1323 attr->clockid = opts->clockid; 1324 } 1325 1326 if (evsel->precise_max) 1327 attr->precise_ip = 3; 1328 1329 if (opts->all_user) { 1330 attr->exclude_kernel = 1; 1331 attr->exclude_user = 0; 1332 } 1333 1334 if (opts->all_kernel) { 1335 attr->exclude_kernel = 0; 1336 attr->exclude_user = 1; 1337 } 1338 1339 if (evsel->core.own_cpus || evsel->unit) 1340 evsel->core.attr.read_format |= PERF_FORMAT_ID; 1341 1342 /* 1343 * Apply event specific term settings, 1344 * it overloads any global configuration. 1345 */ 1346 evsel__apply_config_terms(evsel, opts, track); 1347 1348 evsel->ignore_missing_thread = opts->ignore_missing_thread; 1349 1350 /* The --period option takes the precedence. */ 1351 if (opts->period_set) { 1352 if (opts->period) 1353 evsel__set_sample_bit(evsel, PERIOD); 1354 else 1355 evsel__reset_sample_bit(evsel, PERIOD); 1356 } 1357 1358 /* 1359 * A dummy event never triggers any actual counter and therefore 1360 * cannot be used with branch_stack. 1361 * 1362 * For initial_delay, a dummy event is added implicitly. 1363 * The software event will trigger -EOPNOTSUPP error out, 1364 * if BRANCH_STACK bit is set. 1365 */ 1366 if (evsel__is_dummy_event(evsel)) 1367 evsel__reset_sample_bit(evsel, BRANCH_STACK); 1368 } 1369 1370 int evsel__set_filter(struct evsel *evsel, const char *filter) 1371 { 1372 char *new_filter = strdup(filter); 1373 1374 if (new_filter != NULL) { 1375 free(evsel->filter); 1376 evsel->filter = new_filter; 1377 return 0; 1378 } 1379 1380 return -1; 1381 } 1382 1383 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter) 1384 { 1385 char *new_filter; 1386 1387 if (evsel->filter == NULL) 1388 return evsel__set_filter(evsel, filter); 1389 1390 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { 1391 free(evsel->filter); 1392 evsel->filter = new_filter; 1393 return 0; 1394 } 1395 1396 return -1; 1397 } 1398 1399 int evsel__append_tp_filter(struct evsel *evsel, const char *filter) 1400 { 1401 return evsel__append_filter(evsel, "(%s) && (%s)", filter); 1402 } 1403 1404 int evsel__append_addr_filter(struct evsel *evsel, const char *filter) 1405 { 1406 return evsel__append_filter(evsel, "%s,%s", filter); 1407 } 1408 1409 /* Caller has to clear disabled after going through all CPUs. */ 1410 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx) 1411 { 1412 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx); 1413 } 1414 1415 int evsel__enable(struct evsel *evsel) 1416 { 1417 int err = perf_evsel__enable(&evsel->core); 1418 1419 if (!err) 1420 evsel->disabled = false; 1421 return err; 1422 } 1423 1424 /* Caller has to set disabled after going through all CPUs. */ 1425 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx) 1426 { 1427 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx); 1428 } 1429 1430 int evsel__disable(struct evsel *evsel) 1431 { 1432 int err = perf_evsel__disable(&evsel->core); 1433 /* 1434 * We mark it disabled here so that tools that disable a event can 1435 * ignore events after they disable it. I.e. the ring buffer may have 1436 * already a few more events queued up before the kernel got the stop 1437 * request. 1438 */ 1439 if (!err) 1440 evsel->disabled = true; 1441 1442 return err; 1443 } 1444 1445 void free_config_terms(struct list_head *config_terms) 1446 { 1447 struct evsel_config_term *term, *h; 1448 1449 list_for_each_entry_safe(term, h, config_terms, list) { 1450 list_del_init(&term->list); 1451 if (term->free_str) 1452 zfree(&term->val.str); 1453 free(term); 1454 } 1455 } 1456 1457 static void evsel__free_config_terms(struct evsel *evsel) 1458 { 1459 free_config_terms(&evsel->config_terms); 1460 } 1461 1462 void evsel__exit(struct evsel *evsel) 1463 { 1464 assert(list_empty(&evsel->core.node)); 1465 assert(evsel->evlist == NULL); 1466 bpf_counter__destroy(evsel); 1467 evsel__free_counts(evsel); 1468 perf_evsel__free_fd(&evsel->core); 1469 perf_evsel__free_id(&evsel->core); 1470 evsel__free_config_terms(evsel); 1471 cgroup__put(evsel->cgrp); 1472 perf_cpu_map__put(evsel->core.cpus); 1473 perf_cpu_map__put(evsel->core.own_cpus); 1474 perf_thread_map__put(evsel->core.threads); 1475 zfree(&evsel->group_name); 1476 zfree(&evsel->name); 1477 zfree(&evsel->pmu_name); 1478 zfree(&evsel->unit); 1479 zfree(&evsel->metric_id); 1480 evsel__zero_per_pkg(evsel); 1481 hashmap__free(evsel->per_pkg_mask); 1482 evsel->per_pkg_mask = NULL; 1483 zfree(&evsel->metric_events); 1484 perf_evsel__object.fini(evsel); 1485 } 1486 1487 void evsel__delete(struct evsel *evsel) 1488 { 1489 evsel__exit(evsel); 1490 free(evsel); 1491 } 1492 1493 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread, 1494 struct perf_counts_values *count) 1495 { 1496 struct perf_counts_values tmp; 1497 1498 if (!evsel->prev_raw_counts) 1499 return; 1500 1501 if (cpu_map_idx == -1) { 1502 tmp = evsel->prev_raw_counts->aggr; 1503 evsel->prev_raw_counts->aggr = *count; 1504 } else { 1505 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); 1506 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count; 1507 } 1508 1509 count->val = count->val - tmp.val; 1510 count->ena = count->ena - tmp.ena; 1511 count->run = count->run - tmp.run; 1512 } 1513 1514 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) 1515 { 1516 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread); 1517 1518 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); 1519 } 1520 1521 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread, 1522 u64 val, u64 ena, u64 run) 1523 { 1524 struct perf_counts_values *count; 1525 1526 count = perf_counts(counter->counts, cpu_map_idx, thread); 1527 1528 count->val = val; 1529 count->ena = ena; 1530 count->run = run; 1531 1532 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true); 1533 } 1534 1535 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data) 1536 { 1537 u64 read_format = leader->core.attr.read_format; 1538 struct sample_read_value *v; 1539 u64 nr, ena = 0, run = 0, i; 1540 1541 nr = *data++; 1542 1543 if (nr != (u64) leader->core.nr_members) 1544 return -EINVAL; 1545 1546 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1547 ena = *data++; 1548 1549 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1550 run = *data++; 1551 1552 v = (struct sample_read_value *) data; 1553 1554 evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run); 1555 1556 for (i = 1; i < nr; i++) { 1557 struct evsel *counter; 1558 1559 counter = evlist__id2evsel(leader->evlist, v[i].id); 1560 if (!counter) 1561 return -EINVAL; 1562 1563 evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run); 1564 } 1565 1566 return 0; 1567 } 1568 1569 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread) 1570 { 1571 struct perf_stat_evsel *ps = leader->stats; 1572 u64 read_format = leader->core.attr.read_format; 1573 int size = perf_evsel__read_size(&leader->core); 1574 u64 *data = ps->group_data; 1575 1576 if (!(read_format & PERF_FORMAT_ID)) 1577 return -EINVAL; 1578 1579 if (!evsel__is_group_leader(leader)) 1580 return -EINVAL; 1581 1582 if (!data) { 1583 data = zalloc(size); 1584 if (!data) 1585 return -ENOMEM; 1586 1587 ps->group_data = data; 1588 } 1589 1590 if (FD(leader, cpu_map_idx, thread) < 0) 1591 return -EINVAL; 1592 1593 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0) 1594 return -errno; 1595 1596 return evsel__process_group_data(leader, cpu_map_idx, thread, data); 1597 } 1598 1599 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread) 1600 { 1601 u64 read_format = evsel->core.attr.read_format; 1602 1603 if (read_format & PERF_FORMAT_GROUP) 1604 return evsel__read_group(evsel, cpu_map_idx, thread); 1605 1606 return evsel__read_one(evsel, cpu_map_idx, thread); 1607 } 1608 1609 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale) 1610 { 1611 struct perf_counts_values count; 1612 size_t nv = scale ? 3 : 1; 1613 1614 if (FD(evsel, cpu_map_idx, thread) < 0) 1615 return -EINVAL; 1616 1617 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0) 1618 return -ENOMEM; 1619 1620 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0) 1621 return -errno; 1622 1623 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count); 1624 perf_counts_values__scale(&count, scale, NULL); 1625 *perf_counts(evsel->counts, cpu_map_idx, thread) = count; 1626 return 0; 1627 } 1628 1629 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, 1630 int cpu_map_idx) 1631 { 1632 struct perf_cpu cpu; 1633 1634 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); 1635 return perf_cpu_map__idx(other->core.cpus, cpu); 1636 } 1637 1638 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx) 1639 { 1640 struct evsel *leader = evsel__leader(evsel); 1641 1642 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || 1643 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { 1644 return evsel__match_other_cpu(evsel, leader, cpu_map_idx); 1645 } 1646 1647 return cpu_map_idx; 1648 } 1649 1650 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread) 1651 { 1652 struct evsel *leader = evsel__leader(evsel); 1653 int fd; 1654 1655 if (evsel__is_group_leader(evsel)) 1656 return -1; 1657 1658 /* 1659 * Leader must be already processed/open, 1660 * if not it's a bug. 1661 */ 1662 BUG_ON(!leader->core.fd); 1663 1664 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx); 1665 if (cpu_map_idx == -1) 1666 return -1; 1667 1668 fd = FD(leader, cpu_map_idx, thread); 1669 BUG_ON(fd == -1); 1670 1671 return fd; 1672 } 1673 1674 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) 1675 { 1676 for (int cpu = 0; cpu < nr_cpus; cpu++) 1677 for (int thread = thread_idx; thread < nr_threads - 1; thread++) 1678 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); 1679 } 1680 1681 static int update_fds(struct evsel *evsel, 1682 int nr_cpus, int cpu_map_idx, 1683 int nr_threads, int thread_idx) 1684 { 1685 struct evsel *pos; 1686 1687 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) 1688 return -EINVAL; 1689 1690 evlist__for_each_entry(evsel->evlist, pos) { 1691 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx; 1692 1693 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); 1694 1695 /* 1696 * Since fds for next evsel has not been created, 1697 * there is no need to iterate whole event list. 1698 */ 1699 if (pos == evsel) 1700 break; 1701 } 1702 return 0; 1703 } 1704 1705 static bool evsel__ignore_missing_thread(struct evsel *evsel, 1706 int nr_cpus, int cpu_map_idx, 1707 struct perf_thread_map *threads, 1708 int thread, int err) 1709 { 1710 pid_t ignore_pid = perf_thread_map__pid(threads, thread); 1711 1712 if (!evsel->ignore_missing_thread) 1713 return false; 1714 1715 /* The system wide setup does not work with threads. */ 1716 if (evsel->core.system_wide) 1717 return false; 1718 1719 /* The -ESRCH is perf event syscall errno for pid's not found. */ 1720 if (err != -ESRCH) 1721 return false; 1722 1723 /* If there's only one thread, let it fail. */ 1724 if (threads->nr == 1) 1725 return false; 1726 1727 /* 1728 * We should remove fd for missing_thread first 1729 * because thread_map__remove() will decrease threads->nr. 1730 */ 1731 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread)) 1732 return false; 1733 1734 if (thread_map__remove(threads, thread)) 1735 return false; 1736 1737 pr_warning("WARNING: Ignored open failure for pid %d\n", 1738 ignore_pid); 1739 return true; 1740 } 1741 1742 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1743 void *priv __maybe_unused) 1744 { 1745 return fprintf(fp, " %-32s %s\n", name, val); 1746 } 1747 1748 static void display_attr(struct perf_event_attr *attr) 1749 { 1750 if (verbose >= 2 || debug_peo_args) { 1751 fprintf(stderr, "%.60s\n", graph_dotted_line); 1752 fprintf(stderr, "perf_event_attr:\n"); 1753 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL); 1754 fprintf(stderr, "%.60s\n", graph_dotted_line); 1755 } 1756 } 1757 1758 bool evsel__precise_ip_fallback(struct evsel *evsel) 1759 { 1760 /* Do not try less precise if not requested. */ 1761 if (!evsel->precise_max) 1762 return false; 1763 1764 /* 1765 * We tried all the precise_ip values, and it's 1766 * still failing, so leave it to standard fallback. 1767 */ 1768 if (!evsel->core.attr.precise_ip) { 1769 evsel->core.attr.precise_ip = evsel->precise_ip_original; 1770 return false; 1771 } 1772 1773 if (!evsel->precise_ip_original) 1774 evsel->precise_ip_original = evsel->core.attr.precise_ip; 1775 1776 evsel->core.attr.precise_ip--; 1777 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); 1778 display_attr(&evsel->core.attr); 1779 return true; 1780 } 1781 1782 static struct perf_cpu_map *empty_cpu_map; 1783 static struct perf_thread_map *empty_thread_map; 1784 1785 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1786 struct perf_thread_map *threads) 1787 { 1788 int nthreads; 1789 1790 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || 1791 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) 1792 return -EINVAL; 1793 1794 if (cpus == NULL) { 1795 if (empty_cpu_map == NULL) { 1796 empty_cpu_map = perf_cpu_map__dummy_new(); 1797 if (empty_cpu_map == NULL) 1798 return -ENOMEM; 1799 } 1800 1801 cpus = empty_cpu_map; 1802 } 1803 1804 if (threads == NULL) { 1805 if (empty_thread_map == NULL) { 1806 empty_thread_map = thread_map__new_by_tid(-1); 1807 if (empty_thread_map == NULL) 1808 return -ENOMEM; 1809 } 1810 1811 threads = empty_thread_map; 1812 } 1813 1814 if (evsel->core.system_wide) 1815 nthreads = 1; 1816 else 1817 nthreads = threads->nr; 1818 1819 if (evsel->core.fd == NULL && 1820 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0) 1821 return -ENOMEM; 1822 1823 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; 1824 if (evsel->cgrp) 1825 evsel->open_flags |= PERF_FLAG_PID_CGROUP; 1826 1827 return 0; 1828 } 1829 1830 static void evsel__disable_missing_features(struct evsel *evsel) 1831 { 1832 if (perf_missing_features.weight_struct) { 1833 evsel__set_sample_bit(evsel, WEIGHT); 1834 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); 1835 } 1836 if (perf_missing_features.clockid_wrong) 1837 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1838 if (perf_missing_features.clockid) { 1839 evsel->core.attr.use_clockid = 0; 1840 evsel->core.attr.clockid = 0; 1841 } 1842 if (perf_missing_features.cloexec) 1843 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1844 if (perf_missing_features.mmap2) 1845 evsel->core.attr.mmap2 = 0; 1846 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest) 1847 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; 1848 if (perf_missing_features.lbr_flags) 1849 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1850 PERF_SAMPLE_BRANCH_NO_CYCLES); 1851 if (perf_missing_features.group_read && evsel->core.attr.inherit) 1852 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); 1853 if (perf_missing_features.ksymbol) 1854 evsel->core.attr.ksymbol = 0; 1855 if (perf_missing_features.bpf) 1856 evsel->core.attr.bpf_event = 0; 1857 if (perf_missing_features.branch_hw_idx) 1858 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; 1859 if (perf_missing_features.sample_id_all) 1860 evsel->core.attr.sample_id_all = 0; 1861 } 1862 1863 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1864 struct perf_thread_map *threads) 1865 { 1866 int err; 1867 1868 err = __evsel__prepare_open(evsel, cpus, threads); 1869 if (err) 1870 return err; 1871 1872 evsel__disable_missing_features(evsel); 1873 1874 return err; 1875 } 1876 1877 bool evsel__detect_missing_features(struct evsel *evsel) 1878 { 1879 /* 1880 * Must probe features in the order they were added to the 1881 * perf_event_attr interface. 1882 */ 1883 if (!perf_missing_features.weight_struct && 1884 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { 1885 perf_missing_features.weight_struct = true; 1886 pr_debug2("switching off weight struct support\n"); 1887 return true; 1888 } else if (!perf_missing_features.code_page_size && 1889 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { 1890 perf_missing_features.code_page_size = true; 1891 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n"); 1892 return false; 1893 } else if (!perf_missing_features.data_page_size && 1894 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { 1895 perf_missing_features.data_page_size = true; 1896 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n"); 1897 return false; 1898 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { 1899 perf_missing_features.cgroup = true; 1900 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n"); 1901 return false; 1902 } else if (!perf_missing_features.branch_hw_idx && 1903 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { 1904 perf_missing_features.branch_hw_idx = true; 1905 pr_debug2("switching off branch HW index support\n"); 1906 return true; 1907 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { 1908 perf_missing_features.aux_output = true; 1909 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n"); 1910 return false; 1911 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { 1912 perf_missing_features.bpf = true; 1913 pr_debug2_peo("switching off bpf_event\n"); 1914 return true; 1915 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { 1916 perf_missing_features.ksymbol = true; 1917 pr_debug2_peo("switching off ksymbol\n"); 1918 return true; 1919 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { 1920 perf_missing_features.write_backward = true; 1921 pr_debug2_peo("switching off write_backward\n"); 1922 return false; 1923 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { 1924 perf_missing_features.clockid_wrong = true; 1925 pr_debug2_peo("switching off clockid\n"); 1926 return true; 1927 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { 1928 perf_missing_features.clockid = true; 1929 pr_debug2_peo("switching off use_clockid\n"); 1930 return true; 1931 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { 1932 perf_missing_features.cloexec = true; 1933 pr_debug2_peo("switching off cloexec flag\n"); 1934 return true; 1935 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { 1936 perf_missing_features.mmap2 = true; 1937 pr_debug2_peo("switching off mmap2\n"); 1938 return true; 1939 } else if ((evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) && 1940 (evsel->pmu == NULL || evsel->pmu->missing_features.exclude_guest)) { 1941 if (evsel->pmu == NULL) { 1942 evsel->pmu = evsel__find_pmu(evsel); 1943 if (evsel->pmu) 1944 evsel->pmu->missing_features.exclude_guest = true; 1945 else { 1946 /* we cannot find PMU, disable attrs now */ 1947 evsel->core.attr.exclude_host = false; 1948 evsel->core.attr.exclude_guest = false; 1949 } 1950 } 1951 1952 if (evsel->exclude_GH) { 1953 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n"); 1954 return false; 1955 } 1956 if (!perf_missing_features.exclude_guest) { 1957 perf_missing_features.exclude_guest = true; 1958 pr_debug2_peo("switching off exclude_guest, exclude_host\n"); 1959 } 1960 return true; 1961 } else if (!perf_missing_features.sample_id_all) { 1962 perf_missing_features.sample_id_all = true; 1963 pr_debug2_peo("switching off sample_id_all\n"); 1964 return true; 1965 } else if (!perf_missing_features.lbr_flags && 1966 (evsel->core.attr.branch_sample_type & 1967 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1968 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1969 perf_missing_features.lbr_flags = true; 1970 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n"); 1971 return true; 1972 } else if (!perf_missing_features.group_read && 1973 evsel->core.attr.inherit && 1974 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && 1975 evsel__is_group_leader(evsel)) { 1976 perf_missing_features.group_read = true; 1977 pr_debug2_peo("switching off group read\n"); 1978 return true; 1979 } else { 1980 return false; 1981 } 1982 } 1983 1984 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) 1985 { 1986 int old_errno; 1987 struct rlimit l; 1988 1989 if (*set_rlimit < INCREASED_MAX) { 1990 old_errno = errno; 1991 1992 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 1993 if (*set_rlimit == NO_CHANGE) { 1994 l.rlim_cur = l.rlim_max; 1995 } else { 1996 l.rlim_cur = l.rlim_max + 1000; 1997 l.rlim_max = l.rlim_cur; 1998 } 1999 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 2000 (*set_rlimit) += 1; 2001 errno = old_errno; 2002 return true; 2003 } 2004 } 2005 errno = old_errno; 2006 } 2007 2008 return false; 2009 } 2010 2011 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, 2012 struct perf_thread_map *threads, 2013 int start_cpu_map_idx, int end_cpu_map_idx) 2014 { 2015 int idx, thread, nthreads; 2016 int pid = -1, err, old_errno; 2017 enum rlimit_action set_rlimit = NO_CHANGE; 2018 2019 err = __evsel__prepare_open(evsel, cpus, threads); 2020 if (err) 2021 return err; 2022 2023 if (cpus == NULL) 2024 cpus = empty_cpu_map; 2025 2026 if (threads == NULL) 2027 threads = empty_thread_map; 2028 2029 if (evsel->core.system_wide) 2030 nthreads = 1; 2031 else 2032 nthreads = threads->nr; 2033 2034 if (evsel->cgrp) 2035 pid = evsel->cgrp->fd; 2036 2037 fallback_missing_features: 2038 evsel__disable_missing_features(evsel); 2039 2040 display_attr(&evsel->core.attr); 2041 2042 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) { 2043 2044 for (thread = 0; thread < nthreads; thread++) { 2045 int fd, group_fd; 2046 retry_open: 2047 if (thread >= nthreads) 2048 break; 2049 2050 if (!evsel->cgrp && !evsel->core.system_wide) 2051 pid = perf_thread_map__pid(threads, thread); 2052 2053 group_fd = get_group_fd(evsel, idx, thread); 2054 2055 test_attr__ready(); 2056 2057 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", 2058 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags); 2059 2060 fd = sys_perf_event_open(&evsel->core.attr, pid, 2061 perf_cpu_map__cpu(cpus, idx).cpu, 2062 group_fd, evsel->open_flags); 2063 2064 FD(evsel, idx, thread) = fd; 2065 2066 if (fd < 0) { 2067 err = -errno; 2068 2069 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", 2070 err); 2071 goto try_fallback; 2072 } 2073 2074 bpf_counter__install_pe(evsel, idx, fd); 2075 2076 if (unlikely(test_attr__enabled)) { 2077 test_attr__open(&evsel->core.attr, pid, 2078 perf_cpu_map__cpu(cpus, idx), 2079 fd, group_fd, evsel->open_flags); 2080 } 2081 2082 pr_debug2_peo(" = %d\n", fd); 2083 2084 if (evsel->bpf_fd >= 0) { 2085 int evt_fd = fd; 2086 int bpf_fd = evsel->bpf_fd; 2087 2088 err = ioctl(evt_fd, 2089 PERF_EVENT_IOC_SET_BPF, 2090 bpf_fd); 2091 if (err && errno != EEXIST) { 2092 pr_err("failed to attach bpf fd %d: %s\n", 2093 bpf_fd, strerror(errno)); 2094 err = -EINVAL; 2095 goto out_close; 2096 } 2097 } 2098 2099 set_rlimit = NO_CHANGE; 2100 2101 /* 2102 * If we succeeded but had to kill clockid, fail and 2103 * have evsel__open_strerror() print us a nice error. 2104 */ 2105 if (perf_missing_features.clockid || 2106 perf_missing_features.clockid_wrong) { 2107 err = -EINVAL; 2108 goto out_close; 2109 } 2110 } 2111 } 2112 2113 return 0; 2114 2115 try_fallback: 2116 if (evsel__precise_ip_fallback(evsel)) 2117 goto retry_open; 2118 2119 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus), 2120 idx, threads, thread, err)) { 2121 /* We just removed 1 thread, so lower the upper nthreads limit. */ 2122 nthreads--; 2123 2124 /* ... and pretend like nothing have happened. */ 2125 err = 0; 2126 goto retry_open; 2127 } 2128 /* 2129 * perf stat needs between 5 and 22 fds per CPU. When we run out 2130 * of them try to increase the limits. 2131 */ 2132 if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) 2133 goto retry_open; 2134 2135 if (err != -EINVAL || idx > 0 || thread > 0) 2136 goto out_close; 2137 2138 if (evsel__detect_missing_features(evsel)) 2139 goto fallback_missing_features; 2140 out_close: 2141 if (err) 2142 threads->err_thread = thread; 2143 2144 old_errno = errno; 2145 do { 2146 while (--thread >= 0) { 2147 if (FD(evsel, idx, thread) >= 0) 2148 close(FD(evsel, idx, thread)); 2149 FD(evsel, idx, thread) = -1; 2150 } 2151 thread = nthreads; 2152 } while (--idx >= 0); 2153 errno = old_errno; 2154 return err; 2155 } 2156 2157 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, 2158 struct perf_thread_map *threads) 2159 { 2160 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus)); 2161 } 2162 2163 void evsel__close(struct evsel *evsel) 2164 { 2165 perf_evsel__close(&evsel->core); 2166 perf_evsel__free_id(&evsel->core); 2167 } 2168 2169 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) 2170 { 2171 if (cpu_map_idx == -1) 2172 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus)); 2173 2174 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); 2175 } 2176 2177 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) 2178 { 2179 return evsel__open(evsel, NULL, threads); 2180 } 2181 2182 static int perf_evsel__parse_id_sample(const struct evsel *evsel, 2183 const union perf_event *event, 2184 struct perf_sample *sample) 2185 { 2186 u64 type = evsel->core.attr.sample_type; 2187 const __u64 *array = event->sample.array; 2188 bool swapped = evsel->needs_swap; 2189 union u64_swap u; 2190 2191 array += ((event->header.size - 2192 sizeof(event->header)) / sizeof(u64)) - 1; 2193 2194 if (type & PERF_SAMPLE_IDENTIFIER) { 2195 sample->id = *array; 2196 array--; 2197 } 2198 2199 if (type & PERF_SAMPLE_CPU) { 2200 u.val64 = *array; 2201 if (swapped) { 2202 /* undo swap of u64, then swap on individual u32s */ 2203 u.val64 = bswap_64(u.val64); 2204 u.val32[0] = bswap_32(u.val32[0]); 2205 } 2206 2207 sample->cpu = u.val32[0]; 2208 array--; 2209 } 2210 2211 if (type & PERF_SAMPLE_STREAM_ID) { 2212 sample->stream_id = *array; 2213 array--; 2214 } 2215 2216 if (type & PERF_SAMPLE_ID) { 2217 sample->id = *array; 2218 array--; 2219 } 2220 2221 if (type & PERF_SAMPLE_TIME) { 2222 sample->time = *array; 2223 array--; 2224 } 2225 2226 if (type & PERF_SAMPLE_TID) { 2227 u.val64 = *array; 2228 if (swapped) { 2229 /* undo swap of u64, then swap on individual u32s */ 2230 u.val64 = bswap_64(u.val64); 2231 u.val32[0] = bswap_32(u.val32[0]); 2232 u.val32[1] = bswap_32(u.val32[1]); 2233 } 2234 2235 sample->pid = u.val32[0]; 2236 sample->tid = u.val32[1]; 2237 array--; 2238 } 2239 2240 return 0; 2241 } 2242 2243 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 2244 u64 size) 2245 { 2246 return size > max_size || offset + size > endp; 2247 } 2248 2249 #define OVERFLOW_CHECK(offset, size, max_size) \ 2250 do { \ 2251 if (overflow(endp, (max_size), (offset), (size))) \ 2252 return -EFAULT; \ 2253 } while (0) 2254 2255 #define OVERFLOW_CHECK_u64(offset) \ 2256 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 2257 2258 static int 2259 perf_event__check_size(union perf_event *event, unsigned int sample_size) 2260 { 2261 /* 2262 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 2263 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 2264 * check the format does not go past the end of the event. 2265 */ 2266 if (sample_size + sizeof(event->header) > event->header.size) 2267 return -EFAULT; 2268 2269 return 0; 2270 } 2271 2272 void __weak arch_perf_parse_sample_weight(struct perf_sample *data, 2273 const __u64 *array, 2274 u64 type __maybe_unused) 2275 { 2276 data->weight = *array; 2277 } 2278 2279 u64 evsel__bitfield_swap_branch_flags(u64 value) 2280 { 2281 u64 new_val = 0; 2282 2283 /* 2284 * branch_flags 2285 * union { 2286 * u64 values; 2287 * struct { 2288 * mispred:1 //target mispredicted 2289 * predicted:1 //target predicted 2290 * in_tx:1 //in transaction 2291 * abort:1 //transaction abort 2292 * cycles:16 //cycle count to last branch 2293 * type:4 //branch type 2294 * reserved:40 2295 * } 2296 * } 2297 * 2298 * Avoid bswap64() the entire branch_flag.value, 2299 * as it has variable bit-field sizes. Instead the 2300 * macro takes the bit-field position/size, 2301 * swaps it based on the host endianness. 2302 * 2303 * tep_is_bigendian() is used here instead of 2304 * bigendian() to avoid python test fails. 2305 */ 2306 if (tep_is_bigendian()) { 2307 new_val = bitfield_swap(value, 0, 1); 2308 new_val |= bitfield_swap(value, 1, 1); 2309 new_val |= bitfield_swap(value, 2, 1); 2310 new_val |= bitfield_swap(value, 3, 1); 2311 new_val |= bitfield_swap(value, 4, 16); 2312 new_val |= bitfield_swap(value, 20, 4); 2313 new_val |= bitfield_swap(value, 24, 40); 2314 } else { 2315 new_val = bitfield_swap(value, 63, 1); 2316 new_val |= bitfield_swap(value, 62, 1); 2317 new_val |= bitfield_swap(value, 61, 1); 2318 new_val |= bitfield_swap(value, 60, 1); 2319 new_val |= bitfield_swap(value, 44, 16); 2320 new_val |= bitfield_swap(value, 40, 4); 2321 new_val |= bitfield_swap(value, 0, 40); 2322 } 2323 2324 return new_val; 2325 } 2326 2327 int evsel__parse_sample(struct evsel *evsel, union perf_event *event, 2328 struct perf_sample *data) 2329 { 2330 u64 type = evsel->core.attr.sample_type; 2331 bool swapped = evsel->needs_swap; 2332 const __u64 *array; 2333 u16 max_size = event->header.size; 2334 const void *endp = (void *)event + max_size; 2335 u64 sz; 2336 2337 /* 2338 * used for cross-endian analysis. See git commit 65014ab3 2339 * for why this goofiness is needed. 2340 */ 2341 union u64_swap u; 2342 2343 memset(data, 0, sizeof(*data)); 2344 data->cpu = data->pid = data->tid = -1; 2345 data->stream_id = data->id = data->time = -1ULL; 2346 data->period = evsel->core.attr.sample_period; 2347 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 2348 data->misc = event->header.misc; 2349 data->id = -1ULL; 2350 data->data_src = PERF_MEM_DATA_SRC_NONE; 2351 2352 if (event->header.type != PERF_RECORD_SAMPLE) { 2353 if (!evsel->core.attr.sample_id_all) 2354 return 0; 2355 return perf_evsel__parse_id_sample(evsel, event, data); 2356 } 2357 2358 array = event->sample.array; 2359 2360 if (perf_event__check_size(event, evsel->sample_size)) 2361 return -EFAULT; 2362 2363 if (type & PERF_SAMPLE_IDENTIFIER) { 2364 data->id = *array; 2365 array++; 2366 } 2367 2368 if (type & PERF_SAMPLE_IP) { 2369 data->ip = *array; 2370 array++; 2371 } 2372 2373 if (type & PERF_SAMPLE_TID) { 2374 u.val64 = *array; 2375 if (swapped) { 2376 /* undo swap of u64, then swap on individual u32s */ 2377 u.val64 = bswap_64(u.val64); 2378 u.val32[0] = bswap_32(u.val32[0]); 2379 u.val32[1] = bswap_32(u.val32[1]); 2380 } 2381 2382 data->pid = u.val32[0]; 2383 data->tid = u.val32[1]; 2384 array++; 2385 } 2386 2387 if (type & PERF_SAMPLE_TIME) { 2388 data->time = *array; 2389 array++; 2390 } 2391 2392 if (type & PERF_SAMPLE_ADDR) { 2393 data->addr = *array; 2394 array++; 2395 } 2396 2397 if (type & PERF_SAMPLE_ID) { 2398 data->id = *array; 2399 array++; 2400 } 2401 2402 if (type & PERF_SAMPLE_STREAM_ID) { 2403 data->stream_id = *array; 2404 array++; 2405 } 2406 2407 if (type & PERF_SAMPLE_CPU) { 2408 2409 u.val64 = *array; 2410 if (swapped) { 2411 /* undo swap of u64, then swap on individual u32s */ 2412 u.val64 = bswap_64(u.val64); 2413 u.val32[0] = bswap_32(u.val32[0]); 2414 } 2415 2416 data->cpu = u.val32[0]; 2417 array++; 2418 } 2419 2420 if (type & PERF_SAMPLE_PERIOD) { 2421 data->period = *array; 2422 array++; 2423 } 2424 2425 if (type & PERF_SAMPLE_READ) { 2426 u64 read_format = evsel->core.attr.read_format; 2427 2428 OVERFLOW_CHECK_u64(array); 2429 if (read_format & PERF_FORMAT_GROUP) 2430 data->read.group.nr = *array; 2431 else 2432 data->read.one.value = *array; 2433 2434 array++; 2435 2436 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2437 OVERFLOW_CHECK_u64(array); 2438 data->read.time_enabled = *array; 2439 array++; 2440 } 2441 2442 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2443 OVERFLOW_CHECK_u64(array); 2444 data->read.time_running = *array; 2445 array++; 2446 } 2447 2448 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2449 if (read_format & PERF_FORMAT_GROUP) { 2450 const u64 max_group_nr = UINT64_MAX / 2451 sizeof(struct sample_read_value); 2452 2453 if (data->read.group.nr > max_group_nr) 2454 return -EFAULT; 2455 sz = data->read.group.nr * 2456 sizeof(struct sample_read_value); 2457 OVERFLOW_CHECK(array, sz, max_size); 2458 data->read.group.values = 2459 (struct sample_read_value *)array; 2460 array = (void *)array + sz; 2461 } else { 2462 OVERFLOW_CHECK_u64(array); 2463 data->read.one.id = *array; 2464 array++; 2465 } 2466 } 2467 2468 if (type & PERF_SAMPLE_CALLCHAIN) { 2469 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 2470 2471 OVERFLOW_CHECK_u64(array); 2472 data->callchain = (struct ip_callchain *)array++; 2473 if (data->callchain->nr > max_callchain_nr) 2474 return -EFAULT; 2475 sz = data->callchain->nr * sizeof(u64); 2476 OVERFLOW_CHECK(array, sz, max_size); 2477 array = (void *)array + sz; 2478 } 2479 2480 if (type & PERF_SAMPLE_RAW) { 2481 OVERFLOW_CHECK_u64(array); 2482 u.val64 = *array; 2483 2484 /* 2485 * Undo swap of u64, then swap on individual u32s, 2486 * get the size of the raw area and undo all of the 2487 * swap. The pevent interface handles endianness by 2488 * itself. 2489 */ 2490 if (swapped) { 2491 u.val64 = bswap_64(u.val64); 2492 u.val32[0] = bswap_32(u.val32[0]); 2493 u.val32[1] = bswap_32(u.val32[1]); 2494 } 2495 data->raw_size = u.val32[0]; 2496 2497 /* 2498 * The raw data is aligned on 64bits including the 2499 * u32 size, so it's safe to use mem_bswap_64. 2500 */ 2501 if (swapped) 2502 mem_bswap_64((void *) array, data->raw_size); 2503 2504 array = (void *)array + sizeof(u32); 2505 2506 OVERFLOW_CHECK(array, data->raw_size, max_size); 2507 data->raw_data = (void *)array; 2508 array = (void *)array + data->raw_size; 2509 } 2510 2511 if (type & PERF_SAMPLE_BRANCH_STACK) { 2512 const u64 max_branch_nr = UINT64_MAX / 2513 sizeof(struct branch_entry); 2514 struct branch_entry *e; 2515 unsigned int i; 2516 2517 OVERFLOW_CHECK_u64(array); 2518 data->branch_stack = (struct branch_stack *)array++; 2519 2520 if (data->branch_stack->nr > max_branch_nr) 2521 return -EFAULT; 2522 2523 sz = data->branch_stack->nr * sizeof(struct branch_entry); 2524 if (evsel__has_branch_hw_idx(evsel)) { 2525 sz += sizeof(u64); 2526 e = &data->branch_stack->entries[0]; 2527 } else { 2528 data->no_hw_idx = true; 2529 /* 2530 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied, 2531 * only nr and entries[] will be output by kernel. 2532 */ 2533 e = (struct branch_entry *)&data->branch_stack->hw_idx; 2534 } 2535 2536 if (swapped) { 2537 /* 2538 * struct branch_flag does not have endian 2539 * specific bit field definition. And bswap 2540 * will not resolve the issue, since these 2541 * are bit fields. 2542 * 2543 * evsel__bitfield_swap_branch_flags() uses a 2544 * bitfield_swap macro to swap the bit position 2545 * based on the host endians. 2546 */ 2547 for (i = 0; i < data->branch_stack->nr; i++, e++) 2548 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value); 2549 } 2550 2551 OVERFLOW_CHECK(array, sz, max_size); 2552 array = (void *)array + sz; 2553 } 2554 2555 if (type & PERF_SAMPLE_REGS_USER) { 2556 OVERFLOW_CHECK_u64(array); 2557 data->user_regs.abi = *array; 2558 array++; 2559 2560 if (data->user_regs.abi) { 2561 u64 mask = evsel->core.attr.sample_regs_user; 2562 2563 sz = hweight64(mask) * sizeof(u64); 2564 OVERFLOW_CHECK(array, sz, max_size); 2565 data->user_regs.mask = mask; 2566 data->user_regs.regs = (u64 *)array; 2567 array = (void *)array + sz; 2568 } 2569 } 2570 2571 if (type & PERF_SAMPLE_STACK_USER) { 2572 OVERFLOW_CHECK_u64(array); 2573 sz = *array++; 2574 2575 data->user_stack.offset = ((char *)(array - 1) 2576 - (char *) event); 2577 2578 if (!sz) { 2579 data->user_stack.size = 0; 2580 } else { 2581 OVERFLOW_CHECK(array, sz, max_size); 2582 data->user_stack.data = (char *)array; 2583 array = (void *)array + sz; 2584 OVERFLOW_CHECK_u64(array); 2585 data->user_stack.size = *array++; 2586 if (WARN_ONCE(data->user_stack.size > sz, 2587 "user stack dump failure\n")) 2588 return -EFAULT; 2589 } 2590 } 2591 2592 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 2593 OVERFLOW_CHECK_u64(array); 2594 arch_perf_parse_sample_weight(data, array, type); 2595 array++; 2596 } 2597 2598 if (type & PERF_SAMPLE_DATA_SRC) { 2599 OVERFLOW_CHECK_u64(array); 2600 data->data_src = *array; 2601 array++; 2602 } 2603 2604 if (type & PERF_SAMPLE_TRANSACTION) { 2605 OVERFLOW_CHECK_u64(array); 2606 data->transaction = *array; 2607 array++; 2608 } 2609 2610 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 2611 if (type & PERF_SAMPLE_REGS_INTR) { 2612 OVERFLOW_CHECK_u64(array); 2613 data->intr_regs.abi = *array; 2614 array++; 2615 2616 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 2617 u64 mask = evsel->core.attr.sample_regs_intr; 2618 2619 sz = hweight64(mask) * sizeof(u64); 2620 OVERFLOW_CHECK(array, sz, max_size); 2621 data->intr_regs.mask = mask; 2622 data->intr_regs.regs = (u64 *)array; 2623 array = (void *)array + sz; 2624 } 2625 } 2626 2627 data->phys_addr = 0; 2628 if (type & PERF_SAMPLE_PHYS_ADDR) { 2629 data->phys_addr = *array; 2630 array++; 2631 } 2632 2633 data->cgroup = 0; 2634 if (type & PERF_SAMPLE_CGROUP) { 2635 data->cgroup = *array; 2636 array++; 2637 } 2638 2639 data->data_page_size = 0; 2640 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 2641 data->data_page_size = *array; 2642 array++; 2643 } 2644 2645 data->code_page_size = 0; 2646 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 2647 data->code_page_size = *array; 2648 array++; 2649 } 2650 2651 if (type & PERF_SAMPLE_AUX) { 2652 OVERFLOW_CHECK_u64(array); 2653 sz = *array++; 2654 2655 OVERFLOW_CHECK(array, sz, max_size); 2656 /* Undo swap of data */ 2657 if (swapped) 2658 mem_bswap_64((char *)array, sz); 2659 data->aux_sample.size = sz; 2660 data->aux_sample.data = (char *)array; 2661 array = (void *)array + sz; 2662 } 2663 2664 return 0; 2665 } 2666 2667 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, 2668 u64 *timestamp) 2669 { 2670 u64 type = evsel->core.attr.sample_type; 2671 const __u64 *array; 2672 2673 if (!(type & PERF_SAMPLE_TIME)) 2674 return -1; 2675 2676 if (event->header.type != PERF_RECORD_SAMPLE) { 2677 struct perf_sample data = { 2678 .time = -1ULL, 2679 }; 2680 2681 if (!evsel->core.attr.sample_id_all) 2682 return -1; 2683 if (perf_evsel__parse_id_sample(evsel, event, &data)) 2684 return -1; 2685 2686 *timestamp = data.time; 2687 return 0; 2688 } 2689 2690 array = event->sample.array; 2691 2692 if (perf_event__check_size(event, evsel->sample_size)) 2693 return -EFAULT; 2694 2695 if (type & PERF_SAMPLE_IDENTIFIER) 2696 array++; 2697 2698 if (type & PERF_SAMPLE_IP) 2699 array++; 2700 2701 if (type & PERF_SAMPLE_TID) 2702 array++; 2703 2704 if (type & PERF_SAMPLE_TIME) 2705 *timestamp = *array; 2706 2707 return 0; 2708 } 2709 2710 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name) 2711 { 2712 return tep_find_field(evsel->tp_format, name); 2713 } 2714 2715 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name) 2716 { 2717 struct tep_format_field *field = evsel__field(evsel, name); 2718 int offset; 2719 2720 if (!field) 2721 return NULL; 2722 2723 offset = field->offset; 2724 2725 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2726 offset = *(int *)(sample->raw_data + field->offset); 2727 offset &= 0xffff; 2728 if (field->flags & TEP_FIELD_IS_RELATIVE) 2729 offset += field->offset + field->size; 2730 } 2731 2732 return sample->raw_data + offset; 2733 } 2734 2735 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, 2736 bool needs_swap) 2737 { 2738 u64 value; 2739 void *ptr = sample->raw_data + field->offset; 2740 2741 switch (field->size) { 2742 case 1: 2743 return *(u8 *)ptr; 2744 case 2: 2745 value = *(u16 *)ptr; 2746 break; 2747 case 4: 2748 value = *(u32 *)ptr; 2749 break; 2750 case 8: 2751 memcpy(&value, ptr, sizeof(u64)); 2752 break; 2753 default: 2754 return 0; 2755 } 2756 2757 if (!needs_swap) 2758 return value; 2759 2760 switch (field->size) { 2761 case 2: 2762 return bswap_16(value); 2763 case 4: 2764 return bswap_32(value); 2765 case 8: 2766 return bswap_64(value); 2767 default: 2768 return 0; 2769 } 2770 2771 return 0; 2772 } 2773 2774 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name) 2775 { 2776 struct tep_format_field *field = evsel__field(evsel, name); 2777 2778 if (!field) 2779 return 0; 2780 2781 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; 2782 } 2783 2784 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) 2785 { 2786 int paranoid; 2787 2788 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2789 evsel->core.attr.type == PERF_TYPE_HARDWARE && 2790 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2791 /* 2792 * If it's cycles then fall back to hrtimer based 2793 * cpu-clock-tick sw counter, which is always available even if 2794 * no PMU support. 2795 * 2796 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2797 * b0a873e). 2798 */ 2799 scnprintf(msg, msgsize, "%s", 2800 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2801 2802 evsel->core.attr.type = PERF_TYPE_SOFTWARE; 2803 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; 2804 2805 zfree(&evsel->name); 2806 return true; 2807 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && 2808 (paranoid = perf_event_paranoid()) > 1) { 2809 const char *name = evsel__name(evsel); 2810 char *new_name; 2811 const char *sep = ":"; 2812 2813 /* If event has exclude user then don't exclude kernel. */ 2814 if (evsel->core.attr.exclude_user) 2815 return false; 2816 2817 /* Is there already the separator in the name. */ 2818 if (strchr(name, '/') || 2819 (strchr(name, ':') && !evsel->is_libpfm_event)) 2820 sep = ""; 2821 2822 if (asprintf(&new_name, "%s%su", name, sep) < 0) 2823 return false; 2824 2825 if (evsel->name) 2826 free(evsel->name); 2827 evsel->name = new_name; 2828 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying " 2829 "to fall back to excluding kernel and hypervisor " 2830 " samples", paranoid); 2831 evsel->core.attr.exclude_kernel = 1; 2832 evsel->core.attr.exclude_hv = 1; 2833 2834 return true; 2835 } 2836 2837 return false; 2838 } 2839 2840 static bool find_process(const char *name) 2841 { 2842 size_t len = strlen(name); 2843 DIR *dir; 2844 struct dirent *d; 2845 int ret = -1; 2846 2847 dir = opendir(procfs__mountpoint()); 2848 if (!dir) 2849 return false; 2850 2851 /* Walk through the directory. */ 2852 while (ret && (d = readdir(dir)) != NULL) { 2853 char path[PATH_MAX]; 2854 char *data; 2855 size_t size; 2856 2857 if ((d->d_type != DT_DIR) || 2858 !strcmp(".", d->d_name) || 2859 !strcmp("..", d->d_name)) 2860 continue; 2861 2862 scnprintf(path, sizeof(path), "%s/%s/comm", 2863 procfs__mountpoint(), d->d_name); 2864 2865 if (filename__read_str(path, &data, &size)) 2866 continue; 2867 2868 ret = strncmp(name, data, len); 2869 free(data); 2870 } 2871 2872 closedir(dir); 2873 return ret ? false : true; 2874 } 2875 2876 static bool is_amd(const char *arch, const char *cpuid) 2877 { 2878 return arch && !strcmp("x86", arch) && cpuid && strstarts(cpuid, "AuthenticAMD"); 2879 } 2880 2881 static bool is_amd_ibs(struct evsel *evsel) 2882 { 2883 return evsel->core.attr.precise_ip 2884 || (evsel->pmu_name && !strncmp(evsel->pmu_name, "ibs", 3)); 2885 } 2886 2887 int evsel__open_strerror(struct evsel *evsel, struct target *target, 2888 int err, char *msg, size_t size) 2889 { 2890 struct perf_env *env = evsel__env(evsel); 2891 const char *arch = perf_env__arch(env); 2892 const char *cpuid = perf_env__cpuid(env); 2893 char sbuf[STRERR_BUFSIZE]; 2894 int printed = 0, enforced = 0; 2895 2896 switch (err) { 2897 case EPERM: 2898 case EACCES: 2899 printed += scnprintf(msg + printed, size - printed, 2900 "Access to performance monitoring and observability operations is limited.\n"); 2901 2902 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) { 2903 if (enforced) { 2904 printed += scnprintf(msg + printed, size - printed, 2905 "Enforced MAC policy settings (SELinux) can limit access to performance\n" 2906 "monitoring and observability operations. Inspect system audit records for\n" 2907 "more perf_event access control information and adjusting the policy.\n"); 2908 } 2909 } 2910 2911 if (err == EPERM) 2912 printed += scnprintf(msg, size, 2913 "No permission to enable %s event.\n\n", evsel__name(evsel)); 2914 2915 return scnprintf(msg + printed, size - printed, 2916 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n" 2917 "access to performance monitoring and observability operations for processes\n" 2918 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n" 2919 "More information can be found at 'Perf events and tool security' document:\n" 2920 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n" 2921 "perf_event_paranoid setting is %d:\n" 2922 " -1: Allow use of (almost) all events by all users\n" 2923 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n" 2924 ">= 0: Disallow raw and ftrace function tracepoint access\n" 2925 ">= 1: Disallow CPU event access\n" 2926 ">= 2: Disallow kernel profiling\n" 2927 "To make the adjusted perf_event_paranoid setting permanent preserve it\n" 2928 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)", 2929 perf_event_paranoid()); 2930 case ENOENT: 2931 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel)); 2932 case EMFILE: 2933 return scnprintf(msg, size, "%s", 2934 "Too many events are opened.\n" 2935 "Probably the maximum number of open file descriptors has been reached.\n" 2936 "Hint: Try again after reducing the number of events.\n" 2937 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2938 case ENOMEM: 2939 if (evsel__has_callchain(evsel) && 2940 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 2941 return scnprintf(msg, size, 2942 "Not enough memory to setup event with callchain.\n" 2943 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2944 "Hint: Current value: %d", sysctl__max_stack()); 2945 break; 2946 case ENODEV: 2947 if (target->cpu_list) 2948 return scnprintf(msg, size, "%s", 2949 "No such device - did you specify an out-of-range profile CPU?"); 2950 break; 2951 case EOPNOTSUPP: 2952 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK) 2953 return scnprintf(msg, size, 2954 "%s: PMU Hardware or event type doesn't support branch stack sampling.", 2955 evsel__name(evsel)); 2956 if (evsel->core.attr.aux_output) 2957 return scnprintf(msg, size, 2958 "%s: PMU Hardware doesn't support 'aux_output' feature", 2959 evsel__name(evsel)); 2960 if (evsel->core.attr.sample_period != 0) 2961 return scnprintf(msg, size, 2962 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", 2963 evsel__name(evsel)); 2964 if (evsel->core.attr.precise_ip) 2965 return scnprintf(msg, size, "%s", 2966 "\'precise\' request may not be supported. Try removing 'p' modifier."); 2967 #if defined(__i386__) || defined(__x86_64__) 2968 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) 2969 return scnprintf(msg, size, "%s", 2970 "No hardware sampling interrupt available.\n"); 2971 #endif 2972 break; 2973 case EBUSY: 2974 if (find_process("oprofiled")) 2975 return scnprintf(msg, size, 2976 "The PMU counters are busy/taken by another profiler.\n" 2977 "We found oprofile daemon running, please stop it and try again."); 2978 break; 2979 case EINVAL: 2980 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size) 2981 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel."); 2982 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size) 2983 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel."); 2984 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) 2985 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel."); 2986 if (perf_missing_features.clockid) 2987 return scnprintf(msg, size, "clockid feature not supported."); 2988 if (perf_missing_features.clockid_wrong) 2989 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 2990 if (perf_missing_features.aux_output) 2991 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel."); 2992 if (!target__has_cpu(target)) 2993 return scnprintf(msg, size, 2994 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.", 2995 evsel__name(evsel)); 2996 if (is_amd(arch, cpuid)) { 2997 if (is_amd_ibs(evsel)) { 2998 if (evsel->core.attr.exclude_kernel) 2999 return scnprintf(msg, size, 3000 "AMD IBS can't exclude kernel events. Try running at a higher privilege level."); 3001 if (!evsel->core.system_wide) 3002 return scnprintf(msg, size, 3003 "AMD IBS may only be available in system-wide/per-cpu mode. Try using -a, or -C and workload affinity"); 3004 } 3005 } 3006 3007 break; 3008 case ENODATA: 3009 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. " 3010 "Please add an auxiliary event in front of the load latency event."); 3011 default: 3012 break; 3013 } 3014 3015 return scnprintf(msg, size, 3016 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 3017 "/bin/dmesg | grep -i perf may provide additional information.\n", 3018 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel)); 3019 } 3020 3021 struct perf_env *evsel__env(struct evsel *evsel) 3022 { 3023 if (evsel && evsel->evlist && evsel->evlist->env) 3024 return evsel->evlist->env; 3025 return &perf_env; 3026 } 3027 3028 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) 3029 { 3030 int cpu_map_idx, thread; 3031 3032 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) { 3033 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); 3034 thread++) { 3035 int fd = FD(evsel, cpu_map_idx, thread); 3036 3037 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, 3038 cpu_map_idx, thread, fd) < 0) 3039 return -1; 3040 } 3041 } 3042 3043 return 0; 3044 } 3045 3046 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) 3047 { 3048 struct perf_cpu_map *cpus = evsel->core.cpus; 3049 struct perf_thread_map *threads = evsel->core.threads; 3050 3051 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr)) 3052 return -ENOMEM; 3053 3054 return store_evsel_ids(evsel, evlist); 3055 } 3056 3057 void evsel__zero_per_pkg(struct evsel *evsel) 3058 { 3059 struct hashmap_entry *cur; 3060 size_t bkt; 3061 3062 if (evsel->per_pkg_mask) { 3063 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) 3064 free((char *)cur->key); 3065 3066 hashmap__clear(evsel->per_pkg_mask); 3067 } 3068 } 3069 3070 bool evsel__is_hybrid(struct evsel *evsel) 3071 { 3072 return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name); 3073 } 3074 3075 struct evsel *evsel__leader(struct evsel *evsel) 3076 { 3077 return container_of(evsel->core.leader, struct evsel, core); 3078 } 3079 3080 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) 3081 { 3082 return evsel->core.leader == &leader->core; 3083 } 3084 3085 bool evsel__is_leader(struct evsel *evsel) 3086 { 3087 return evsel__has_leader(evsel, evsel); 3088 } 3089 3090 void evsel__set_leader(struct evsel *evsel, struct evsel *leader) 3091 { 3092 evsel->core.leader = &leader->core; 3093 } 3094 3095 int evsel__source_count(const struct evsel *evsel) 3096 { 3097 struct evsel *pos; 3098 int count = 0; 3099 3100 evlist__for_each_entry(evsel->evlist, pos) { 3101 if (pos->metric_leader == evsel) 3102 count++; 3103 } 3104 return count; 3105 } 3106 3107 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused) 3108 { 3109 return false; 3110 } 3111 3112 /* 3113 * Remove an event from a given group (leader). 3114 * Some events, e.g., perf metrics Topdown events, 3115 * must always be grouped. Ignore the events. 3116 */ 3117 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader) 3118 { 3119 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) { 3120 evsel__set_leader(evsel, evsel); 3121 evsel->core.nr_members = 0; 3122 leader->core.nr_members--; 3123 } 3124 } 3125