1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <byteswap.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <linux/bitops.h> 13 #include <api/fs/fs.h> 14 #include <api/fs/tracing_path.h> 15 #include <traceevent/event-parse.h> 16 #include <linux/hw_breakpoint.h> 17 #include <linux/perf_event.h> 18 #include <linux/compiler.h> 19 #include <linux/err.h> 20 #include <linux/zalloc.h> 21 #include <sys/ioctl.h> 22 #include <sys/resource.h> 23 #include <sys/types.h> 24 #include <dirent.h> 25 #include <stdlib.h> 26 #include <perf/evsel.h> 27 #include "asm/bug.h" 28 #include "bpf_counter.h" 29 #include "callchain.h" 30 #include "cgroup.h" 31 #include "counts.h" 32 #include "event.h" 33 #include "evsel.h" 34 #include "util/env.h" 35 #include "util/evsel_config.h" 36 #include "util/evsel_fprintf.h" 37 #include "evlist.h" 38 #include <perf/cpumap.h> 39 #include "thread_map.h" 40 #include "target.h" 41 #include "perf_regs.h" 42 #include "record.h" 43 #include "debug.h" 44 #include "trace-event.h" 45 #include "stat.h" 46 #include "string2.h" 47 #include "memswap.h" 48 #include "util.h" 49 #include "hashmap.h" 50 #include "pmu-hybrid.h" 51 #include "../perf-sys.h" 52 #include "util/parse-branch-options.h" 53 #include <internal/xyarray.h> 54 #include <internal/lib.h> 55 56 #include <linux/ctype.h> 57 58 struct perf_missing_features perf_missing_features; 59 60 static clockid_t clockid; 61 62 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused) 63 { 64 return 0; 65 } 66 67 void __weak test_attr__ready(void) { } 68 69 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused) 70 { 71 } 72 73 static struct { 74 size_t size; 75 int (*init)(struct evsel *evsel); 76 void (*fini)(struct evsel *evsel); 77 } perf_evsel__object = { 78 .size = sizeof(struct evsel), 79 .init = evsel__no_extra_init, 80 .fini = evsel__no_extra_fini, 81 }; 82 83 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel), 84 void (*fini)(struct evsel *evsel)) 85 { 86 87 if (object_size == 0) 88 goto set_methods; 89 90 if (perf_evsel__object.size > object_size) 91 return -EINVAL; 92 93 perf_evsel__object.size = object_size; 94 95 set_methods: 96 if (init != NULL) 97 perf_evsel__object.init = init; 98 99 if (fini != NULL) 100 perf_evsel__object.fini = fini; 101 102 return 0; 103 } 104 105 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 106 107 int __evsel__sample_size(u64 sample_type) 108 { 109 u64 mask = sample_type & PERF_SAMPLE_MASK; 110 int size = 0; 111 int i; 112 113 for (i = 0; i < 64; i++) { 114 if (mask & (1ULL << i)) 115 size++; 116 } 117 118 size *= sizeof(u64); 119 120 return size; 121 } 122 123 /** 124 * __perf_evsel__calc_id_pos - calculate id_pos. 125 * @sample_type: sample type 126 * 127 * This function returns the position of the event id (PERF_SAMPLE_ID or 128 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 129 * perf_record_sample. 130 */ 131 static int __perf_evsel__calc_id_pos(u64 sample_type) 132 { 133 int idx = 0; 134 135 if (sample_type & PERF_SAMPLE_IDENTIFIER) 136 return 0; 137 138 if (!(sample_type & PERF_SAMPLE_ID)) 139 return -1; 140 141 if (sample_type & PERF_SAMPLE_IP) 142 idx += 1; 143 144 if (sample_type & PERF_SAMPLE_TID) 145 idx += 1; 146 147 if (sample_type & PERF_SAMPLE_TIME) 148 idx += 1; 149 150 if (sample_type & PERF_SAMPLE_ADDR) 151 idx += 1; 152 153 return idx; 154 } 155 156 /** 157 * __perf_evsel__calc_is_pos - calculate is_pos. 158 * @sample_type: sample type 159 * 160 * This function returns the position (counting backwards) of the event id 161 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 162 * sample_id_all is used there is an id sample appended to non-sample events. 163 */ 164 static int __perf_evsel__calc_is_pos(u64 sample_type) 165 { 166 int idx = 1; 167 168 if (sample_type & PERF_SAMPLE_IDENTIFIER) 169 return 1; 170 171 if (!(sample_type & PERF_SAMPLE_ID)) 172 return -1; 173 174 if (sample_type & PERF_SAMPLE_CPU) 175 idx += 1; 176 177 if (sample_type & PERF_SAMPLE_STREAM_ID) 178 idx += 1; 179 180 return idx; 181 } 182 183 void evsel__calc_id_pos(struct evsel *evsel) 184 { 185 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); 186 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); 187 } 188 189 void __evsel__set_sample_bit(struct evsel *evsel, 190 enum perf_event_sample_format bit) 191 { 192 if (!(evsel->core.attr.sample_type & bit)) { 193 evsel->core.attr.sample_type |= bit; 194 evsel->sample_size += sizeof(u64); 195 evsel__calc_id_pos(evsel); 196 } 197 } 198 199 void __evsel__reset_sample_bit(struct evsel *evsel, 200 enum perf_event_sample_format bit) 201 { 202 if (evsel->core.attr.sample_type & bit) { 203 evsel->core.attr.sample_type &= ~bit; 204 evsel->sample_size -= sizeof(u64); 205 evsel__calc_id_pos(evsel); 206 } 207 } 208 209 void evsel__set_sample_id(struct evsel *evsel, 210 bool can_sample_identifier) 211 { 212 if (can_sample_identifier) { 213 evsel__reset_sample_bit(evsel, ID); 214 evsel__set_sample_bit(evsel, IDENTIFIER); 215 } else { 216 evsel__set_sample_bit(evsel, ID); 217 } 218 evsel->core.attr.read_format |= PERF_FORMAT_ID; 219 } 220 221 /** 222 * evsel__is_function_event - Return whether given evsel is a function 223 * trace event 224 * 225 * @evsel - evsel selector to be tested 226 * 227 * Return %true if event is function trace event 228 */ 229 bool evsel__is_function_event(struct evsel *evsel) 230 { 231 #define FUNCTION_EVENT "ftrace:function" 232 233 return evsel->name && 234 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); 235 236 #undef FUNCTION_EVENT 237 } 238 239 void evsel__init(struct evsel *evsel, 240 struct perf_event_attr *attr, int idx) 241 { 242 perf_evsel__init(&evsel->core, attr, idx); 243 evsel->tracking = !idx; 244 evsel->unit = strdup(""); 245 evsel->scale = 1.0; 246 evsel->max_events = ULONG_MAX; 247 evsel->evlist = NULL; 248 evsel->bpf_obj = NULL; 249 evsel->bpf_fd = -1; 250 INIT_LIST_HEAD(&evsel->config_terms); 251 INIT_LIST_HEAD(&evsel->bpf_counter_list); 252 perf_evsel__object.init(evsel); 253 evsel->sample_size = __evsel__sample_size(attr->sample_type); 254 evsel__calc_id_pos(evsel); 255 evsel->cmdline_group_boundary = false; 256 evsel->metric_expr = NULL; 257 evsel->metric_name = NULL; 258 evsel->metric_events = NULL; 259 evsel->per_pkg_mask = NULL; 260 evsel->collect_stat = false; 261 evsel->pmu_name = NULL; 262 } 263 264 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) 265 { 266 struct evsel *evsel = zalloc(perf_evsel__object.size); 267 268 if (!evsel) 269 return NULL; 270 evsel__init(evsel, attr, idx); 271 272 if (evsel__is_bpf_output(evsel)) { 273 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 274 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 275 evsel->core.attr.sample_period = 1; 276 } 277 278 if (evsel__is_clock(evsel)) { 279 free((char *)evsel->unit); 280 evsel->unit = strdup("msec"); 281 evsel->scale = 1e-6; 282 } 283 284 return evsel; 285 } 286 287 static bool perf_event_can_profile_kernel(void) 288 { 289 return perf_event_paranoid_check(1); 290 } 291 292 struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config) 293 { 294 struct perf_event_attr attr = { 295 .type = type, 296 .config = config, 297 .exclude_kernel = !perf_event_can_profile_kernel(), 298 }; 299 struct evsel *evsel; 300 301 event_attr_init(&attr); 302 303 /* 304 * Now let the usual logic to set up the perf_event_attr defaults 305 * to kick in when we return and before perf_evsel__open() is called. 306 */ 307 evsel = evsel__new(&attr); 308 if (evsel == NULL) 309 goto out; 310 311 arch_evsel__fixup_new_cycles(&evsel->core.attr); 312 313 evsel->precise_max = true; 314 315 /* use asprintf() because free(evsel) assumes name is allocated */ 316 if (asprintf(&evsel->name, "cycles%s%s%.*s", 317 (attr.precise_ip || attr.exclude_kernel) ? ":" : "", 318 attr.exclude_kernel ? "u" : "", 319 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0) 320 goto error_free; 321 out: 322 return evsel; 323 error_free: 324 evsel__delete(evsel); 325 evsel = NULL; 326 goto out; 327 } 328 329 int copy_config_terms(struct list_head *dst, struct list_head *src) 330 { 331 struct evsel_config_term *pos, *tmp; 332 333 list_for_each_entry(pos, src, list) { 334 tmp = malloc(sizeof(*tmp)); 335 if (tmp == NULL) 336 return -ENOMEM; 337 338 *tmp = *pos; 339 if (tmp->free_str) { 340 tmp->val.str = strdup(pos->val.str); 341 if (tmp->val.str == NULL) { 342 free(tmp); 343 return -ENOMEM; 344 } 345 } 346 list_add_tail(&tmp->list, dst); 347 } 348 return 0; 349 } 350 351 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) 352 { 353 return copy_config_terms(&dst->config_terms, &src->config_terms); 354 } 355 356 /** 357 * evsel__clone - create a new evsel copied from @orig 358 * @orig: original evsel 359 * 360 * The assumption is that @orig is not configured nor opened yet. 361 * So we only care about the attributes that can be set while it's parsed. 362 */ 363 struct evsel *evsel__clone(struct evsel *orig) 364 { 365 struct evsel *evsel; 366 367 BUG_ON(orig->core.fd); 368 BUG_ON(orig->counts); 369 BUG_ON(orig->priv); 370 BUG_ON(orig->per_pkg_mask); 371 372 /* cannot handle BPF objects for now */ 373 if (orig->bpf_obj) 374 return NULL; 375 376 evsel = evsel__new(&orig->core.attr); 377 if (evsel == NULL) 378 return NULL; 379 380 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); 381 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); 382 evsel->core.threads = perf_thread_map__get(orig->core.threads); 383 evsel->core.nr_members = orig->core.nr_members; 384 evsel->core.system_wide = orig->core.system_wide; 385 386 if (orig->name) { 387 evsel->name = strdup(orig->name); 388 if (evsel->name == NULL) 389 goto out_err; 390 } 391 if (orig->group_name) { 392 evsel->group_name = strdup(orig->group_name); 393 if (evsel->group_name == NULL) 394 goto out_err; 395 } 396 if (orig->pmu_name) { 397 evsel->pmu_name = strdup(orig->pmu_name); 398 if (evsel->pmu_name == NULL) 399 goto out_err; 400 } 401 if (orig->filter) { 402 evsel->filter = strdup(orig->filter); 403 if (evsel->filter == NULL) 404 goto out_err; 405 } 406 if (orig->metric_id) { 407 evsel->metric_id = strdup(orig->metric_id); 408 if (evsel->metric_id == NULL) 409 goto out_err; 410 } 411 evsel->cgrp = cgroup__get(orig->cgrp); 412 evsel->tp_format = orig->tp_format; 413 evsel->handler = orig->handler; 414 evsel->core.leader = orig->core.leader; 415 416 evsel->max_events = orig->max_events; 417 evsel->tool_event = orig->tool_event; 418 free((char *)evsel->unit); 419 evsel->unit = strdup(orig->unit); 420 if (evsel->unit == NULL) 421 goto out_err; 422 423 evsel->scale = orig->scale; 424 evsel->snapshot = orig->snapshot; 425 evsel->per_pkg = orig->per_pkg; 426 evsel->percore = orig->percore; 427 evsel->precise_max = orig->precise_max; 428 evsel->use_uncore_alias = orig->use_uncore_alias; 429 evsel->is_libpfm_event = orig->is_libpfm_event; 430 431 evsel->exclude_GH = orig->exclude_GH; 432 evsel->sample_read = orig->sample_read; 433 evsel->auto_merge_stats = orig->auto_merge_stats; 434 evsel->collect_stat = orig->collect_stat; 435 evsel->weak_group = orig->weak_group; 436 evsel->use_config_name = orig->use_config_name; 437 438 if (evsel__copy_config_terms(evsel, orig) < 0) 439 goto out_err; 440 441 return evsel; 442 443 out_err: 444 evsel__delete(evsel); 445 return NULL; 446 } 447 448 /* 449 * Returns pointer with encoded error via <linux/err.h> interface. 450 */ 451 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) 452 { 453 struct evsel *evsel = zalloc(perf_evsel__object.size); 454 int err = -ENOMEM; 455 456 if (evsel == NULL) { 457 goto out_err; 458 } else { 459 struct perf_event_attr attr = { 460 .type = PERF_TYPE_TRACEPOINT, 461 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 462 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 463 }; 464 465 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 466 goto out_free; 467 468 evsel->tp_format = trace_event__tp_format(sys, name); 469 if (IS_ERR(evsel->tp_format)) { 470 err = PTR_ERR(evsel->tp_format); 471 goto out_free; 472 } 473 474 event_attr_init(&attr); 475 attr.config = evsel->tp_format->id; 476 attr.sample_period = 1; 477 evsel__init(evsel, &attr, idx); 478 } 479 480 return evsel; 481 482 out_free: 483 zfree(&evsel->name); 484 free(evsel); 485 out_err: 486 return ERR_PTR(err); 487 } 488 489 const char *evsel__hw_names[PERF_COUNT_HW_MAX] = { 490 "cycles", 491 "instructions", 492 "cache-references", 493 "cache-misses", 494 "branches", 495 "branch-misses", 496 "bus-cycles", 497 "stalled-cycles-frontend", 498 "stalled-cycles-backend", 499 "ref-cycles", 500 }; 501 502 char *evsel__bpf_counter_events; 503 504 bool evsel__match_bpf_counter_events(const char *name) 505 { 506 int name_len; 507 bool match; 508 char *ptr; 509 510 if (!evsel__bpf_counter_events) 511 return false; 512 513 ptr = strstr(evsel__bpf_counter_events, name); 514 name_len = strlen(name); 515 516 /* check name matches a full token in evsel__bpf_counter_events */ 517 match = (ptr != NULL) && 518 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) && 519 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0')); 520 521 return match; 522 } 523 524 static const char *__evsel__hw_name(u64 config) 525 { 526 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config]) 527 return evsel__hw_names[config]; 528 529 return "unknown-hardware"; 530 } 531 532 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) 533 { 534 int colon = 0, r = 0; 535 struct perf_event_attr *attr = &evsel->core.attr; 536 bool exclude_guest_default = false; 537 538 #define MOD_PRINT(context, mod) do { \ 539 if (!attr->exclude_##context) { \ 540 if (!colon) colon = ++r; \ 541 r += scnprintf(bf + r, size - r, "%c", mod); \ 542 } } while(0) 543 544 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 545 MOD_PRINT(kernel, 'k'); 546 MOD_PRINT(user, 'u'); 547 MOD_PRINT(hv, 'h'); 548 exclude_guest_default = true; 549 } 550 551 if (attr->precise_ip) { 552 if (!colon) 553 colon = ++r; 554 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 555 exclude_guest_default = true; 556 } 557 558 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 559 MOD_PRINT(host, 'H'); 560 MOD_PRINT(guest, 'G'); 561 } 562 #undef MOD_PRINT 563 if (colon) 564 bf[colon - 1] = ':'; 565 return r; 566 } 567 568 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 569 { 570 int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); 571 return r + evsel__add_modifiers(evsel, bf + r, size - r); 572 } 573 574 const char *evsel__sw_names[PERF_COUNT_SW_MAX] = { 575 "cpu-clock", 576 "task-clock", 577 "page-faults", 578 "context-switches", 579 "cpu-migrations", 580 "minor-faults", 581 "major-faults", 582 "alignment-faults", 583 "emulation-faults", 584 "dummy", 585 }; 586 587 static const char *__evsel__sw_name(u64 config) 588 { 589 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config]) 590 return evsel__sw_names[config]; 591 return "unknown-software"; 592 } 593 594 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size) 595 { 596 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); 597 return r + evsel__add_modifiers(evsel, bf + r, size - r); 598 } 599 600 static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 601 { 602 int r; 603 604 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 605 606 if (type & HW_BREAKPOINT_R) 607 r += scnprintf(bf + r, size - r, "r"); 608 609 if (type & HW_BREAKPOINT_W) 610 r += scnprintf(bf + r, size - r, "w"); 611 612 if (type & HW_BREAKPOINT_X) 613 r += scnprintf(bf + r, size - r, "x"); 614 615 return r; 616 } 617 618 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size) 619 { 620 struct perf_event_attr *attr = &evsel->core.attr; 621 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 622 return r + evsel__add_modifiers(evsel, bf + r, size - r); 623 } 624 625 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = { 626 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 627 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 628 { "LLC", "L2", }, 629 { "dTLB", "d-tlb", "Data-TLB", }, 630 { "iTLB", "i-tlb", "Instruction-TLB", }, 631 { "branch", "branches", "bpu", "btb", "bpc", }, 632 { "node", }, 633 }; 634 635 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = { 636 { "load", "loads", "read", }, 637 { "store", "stores", "write", }, 638 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 639 }; 640 641 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = { 642 { "refs", "Reference", "ops", "access", }, 643 { "misses", "miss", }, 644 }; 645 646 #define C(x) PERF_COUNT_HW_CACHE_##x 647 #define CACHE_READ (1 << C(OP_READ)) 648 #define CACHE_WRITE (1 << C(OP_WRITE)) 649 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 650 #define COP(x) (1 << x) 651 652 /* 653 * cache operation stat 654 * L1I : Read and prefetch only 655 * ITLB and BPU : Read-only 656 */ 657 static unsigned long evsel__hw_cache_stat[C(MAX)] = { 658 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 659 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 660 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 661 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 662 [C(ITLB)] = (CACHE_READ), 663 [C(BPU)] = (CACHE_READ), 664 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 665 }; 666 667 bool evsel__is_cache_op_valid(u8 type, u8 op) 668 { 669 if (evsel__hw_cache_stat[type] & COP(op)) 670 return true; /* valid */ 671 else 672 return false; /* invalid */ 673 } 674 675 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size) 676 { 677 if (result) { 678 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0], 679 evsel__hw_cache_op[op][0], 680 evsel__hw_cache_result[result][0]); 681 } 682 683 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0], 684 evsel__hw_cache_op[op][1]); 685 } 686 687 static int __evsel__hw_cache_name(u64 config, char *bf, size_t size) 688 { 689 u8 op, result, type = (config >> 0) & 0xff; 690 const char *err = "unknown-ext-hardware-cache-type"; 691 692 if (type >= PERF_COUNT_HW_CACHE_MAX) 693 goto out_err; 694 695 op = (config >> 8) & 0xff; 696 err = "unknown-ext-hardware-cache-op"; 697 if (op >= PERF_COUNT_HW_CACHE_OP_MAX) 698 goto out_err; 699 700 result = (config >> 16) & 0xff; 701 err = "unknown-ext-hardware-cache-result"; 702 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 703 goto out_err; 704 705 err = "invalid-cache"; 706 if (!evsel__is_cache_op_valid(type, op)) 707 goto out_err; 708 709 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 710 out_err: 711 return scnprintf(bf, size, "%s", err); 712 } 713 714 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) 715 { 716 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); 717 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 718 } 719 720 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size) 721 { 722 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); 723 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 724 } 725 726 static int evsel__tool_name(char *bf, size_t size) 727 { 728 int ret = scnprintf(bf, size, "duration_time"); 729 return ret; 730 } 731 732 const char *evsel__name(struct evsel *evsel) 733 { 734 char bf[128]; 735 736 if (!evsel) 737 goto out_unknown; 738 739 if (evsel->name) 740 return evsel->name; 741 742 switch (evsel->core.attr.type) { 743 case PERF_TYPE_RAW: 744 evsel__raw_name(evsel, bf, sizeof(bf)); 745 break; 746 747 case PERF_TYPE_HARDWARE: 748 evsel__hw_name(evsel, bf, sizeof(bf)); 749 break; 750 751 case PERF_TYPE_HW_CACHE: 752 evsel__hw_cache_name(evsel, bf, sizeof(bf)); 753 break; 754 755 case PERF_TYPE_SOFTWARE: 756 if (evsel->tool_event) 757 evsel__tool_name(bf, sizeof(bf)); 758 else 759 evsel__sw_name(evsel, bf, sizeof(bf)); 760 break; 761 762 case PERF_TYPE_TRACEPOINT: 763 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 764 break; 765 766 case PERF_TYPE_BREAKPOINT: 767 evsel__bp_name(evsel, bf, sizeof(bf)); 768 break; 769 770 default: 771 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 772 evsel->core.attr.type); 773 break; 774 } 775 776 evsel->name = strdup(bf); 777 778 if (evsel->name) 779 return evsel->name; 780 out_unknown: 781 return "unknown"; 782 } 783 784 const char *evsel__metric_id(const struct evsel *evsel) 785 { 786 if (evsel->metric_id) 787 return evsel->metric_id; 788 789 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && evsel->tool_event) 790 return "duration_time"; 791 792 return "unknown"; 793 } 794 795 const char *evsel__group_name(struct evsel *evsel) 796 { 797 return evsel->group_name ?: "anon group"; 798 } 799 800 /* 801 * Returns the group details for the specified leader, 802 * with following rules. 803 * 804 * For record -e '{cycles,instructions}' 805 * 'anon group { cycles:u, instructions:u }' 806 * 807 * For record -e 'cycles,instructions' and report --group 808 * 'cycles:u, instructions:u' 809 */ 810 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size) 811 { 812 int ret = 0; 813 struct evsel *pos; 814 const char *group_name = evsel__group_name(evsel); 815 816 if (!evsel->forced_leader) 817 ret = scnprintf(buf, size, "%s { ", group_name); 818 819 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel)); 820 821 for_each_group_member(pos, evsel) 822 ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos)); 823 824 if (!evsel->forced_leader) 825 ret += scnprintf(buf + ret, size - ret, " }"); 826 827 return ret; 828 } 829 830 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 831 struct callchain_param *param) 832 { 833 bool function = evsel__is_function_event(evsel); 834 struct perf_event_attr *attr = &evsel->core.attr; 835 836 evsel__set_sample_bit(evsel, CALLCHAIN); 837 838 attr->sample_max_stack = param->max_stack; 839 840 if (opts->kernel_callchains) 841 attr->exclude_callchain_user = 1; 842 if (opts->user_callchains) 843 attr->exclude_callchain_kernel = 1; 844 if (param->record_mode == CALLCHAIN_LBR) { 845 if (!opts->branch_stack) { 846 if (attr->exclude_user) { 847 pr_warning("LBR callstack option is only available " 848 "to get user callchain information. " 849 "Falling back to framepointers.\n"); 850 } else { 851 evsel__set_sample_bit(evsel, BRANCH_STACK); 852 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 853 PERF_SAMPLE_BRANCH_CALL_STACK | 854 PERF_SAMPLE_BRANCH_NO_CYCLES | 855 PERF_SAMPLE_BRANCH_NO_FLAGS | 856 PERF_SAMPLE_BRANCH_HW_INDEX; 857 } 858 } else 859 pr_warning("Cannot use LBR callstack with branch stack. " 860 "Falling back to framepointers.\n"); 861 } 862 863 if (param->record_mode == CALLCHAIN_DWARF) { 864 if (!function) { 865 evsel__set_sample_bit(evsel, REGS_USER); 866 evsel__set_sample_bit(evsel, STACK_USER); 867 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) { 868 attr->sample_regs_user |= DWARF_MINIMAL_REGS; 869 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, " 870 "specifying a subset with --user-regs may render DWARF unwinding unreliable, " 871 "so the minimal registers set (IP, SP) is explicitly forced.\n"); 872 } else { 873 attr->sample_regs_user |= PERF_REGS_MASK; 874 } 875 attr->sample_stack_user = param->dump_size; 876 attr->exclude_callchain_user = 1; 877 } else { 878 pr_info("Cannot use DWARF unwind for function trace event," 879 " falling back to framepointers.\n"); 880 } 881 } 882 883 if (function) { 884 pr_info("Disabling user space callchains for function trace event.\n"); 885 attr->exclude_callchain_user = 1; 886 } 887 } 888 889 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 890 struct callchain_param *param) 891 { 892 if (param->enabled) 893 return __evsel__config_callchain(evsel, opts, param); 894 } 895 896 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param) 897 { 898 struct perf_event_attr *attr = &evsel->core.attr; 899 900 evsel__reset_sample_bit(evsel, CALLCHAIN); 901 if (param->record_mode == CALLCHAIN_LBR) { 902 evsel__reset_sample_bit(evsel, BRANCH_STACK); 903 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 904 PERF_SAMPLE_BRANCH_CALL_STACK | 905 PERF_SAMPLE_BRANCH_HW_INDEX); 906 } 907 if (param->record_mode == CALLCHAIN_DWARF) { 908 evsel__reset_sample_bit(evsel, REGS_USER); 909 evsel__reset_sample_bit(evsel, STACK_USER); 910 } 911 } 912 913 static void evsel__apply_config_terms(struct evsel *evsel, 914 struct record_opts *opts, bool track) 915 { 916 struct evsel_config_term *term; 917 struct list_head *config_terms = &evsel->config_terms; 918 struct perf_event_attr *attr = &evsel->core.attr; 919 /* callgraph default */ 920 struct callchain_param param = { 921 .record_mode = callchain_param.record_mode, 922 }; 923 u32 dump_size = 0; 924 int max_stack = 0; 925 const char *callgraph_buf = NULL; 926 927 list_for_each_entry(term, config_terms, list) { 928 switch (term->type) { 929 case EVSEL__CONFIG_TERM_PERIOD: 930 if (!(term->weak && opts->user_interval != ULLONG_MAX)) { 931 attr->sample_period = term->val.period; 932 attr->freq = 0; 933 evsel__reset_sample_bit(evsel, PERIOD); 934 } 935 break; 936 case EVSEL__CONFIG_TERM_FREQ: 937 if (!(term->weak && opts->user_freq != UINT_MAX)) { 938 attr->sample_freq = term->val.freq; 939 attr->freq = 1; 940 evsel__set_sample_bit(evsel, PERIOD); 941 } 942 break; 943 case EVSEL__CONFIG_TERM_TIME: 944 if (term->val.time) 945 evsel__set_sample_bit(evsel, TIME); 946 else 947 evsel__reset_sample_bit(evsel, TIME); 948 break; 949 case EVSEL__CONFIG_TERM_CALLGRAPH: 950 callgraph_buf = term->val.str; 951 break; 952 case EVSEL__CONFIG_TERM_BRANCH: 953 if (term->val.str && strcmp(term->val.str, "no")) { 954 evsel__set_sample_bit(evsel, BRANCH_STACK); 955 parse_branch_str(term->val.str, 956 &attr->branch_sample_type); 957 } else 958 evsel__reset_sample_bit(evsel, BRANCH_STACK); 959 break; 960 case EVSEL__CONFIG_TERM_STACK_USER: 961 dump_size = term->val.stack_user; 962 break; 963 case EVSEL__CONFIG_TERM_MAX_STACK: 964 max_stack = term->val.max_stack; 965 break; 966 case EVSEL__CONFIG_TERM_MAX_EVENTS: 967 evsel->max_events = term->val.max_events; 968 break; 969 case EVSEL__CONFIG_TERM_INHERIT: 970 /* 971 * attr->inherit should has already been set by 972 * evsel__config. If user explicitly set 973 * inherit using config terms, override global 974 * opt->no_inherit setting. 975 */ 976 attr->inherit = term->val.inherit ? 1 : 0; 977 break; 978 case EVSEL__CONFIG_TERM_OVERWRITE: 979 attr->write_backward = term->val.overwrite ? 1 : 0; 980 break; 981 case EVSEL__CONFIG_TERM_DRV_CFG: 982 break; 983 case EVSEL__CONFIG_TERM_PERCORE: 984 break; 985 case EVSEL__CONFIG_TERM_AUX_OUTPUT: 986 attr->aux_output = term->val.aux_output ? 1 : 0; 987 break; 988 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE: 989 /* Already applied by auxtrace */ 990 break; 991 case EVSEL__CONFIG_TERM_CFG_CHG: 992 break; 993 default: 994 break; 995 } 996 } 997 998 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 999 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { 1000 bool sample_address = false; 1001 1002 if (max_stack) { 1003 param.max_stack = max_stack; 1004 if (callgraph_buf == NULL) 1005 callgraph_buf = "fp"; 1006 } 1007 1008 /* parse callgraph parameters */ 1009 if (callgraph_buf != NULL) { 1010 if (!strcmp(callgraph_buf, "no")) { 1011 param.enabled = false; 1012 param.record_mode = CALLCHAIN_NONE; 1013 } else { 1014 param.enabled = true; 1015 if (parse_callchain_record(callgraph_buf, ¶m)) { 1016 pr_err("per-event callgraph setting for %s failed. " 1017 "Apply callgraph global setting for it\n", 1018 evsel->name); 1019 return; 1020 } 1021 if (param.record_mode == CALLCHAIN_DWARF) 1022 sample_address = true; 1023 } 1024 } 1025 if (dump_size > 0) { 1026 dump_size = round_up(dump_size, sizeof(u64)); 1027 param.dump_size = dump_size; 1028 } 1029 1030 /* If global callgraph set, clear it */ 1031 if (callchain_param.enabled) 1032 evsel__reset_callgraph(evsel, &callchain_param); 1033 1034 /* set perf-event callgraph */ 1035 if (param.enabled) { 1036 if (sample_address) { 1037 evsel__set_sample_bit(evsel, ADDR); 1038 evsel__set_sample_bit(evsel, DATA_SRC); 1039 evsel->core.attr.mmap_data = track; 1040 } 1041 evsel__config_callchain(evsel, opts, ¶m); 1042 } 1043 } 1044 } 1045 1046 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) 1047 { 1048 struct evsel_config_term *term, *found_term = NULL; 1049 1050 list_for_each_entry(term, &evsel->config_terms, list) { 1051 if (term->type == type) 1052 found_term = term; 1053 } 1054 1055 return found_term; 1056 } 1057 1058 void __weak arch_evsel__set_sample_weight(struct evsel *evsel) 1059 { 1060 evsel__set_sample_bit(evsel, WEIGHT); 1061 } 1062 1063 void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_unused) 1064 { 1065 } 1066 1067 /* 1068 * The enable_on_exec/disabled value strategy: 1069 * 1070 * 1) For any type of traced program: 1071 * - all independent events and group leaders are disabled 1072 * - all group members are enabled 1073 * 1074 * Group members are ruled by group leaders. They need to 1075 * be enabled, because the group scheduling relies on that. 1076 * 1077 * 2) For traced programs executed by perf: 1078 * - all independent events and group leaders have 1079 * enable_on_exec set 1080 * - we don't specifically enable or disable any event during 1081 * the record command 1082 * 1083 * Independent events and group leaders are initially disabled 1084 * and get enabled by exec. Group members are ruled by group 1085 * leaders as stated in 1). 1086 * 1087 * 3) For traced programs attached by perf (pid/tid): 1088 * - we specifically enable or disable all events during 1089 * the record command 1090 * 1091 * When attaching events to already running traced we 1092 * enable/disable events specifically, as there's no 1093 * initial traced exec call. 1094 */ 1095 void evsel__config(struct evsel *evsel, struct record_opts *opts, 1096 struct callchain_param *callchain) 1097 { 1098 struct evsel *leader = evsel__leader(evsel); 1099 struct perf_event_attr *attr = &evsel->core.attr; 1100 int track = evsel->tracking; 1101 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 1102 1103 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 1104 attr->inherit = !opts->no_inherit; 1105 attr->write_backward = opts->overwrite ? 1 : 0; 1106 1107 evsel__set_sample_bit(evsel, IP); 1108 evsel__set_sample_bit(evsel, TID); 1109 1110 if (evsel->sample_read) { 1111 evsel__set_sample_bit(evsel, READ); 1112 1113 /* 1114 * We need ID even in case of single event, because 1115 * PERF_SAMPLE_READ process ID specific data. 1116 */ 1117 evsel__set_sample_id(evsel, false); 1118 1119 /* 1120 * Apply group format only if we belong to group 1121 * with more than one members. 1122 */ 1123 if (leader->core.nr_members > 1) { 1124 attr->read_format |= PERF_FORMAT_GROUP; 1125 attr->inherit = 0; 1126 } 1127 } 1128 1129 /* 1130 * We default some events to have a default interval. But keep 1131 * it a weak assumption overridable by the user. 1132 */ 1133 if (!attr->sample_period) { 1134 if (opts->freq) { 1135 attr->freq = 1; 1136 attr->sample_freq = opts->freq; 1137 } else { 1138 attr->sample_period = opts->default_interval; 1139 } 1140 } 1141 /* 1142 * If attr->freq was set (here or earlier), ask for period 1143 * to be sampled. 1144 */ 1145 if (attr->freq) 1146 evsel__set_sample_bit(evsel, PERIOD); 1147 1148 if (opts->no_samples) 1149 attr->sample_freq = 0; 1150 1151 if (opts->inherit_stat) { 1152 evsel->core.attr.read_format |= 1153 PERF_FORMAT_TOTAL_TIME_ENABLED | 1154 PERF_FORMAT_TOTAL_TIME_RUNNING | 1155 PERF_FORMAT_ID; 1156 attr->inherit_stat = 1; 1157 } 1158 1159 if (opts->sample_address) { 1160 evsel__set_sample_bit(evsel, ADDR); 1161 attr->mmap_data = track; 1162 } 1163 1164 /* 1165 * We don't allow user space callchains for function trace 1166 * event, due to issues with page faults while tracing page 1167 * fault handler and its overall trickiness nature. 1168 */ 1169 if (evsel__is_function_event(evsel)) 1170 evsel->core.attr.exclude_callchain_user = 1; 1171 1172 if (callchain && callchain->enabled && !evsel->no_aux_samples) 1173 evsel__config_callchain(evsel, opts, callchain); 1174 1175 if (opts->sample_intr_regs && !evsel->no_aux_samples && 1176 !evsel__is_dummy_event(evsel)) { 1177 attr->sample_regs_intr = opts->sample_intr_regs; 1178 evsel__set_sample_bit(evsel, REGS_INTR); 1179 } 1180 1181 if (opts->sample_user_regs && !evsel->no_aux_samples && 1182 !evsel__is_dummy_event(evsel)) { 1183 attr->sample_regs_user |= opts->sample_user_regs; 1184 evsel__set_sample_bit(evsel, REGS_USER); 1185 } 1186 1187 if (target__has_cpu(&opts->target) || opts->sample_cpu) 1188 evsel__set_sample_bit(evsel, CPU); 1189 1190 /* 1191 * When the user explicitly disabled time don't force it here. 1192 */ 1193 if (opts->sample_time && 1194 (!perf_missing_features.sample_id_all && 1195 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 1196 opts->sample_time_set))) 1197 evsel__set_sample_bit(evsel, TIME); 1198 1199 if (opts->raw_samples && !evsel->no_aux_samples) { 1200 evsel__set_sample_bit(evsel, TIME); 1201 evsel__set_sample_bit(evsel, RAW); 1202 evsel__set_sample_bit(evsel, CPU); 1203 } 1204 1205 if (opts->sample_address) 1206 evsel__set_sample_bit(evsel, DATA_SRC); 1207 1208 if (opts->sample_phys_addr) 1209 evsel__set_sample_bit(evsel, PHYS_ADDR); 1210 1211 if (opts->no_buffering) { 1212 attr->watermark = 0; 1213 attr->wakeup_events = 1; 1214 } 1215 if (opts->branch_stack && !evsel->no_aux_samples) { 1216 evsel__set_sample_bit(evsel, BRANCH_STACK); 1217 attr->branch_sample_type = opts->branch_stack; 1218 } 1219 1220 if (opts->sample_weight) 1221 arch_evsel__set_sample_weight(evsel); 1222 1223 attr->task = track; 1224 attr->mmap = track; 1225 attr->mmap2 = track && !perf_missing_features.mmap2; 1226 attr->comm = track; 1227 attr->build_id = track && opts->build_id; 1228 1229 /* 1230 * ksymbol is tracked separately with text poke because it needs to be 1231 * system wide and enabled immediately. 1232 */ 1233 if (!opts->text_poke) 1234 attr->ksymbol = track && !perf_missing_features.ksymbol; 1235 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf; 1236 1237 if (opts->record_namespaces) 1238 attr->namespaces = track; 1239 1240 if (opts->record_cgroup) { 1241 attr->cgroup = track && !perf_missing_features.cgroup; 1242 evsel__set_sample_bit(evsel, CGROUP); 1243 } 1244 1245 if (opts->sample_data_page_size) 1246 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE); 1247 1248 if (opts->sample_code_page_size) 1249 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE); 1250 1251 if (opts->record_switch_events) 1252 attr->context_switch = track; 1253 1254 if (opts->sample_transaction) 1255 evsel__set_sample_bit(evsel, TRANSACTION); 1256 1257 if (opts->running_time) { 1258 evsel->core.attr.read_format |= 1259 PERF_FORMAT_TOTAL_TIME_ENABLED | 1260 PERF_FORMAT_TOTAL_TIME_RUNNING; 1261 } 1262 1263 /* 1264 * XXX see the function comment above 1265 * 1266 * Disabling only independent events or group leaders, 1267 * keeping group members enabled. 1268 */ 1269 if (evsel__is_group_leader(evsel)) 1270 attr->disabled = 1; 1271 1272 /* 1273 * Setting enable_on_exec for independent events and 1274 * group leaders for traced executed by perf. 1275 */ 1276 if (target__none(&opts->target) && evsel__is_group_leader(evsel) && 1277 !opts->initial_delay) 1278 attr->enable_on_exec = 1; 1279 1280 if (evsel->immediate) { 1281 attr->disabled = 0; 1282 attr->enable_on_exec = 0; 1283 } 1284 1285 clockid = opts->clockid; 1286 if (opts->use_clockid) { 1287 attr->use_clockid = 1; 1288 attr->clockid = opts->clockid; 1289 } 1290 1291 if (evsel->precise_max) 1292 attr->precise_ip = 3; 1293 1294 if (opts->all_user) { 1295 attr->exclude_kernel = 1; 1296 attr->exclude_user = 0; 1297 } 1298 1299 if (opts->all_kernel) { 1300 attr->exclude_kernel = 0; 1301 attr->exclude_user = 1; 1302 } 1303 1304 if (evsel->core.own_cpus || evsel->unit) 1305 evsel->core.attr.read_format |= PERF_FORMAT_ID; 1306 1307 /* 1308 * Apply event specific term settings, 1309 * it overloads any global configuration. 1310 */ 1311 evsel__apply_config_terms(evsel, opts, track); 1312 1313 evsel->ignore_missing_thread = opts->ignore_missing_thread; 1314 1315 /* The --period option takes the precedence. */ 1316 if (opts->period_set) { 1317 if (opts->period) 1318 evsel__set_sample_bit(evsel, PERIOD); 1319 else 1320 evsel__reset_sample_bit(evsel, PERIOD); 1321 } 1322 1323 /* 1324 * A dummy event never triggers any actual counter and therefore 1325 * cannot be used with branch_stack. 1326 * 1327 * For initial_delay, a dummy event is added implicitly. 1328 * The software event will trigger -EOPNOTSUPP error out, 1329 * if BRANCH_STACK bit is set. 1330 */ 1331 if (evsel__is_dummy_event(evsel)) 1332 evsel__reset_sample_bit(evsel, BRANCH_STACK); 1333 } 1334 1335 int evsel__set_filter(struct evsel *evsel, const char *filter) 1336 { 1337 char *new_filter = strdup(filter); 1338 1339 if (new_filter != NULL) { 1340 free(evsel->filter); 1341 evsel->filter = new_filter; 1342 return 0; 1343 } 1344 1345 return -1; 1346 } 1347 1348 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter) 1349 { 1350 char *new_filter; 1351 1352 if (evsel->filter == NULL) 1353 return evsel__set_filter(evsel, filter); 1354 1355 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { 1356 free(evsel->filter); 1357 evsel->filter = new_filter; 1358 return 0; 1359 } 1360 1361 return -1; 1362 } 1363 1364 int evsel__append_tp_filter(struct evsel *evsel, const char *filter) 1365 { 1366 return evsel__append_filter(evsel, "(%s) && (%s)", filter); 1367 } 1368 1369 int evsel__append_addr_filter(struct evsel *evsel, const char *filter) 1370 { 1371 return evsel__append_filter(evsel, "%s,%s", filter); 1372 } 1373 1374 /* Caller has to clear disabled after going through all CPUs. */ 1375 int evsel__enable_cpu(struct evsel *evsel, int cpu) 1376 { 1377 return perf_evsel__enable_cpu(&evsel->core, cpu); 1378 } 1379 1380 int evsel__enable(struct evsel *evsel) 1381 { 1382 int err = perf_evsel__enable(&evsel->core); 1383 1384 if (!err) 1385 evsel->disabled = false; 1386 return err; 1387 } 1388 1389 /* Caller has to set disabled after going through all CPUs. */ 1390 int evsel__disable_cpu(struct evsel *evsel, int cpu) 1391 { 1392 return perf_evsel__disable_cpu(&evsel->core, cpu); 1393 } 1394 1395 int evsel__disable(struct evsel *evsel) 1396 { 1397 int err = perf_evsel__disable(&evsel->core); 1398 /* 1399 * We mark it disabled here so that tools that disable a event can 1400 * ignore events after they disable it. I.e. the ring buffer may have 1401 * already a few more events queued up before the kernel got the stop 1402 * request. 1403 */ 1404 if (!err) 1405 evsel->disabled = true; 1406 1407 return err; 1408 } 1409 1410 void free_config_terms(struct list_head *config_terms) 1411 { 1412 struct evsel_config_term *term, *h; 1413 1414 list_for_each_entry_safe(term, h, config_terms, list) { 1415 list_del_init(&term->list); 1416 if (term->free_str) 1417 zfree(&term->val.str); 1418 free(term); 1419 } 1420 } 1421 1422 static void evsel__free_config_terms(struct evsel *evsel) 1423 { 1424 free_config_terms(&evsel->config_terms); 1425 } 1426 1427 void evsel__exit(struct evsel *evsel) 1428 { 1429 assert(list_empty(&evsel->core.node)); 1430 assert(evsel->evlist == NULL); 1431 bpf_counter__destroy(evsel); 1432 evsel__free_counts(evsel); 1433 perf_evsel__free_fd(&evsel->core); 1434 perf_evsel__free_id(&evsel->core); 1435 evsel__free_config_terms(evsel); 1436 cgroup__put(evsel->cgrp); 1437 perf_cpu_map__put(evsel->core.cpus); 1438 perf_cpu_map__put(evsel->core.own_cpus); 1439 perf_thread_map__put(evsel->core.threads); 1440 zfree(&evsel->group_name); 1441 zfree(&evsel->name); 1442 zfree(&evsel->pmu_name); 1443 zfree(&evsel->unit); 1444 zfree(&evsel->metric_id); 1445 evsel__zero_per_pkg(evsel); 1446 hashmap__free(evsel->per_pkg_mask); 1447 evsel->per_pkg_mask = NULL; 1448 zfree(&evsel->metric_events); 1449 perf_evsel__object.fini(evsel); 1450 } 1451 1452 void evsel__delete(struct evsel *evsel) 1453 { 1454 evsel__exit(evsel); 1455 free(evsel); 1456 } 1457 1458 void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, 1459 struct perf_counts_values *count) 1460 { 1461 struct perf_counts_values tmp; 1462 1463 if (!evsel->prev_raw_counts) 1464 return; 1465 1466 if (cpu == -1) { 1467 tmp = evsel->prev_raw_counts->aggr; 1468 evsel->prev_raw_counts->aggr = *count; 1469 } else { 1470 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); 1471 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; 1472 } 1473 1474 count->val = count->val - tmp.val; 1475 count->ena = count->ena - tmp.ena; 1476 count->run = count->run - tmp.run; 1477 } 1478 1479 static int evsel__read_one(struct evsel *evsel, int cpu, int thread) 1480 { 1481 struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread); 1482 1483 return perf_evsel__read(&evsel->core, cpu, thread, count); 1484 } 1485 1486 static void evsel__set_count(struct evsel *counter, int cpu, int thread, u64 val, u64 ena, u64 run) 1487 { 1488 struct perf_counts_values *count; 1489 1490 count = perf_counts(counter->counts, cpu, thread); 1491 1492 count->val = val; 1493 count->ena = ena; 1494 count->run = run; 1495 1496 perf_counts__set_loaded(counter->counts, cpu, thread, true); 1497 } 1498 1499 static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, u64 *data) 1500 { 1501 u64 read_format = leader->core.attr.read_format; 1502 struct sample_read_value *v; 1503 u64 nr, ena = 0, run = 0, i; 1504 1505 nr = *data++; 1506 1507 if (nr != (u64) leader->core.nr_members) 1508 return -EINVAL; 1509 1510 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1511 ena = *data++; 1512 1513 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1514 run = *data++; 1515 1516 v = (struct sample_read_value *) data; 1517 1518 evsel__set_count(leader, cpu, thread, v[0].value, ena, run); 1519 1520 for (i = 1; i < nr; i++) { 1521 struct evsel *counter; 1522 1523 counter = evlist__id2evsel(leader->evlist, v[i].id); 1524 if (!counter) 1525 return -EINVAL; 1526 1527 evsel__set_count(counter, cpu, thread, v[i].value, ena, run); 1528 } 1529 1530 return 0; 1531 } 1532 1533 static int evsel__read_group(struct evsel *leader, int cpu, int thread) 1534 { 1535 struct perf_stat_evsel *ps = leader->stats; 1536 u64 read_format = leader->core.attr.read_format; 1537 int size = perf_evsel__read_size(&leader->core); 1538 u64 *data = ps->group_data; 1539 1540 if (!(read_format & PERF_FORMAT_ID)) 1541 return -EINVAL; 1542 1543 if (!evsel__is_group_leader(leader)) 1544 return -EINVAL; 1545 1546 if (!data) { 1547 data = zalloc(size); 1548 if (!data) 1549 return -ENOMEM; 1550 1551 ps->group_data = data; 1552 } 1553 1554 if (FD(leader, cpu, thread) < 0) 1555 return -EINVAL; 1556 1557 if (readn(FD(leader, cpu, thread), data, size) <= 0) 1558 return -errno; 1559 1560 return evsel__process_group_data(leader, cpu, thread, data); 1561 } 1562 1563 int evsel__read_counter(struct evsel *evsel, int cpu, int thread) 1564 { 1565 u64 read_format = evsel->core.attr.read_format; 1566 1567 if (read_format & PERF_FORMAT_GROUP) 1568 return evsel__read_group(evsel, cpu, thread); 1569 1570 return evsel__read_one(evsel, cpu, thread); 1571 } 1572 1573 int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale) 1574 { 1575 struct perf_counts_values count; 1576 size_t nv = scale ? 3 : 1; 1577 1578 if (FD(evsel, cpu, thread) < 0) 1579 return -EINVAL; 1580 1581 if (evsel->counts == NULL && evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) 1582 return -ENOMEM; 1583 1584 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0) 1585 return -errno; 1586 1587 evsel__compute_deltas(evsel, cpu, thread, &count); 1588 perf_counts_values__scale(&count, scale, NULL); 1589 *perf_counts(evsel->counts, cpu, thread) = count; 1590 return 0; 1591 } 1592 1593 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, 1594 int cpu) 1595 { 1596 int cpuid; 1597 1598 cpuid = perf_cpu_map__cpu(evsel->core.cpus, cpu); 1599 return perf_cpu_map__idx(other->core.cpus, cpuid); 1600 } 1601 1602 static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu) 1603 { 1604 struct evsel *leader = evsel__leader(evsel); 1605 1606 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || 1607 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { 1608 return evsel__match_other_cpu(evsel, leader, cpu); 1609 } 1610 1611 return cpu; 1612 } 1613 1614 static int get_group_fd(struct evsel *evsel, int cpu, int thread) 1615 { 1616 struct evsel *leader = evsel__leader(evsel); 1617 int fd; 1618 1619 if (evsel__is_group_leader(evsel)) 1620 return -1; 1621 1622 /* 1623 * Leader must be already processed/open, 1624 * if not it's a bug. 1625 */ 1626 BUG_ON(!leader->core.fd); 1627 1628 cpu = evsel__hybrid_group_cpu(evsel, cpu); 1629 if (cpu == -1) 1630 return -1; 1631 1632 fd = FD(leader, cpu, thread); 1633 BUG_ON(fd == -1); 1634 1635 return fd; 1636 } 1637 1638 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) 1639 { 1640 for (int cpu = 0; cpu < nr_cpus; cpu++) 1641 for (int thread = thread_idx; thread < nr_threads - 1; thread++) 1642 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); 1643 } 1644 1645 static int update_fds(struct evsel *evsel, 1646 int nr_cpus, int cpu_idx, 1647 int nr_threads, int thread_idx) 1648 { 1649 struct evsel *pos; 1650 1651 if (cpu_idx >= nr_cpus || thread_idx >= nr_threads) 1652 return -EINVAL; 1653 1654 evlist__for_each_entry(evsel->evlist, pos) { 1655 nr_cpus = pos != evsel ? nr_cpus : cpu_idx; 1656 1657 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); 1658 1659 /* 1660 * Since fds for next evsel has not been created, 1661 * there is no need to iterate whole event list. 1662 */ 1663 if (pos == evsel) 1664 break; 1665 } 1666 return 0; 1667 } 1668 1669 bool evsel__ignore_missing_thread(struct evsel *evsel, 1670 int nr_cpus, int cpu, 1671 struct perf_thread_map *threads, 1672 int thread, int err) 1673 { 1674 pid_t ignore_pid = perf_thread_map__pid(threads, thread); 1675 1676 if (!evsel->ignore_missing_thread) 1677 return false; 1678 1679 /* The system wide setup does not work with threads. */ 1680 if (evsel->core.system_wide) 1681 return false; 1682 1683 /* The -ESRCH is perf event syscall errno for pid's not found. */ 1684 if (err != -ESRCH) 1685 return false; 1686 1687 /* If there's only one thread, let it fail. */ 1688 if (threads->nr == 1) 1689 return false; 1690 1691 /* 1692 * We should remove fd for missing_thread first 1693 * because thread_map__remove() will decrease threads->nr. 1694 */ 1695 if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) 1696 return false; 1697 1698 if (thread_map__remove(threads, thread)) 1699 return false; 1700 1701 pr_warning("WARNING: Ignored open failure for pid %d\n", 1702 ignore_pid); 1703 return true; 1704 } 1705 1706 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1707 void *priv __maybe_unused) 1708 { 1709 return fprintf(fp, " %-32s %s\n", name, val); 1710 } 1711 1712 static void display_attr(struct perf_event_attr *attr) 1713 { 1714 if (verbose >= 2 || debug_peo_args) { 1715 fprintf(stderr, "%.60s\n", graph_dotted_line); 1716 fprintf(stderr, "perf_event_attr:\n"); 1717 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL); 1718 fprintf(stderr, "%.60s\n", graph_dotted_line); 1719 } 1720 } 1721 1722 bool evsel__precise_ip_fallback(struct evsel *evsel) 1723 { 1724 /* Do not try less precise if not requested. */ 1725 if (!evsel->precise_max) 1726 return false; 1727 1728 /* 1729 * We tried all the precise_ip values, and it's 1730 * still failing, so leave it to standard fallback. 1731 */ 1732 if (!evsel->core.attr.precise_ip) { 1733 evsel->core.attr.precise_ip = evsel->precise_ip_original; 1734 return false; 1735 } 1736 1737 if (!evsel->precise_ip_original) 1738 evsel->precise_ip_original = evsel->core.attr.precise_ip; 1739 1740 evsel->core.attr.precise_ip--; 1741 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); 1742 display_attr(&evsel->core.attr); 1743 return true; 1744 } 1745 1746 static struct perf_cpu_map *empty_cpu_map; 1747 static struct perf_thread_map *empty_thread_map; 1748 1749 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1750 struct perf_thread_map *threads) 1751 { 1752 int nthreads; 1753 1754 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || 1755 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) 1756 return -EINVAL; 1757 1758 if (cpus == NULL) { 1759 if (empty_cpu_map == NULL) { 1760 empty_cpu_map = perf_cpu_map__dummy_new(); 1761 if (empty_cpu_map == NULL) 1762 return -ENOMEM; 1763 } 1764 1765 cpus = empty_cpu_map; 1766 } 1767 1768 if (threads == NULL) { 1769 if (empty_thread_map == NULL) { 1770 empty_thread_map = thread_map__new_by_tid(-1); 1771 if (empty_thread_map == NULL) 1772 return -ENOMEM; 1773 } 1774 1775 threads = empty_thread_map; 1776 } 1777 1778 if (evsel->core.system_wide) 1779 nthreads = 1; 1780 else 1781 nthreads = threads->nr; 1782 1783 if (evsel->core.fd == NULL && 1784 perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) 1785 return -ENOMEM; 1786 1787 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; 1788 if (evsel->cgrp) 1789 evsel->open_flags |= PERF_FLAG_PID_CGROUP; 1790 1791 return 0; 1792 } 1793 1794 static void evsel__disable_missing_features(struct evsel *evsel) 1795 { 1796 if (perf_missing_features.weight_struct) { 1797 evsel__set_sample_bit(evsel, WEIGHT); 1798 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); 1799 } 1800 if (perf_missing_features.clockid_wrong) 1801 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1802 if (perf_missing_features.clockid) { 1803 evsel->core.attr.use_clockid = 0; 1804 evsel->core.attr.clockid = 0; 1805 } 1806 if (perf_missing_features.cloexec) 1807 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1808 if (perf_missing_features.mmap2) 1809 evsel->core.attr.mmap2 = 0; 1810 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest) 1811 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; 1812 if (perf_missing_features.lbr_flags) 1813 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1814 PERF_SAMPLE_BRANCH_NO_CYCLES); 1815 if (perf_missing_features.group_read && evsel->core.attr.inherit) 1816 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); 1817 if (perf_missing_features.ksymbol) 1818 evsel->core.attr.ksymbol = 0; 1819 if (perf_missing_features.bpf) 1820 evsel->core.attr.bpf_event = 0; 1821 if (perf_missing_features.branch_hw_idx) 1822 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; 1823 if (perf_missing_features.sample_id_all) 1824 evsel->core.attr.sample_id_all = 0; 1825 } 1826 1827 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1828 struct perf_thread_map *threads) 1829 { 1830 int err; 1831 1832 err = __evsel__prepare_open(evsel, cpus, threads); 1833 if (err) 1834 return err; 1835 1836 evsel__disable_missing_features(evsel); 1837 1838 return err; 1839 } 1840 1841 bool evsel__detect_missing_features(struct evsel *evsel) 1842 { 1843 /* 1844 * Must probe features in the order they were added to the 1845 * perf_event_attr interface. 1846 */ 1847 if (!perf_missing_features.weight_struct && 1848 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { 1849 perf_missing_features.weight_struct = true; 1850 pr_debug2("switching off weight struct support\n"); 1851 return true; 1852 } else if (!perf_missing_features.code_page_size && 1853 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { 1854 perf_missing_features.code_page_size = true; 1855 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n"); 1856 return false; 1857 } else if (!perf_missing_features.data_page_size && 1858 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { 1859 perf_missing_features.data_page_size = true; 1860 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n"); 1861 return false; 1862 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { 1863 perf_missing_features.cgroup = true; 1864 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n"); 1865 return false; 1866 } else if (!perf_missing_features.branch_hw_idx && 1867 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { 1868 perf_missing_features.branch_hw_idx = true; 1869 pr_debug2("switching off branch HW index support\n"); 1870 return true; 1871 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { 1872 perf_missing_features.aux_output = true; 1873 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n"); 1874 return false; 1875 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { 1876 perf_missing_features.bpf = true; 1877 pr_debug2_peo("switching off bpf_event\n"); 1878 return true; 1879 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { 1880 perf_missing_features.ksymbol = true; 1881 pr_debug2_peo("switching off ksymbol\n"); 1882 return true; 1883 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { 1884 perf_missing_features.write_backward = true; 1885 pr_debug2_peo("switching off write_backward\n"); 1886 return false; 1887 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { 1888 perf_missing_features.clockid_wrong = true; 1889 pr_debug2_peo("switching off clockid\n"); 1890 return true; 1891 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { 1892 perf_missing_features.clockid = true; 1893 pr_debug2_peo("switching off use_clockid\n"); 1894 return true; 1895 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { 1896 perf_missing_features.cloexec = true; 1897 pr_debug2_peo("switching off cloexec flag\n"); 1898 return true; 1899 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { 1900 perf_missing_features.mmap2 = true; 1901 pr_debug2_peo("switching off mmap2\n"); 1902 return true; 1903 } else if ((evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) && 1904 (evsel->pmu == NULL || evsel->pmu->missing_features.exclude_guest)) { 1905 if (evsel->pmu == NULL) { 1906 evsel->pmu = evsel__find_pmu(evsel); 1907 if (evsel->pmu) 1908 evsel->pmu->missing_features.exclude_guest = true; 1909 else { 1910 /* we cannot find PMU, disable attrs now */ 1911 evsel->core.attr.exclude_host = false; 1912 evsel->core.attr.exclude_guest = false; 1913 } 1914 } 1915 1916 if (evsel->exclude_GH) { 1917 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n"); 1918 return false; 1919 } 1920 if (!perf_missing_features.exclude_guest) { 1921 perf_missing_features.exclude_guest = true; 1922 pr_debug2_peo("switching off exclude_guest, exclude_host\n"); 1923 } 1924 return true; 1925 } else if (!perf_missing_features.sample_id_all) { 1926 perf_missing_features.sample_id_all = true; 1927 pr_debug2_peo("switching off sample_id_all\n"); 1928 return true; 1929 } else if (!perf_missing_features.lbr_flags && 1930 (evsel->core.attr.branch_sample_type & 1931 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1932 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1933 perf_missing_features.lbr_flags = true; 1934 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n"); 1935 return true; 1936 } else if (!perf_missing_features.group_read && 1937 evsel->core.attr.inherit && 1938 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && 1939 evsel__is_group_leader(evsel)) { 1940 perf_missing_features.group_read = true; 1941 pr_debug2_peo("switching off group read\n"); 1942 return true; 1943 } else { 1944 return false; 1945 } 1946 } 1947 1948 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) 1949 { 1950 int old_errno; 1951 struct rlimit l; 1952 1953 if (*set_rlimit < INCREASED_MAX) { 1954 old_errno = errno; 1955 1956 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 1957 if (*set_rlimit == NO_CHANGE) { 1958 l.rlim_cur = l.rlim_max; 1959 } else { 1960 l.rlim_cur = l.rlim_max + 1000; 1961 l.rlim_max = l.rlim_cur; 1962 } 1963 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 1964 (*set_rlimit) += 1; 1965 errno = old_errno; 1966 return true; 1967 } 1968 } 1969 errno = old_errno; 1970 } 1971 1972 return false; 1973 } 1974 1975 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, 1976 struct perf_thread_map *threads, 1977 int start_cpu, int end_cpu) 1978 { 1979 int cpu, thread, nthreads; 1980 int pid = -1, err, old_errno; 1981 enum rlimit_action set_rlimit = NO_CHANGE; 1982 1983 err = __evsel__prepare_open(evsel, cpus, threads); 1984 if (err) 1985 return err; 1986 1987 if (cpus == NULL) 1988 cpus = empty_cpu_map; 1989 1990 if (threads == NULL) 1991 threads = empty_thread_map; 1992 1993 if (evsel->core.system_wide) 1994 nthreads = 1; 1995 else 1996 nthreads = threads->nr; 1997 1998 if (evsel->cgrp) 1999 pid = evsel->cgrp->fd; 2000 2001 fallback_missing_features: 2002 evsel__disable_missing_features(evsel); 2003 2004 display_attr(&evsel->core.attr); 2005 2006 for (cpu = start_cpu; cpu < end_cpu; cpu++) { 2007 2008 for (thread = 0; thread < nthreads; thread++) { 2009 int fd, group_fd; 2010 retry_open: 2011 if (thread >= nthreads) 2012 break; 2013 2014 if (!evsel->cgrp && !evsel->core.system_wide) 2015 pid = perf_thread_map__pid(threads, thread); 2016 2017 group_fd = get_group_fd(evsel, cpu, thread); 2018 2019 test_attr__ready(); 2020 2021 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", 2022 pid, cpus->map[cpu], group_fd, evsel->open_flags); 2023 2024 fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu], 2025 group_fd, evsel->open_flags); 2026 2027 FD(evsel, cpu, thread) = fd; 2028 2029 if (fd < 0) { 2030 err = -errno; 2031 2032 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", 2033 err); 2034 goto try_fallback; 2035 } 2036 2037 bpf_counter__install_pe(evsel, cpu, fd); 2038 2039 if (unlikely(test_attr__enabled)) { 2040 test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], 2041 fd, group_fd, evsel->open_flags); 2042 } 2043 2044 pr_debug2_peo(" = %d\n", fd); 2045 2046 if (evsel->bpf_fd >= 0) { 2047 int evt_fd = fd; 2048 int bpf_fd = evsel->bpf_fd; 2049 2050 err = ioctl(evt_fd, 2051 PERF_EVENT_IOC_SET_BPF, 2052 bpf_fd); 2053 if (err && errno != EEXIST) { 2054 pr_err("failed to attach bpf fd %d: %s\n", 2055 bpf_fd, strerror(errno)); 2056 err = -EINVAL; 2057 goto out_close; 2058 } 2059 } 2060 2061 set_rlimit = NO_CHANGE; 2062 2063 /* 2064 * If we succeeded but had to kill clockid, fail and 2065 * have evsel__open_strerror() print us a nice error. 2066 */ 2067 if (perf_missing_features.clockid || 2068 perf_missing_features.clockid_wrong) { 2069 err = -EINVAL; 2070 goto out_close; 2071 } 2072 } 2073 } 2074 2075 return 0; 2076 2077 try_fallback: 2078 if (evsel__precise_ip_fallback(evsel)) 2079 goto retry_open; 2080 2081 if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { 2082 /* We just removed 1 thread, so lower the upper nthreads limit. */ 2083 nthreads--; 2084 2085 /* ... and pretend like nothing have happened. */ 2086 err = 0; 2087 goto retry_open; 2088 } 2089 /* 2090 * perf stat needs between 5 and 22 fds per CPU. When we run out 2091 * of them try to increase the limits. 2092 */ 2093 if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) 2094 goto retry_open; 2095 2096 if (err != -EINVAL || cpu > 0 || thread > 0) 2097 goto out_close; 2098 2099 if (evsel__detect_missing_features(evsel)) 2100 goto fallback_missing_features; 2101 out_close: 2102 if (err) 2103 threads->err_thread = thread; 2104 2105 old_errno = errno; 2106 do { 2107 while (--thread >= 0) { 2108 if (FD(evsel, cpu, thread) >= 0) 2109 close(FD(evsel, cpu, thread)); 2110 FD(evsel, cpu, thread) = -1; 2111 } 2112 thread = nthreads; 2113 } while (--cpu >= 0); 2114 errno = old_errno; 2115 return err; 2116 } 2117 2118 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, 2119 struct perf_thread_map *threads) 2120 { 2121 return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); 2122 } 2123 2124 void evsel__close(struct evsel *evsel) 2125 { 2126 perf_evsel__close(&evsel->core); 2127 perf_evsel__free_id(&evsel->core); 2128 } 2129 2130 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu) 2131 { 2132 if (cpu == -1) 2133 return evsel__open_cpu(evsel, cpus, NULL, 0, 2134 cpus ? cpus->nr : 1); 2135 2136 return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); 2137 } 2138 2139 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) 2140 { 2141 return evsel__open(evsel, NULL, threads); 2142 } 2143 2144 static int perf_evsel__parse_id_sample(const struct evsel *evsel, 2145 const union perf_event *event, 2146 struct perf_sample *sample) 2147 { 2148 u64 type = evsel->core.attr.sample_type; 2149 const __u64 *array = event->sample.array; 2150 bool swapped = evsel->needs_swap; 2151 union u64_swap u; 2152 2153 array += ((event->header.size - 2154 sizeof(event->header)) / sizeof(u64)) - 1; 2155 2156 if (type & PERF_SAMPLE_IDENTIFIER) { 2157 sample->id = *array; 2158 array--; 2159 } 2160 2161 if (type & PERF_SAMPLE_CPU) { 2162 u.val64 = *array; 2163 if (swapped) { 2164 /* undo swap of u64, then swap on individual u32s */ 2165 u.val64 = bswap_64(u.val64); 2166 u.val32[0] = bswap_32(u.val32[0]); 2167 } 2168 2169 sample->cpu = u.val32[0]; 2170 array--; 2171 } 2172 2173 if (type & PERF_SAMPLE_STREAM_ID) { 2174 sample->stream_id = *array; 2175 array--; 2176 } 2177 2178 if (type & PERF_SAMPLE_ID) { 2179 sample->id = *array; 2180 array--; 2181 } 2182 2183 if (type & PERF_SAMPLE_TIME) { 2184 sample->time = *array; 2185 array--; 2186 } 2187 2188 if (type & PERF_SAMPLE_TID) { 2189 u.val64 = *array; 2190 if (swapped) { 2191 /* undo swap of u64, then swap on individual u32s */ 2192 u.val64 = bswap_64(u.val64); 2193 u.val32[0] = bswap_32(u.val32[0]); 2194 u.val32[1] = bswap_32(u.val32[1]); 2195 } 2196 2197 sample->pid = u.val32[0]; 2198 sample->tid = u.val32[1]; 2199 array--; 2200 } 2201 2202 return 0; 2203 } 2204 2205 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 2206 u64 size) 2207 { 2208 return size > max_size || offset + size > endp; 2209 } 2210 2211 #define OVERFLOW_CHECK(offset, size, max_size) \ 2212 do { \ 2213 if (overflow(endp, (max_size), (offset), (size))) \ 2214 return -EFAULT; \ 2215 } while (0) 2216 2217 #define OVERFLOW_CHECK_u64(offset) \ 2218 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 2219 2220 static int 2221 perf_event__check_size(union perf_event *event, unsigned int sample_size) 2222 { 2223 /* 2224 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 2225 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 2226 * check the format does not go past the end of the event. 2227 */ 2228 if (sample_size + sizeof(event->header) > event->header.size) 2229 return -EFAULT; 2230 2231 return 0; 2232 } 2233 2234 void __weak arch_perf_parse_sample_weight(struct perf_sample *data, 2235 const __u64 *array, 2236 u64 type __maybe_unused) 2237 { 2238 data->weight = *array; 2239 } 2240 2241 u64 evsel__bitfield_swap_branch_flags(u64 value) 2242 { 2243 u64 new_val = 0; 2244 2245 /* 2246 * branch_flags 2247 * union { 2248 * u64 values; 2249 * struct { 2250 * mispred:1 //target mispredicted 2251 * predicted:1 //target predicted 2252 * in_tx:1 //in transaction 2253 * abort:1 //transaction abort 2254 * cycles:16 //cycle count to last branch 2255 * type:4 //branch type 2256 * reserved:40 2257 * } 2258 * } 2259 * 2260 * Avoid bswap64() the entire branch_flag.value, 2261 * as it has variable bit-field sizes. Instead the 2262 * macro takes the bit-field position/size, 2263 * swaps it based on the host endianness. 2264 * 2265 * tep_is_bigendian() is used here instead of 2266 * bigendian() to avoid python test fails. 2267 */ 2268 if (tep_is_bigendian()) { 2269 new_val = bitfield_swap(value, 0, 1); 2270 new_val |= bitfield_swap(value, 1, 1); 2271 new_val |= bitfield_swap(value, 2, 1); 2272 new_val |= bitfield_swap(value, 3, 1); 2273 new_val |= bitfield_swap(value, 4, 16); 2274 new_val |= bitfield_swap(value, 20, 4); 2275 new_val |= bitfield_swap(value, 24, 40); 2276 } else { 2277 new_val = bitfield_swap(value, 63, 1); 2278 new_val |= bitfield_swap(value, 62, 1); 2279 new_val |= bitfield_swap(value, 61, 1); 2280 new_val |= bitfield_swap(value, 60, 1); 2281 new_val |= bitfield_swap(value, 44, 16); 2282 new_val |= bitfield_swap(value, 40, 4); 2283 new_val |= bitfield_swap(value, 0, 40); 2284 } 2285 2286 return new_val; 2287 } 2288 2289 int evsel__parse_sample(struct evsel *evsel, union perf_event *event, 2290 struct perf_sample *data) 2291 { 2292 u64 type = evsel->core.attr.sample_type; 2293 bool swapped = evsel->needs_swap; 2294 const __u64 *array; 2295 u16 max_size = event->header.size; 2296 const void *endp = (void *)event + max_size; 2297 u64 sz; 2298 2299 /* 2300 * used for cross-endian analysis. See git commit 65014ab3 2301 * for why this goofiness is needed. 2302 */ 2303 union u64_swap u; 2304 2305 memset(data, 0, sizeof(*data)); 2306 data->cpu = data->pid = data->tid = -1; 2307 data->stream_id = data->id = data->time = -1ULL; 2308 data->period = evsel->core.attr.sample_period; 2309 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 2310 data->misc = event->header.misc; 2311 data->id = -1ULL; 2312 data->data_src = PERF_MEM_DATA_SRC_NONE; 2313 2314 if (event->header.type != PERF_RECORD_SAMPLE) { 2315 if (!evsel->core.attr.sample_id_all) 2316 return 0; 2317 return perf_evsel__parse_id_sample(evsel, event, data); 2318 } 2319 2320 array = event->sample.array; 2321 2322 if (perf_event__check_size(event, evsel->sample_size)) 2323 return -EFAULT; 2324 2325 if (type & PERF_SAMPLE_IDENTIFIER) { 2326 data->id = *array; 2327 array++; 2328 } 2329 2330 if (type & PERF_SAMPLE_IP) { 2331 data->ip = *array; 2332 array++; 2333 } 2334 2335 if (type & PERF_SAMPLE_TID) { 2336 u.val64 = *array; 2337 if (swapped) { 2338 /* undo swap of u64, then swap on individual u32s */ 2339 u.val64 = bswap_64(u.val64); 2340 u.val32[0] = bswap_32(u.val32[0]); 2341 u.val32[1] = bswap_32(u.val32[1]); 2342 } 2343 2344 data->pid = u.val32[0]; 2345 data->tid = u.val32[1]; 2346 array++; 2347 } 2348 2349 if (type & PERF_SAMPLE_TIME) { 2350 data->time = *array; 2351 array++; 2352 } 2353 2354 if (type & PERF_SAMPLE_ADDR) { 2355 data->addr = *array; 2356 array++; 2357 } 2358 2359 if (type & PERF_SAMPLE_ID) { 2360 data->id = *array; 2361 array++; 2362 } 2363 2364 if (type & PERF_SAMPLE_STREAM_ID) { 2365 data->stream_id = *array; 2366 array++; 2367 } 2368 2369 if (type & PERF_SAMPLE_CPU) { 2370 2371 u.val64 = *array; 2372 if (swapped) { 2373 /* undo swap of u64, then swap on individual u32s */ 2374 u.val64 = bswap_64(u.val64); 2375 u.val32[0] = bswap_32(u.val32[0]); 2376 } 2377 2378 data->cpu = u.val32[0]; 2379 array++; 2380 } 2381 2382 if (type & PERF_SAMPLE_PERIOD) { 2383 data->period = *array; 2384 array++; 2385 } 2386 2387 if (type & PERF_SAMPLE_READ) { 2388 u64 read_format = evsel->core.attr.read_format; 2389 2390 OVERFLOW_CHECK_u64(array); 2391 if (read_format & PERF_FORMAT_GROUP) 2392 data->read.group.nr = *array; 2393 else 2394 data->read.one.value = *array; 2395 2396 array++; 2397 2398 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2399 OVERFLOW_CHECK_u64(array); 2400 data->read.time_enabled = *array; 2401 array++; 2402 } 2403 2404 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2405 OVERFLOW_CHECK_u64(array); 2406 data->read.time_running = *array; 2407 array++; 2408 } 2409 2410 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2411 if (read_format & PERF_FORMAT_GROUP) { 2412 const u64 max_group_nr = UINT64_MAX / 2413 sizeof(struct sample_read_value); 2414 2415 if (data->read.group.nr > max_group_nr) 2416 return -EFAULT; 2417 sz = data->read.group.nr * 2418 sizeof(struct sample_read_value); 2419 OVERFLOW_CHECK(array, sz, max_size); 2420 data->read.group.values = 2421 (struct sample_read_value *)array; 2422 array = (void *)array + sz; 2423 } else { 2424 OVERFLOW_CHECK_u64(array); 2425 data->read.one.id = *array; 2426 array++; 2427 } 2428 } 2429 2430 if (type & PERF_SAMPLE_CALLCHAIN) { 2431 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 2432 2433 OVERFLOW_CHECK_u64(array); 2434 data->callchain = (struct ip_callchain *)array++; 2435 if (data->callchain->nr > max_callchain_nr) 2436 return -EFAULT; 2437 sz = data->callchain->nr * sizeof(u64); 2438 OVERFLOW_CHECK(array, sz, max_size); 2439 array = (void *)array + sz; 2440 } 2441 2442 if (type & PERF_SAMPLE_RAW) { 2443 OVERFLOW_CHECK_u64(array); 2444 u.val64 = *array; 2445 2446 /* 2447 * Undo swap of u64, then swap on individual u32s, 2448 * get the size of the raw area and undo all of the 2449 * swap. The pevent interface handles endianness by 2450 * itself. 2451 */ 2452 if (swapped) { 2453 u.val64 = bswap_64(u.val64); 2454 u.val32[0] = bswap_32(u.val32[0]); 2455 u.val32[1] = bswap_32(u.val32[1]); 2456 } 2457 data->raw_size = u.val32[0]; 2458 2459 /* 2460 * The raw data is aligned on 64bits including the 2461 * u32 size, so it's safe to use mem_bswap_64. 2462 */ 2463 if (swapped) 2464 mem_bswap_64((void *) array, data->raw_size); 2465 2466 array = (void *)array + sizeof(u32); 2467 2468 OVERFLOW_CHECK(array, data->raw_size, max_size); 2469 data->raw_data = (void *)array; 2470 array = (void *)array + data->raw_size; 2471 } 2472 2473 if (type & PERF_SAMPLE_BRANCH_STACK) { 2474 const u64 max_branch_nr = UINT64_MAX / 2475 sizeof(struct branch_entry); 2476 struct branch_entry *e; 2477 unsigned int i; 2478 2479 OVERFLOW_CHECK_u64(array); 2480 data->branch_stack = (struct branch_stack *)array++; 2481 2482 if (data->branch_stack->nr > max_branch_nr) 2483 return -EFAULT; 2484 2485 sz = data->branch_stack->nr * sizeof(struct branch_entry); 2486 if (evsel__has_branch_hw_idx(evsel)) { 2487 sz += sizeof(u64); 2488 e = &data->branch_stack->entries[0]; 2489 } else { 2490 data->no_hw_idx = true; 2491 /* 2492 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied, 2493 * only nr and entries[] will be output by kernel. 2494 */ 2495 e = (struct branch_entry *)&data->branch_stack->hw_idx; 2496 } 2497 2498 if (swapped) { 2499 /* 2500 * struct branch_flag does not have endian 2501 * specific bit field definition. And bswap 2502 * will not resolve the issue, since these 2503 * are bit fields. 2504 * 2505 * evsel__bitfield_swap_branch_flags() uses a 2506 * bitfield_swap macro to swap the bit position 2507 * based on the host endians. 2508 */ 2509 for (i = 0; i < data->branch_stack->nr; i++, e++) 2510 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value); 2511 } 2512 2513 OVERFLOW_CHECK(array, sz, max_size); 2514 array = (void *)array + sz; 2515 } 2516 2517 if (type & PERF_SAMPLE_REGS_USER) { 2518 OVERFLOW_CHECK_u64(array); 2519 data->user_regs.abi = *array; 2520 array++; 2521 2522 if (data->user_regs.abi) { 2523 u64 mask = evsel->core.attr.sample_regs_user; 2524 2525 sz = hweight64(mask) * sizeof(u64); 2526 OVERFLOW_CHECK(array, sz, max_size); 2527 data->user_regs.mask = mask; 2528 data->user_regs.regs = (u64 *)array; 2529 array = (void *)array + sz; 2530 } 2531 } 2532 2533 if (type & PERF_SAMPLE_STACK_USER) { 2534 OVERFLOW_CHECK_u64(array); 2535 sz = *array++; 2536 2537 data->user_stack.offset = ((char *)(array - 1) 2538 - (char *) event); 2539 2540 if (!sz) { 2541 data->user_stack.size = 0; 2542 } else { 2543 OVERFLOW_CHECK(array, sz, max_size); 2544 data->user_stack.data = (char *)array; 2545 array = (void *)array + sz; 2546 OVERFLOW_CHECK_u64(array); 2547 data->user_stack.size = *array++; 2548 if (WARN_ONCE(data->user_stack.size > sz, 2549 "user stack dump failure\n")) 2550 return -EFAULT; 2551 } 2552 } 2553 2554 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 2555 OVERFLOW_CHECK_u64(array); 2556 arch_perf_parse_sample_weight(data, array, type); 2557 array++; 2558 } 2559 2560 if (type & PERF_SAMPLE_DATA_SRC) { 2561 OVERFLOW_CHECK_u64(array); 2562 data->data_src = *array; 2563 array++; 2564 } 2565 2566 if (type & PERF_SAMPLE_TRANSACTION) { 2567 OVERFLOW_CHECK_u64(array); 2568 data->transaction = *array; 2569 array++; 2570 } 2571 2572 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 2573 if (type & PERF_SAMPLE_REGS_INTR) { 2574 OVERFLOW_CHECK_u64(array); 2575 data->intr_regs.abi = *array; 2576 array++; 2577 2578 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 2579 u64 mask = evsel->core.attr.sample_regs_intr; 2580 2581 sz = hweight64(mask) * sizeof(u64); 2582 OVERFLOW_CHECK(array, sz, max_size); 2583 data->intr_regs.mask = mask; 2584 data->intr_regs.regs = (u64 *)array; 2585 array = (void *)array + sz; 2586 } 2587 } 2588 2589 data->phys_addr = 0; 2590 if (type & PERF_SAMPLE_PHYS_ADDR) { 2591 data->phys_addr = *array; 2592 array++; 2593 } 2594 2595 data->cgroup = 0; 2596 if (type & PERF_SAMPLE_CGROUP) { 2597 data->cgroup = *array; 2598 array++; 2599 } 2600 2601 data->data_page_size = 0; 2602 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 2603 data->data_page_size = *array; 2604 array++; 2605 } 2606 2607 data->code_page_size = 0; 2608 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 2609 data->code_page_size = *array; 2610 array++; 2611 } 2612 2613 if (type & PERF_SAMPLE_AUX) { 2614 OVERFLOW_CHECK_u64(array); 2615 sz = *array++; 2616 2617 OVERFLOW_CHECK(array, sz, max_size); 2618 /* Undo swap of data */ 2619 if (swapped) 2620 mem_bswap_64((char *)array, sz); 2621 data->aux_sample.size = sz; 2622 data->aux_sample.data = (char *)array; 2623 array = (void *)array + sz; 2624 } 2625 2626 return 0; 2627 } 2628 2629 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, 2630 u64 *timestamp) 2631 { 2632 u64 type = evsel->core.attr.sample_type; 2633 const __u64 *array; 2634 2635 if (!(type & PERF_SAMPLE_TIME)) 2636 return -1; 2637 2638 if (event->header.type != PERF_RECORD_SAMPLE) { 2639 struct perf_sample data = { 2640 .time = -1ULL, 2641 }; 2642 2643 if (!evsel->core.attr.sample_id_all) 2644 return -1; 2645 if (perf_evsel__parse_id_sample(evsel, event, &data)) 2646 return -1; 2647 2648 *timestamp = data.time; 2649 return 0; 2650 } 2651 2652 array = event->sample.array; 2653 2654 if (perf_event__check_size(event, evsel->sample_size)) 2655 return -EFAULT; 2656 2657 if (type & PERF_SAMPLE_IDENTIFIER) 2658 array++; 2659 2660 if (type & PERF_SAMPLE_IP) 2661 array++; 2662 2663 if (type & PERF_SAMPLE_TID) 2664 array++; 2665 2666 if (type & PERF_SAMPLE_TIME) 2667 *timestamp = *array; 2668 2669 return 0; 2670 } 2671 2672 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name) 2673 { 2674 return tep_find_field(evsel->tp_format, name); 2675 } 2676 2677 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name) 2678 { 2679 struct tep_format_field *field = evsel__field(evsel, name); 2680 int offset; 2681 2682 if (!field) 2683 return NULL; 2684 2685 offset = field->offset; 2686 2687 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2688 offset = *(int *)(sample->raw_data + field->offset); 2689 offset &= 0xffff; 2690 } 2691 2692 return sample->raw_data + offset; 2693 } 2694 2695 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, 2696 bool needs_swap) 2697 { 2698 u64 value; 2699 void *ptr = sample->raw_data + field->offset; 2700 2701 switch (field->size) { 2702 case 1: 2703 return *(u8 *)ptr; 2704 case 2: 2705 value = *(u16 *)ptr; 2706 break; 2707 case 4: 2708 value = *(u32 *)ptr; 2709 break; 2710 case 8: 2711 memcpy(&value, ptr, sizeof(u64)); 2712 break; 2713 default: 2714 return 0; 2715 } 2716 2717 if (!needs_swap) 2718 return value; 2719 2720 switch (field->size) { 2721 case 2: 2722 return bswap_16(value); 2723 case 4: 2724 return bswap_32(value); 2725 case 8: 2726 return bswap_64(value); 2727 default: 2728 return 0; 2729 } 2730 2731 return 0; 2732 } 2733 2734 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name) 2735 { 2736 struct tep_format_field *field = evsel__field(evsel, name); 2737 2738 if (!field) 2739 return 0; 2740 2741 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; 2742 } 2743 2744 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) 2745 { 2746 int paranoid; 2747 2748 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2749 evsel->core.attr.type == PERF_TYPE_HARDWARE && 2750 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2751 /* 2752 * If it's cycles then fall back to hrtimer based 2753 * cpu-clock-tick sw counter, which is always available even if 2754 * no PMU support. 2755 * 2756 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2757 * b0a873e). 2758 */ 2759 scnprintf(msg, msgsize, "%s", 2760 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2761 2762 evsel->core.attr.type = PERF_TYPE_SOFTWARE; 2763 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; 2764 2765 zfree(&evsel->name); 2766 return true; 2767 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && 2768 (paranoid = perf_event_paranoid()) > 1) { 2769 const char *name = evsel__name(evsel); 2770 char *new_name; 2771 const char *sep = ":"; 2772 2773 /* If event has exclude user then don't exclude kernel. */ 2774 if (evsel->core.attr.exclude_user) 2775 return false; 2776 2777 /* Is there already the separator in the name. */ 2778 if (strchr(name, '/') || 2779 (strchr(name, ':') && !evsel->is_libpfm_event)) 2780 sep = ""; 2781 2782 if (asprintf(&new_name, "%s%su", name, sep) < 0) 2783 return false; 2784 2785 if (evsel->name) 2786 free(evsel->name); 2787 evsel->name = new_name; 2788 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying " 2789 "to fall back to excluding kernel and hypervisor " 2790 " samples", paranoid); 2791 evsel->core.attr.exclude_kernel = 1; 2792 evsel->core.attr.exclude_hv = 1; 2793 2794 return true; 2795 } 2796 2797 return false; 2798 } 2799 2800 static bool find_process(const char *name) 2801 { 2802 size_t len = strlen(name); 2803 DIR *dir; 2804 struct dirent *d; 2805 int ret = -1; 2806 2807 dir = opendir(procfs__mountpoint()); 2808 if (!dir) 2809 return false; 2810 2811 /* Walk through the directory. */ 2812 while (ret && (d = readdir(dir)) != NULL) { 2813 char path[PATH_MAX]; 2814 char *data; 2815 size_t size; 2816 2817 if ((d->d_type != DT_DIR) || 2818 !strcmp(".", d->d_name) || 2819 !strcmp("..", d->d_name)) 2820 continue; 2821 2822 scnprintf(path, sizeof(path), "%s/%s/comm", 2823 procfs__mountpoint(), d->d_name); 2824 2825 if (filename__read_str(path, &data, &size)) 2826 continue; 2827 2828 ret = strncmp(name, data, len); 2829 free(data); 2830 } 2831 2832 closedir(dir); 2833 return ret ? false : true; 2834 } 2835 2836 int evsel__open_strerror(struct evsel *evsel, struct target *target, 2837 int err, char *msg, size_t size) 2838 { 2839 char sbuf[STRERR_BUFSIZE]; 2840 int printed = 0, enforced = 0; 2841 2842 switch (err) { 2843 case EPERM: 2844 case EACCES: 2845 printed += scnprintf(msg + printed, size - printed, 2846 "Access to performance monitoring and observability operations is limited.\n"); 2847 2848 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) { 2849 if (enforced) { 2850 printed += scnprintf(msg + printed, size - printed, 2851 "Enforced MAC policy settings (SELinux) can limit access to performance\n" 2852 "monitoring and observability operations. Inspect system audit records for\n" 2853 "more perf_event access control information and adjusting the policy.\n"); 2854 } 2855 } 2856 2857 if (err == EPERM) 2858 printed += scnprintf(msg, size, 2859 "No permission to enable %s event.\n\n", evsel__name(evsel)); 2860 2861 return scnprintf(msg + printed, size - printed, 2862 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n" 2863 "access to performance monitoring and observability operations for processes\n" 2864 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n" 2865 "More information can be found at 'Perf events and tool security' document:\n" 2866 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n" 2867 "perf_event_paranoid setting is %d:\n" 2868 " -1: Allow use of (almost) all events by all users\n" 2869 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n" 2870 ">= 0: Disallow raw and ftrace function tracepoint access\n" 2871 ">= 1: Disallow CPU event access\n" 2872 ">= 2: Disallow kernel profiling\n" 2873 "To make the adjusted perf_event_paranoid setting permanent preserve it\n" 2874 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)", 2875 perf_event_paranoid()); 2876 case ENOENT: 2877 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel)); 2878 case EMFILE: 2879 return scnprintf(msg, size, "%s", 2880 "Too many events are opened.\n" 2881 "Probably the maximum number of open file descriptors has been reached.\n" 2882 "Hint: Try again after reducing the number of events.\n" 2883 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2884 case ENOMEM: 2885 if (evsel__has_callchain(evsel) && 2886 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 2887 return scnprintf(msg, size, 2888 "Not enough memory to setup event with callchain.\n" 2889 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2890 "Hint: Current value: %d", sysctl__max_stack()); 2891 break; 2892 case ENODEV: 2893 if (target->cpu_list) 2894 return scnprintf(msg, size, "%s", 2895 "No such device - did you specify an out-of-range profile CPU?"); 2896 break; 2897 case EOPNOTSUPP: 2898 if (evsel->core.attr.aux_output) 2899 return scnprintf(msg, size, 2900 "%s: PMU Hardware doesn't support 'aux_output' feature", 2901 evsel__name(evsel)); 2902 if (evsel->core.attr.sample_period != 0) 2903 return scnprintf(msg, size, 2904 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", 2905 evsel__name(evsel)); 2906 if (evsel->core.attr.precise_ip) 2907 return scnprintf(msg, size, "%s", 2908 "\'precise\' request may not be supported. Try removing 'p' modifier."); 2909 #if defined(__i386__) || defined(__x86_64__) 2910 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) 2911 return scnprintf(msg, size, "%s", 2912 "No hardware sampling interrupt available.\n"); 2913 #endif 2914 break; 2915 case EBUSY: 2916 if (find_process("oprofiled")) 2917 return scnprintf(msg, size, 2918 "The PMU counters are busy/taken by another profiler.\n" 2919 "We found oprofile daemon running, please stop it and try again."); 2920 break; 2921 case EINVAL: 2922 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size) 2923 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel."); 2924 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size) 2925 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel."); 2926 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) 2927 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel."); 2928 if (perf_missing_features.clockid) 2929 return scnprintf(msg, size, "clockid feature not supported."); 2930 if (perf_missing_features.clockid_wrong) 2931 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 2932 if (perf_missing_features.aux_output) 2933 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel."); 2934 if (!target__has_cpu(target)) 2935 return scnprintf(msg, size, 2936 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.", 2937 evsel__name(evsel)); 2938 break; 2939 case ENODATA: 2940 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. " 2941 "Please add an auxiliary event in front of the load latency event."); 2942 default: 2943 break; 2944 } 2945 2946 return scnprintf(msg, size, 2947 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2948 "/bin/dmesg | grep -i perf may provide additional information.\n", 2949 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel)); 2950 } 2951 2952 struct perf_env *evsel__env(struct evsel *evsel) 2953 { 2954 if (evsel && evsel->evlist) 2955 return evsel->evlist->env; 2956 return &perf_env; 2957 } 2958 2959 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) 2960 { 2961 int cpu, thread; 2962 2963 for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) { 2964 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); 2965 thread++) { 2966 int fd = FD(evsel, cpu, thread); 2967 2968 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, 2969 cpu, thread, fd) < 0) 2970 return -1; 2971 } 2972 } 2973 2974 return 0; 2975 } 2976 2977 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) 2978 { 2979 struct perf_cpu_map *cpus = evsel->core.cpus; 2980 struct perf_thread_map *threads = evsel->core.threads; 2981 2982 if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr)) 2983 return -ENOMEM; 2984 2985 return store_evsel_ids(evsel, evlist); 2986 } 2987 2988 void evsel__zero_per_pkg(struct evsel *evsel) 2989 { 2990 struct hashmap_entry *cur; 2991 size_t bkt; 2992 2993 if (evsel->per_pkg_mask) { 2994 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) 2995 free((char *)cur->key); 2996 2997 hashmap__clear(evsel->per_pkg_mask); 2998 } 2999 } 3000 3001 bool evsel__is_hybrid(struct evsel *evsel) 3002 { 3003 return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name); 3004 } 3005 3006 struct evsel *evsel__leader(struct evsel *evsel) 3007 { 3008 return container_of(evsel->core.leader, struct evsel, core); 3009 } 3010 3011 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) 3012 { 3013 return evsel->core.leader == &leader->core; 3014 } 3015 3016 bool evsel__is_leader(struct evsel *evsel) 3017 { 3018 return evsel__has_leader(evsel, evsel); 3019 } 3020 3021 void evsel__set_leader(struct evsel *evsel, struct evsel *leader) 3022 { 3023 evsel->core.leader = &leader->core; 3024 } 3025 3026 int evsel__source_count(const struct evsel *evsel) 3027 { 3028 struct evsel *pos; 3029 int count = 0; 3030 3031 evlist__for_each_entry(evsel->evlist, pos) { 3032 if (pos->metric_leader == evsel) 3033 count++; 3034 } 3035 return count; 3036 } 3037