1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <byteswap.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <linux/bitops.h> 13 #include <api/fs/fs.h> 14 #include <api/fs/tracing_path.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/perf_event.h> 17 #include <linux/compiler.h> 18 #include <linux/err.h> 19 #include <linux/zalloc.h> 20 #include <sys/ioctl.h> 21 #include <sys/resource.h> 22 #include <sys/types.h> 23 #include <dirent.h> 24 #include <stdlib.h> 25 #include <perf/evsel.h> 26 #include "asm/bug.h" 27 #include "bpf_counter.h" 28 #include "callchain.h" 29 #include "cgroup.h" 30 #include "counts.h" 31 #include "event.h" 32 #include "evsel.h" 33 #include "util/env.h" 34 #include "util/evsel_config.h" 35 #include "util/evsel_fprintf.h" 36 #include "evlist.h" 37 #include <perf/cpumap.h> 38 #include "thread_map.h" 39 #include "target.h" 40 #include "perf_regs.h" 41 #include "record.h" 42 #include "debug.h" 43 #include "trace-event.h" 44 #include "stat.h" 45 #include "string2.h" 46 #include "memswap.h" 47 #include "util.h" 48 #include "util/hashmap.h" 49 #include "off_cpu.h" 50 #include "pmu.h" 51 #include "pmus.h" 52 #include "../perf-sys.h" 53 #include "util/parse-branch-options.h" 54 #include "util/bpf-filter.h" 55 #include <internal/xyarray.h> 56 #include <internal/lib.h> 57 #include <internal/threadmap.h> 58 59 #include <linux/ctype.h> 60 61 #ifdef HAVE_LIBTRACEEVENT 62 #include <traceevent/event-parse.h> 63 #endif 64 65 struct perf_missing_features perf_missing_features; 66 67 static clockid_t clockid; 68 69 static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = { 70 NULL, 71 "duration_time", 72 "user_time", 73 "system_time", 74 }; 75 76 const char *perf_tool_event__to_str(enum perf_tool_event ev) 77 { 78 if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX) 79 return perf_tool_event__tool_names[ev]; 80 81 return NULL; 82 } 83 84 enum perf_tool_event perf_tool_event__from_str(const char *str) 85 { 86 int i; 87 88 perf_tool_event__for_each_event(i) { 89 if (!strcmp(str, perf_tool_event__tool_names[i])) 90 return i; 91 } 92 return PERF_TOOL_NONE; 93 } 94 95 96 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused) 97 { 98 return 0; 99 } 100 101 void __weak test_attr__ready(void) { } 102 103 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused) 104 { 105 } 106 107 static struct { 108 size_t size; 109 int (*init)(struct evsel *evsel); 110 void (*fini)(struct evsel *evsel); 111 } perf_evsel__object = { 112 .size = sizeof(struct evsel), 113 .init = evsel__no_extra_init, 114 .fini = evsel__no_extra_fini, 115 }; 116 117 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel), 118 void (*fini)(struct evsel *evsel)) 119 { 120 121 if (object_size == 0) 122 goto set_methods; 123 124 if (perf_evsel__object.size > object_size) 125 return -EINVAL; 126 127 perf_evsel__object.size = object_size; 128 129 set_methods: 130 if (init != NULL) 131 perf_evsel__object.init = init; 132 133 if (fini != NULL) 134 perf_evsel__object.fini = fini; 135 136 return 0; 137 } 138 139 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 140 141 int __evsel__sample_size(u64 sample_type) 142 { 143 u64 mask = sample_type & PERF_SAMPLE_MASK; 144 int size = 0; 145 int i; 146 147 for (i = 0; i < 64; i++) { 148 if (mask & (1ULL << i)) 149 size++; 150 } 151 152 size *= sizeof(u64); 153 154 return size; 155 } 156 157 /** 158 * __perf_evsel__calc_id_pos - calculate id_pos. 159 * @sample_type: sample type 160 * 161 * This function returns the position of the event id (PERF_SAMPLE_ID or 162 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 163 * perf_record_sample. 164 */ 165 static int __perf_evsel__calc_id_pos(u64 sample_type) 166 { 167 int idx = 0; 168 169 if (sample_type & PERF_SAMPLE_IDENTIFIER) 170 return 0; 171 172 if (!(sample_type & PERF_SAMPLE_ID)) 173 return -1; 174 175 if (sample_type & PERF_SAMPLE_IP) 176 idx += 1; 177 178 if (sample_type & PERF_SAMPLE_TID) 179 idx += 1; 180 181 if (sample_type & PERF_SAMPLE_TIME) 182 idx += 1; 183 184 if (sample_type & PERF_SAMPLE_ADDR) 185 idx += 1; 186 187 return idx; 188 } 189 190 /** 191 * __perf_evsel__calc_is_pos - calculate is_pos. 192 * @sample_type: sample type 193 * 194 * This function returns the position (counting backwards) of the event id 195 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 196 * sample_id_all is used there is an id sample appended to non-sample events. 197 */ 198 static int __perf_evsel__calc_is_pos(u64 sample_type) 199 { 200 int idx = 1; 201 202 if (sample_type & PERF_SAMPLE_IDENTIFIER) 203 return 1; 204 205 if (!(sample_type & PERF_SAMPLE_ID)) 206 return -1; 207 208 if (sample_type & PERF_SAMPLE_CPU) 209 idx += 1; 210 211 if (sample_type & PERF_SAMPLE_STREAM_ID) 212 idx += 1; 213 214 return idx; 215 } 216 217 void evsel__calc_id_pos(struct evsel *evsel) 218 { 219 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); 220 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); 221 } 222 223 void __evsel__set_sample_bit(struct evsel *evsel, 224 enum perf_event_sample_format bit) 225 { 226 if (!(evsel->core.attr.sample_type & bit)) { 227 evsel->core.attr.sample_type |= bit; 228 evsel->sample_size += sizeof(u64); 229 evsel__calc_id_pos(evsel); 230 } 231 } 232 233 void __evsel__reset_sample_bit(struct evsel *evsel, 234 enum perf_event_sample_format bit) 235 { 236 if (evsel->core.attr.sample_type & bit) { 237 evsel->core.attr.sample_type &= ~bit; 238 evsel->sample_size -= sizeof(u64); 239 evsel__calc_id_pos(evsel); 240 } 241 } 242 243 void evsel__set_sample_id(struct evsel *evsel, 244 bool can_sample_identifier) 245 { 246 if (can_sample_identifier) { 247 evsel__reset_sample_bit(evsel, ID); 248 evsel__set_sample_bit(evsel, IDENTIFIER); 249 } else { 250 evsel__set_sample_bit(evsel, ID); 251 } 252 evsel->core.attr.read_format |= PERF_FORMAT_ID; 253 } 254 255 /** 256 * evsel__is_function_event - Return whether given evsel is a function 257 * trace event 258 * 259 * @evsel - evsel selector to be tested 260 * 261 * Return %true if event is function trace event 262 */ 263 bool evsel__is_function_event(struct evsel *evsel) 264 { 265 #define FUNCTION_EVENT "ftrace:function" 266 267 return evsel->name && 268 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); 269 270 #undef FUNCTION_EVENT 271 } 272 273 void evsel__init(struct evsel *evsel, 274 struct perf_event_attr *attr, int idx) 275 { 276 perf_evsel__init(&evsel->core, attr, idx); 277 evsel->tracking = !idx; 278 evsel->unit = strdup(""); 279 evsel->scale = 1.0; 280 evsel->max_events = ULONG_MAX; 281 evsel->evlist = NULL; 282 evsel->bpf_obj = NULL; 283 evsel->bpf_fd = -1; 284 INIT_LIST_HEAD(&evsel->config_terms); 285 INIT_LIST_HEAD(&evsel->bpf_counter_list); 286 perf_evsel__object.init(evsel); 287 evsel->sample_size = __evsel__sample_size(attr->sample_type); 288 evsel__calc_id_pos(evsel); 289 evsel->cmdline_group_boundary = false; 290 evsel->metric_events = NULL; 291 evsel->per_pkg_mask = NULL; 292 evsel->collect_stat = false; 293 evsel->pmu_name = NULL; 294 evsel->skippable = false; 295 } 296 297 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) 298 { 299 struct evsel *evsel = zalloc(perf_evsel__object.size); 300 301 if (!evsel) 302 return NULL; 303 evsel__init(evsel, attr, idx); 304 305 if (evsel__is_bpf_output(evsel) && !attr->sample_type) { 306 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 307 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 308 evsel->core.attr.sample_period = 1; 309 } 310 311 if (evsel__is_clock(evsel)) { 312 free((char *)evsel->unit); 313 evsel->unit = strdup("msec"); 314 evsel->scale = 1e-6; 315 } 316 317 return evsel; 318 } 319 320 int copy_config_terms(struct list_head *dst, struct list_head *src) 321 { 322 struct evsel_config_term *pos, *tmp; 323 324 list_for_each_entry(pos, src, list) { 325 tmp = malloc(sizeof(*tmp)); 326 if (tmp == NULL) 327 return -ENOMEM; 328 329 *tmp = *pos; 330 if (tmp->free_str) { 331 tmp->val.str = strdup(pos->val.str); 332 if (tmp->val.str == NULL) { 333 free(tmp); 334 return -ENOMEM; 335 } 336 } 337 list_add_tail(&tmp->list, dst); 338 } 339 return 0; 340 } 341 342 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) 343 { 344 return copy_config_terms(&dst->config_terms, &src->config_terms); 345 } 346 347 /** 348 * evsel__clone - create a new evsel copied from @orig 349 * @orig: original evsel 350 * 351 * The assumption is that @orig is not configured nor opened yet. 352 * So we only care about the attributes that can be set while it's parsed. 353 */ 354 struct evsel *evsel__clone(struct evsel *orig) 355 { 356 struct evsel *evsel; 357 358 BUG_ON(orig->core.fd); 359 BUG_ON(orig->counts); 360 BUG_ON(orig->priv); 361 BUG_ON(orig->per_pkg_mask); 362 363 /* cannot handle BPF objects for now */ 364 if (orig->bpf_obj) 365 return NULL; 366 367 evsel = evsel__new(&orig->core.attr); 368 if (evsel == NULL) 369 return NULL; 370 371 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); 372 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); 373 evsel->core.threads = perf_thread_map__get(orig->core.threads); 374 evsel->core.nr_members = orig->core.nr_members; 375 evsel->core.system_wide = orig->core.system_wide; 376 evsel->core.requires_cpu = orig->core.requires_cpu; 377 evsel->core.is_pmu_core = orig->core.is_pmu_core; 378 379 if (orig->name) { 380 evsel->name = strdup(orig->name); 381 if (evsel->name == NULL) 382 goto out_err; 383 } 384 if (orig->group_name) { 385 evsel->group_name = strdup(orig->group_name); 386 if (evsel->group_name == NULL) 387 goto out_err; 388 } 389 if (orig->pmu_name) { 390 evsel->pmu_name = strdup(orig->pmu_name); 391 if (evsel->pmu_name == NULL) 392 goto out_err; 393 } 394 if (orig->filter) { 395 evsel->filter = strdup(orig->filter); 396 if (evsel->filter == NULL) 397 goto out_err; 398 } 399 if (orig->metric_id) { 400 evsel->metric_id = strdup(orig->metric_id); 401 if (evsel->metric_id == NULL) 402 goto out_err; 403 } 404 evsel->cgrp = cgroup__get(orig->cgrp); 405 #ifdef HAVE_LIBTRACEEVENT 406 evsel->tp_format = orig->tp_format; 407 #endif 408 evsel->handler = orig->handler; 409 evsel->core.leader = orig->core.leader; 410 411 evsel->max_events = orig->max_events; 412 evsel->tool_event = orig->tool_event; 413 free((char *)evsel->unit); 414 evsel->unit = strdup(orig->unit); 415 if (evsel->unit == NULL) 416 goto out_err; 417 418 evsel->scale = orig->scale; 419 evsel->snapshot = orig->snapshot; 420 evsel->per_pkg = orig->per_pkg; 421 evsel->percore = orig->percore; 422 evsel->precise_max = orig->precise_max; 423 evsel->is_libpfm_event = orig->is_libpfm_event; 424 425 evsel->exclude_GH = orig->exclude_GH; 426 evsel->sample_read = orig->sample_read; 427 evsel->auto_merge_stats = orig->auto_merge_stats; 428 evsel->collect_stat = orig->collect_stat; 429 evsel->weak_group = orig->weak_group; 430 evsel->use_config_name = orig->use_config_name; 431 evsel->pmu = orig->pmu; 432 433 if (evsel__copy_config_terms(evsel, orig) < 0) 434 goto out_err; 435 436 return evsel; 437 438 out_err: 439 evsel__delete(evsel); 440 return NULL; 441 } 442 443 /* 444 * Returns pointer with encoded error via <linux/err.h> interface. 445 */ 446 #ifdef HAVE_LIBTRACEEVENT 447 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) 448 { 449 struct evsel *evsel = zalloc(perf_evsel__object.size); 450 int err = -ENOMEM; 451 452 if (evsel == NULL) { 453 goto out_err; 454 } else { 455 struct perf_event_attr attr = { 456 .type = PERF_TYPE_TRACEPOINT, 457 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 458 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 459 }; 460 461 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 462 goto out_free; 463 464 evsel->tp_format = trace_event__tp_format(sys, name); 465 if (IS_ERR(evsel->tp_format)) { 466 err = PTR_ERR(evsel->tp_format); 467 goto out_free; 468 } 469 470 event_attr_init(&attr); 471 attr.config = evsel->tp_format->id; 472 attr.sample_period = 1; 473 evsel__init(evsel, &attr, idx); 474 } 475 476 return evsel; 477 478 out_free: 479 zfree(&evsel->name); 480 free(evsel); 481 out_err: 482 return ERR_PTR(err); 483 } 484 #endif 485 486 const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = { 487 "cycles", 488 "instructions", 489 "cache-references", 490 "cache-misses", 491 "branches", 492 "branch-misses", 493 "bus-cycles", 494 "stalled-cycles-frontend", 495 "stalled-cycles-backend", 496 "ref-cycles", 497 }; 498 499 char *evsel__bpf_counter_events; 500 501 bool evsel__match_bpf_counter_events(const char *name) 502 { 503 int name_len; 504 bool match; 505 char *ptr; 506 507 if (!evsel__bpf_counter_events) 508 return false; 509 510 ptr = strstr(evsel__bpf_counter_events, name); 511 name_len = strlen(name); 512 513 /* check name matches a full token in evsel__bpf_counter_events */ 514 match = (ptr != NULL) && 515 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) && 516 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0')); 517 518 return match; 519 } 520 521 static const char *__evsel__hw_name(u64 config) 522 { 523 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config]) 524 return evsel__hw_names[config]; 525 526 return "unknown-hardware"; 527 } 528 529 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) 530 { 531 int colon = 0, r = 0; 532 struct perf_event_attr *attr = &evsel->core.attr; 533 bool exclude_guest_default = false; 534 535 #define MOD_PRINT(context, mod) do { \ 536 if (!attr->exclude_##context) { \ 537 if (!colon) colon = ++r; \ 538 r += scnprintf(bf + r, size - r, "%c", mod); \ 539 } } while(0) 540 541 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 542 MOD_PRINT(kernel, 'k'); 543 MOD_PRINT(user, 'u'); 544 MOD_PRINT(hv, 'h'); 545 exclude_guest_default = true; 546 } 547 548 if (attr->precise_ip) { 549 if (!colon) 550 colon = ++r; 551 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 552 exclude_guest_default = true; 553 } 554 555 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 556 MOD_PRINT(host, 'H'); 557 MOD_PRINT(guest, 'G'); 558 } 559 #undef MOD_PRINT 560 if (colon) 561 bf[colon - 1] = ':'; 562 return r; 563 } 564 565 int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 566 { 567 return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); 568 } 569 570 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 571 { 572 int r = arch_evsel__hw_name(evsel, bf, size); 573 return r + evsel__add_modifiers(evsel, bf + r, size - r); 574 } 575 576 const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = { 577 "cpu-clock", 578 "task-clock", 579 "page-faults", 580 "context-switches", 581 "cpu-migrations", 582 "minor-faults", 583 "major-faults", 584 "alignment-faults", 585 "emulation-faults", 586 "dummy", 587 }; 588 589 static const char *__evsel__sw_name(u64 config) 590 { 591 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config]) 592 return evsel__sw_names[config]; 593 return "unknown-software"; 594 } 595 596 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size) 597 { 598 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); 599 return r + evsel__add_modifiers(evsel, bf + r, size - r); 600 } 601 602 static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size) 603 { 604 return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev)); 605 } 606 607 static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 608 { 609 int r; 610 611 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 612 613 if (type & HW_BREAKPOINT_R) 614 r += scnprintf(bf + r, size - r, "r"); 615 616 if (type & HW_BREAKPOINT_W) 617 r += scnprintf(bf + r, size - r, "w"); 618 619 if (type & HW_BREAKPOINT_X) 620 r += scnprintf(bf + r, size - r, "x"); 621 622 return r; 623 } 624 625 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size) 626 { 627 struct perf_event_attr *attr = &evsel->core.attr; 628 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 629 return r + evsel__add_modifiers(evsel, bf + r, size - r); 630 } 631 632 const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = { 633 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 634 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 635 { "LLC", "L2", }, 636 { "dTLB", "d-tlb", "Data-TLB", }, 637 { "iTLB", "i-tlb", "Instruction-TLB", }, 638 { "branch", "branches", "bpu", "btb", "bpc", }, 639 { "node", }, 640 }; 641 642 const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = { 643 { "load", "loads", "read", }, 644 { "store", "stores", "write", }, 645 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 646 }; 647 648 const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = { 649 { "refs", "Reference", "ops", "access", }, 650 { "misses", "miss", }, 651 }; 652 653 #define C(x) PERF_COUNT_HW_CACHE_##x 654 #define CACHE_READ (1 << C(OP_READ)) 655 #define CACHE_WRITE (1 << C(OP_WRITE)) 656 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 657 #define COP(x) (1 << x) 658 659 /* 660 * cache operation stat 661 * L1I : Read and prefetch only 662 * ITLB and BPU : Read-only 663 */ 664 static const unsigned long evsel__hw_cache_stat[C(MAX)] = { 665 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 666 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 667 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 668 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 669 [C(ITLB)] = (CACHE_READ), 670 [C(BPU)] = (CACHE_READ), 671 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 672 }; 673 674 bool evsel__is_cache_op_valid(u8 type, u8 op) 675 { 676 if (evsel__hw_cache_stat[type] & COP(op)) 677 return true; /* valid */ 678 else 679 return false; /* invalid */ 680 } 681 682 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size) 683 { 684 if (result) { 685 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0], 686 evsel__hw_cache_op[op][0], 687 evsel__hw_cache_result[result][0]); 688 } 689 690 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0], 691 evsel__hw_cache_op[op][1]); 692 } 693 694 static int __evsel__hw_cache_name(u64 config, char *bf, size_t size) 695 { 696 u8 op, result, type = (config >> 0) & 0xff; 697 const char *err = "unknown-ext-hardware-cache-type"; 698 699 if (type >= PERF_COUNT_HW_CACHE_MAX) 700 goto out_err; 701 702 op = (config >> 8) & 0xff; 703 err = "unknown-ext-hardware-cache-op"; 704 if (op >= PERF_COUNT_HW_CACHE_OP_MAX) 705 goto out_err; 706 707 result = (config >> 16) & 0xff; 708 err = "unknown-ext-hardware-cache-result"; 709 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 710 goto out_err; 711 712 err = "invalid-cache"; 713 if (!evsel__is_cache_op_valid(type, op)) 714 goto out_err; 715 716 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 717 out_err: 718 return scnprintf(bf, size, "%s", err); 719 } 720 721 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) 722 { 723 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); 724 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 725 } 726 727 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size) 728 { 729 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); 730 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 731 } 732 733 const char *evsel__name(struct evsel *evsel) 734 { 735 char bf[128]; 736 737 if (!evsel) 738 goto out_unknown; 739 740 if (evsel->name) 741 return evsel->name; 742 743 switch (evsel->core.attr.type) { 744 case PERF_TYPE_RAW: 745 evsel__raw_name(evsel, bf, sizeof(bf)); 746 break; 747 748 case PERF_TYPE_HARDWARE: 749 evsel__hw_name(evsel, bf, sizeof(bf)); 750 break; 751 752 case PERF_TYPE_HW_CACHE: 753 evsel__hw_cache_name(evsel, bf, sizeof(bf)); 754 break; 755 756 case PERF_TYPE_SOFTWARE: 757 if (evsel__is_tool(evsel)) 758 evsel__tool_name(evsel->tool_event, bf, sizeof(bf)); 759 else 760 evsel__sw_name(evsel, bf, sizeof(bf)); 761 break; 762 763 case PERF_TYPE_TRACEPOINT: 764 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 765 break; 766 767 case PERF_TYPE_BREAKPOINT: 768 evsel__bp_name(evsel, bf, sizeof(bf)); 769 break; 770 771 default: 772 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 773 evsel->core.attr.type); 774 break; 775 } 776 777 evsel->name = strdup(bf); 778 779 if (evsel->name) 780 return evsel->name; 781 out_unknown: 782 return "unknown"; 783 } 784 785 bool evsel__name_is(struct evsel *evsel, const char *name) 786 { 787 return !strcmp(evsel__name(evsel), name); 788 } 789 790 const char *evsel__group_pmu_name(const struct evsel *evsel) 791 { 792 struct evsel *leader = evsel__leader(evsel); 793 struct evsel *pos; 794 795 /* 796 * Software events may be in a group with other uncore PMU events. Use 797 * the pmu_name of the first non-software event to avoid breaking the 798 * software event out of the group. 799 * 800 * Aux event leaders, like intel_pt, expect a group with events from 801 * other PMUs, so substitute the AUX event's PMU in this case. 802 */ 803 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE || evsel__is_aux_event(leader)) { 804 /* Starting with the leader, find the first event with a named PMU. */ 805 for_each_group_evsel(pos, leader) { 806 if (pos->pmu_name) 807 return pos->pmu_name; 808 } 809 } 810 811 return evsel->pmu_name ?: "cpu"; 812 } 813 814 const char *evsel__metric_id(const struct evsel *evsel) 815 { 816 if (evsel->metric_id) 817 return evsel->metric_id; 818 819 if (evsel__is_tool(evsel)) 820 return perf_tool_event__to_str(evsel->tool_event); 821 822 return "unknown"; 823 } 824 825 const char *evsel__group_name(struct evsel *evsel) 826 { 827 return evsel->group_name ?: "anon group"; 828 } 829 830 /* 831 * Returns the group details for the specified leader, 832 * with following rules. 833 * 834 * For record -e '{cycles,instructions}' 835 * 'anon group { cycles:u, instructions:u }' 836 * 837 * For record -e 'cycles,instructions' and report --group 838 * 'cycles:u, instructions:u' 839 */ 840 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size) 841 { 842 int ret = 0; 843 struct evsel *pos; 844 const char *group_name = evsel__group_name(evsel); 845 846 if (!evsel->forced_leader) 847 ret = scnprintf(buf, size, "%s { ", group_name); 848 849 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel)); 850 851 for_each_group_member(pos, evsel) 852 ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos)); 853 854 if (!evsel->forced_leader) 855 ret += scnprintf(buf + ret, size - ret, " }"); 856 857 return ret; 858 } 859 860 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 861 struct callchain_param *param) 862 { 863 bool function = evsel__is_function_event(evsel); 864 struct perf_event_attr *attr = &evsel->core.attr; 865 866 evsel__set_sample_bit(evsel, CALLCHAIN); 867 868 attr->sample_max_stack = param->max_stack; 869 870 if (opts->kernel_callchains) 871 attr->exclude_callchain_user = 1; 872 if (opts->user_callchains) 873 attr->exclude_callchain_kernel = 1; 874 if (param->record_mode == CALLCHAIN_LBR) { 875 if (!opts->branch_stack) { 876 if (attr->exclude_user) { 877 pr_warning("LBR callstack option is only available " 878 "to get user callchain information. " 879 "Falling back to framepointers.\n"); 880 } else { 881 evsel__set_sample_bit(evsel, BRANCH_STACK); 882 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 883 PERF_SAMPLE_BRANCH_CALL_STACK | 884 PERF_SAMPLE_BRANCH_NO_CYCLES | 885 PERF_SAMPLE_BRANCH_NO_FLAGS | 886 PERF_SAMPLE_BRANCH_HW_INDEX; 887 } 888 } else 889 pr_warning("Cannot use LBR callstack with branch stack. " 890 "Falling back to framepointers.\n"); 891 } 892 893 if (param->record_mode == CALLCHAIN_DWARF) { 894 if (!function) { 895 evsel__set_sample_bit(evsel, REGS_USER); 896 evsel__set_sample_bit(evsel, STACK_USER); 897 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) { 898 attr->sample_regs_user |= DWARF_MINIMAL_REGS; 899 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, " 900 "specifying a subset with --user-regs may render DWARF unwinding unreliable, " 901 "so the minimal registers set (IP, SP) is explicitly forced.\n"); 902 } else { 903 attr->sample_regs_user |= arch__user_reg_mask(); 904 } 905 attr->sample_stack_user = param->dump_size; 906 attr->exclude_callchain_user = 1; 907 } else { 908 pr_info("Cannot use DWARF unwind for function trace event," 909 " falling back to framepointers.\n"); 910 } 911 } 912 913 if (function) { 914 pr_info("Disabling user space callchains for function trace event.\n"); 915 attr->exclude_callchain_user = 1; 916 } 917 } 918 919 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 920 struct callchain_param *param) 921 { 922 if (param->enabled) 923 return __evsel__config_callchain(evsel, opts, param); 924 } 925 926 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param) 927 { 928 struct perf_event_attr *attr = &evsel->core.attr; 929 930 evsel__reset_sample_bit(evsel, CALLCHAIN); 931 if (param->record_mode == CALLCHAIN_LBR) { 932 evsel__reset_sample_bit(evsel, BRANCH_STACK); 933 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 934 PERF_SAMPLE_BRANCH_CALL_STACK | 935 PERF_SAMPLE_BRANCH_HW_INDEX); 936 } 937 if (param->record_mode == CALLCHAIN_DWARF) { 938 evsel__reset_sample_bit(evsel, REGS_USER); 939 evsel__reset_sample_bit(evsel, STACK_USER); 940 } 941 } 942 943 static void evsel__apply_config_terms(struct evsel *evsel, 944 struct record_opts *opts, bool track) 945 { 946 struct evsel_config_term *term; 947 struct list_head *config_terms = &evsel->config_terms; 948 struct perf_event_attr *attr = &evsel->core.attr; 949 /* callgraph default */ 950 struct callchain_param param = { 951 .record_mode = callchain_param.record_mode, 952 }; 953 u32 dump_size = 0; 954 int max_stack = 0; 955 const char *callgraph_buf = NULL; 956 957 list_for_each_entry(term, config_terms, list) { 958 switch (term->type) { 959 case EVSEL__CONFIG_TERM_PERIOD: 960 if (!(term->weak && opts->user_interval != ULLONG_MAX)) { 961 attr->sample_period = term->val.period; 962 attr->freq = 0; 963 evsel__reset_sample_bit(evsel, PERIOD); 964 } 965 break; 966 case EVSEL__CONFIG_TERM_FREQ: 967 if (!(term->weak && opts->user_freq != UINT_MAX)) { 968 attr->sample_freq = term->val.freq; 969 attr->freq = 1; 970 evsel__set_sample_bit(evsel, PERIOD); 971 } 972 break; 973 case EVSEL__CONFIG_TERM_TIME: 974 if (term->val.time) 975 evsel__set_sample_bit(evsel, TIME); 976 else 977 evsel__reset_sample_bit(evsel, TIME); 978 break; 979 case EVSEL__CONFIG_TERM_CALLGRAPH: 980 callgraph_buf = term->val.str; 981 break; 982 case EVSEL__CONFIG_TERM_BRANCH: 983 if (term->val.str && strcmp(term->val.str, "no")) { 984 evsel__set_sample_bit(evsel, BRANCH_STACK); 985 parse_branch_str(term->val.str, 986 &attr->branch_sample_type); 987 } else 988 evsel__reset_sample_bit(evsel, BRANCH_STACK); 989 break; 990 case EVSEL__CONFIG_TERM_STACK_USER: 991 dump_size = term->val.stack_user; 992 break; 993 case EVSEL__CONFIG_TERM_MAX_STACK: 994 max_stack = term->val.max_stack; 995 break; 996 case EVSEL__CONFIG_TERM_MAX_EVENTS: 997 evsel->max_events = term->val.max_events; 998 break; 999 case EVSEL__CONFIG_TERM_INHERIT: 1000 /* 1001 * attr->inherit should has already been set by 1002 * evsel__config. If user explicitly set 1003 * inherit using config terms, override global 1004 * opt->no_inherit setting. 1005 */ 1006 attr->inherit = term->val.inherit ? 1 : 0; 1007 break; 1008 case EVSEL__CONFIG_TERM_OVERWRITE: 1009 attr->write_backward = term->val.overwrite ? 1 : 0; 1010 break; 1011 case EVSEL__CONFIG_TERM_DRV_CFG: 1012 break; 1013 case EVSEL__CONFIG_TERM_PERCORE: 1014 break; 1015 case EVSEL__CONFIG_TERM_AUX_OUTPUT: 1016 attr->aux_output = term->val.aux_output ? 1 : 0; 1017 break; 1018 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE: 1019 /* Already applied by auxtrace */ 1020 break; 1021 case EVSEL__CONFIG_TERM_CFG_CHG: 1022 break; 1023 default: 1024 break; 1025 } 1026 } 1027 1028 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 1029 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { 1030 bool sample_address = false; 1031 1032 if (max_stack) { 1033 param.max_stack = max_stack; 1034 if (callgraph_buf == NULL) 1035 callgraph_buf = "fp"; 1036 } 1037 1038 /* parse callgraph parameters */ 1039 if (callgraph_buf != NULL) { 1040 if (!strcmp(callgraph_buf, "no")) { 1041 param.enabled = false; 1042 param.record_mode = CALLCHAIN_NONE; 1043 } else { 1044 param.enabled = true; 1045 if (parse_callchain_record(callgraph_buf, ¶m)) { 1046 pr_err("per-event callgraph setting for %s failed. " 1047 "Apply callgraph global setting for it\n", 1048 evsel->name); 1049 return; 1050 } 1051 if (param.record_mode == CALLCHAIN_DWARF) 1052 sample_address = true; 1053 } 1054 } 1055 if (dump_size > 0) { 1056 dump_size = round_up(dump_size, sizeof(u64)); 1057 param.dump_size = dump_size; 1058 } 1059 1060 /* If global callgraph set, clear it */ 1061 if (callchain_param.enabled) 1062 evsel__reset_callgraph(evsel, &callchain_param); 1063 1064 /* set perf-event callgraph */ 1065 if (param.enabled) { 1066 if (sample_address) { 1067 evsel__set_sample_bit(evsel, ADDR); 1068 evsel__set_sample_bit(evsel, DATA_SRC); 1069 evsel->core.attr.mmap_data = track; 1070 } 1071 evsel__config_callchain(evsel, opts, ¶m); 1072 } 1073 } 1074 } 1075 1076 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) 1077 { 1078 struct evsel_config_term *term, *found_term = NULL; 1079 1080 list_for_each_entry(term, &evsel->config_terms, list) { 1081 if (term->type == type) 1082 found_term = term; 1083 } 1084 1085 return found_term; 1086 } 1087 1088 void __weak arch_evsel__set_sample_weight(struct evsel *evsel) 1089 { 1090 evsel__set_sample_bit(evsel, WEIGHT); 1091 } 1092 1093 void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused, 1094 struct perf_event_attr *attr __maybe_unused) 1095 { 1096 } 1097 1098 static void evsel__set_default_freq_period(struct record_opts *opts, 1099 struct perf_event_attr *attr) 1100 { 1101 if (opts->freq) { 1102 attr->freq = 1; 1103 attr->sample_freq = opts->freq; 1104 } else { 1105 attr->sample_period = opts->default_interval; 1106 } 1107 } 1108 1109 static bool evsel__is_offcpu_event(struct evsel *evsel) 1110 { 1111 return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT); 1112 } 1113 1114 /* 1115 * The enable_on_exec/disabled value strategy: 1116 * 1117 * 1) For any type of traced program: 1118 * - all independent events and group leaders are disabled 1119 * - all group members are enabled 1120 * 1121 * Group members are ruled by group leaders. They need to 1122 * be enabled, because the group scheduling relies on that. 1123 * 1124 * 2) For traced programs executed by perf: 1125 * - all independent events and group leaders have 1126 * enable_on_exec set 1127 * - we don't specifically enable or disable any event during 1128 * the record command 1129 * 1130 * Independent events and group leaders are initially disabled 1131 * and get enabled by exec. Group members are ruled by group 1132 * leaders as stated in 1). 1133 * 1134 * 3) For traced programs attached by perf (pid/tid): 1135 * - we specifically enable or disable all events during 1136 * the record command 1137 * 1138 * When attaching events to already running traced we 1139 * enable/disable events specifically, as there's no 1140 * initial traced exec call. 1141 */ 1142 void evsel__config(struct evsel *evsel, struct record_opts *opts, 1143 struct callchain_param *callchain) 1144 { 1145 struct evsel *leader = evsel__leader(evsel); 1146 struct perf_event_attr *attr = &evsel->core.attr; 1147 int track = evsel->tracking; 1148 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 1149 1150 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 1151 attr->inherit = !opts->no_inherit; 1152 attr->write_backward = opts->overwrite ? 1 : 0; 1153 attr->read_format = PERF_FORMAT_LOST; 1154 1155 evsel__set_sample_bit(evsel, IP); 1156 evsel__set_sample_bit(evsel, TID); 1157 1158 if (evsel->sample_read) { 1159 evsel__set_sample_bit(evsel, READ); 1160 1161 /* 1162 * We need ID even in case of single event, because 1163 * PERF_SAMPLE_READ process ID specific data. 1164 */ 1165 evsel__set_sample_id(evsel, false); 1166 1167 /* 1168 * Apply group format only if we belong to group 1169 * with more than one members. 1170 */ 1171 if (leader->core.nr_members > 1) { 1172 attr->read_format |= PERF_FORMAT_GROUP; 1173 attr->inherit = 0; 1174 } 1175 } 1176 1177 /* 1178 * We default some events to have a default interval. But keep 1179 * it a weak assumption overridable by the user. 1180 */ 1181 if ((evsel->is_libpfm_event && !attr->sample_period) || 1182 (!evsel->is_libpfm_event && (!attr->sample_period || 1183 opts->user_freq != UINT_MAX || 1184 opts->user_interval != ULLONG_MAX))) 1185 evsel__set_default_freq_period(opts, attr); 1186 1187 /* 1188 * If attr->freq was set (here or earlier), ask for period 1189 * to be sampled. 1190 */ 1191 if (attr->freq) 1192 evsel__set_sample_bit(evsel, PERIOD); 1193 1194 if (opts->no_samples) 1195 attr->sample_freq = 0; 1196 1197 if (opts->inherit_stat) { 1198 evsel->core.attr.read_format |= 1199 PERF_FORMAT_TOTAL_TIME_ENABLED | 1200 PERF_FORMAT_TOTAL_TIME_RUNNING | 1201 PERF_FORMAT_ID; 1202 attr->inherit_stat = 1; 1203 } 1204 1205 if (opts->sample_address) { 1206 evsel__set_sample_bit(evsel, ADDR); 1207 attr->mmap_data = track; 1208 } 1209 1210 /* 1211 * We don't allow user space callchains for function trace 1212 * event, due to issues with page faults while tracing page 1213 * fault handler and its overall trickiness nature. 1214 */ 1215 if (evsel__is_function_event(evsel)) 1216 evsel->core.attr.exclude_callchain_user = 1; 1217 1218 if (callchain && callchain->enabled && !evsel->no_aux_samples) 1219 evsel__config_callchain(evsel, opts, callchain); 1220 1221 if (opts->sample_intr_regs && !evsel->no_aux_samples && 1222 !evsel__is_dummy_event(evsel)) { 1223 attr->sample_regs_intr = opts->sample_intr_regs; 1224 evsel__set_sample_bit(evsel, REGS_INTR); 1225 } 1226 1227 if (opts->sample_user_regs && !evsel->no_aux_samples && 1228 !evsel__is_dummy_event(evsel)) { 1229 attr->sample_regs_user |= opts->sample_user_regs; 1230 evsel__set_sample_bit(evsel, REGS_USER); 1231 } 1232 1233 if (target__has_cpu(&opts->target) || opts->sample_cpu) 1234 evsel__set_sample_bit(evsel, CPU); 1235 1236 /* 1237 * When the user explicitly disabled time don't force it here. 1238 */ 1239 if (opts->sample_time && 1240 (!perf_missing_features.sample_id_all && 1241 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 1242 opts->sample_time_set))) 1243 evsel__set_sample_bit(evsel, TIME); 1244 1245 if (opts->raw_samples && !evsel->no_aux_samples) { 1246 evsel__set_sample_bit(evsel, TIME); 1247 evsel__set_sample_bit(evsel, RAW); 1248 evsel__set_sample_bit(evsel, CPU); 1249 } 1250 1251 if (opts->sample_address) 1252 evsel__set_sample_bit(evsel, DATA_SRC); 1253 1254 if (opts->sample_phys_addr) 1255 evsel__set_sample_bit(evsel, PHYS_ADDR); 1256 1257 if (opts->no_buffering) { 1258 attr->watermark = 0; 1259 attr->wakeup_events = 1; 1260 } 1261 if (opts->branch_stack && !evsel->no_aux_samples) { 1262 evsel__set_sample_bit(evsel, BRANCH_STACK); 1263 attr->branch_sample_type = opts->branch_stack; 1264 } 1265 1266 if (opts->sample_weight) 1267 arch_evsel__set_sample_weight(evsel); 1268 1269 attr->task = track; 1270 attr->mmap = track; 1271 attr->mmap2 = track && !perf_missing_features.mmap2; 1272 attr->comm = track; 1273 attr->build_id = track && opts->build_id; 1274 1275 /* 1276 * ksymbol is tracked separately with text poke because it needs to be 1277 * system wide and enabled immediately. 1278 */ 1279 if (!opts->text_poke) 1280 attr->ksymbol = track && !perf_missing_features.ksymbol; 1281 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf; 1282 1283 if (opts->record_namespaces) 1284 attr->namespaces = track; 1285 1286 if (opts->record_cgroup) { 1287 attr->cgroup = track && !perf_missing_features.cgroup; 1288 evsel__set_sample_bit(evsel, CGROUP); 1289 } 1290 1291 if (opts->sample_data_page_size) 1292 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE); 1293 1294 if (opts->sample_code_page_size) 1295 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE); 1296 1297 if (opts->record_switch_events) 1298 attr->context_switch = track; 1299 1300 if (opts->sample_transaction) 1301 evsel__set_sample_bit(evsel, TRANSACTION); 1302 1303 if (opts->running_time) { 1304 evsel->core.attr.read_format |= 1305 PERF_FORMAT_TOTAL_TIME_ENABLED | 1306 PERF_FORMAT_TOTAL_TIME_RUNNING; 1307 } 1308 1309 /* 1310 * XXX see the function comment above 1311 * 1312 * Disabling only independent events or group leaders, 1313 * keeping group members enabled. 1314 */ 1315 if (evsel__is_group_leader(evsel)) 1316 attr->disabled = 1; 1317 1318 /* 1319 * Setting enable_on_exec for independent events and 1320 * group leaders for traced executed by perf. 1321 */ 1322 if (target__none(&opts->target) && evsel__is_group_leader(evsel) && 1323 !opts->target.initial_delay) 1324 attr->enable_on_exec = 1; 1325 1326 if (evsel->immediate) { 1327 attr->disabled = 0; 1328 attr->enable_on_exec = 0; 1329 } 1330 1331 clockid = opts->clockid; 1332 if (opts->use_clockid) { 1333 attr->use_clockid = 1; 1334 attr->clockid = opts->clockid; 1335 } 1336 1337 if (evsel->precise_max) 1338 attr->precise_ip = 3; 1339 1340 if (opts->all_user) { 1341 attr->exclude_kernel = 1; 1342 attr->exclude_user = 0; 1343 } 1344 1345 if (opts->all_kernel) { 1346 attr->exclude_kernel = 0; 1347 attr->exclude_user = 1; 1348 } 1349 1350 if (evsel->core.own_cpus || evsel->unit) 1351 evsel->core.attr.read_format |= PERF_FORMAT_ID; 1352 1353 /* 1354 * Apply event specific term settings, 1355 * it overloads any global configuration. 1356 */ 1357 evsel__apply_config_terms(evsel, opts, track); 1358 1359 evsel->ignore_missing_thread = opts->ignore_missing_thread; 1360 1361 /* The --period option takes the precedence. */ 1362 if (opts->period_set) { 1363 if (opts->period) 1364 evsel__set_sample_bit(evsel, PERIOD); 1365 else 1366 evsel__reset_sample_bit(evsel, PERIOD); 1367 } 1368 1369 /* 1370 * A dummy event never triggers any actual counter and therefore 1371 * cannot be used with branch_stack. 1372 * 1373 * For initial_delay, a dummy event is added implicitly. 1374 * The software event will trigger -EOPNOTSUPP error out, 1375 * if BRANCH_STACK bit is set. 1376 */ 1377 if (evsel__is_dummy_event(evsel)) 1378 evsel__reset_sample_bit(evsel, BRANCH_STACK); 1379 1380 if (evsel__is_offcpu_event(evsel)) 1381 evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES; 1382 1383 arch__post_evsel_config(evsel, attr); 1384 } 1385 1386 int evsel__set_filter(struct evsel *evsel, const char *filter) 1387 { 1388 char *new_filter = strdup(filter); 1389 1390 if (new_filter != NULL) { 1391 free(evsel->filter); 1392 evsel->filter = new_filter; 1393 return 0; 1394 } 1395 1396 return -1; 1397 } 1398 1399 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter) 1400 { 1401 char *new_filter; 1402 1403 if (evsel->filter == NULL) 1404 return evsel__set_filter(evsel, filter); 1405 1406 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { 1407 free(evsel->filter); 1408 evsel->filter = new_filter; 1409 return 0; 1410 } 1411 1412 return -1; 1413 } 1414 1415 int evsel__append_tp_filter(struct evsel *evsel, const char *filter) 1416 { 1417 return evsel__append_filter(evsel, "(%s) && (%s)", filter); 1418 } 1419 1420 int evsel__append_addr_filter(struct evsel *evsel, const char *filter) 1421 { 1422 return evsel__append_filter(evsel, "%s,%s", filter); 1423 } 1424 1425 /* Caller has to clear disabled after going through all CPUs. */ 1426 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx) 1427 { 1428 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx); 1429 } 1430 1431 int evsel__enable(struct evsel *evsel) 1432 { 1433 int err = perf_evsel__enable(&evsel->core); 1434 1435 if (!err) 1436 evsel->disabled = false; 1437 return err; 1438 } 1439 1440 /* Caller has to set disabled after going through all CPUs. */ 1441 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx) 1442 { 1443 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx); 1444 } 1445 1446 int evsel__disable(struct evsel *evsel) 1447 { 1448 int err = perf_evsel__disable(&evsel->core); 1449 /* 1450 * We mark it disabled here so that tools that disable a event can 1451 * ignore events after they disable it. I.e. the ring buffer may have 1452 * already a few more events queued up before the kernel got the stop 1453 * request. 1454 */ 1455 if (!err) 1456 evsel->disabled = true; 1457 1458 return err; 1459 } 1460 1461 void free_config_terms(struct list_head *config_terms) 1462 { 1463 struct evsel_config_term *term, *h; 1464 1465 list_for_each_entry_safe(term, h, config_terms, list) { 1466 list_del_init(&term->list); 1467 if (term->free_str) 1468 zfree(&term->val.str); 1469 free(term); 1470 } 1471 } 1472 1473 static void evsel__free_config_terms(struct evsel *evsel) 1474 { 1475 free_config_terms(&evsel->config_terms); 1476 } 1477 1478 void evsel__exit(struct evsel *evsel) 1479 { 1480 assert(list_empty(&evsel->core.node)); 1481 assert(evsel->evlist == NULL); 1482 bpf_counter__destroy(evsel); 1483 perf_bpf_filter__destroy(evsel); 1484 evsel__free_counts(evsel); 1485 perf_evsel__free_fd(&evsel->core); 1486 perf_evsel__free_id(&evsel->core); 1487 evsel__free_config_terms(evsel); 1488 cgroup__put(evsel->cgrp); 1489 perf_cpu_map__put(evsel->core.cpus); 1490 perf_cpu_map__put(evsel->core.own_cpus); 1491 perf_thread_map__put(evsel->core.threads); 1492 zfree(&evsel->group_name); 1493 zfree(&evsel->name); 1494 zfree(&evsel->pmu_name); 1495 zfree(&evsel->unit); 1496 zfree(&evsel->metric_id); 1497 evsel__zero_per_pkg(evsel); 1498 hashmap__free(evsel->per_pkg_mask); 1499 evsel->per_pkg_mask = NULL; 1500 zfree(&evsel->metric_events); 1501 perf_evsel__object.fini(evsel); 1502 } 1503 1504 void evsel__delete(struct evsel *evsel) 1505 { 1506 if (!evsel) 1507 return; 1508 1509 evsel__exit(evsel); 1510 free(evsel); 1511 } 1512 1513 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread, 1514 struct perf_counts_values *count) 1515 { 1516 struct perf_counts_values tmp; 1517 1518 if (!evsel->prev_raw_counts) 1519 return; 1520 1521 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); 1522 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count; 1523 1524 count->val = count->val - tmp.val; 1525 count->ena = count->ena - tmp.ena; 1526 count->run = count->run - tmp.run; 1527 } 1528 1529 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) 1530 { 1531 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread); 1532 1533 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); 1534 } 1535 1536 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread, 1537 u64 val, u64 ena, u64 run, u64 lost) 1538 { 1539 struct perf_counts_values *count; 1540 1541 count = perf_counts(counter->counts, cpu_map_idx, thread); 1542 1543 count->val = val; 1544 count->ena = ena; 1545 count->run = run; 1546 count->lost = lost; 1547 1548 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true); 1549 } 1550 1551 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data) 1552 { 1553 u64 read_format = leader->core.attr.read_format; 1554 struct sample_read_value *v; 1555 u64 nr, ena = 0, run = 0, lost = 0; 1556 1557 nr = *data++; 1558 1559 if (nr != (u64) leader->core.nr_members) 1560 return -EINVAL; 1561 1562 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1563 ena = *data++; 1564 1565 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1566 run = *data++; 1567 1568 v = (void *)data; 1569 sample_read_group__for_each(v, nr, read_format) { 1570 struct evsel *counter; 1571 1572 counter = evlist__id2evsel(leader->evlist, v->id); 1573 if (!counter) 1574 return -EINVAL; 1575 1576 if (read_format & PERF_FORMAT_LOST) 1577 lost = v->lost; 1578 1579 evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost); 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread) 1586 { 1587 struct perf_stat_evsel *ps = leader->stats; 1588 u64 read_format = leader->core.attr.read_format; 1589 int size = perf_evsel__read_size(&leader->core); 1590 u64 *data = ps->group_data; 1591 1592 if (!(read_format & PERF_FORMAT_ID)) 1593 return -EINVAL; 1594 1595 if (!evsel__is_group_leader(leader)) 1596 return -EINVAL; 1597 1598 if (!data) { 1599 data = zalloc(size); 1600 if (!data) 1601 return -ENOMEM; 1602 1603 ps->group_data = data; 1604 } 1605 1606 if (FD(leader, cpu_map_idx, thread) < 0) 1607 return -EINVAL; 1608 1609 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0) 1610 return -errno; 1611 1612 return evsel__process_group_data(leader, cpu_map_idx, thread, data); 1613 } 1614 1615 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread) 1616 { 1617 u64 read_format = evsel->core.attr.read_format; 1618 1619 if (read_format & PERF_FORMAT_GROUP) 1620 return evsel__read_group(evsel, cpu_map_idx, thread); 1621 1622 return evsel__read_one(evsel, cpu_map_idx, thread); 1623 } 1624 1625 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale) 1626 { 1627 struct perf_counts_values count; 1628 size_t nv = scale ? 3 : 1; 1629 1630 if (FD(evsel, cpu_map_idx, thread) < 0) 1631 return -EINVAL; 1632 1633 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0) 1634 return -ENOMEM; 1635 1636 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0) 1637 return -errno; 1638 1639 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count); 1640 perf_counts_values__scale(&count, scale, NULL); 1641 *perf_counts(evsel->counts, cpu_map_idx, thread) = count; 1642 return 0; 1643 } 1644 1645 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, 1646 int cpu_map_idx) 1647 { 1648 struct perf_cpu cpu; 1649 1650 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); 1651 return perf_cpu_map__idx(other->core.cpus, cpu); 1652 } 1653 1654 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx) 1655 { 1656 struct evsel *leader = evsel__leader(evsel); 1657 1658 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || 1659 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { 1660 return evsel__match_other_cpu(evsel, leader, cpu_map_idx); 1661 } 1662 1663 return cpu_map_idx; 1664 } 1665 1666 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread) 1667 { 1668 struct evsel *leader = evsel__leader(evsel); 1669 int fd; 1670 1671 if (evsel__is_group_leader(evsel)) 1672 return -1; 1673 1674 /* 1675 * Leader must be already processed/open, 1676 * if not it's a bug. 1677 */ 1678 BUG_ON(!leader->core.fd); 1679 1680 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx); 1681 if (cpu_map_idx == -1) 1682 return -1; 1683 1684 fd = FD(leader, cpu_map_idx, thread); 1685 BUG_ON(fd == -1 && !leader->skippable); 1686 1687 /* 1688 * When the leader has been skipped, return -2 to distinguish from no 1689 * group leader case. 1690 */ 1691 return fd == -1 ? -2 : fd; 1692 } 1693 1694 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) 1695 { 1696 for (int cpu = 0; cpu < nr_cpus; cpu++) 1697 for (int thread = thread_idx; thread < nr_threads - 1; thread++) 1698 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); 1699 } 1700 1701 static int update_fds(struct evsel *evsel, 1702 int nr_cpus, int cpu_map_idx, 1703 int nr_threads, int thread_idx) 1704 { 1705 struct evsel *pos; 1706 1707 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) 1708 return -EINVAL; 1709 1710 evlist__for_each_entry(evsel->evlist, pos) { 1711 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx; 1712 1713 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); 1714 1715 /* 1716 * Since fds for next evsel has not been created, 1717 * there is no need to iterate whole event list. 1718 */ 1719 if (pos == evsel) 1720 break; 1721 } 1722 return 0; 1723 } 1724 1725 static bool evsel__ignore_missing_thread(struct evsel *evsel, 1726 int nr_cpus, int cpu_map_idx, 1727 struct perf_thread_map *threads, 1728 int thread, int err) 1729 { 1730 pid_t ignore_pid = perf_thread_map__pid(threads, thread); 1731 1732 if (!evsel->ignore_missing_thread) 1733 return false; 1734 1735 /* The system wide setup does not work with threads. */ 1736 if (evsel->core.system_wide) 1737 return false; 1738 1739 /* The -ESRCH is perf event syscall errno for pid's not found. */ 1740 if (err != -ESRCH) 1741 return false; 1742 1743 /* If there's only one thread, let it fail. */ 1744 if (threads->nr == 1) 1745 return false; 1746 1747 /* 1748 * We should remove fd for missing_thread first 1749 * because thread_map__remove() will decrease threads->nr. 1750 */ 1751 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread)) 1752 return false; 1753 1754 if (thread_map__remove(threads, thread)) 1755 return false; 1756 1757 pr_warning("WARNING: Ignored open failure for pid %d\n", 1758 ignore_pid); 1759 return true; 1760 } 1761 1762 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1763 void *priv __maybe_unused) 1764 { 1765 return fprintf(fp, " %-32s %s\n", name, val); 1766 } 1767 1768 static void display_attr(struct perf_event_attr *attr) 1769 { 1770 if (verbose >= 2 || debug_peo_args) { 1771 fprintf(stderr, "%.60s\n", graph_dotted_line); 1772 fprintf(stderr, "perf_event_attr:\n"); 1773 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL); 1774 fprintf(stderr, "%.60s\n", graph_dotted_line); 1775 } 1776 } 1777 1778 bool evsel__precise_ip_fallback(struct evsel *evsel) 1779 { 1780 /* Do not try less precise if not requested. */ 1781 if (!evsel->precise_max) 1782 return false; 1783 1784 /* 1785 * We tried all the precise_ip values, and it's 1786 * still failing, so leave it to standard fallback. 1787 */ 1788 if (!evsel->core.attr.precise_ip) { 1789 evsel->core.attr.precise_ip = evsel->precise_ip_original; 1790 return false; 1791 } 1792 1793 if (!evsel->precise_ip_original) 1794 evsel->precise_ip_original = evsel->core.attr.precise_ip; 1795 1796 evsel->core.attr.precise_ip--; 1797 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); 1798 display_attr(&evsel->core.attr); 1799 return true; 1800 } 1801 1802 static struct perf_cpu_map *empty_cpu_map; 1803 static struct perf_thread_map *empty_thread_map; 1804 1805 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1806 struct perf_thread_map *threads) 1807 { 1808 int nthreads = perf_thread_map__nr(threads); 1809 1810 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || 1811 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) 1812 return -EINVAL; 1813 1814 if (cpus == NULL) { 1815 if (empty_cpu_map == NULL) { 1816 empty_cpu_map = perf_cpu_map__dummy_new(); 1817 if (empty_cpu_map == NULL) 1818 return -ENOMEM; 1819 } 1820 1821 cpus = empty_cpu_map; 1822 } 1823 1824 if (threads == NULL) { 1825 if (empty_thread_map == NULL) { 1826 empty_thread_map = thread_map__new_by_tid(-1); 1827 if (empty_thread_map == NULL) 1828 return -ENOMEM; 1829 } 1830 1831 threads = empty_thread_map; 1832 } 1833 1834 if (evsel->core.fd == NULL && 1835 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0) 1836 return -ENOMEM; 1837 1838 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; 1839 if (evsel->cgrp) 1840 evsel->open_flags |= PERF_FLAG_PID_CGROUP; 1841 1842 return 0; 1843 } 1844 1845 static void evsel__disable_missing_features(struct evsel *evsel) 1846 { 1847 if (perf_missing_features.read_lost) 1848 evsel->core.attr.read_format &= ~PERF_FORMAT_LOST; 1849 if (perf_missing_features.weight_struct) { 1850 evsel__set_sample_bit(evsel, WEIGHT); 1851 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); 1852 } 1853 if (perf_missing_features.clockid_wrong) 1854 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1855 if (perf_missing_features.clockid) { 1856 evsel->core.attr.use_clockid = 0; 1857 evsel->core.attr.clockid = 0; 1858 } 1859 if (perf_missing_features.cloexec) 1860 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1861 if (perf_missing_features.mmap2) 1862 evsel->core.attr.mmap2 = 0; 1863 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest) 1864 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; 1865 if (perf_missing_features.lbr_flags) 1866 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1867 PERF_SAMPLE_BRANCH_NO_CYCLES); 1868 if (perf_missing_features.group_read && evsel->core.attr.inherit) 1869 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); 1870 if (perf_missing_features.ksymbol) 1871 evsel->core.attr.ksymbol = 0; 1872 if (perf_missing_features.bpf) 1873 evsel->core.attr.bpf_event = 0; 1874 if (perf_missing_features.branch_hw_idx) 1875 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; 1876 if (perf_missing_features.sample_id_all) 1877 evsel->core.attr.sample_id_all = 0; 1878 } 1879 1880 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1881 struct perf_thread_map *threads) 1882 { 1883 int err; 1884 1885 err = __evsel__prepare_open(evsel, cpus, threads); 1886 if (err) 1887 return err; 1888 1889 evsel__disable_missing_features(evsel); 1890 1891 return err; 1892 } 1893 1894 bool evsel__detect_missing_features(struct evsel *evsel) 1895 { 1896 /* 1897 * Must probe features in the order they were added to the 1898 * perf_event_attr interface. 1899 */ 1900 if (!perf_missing_features.read_lost && 1901 (evsel->core.attr.read_format & PERF_FORMAT_LOST)) { 1902 perf_missing_features.read_lost = true; 1903 pr_debug2("switching off PERF_FORMAT_LOST support\n"); 1904 return true; 1905 } else if (!perf_missing_features.weight_struct && 1906 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { 1907 perf_missing_features.weight_struct = true; 1908 pr_debug2("switching off weight struct support\n"); 1909 return true; 1910 } else if (!perf_missing_features.code_page_size && 1911 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { 1912 perf_missing_features.code_page_size = true; 1913 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n"); 1914 return false; 1915 } else if (!perf_missing_features.data_page_size && 1916 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { 1917 perf_missing_features.data_page_size = true; 1918 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n"); 1919 return false; 1920 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { 1921 perf_missing_features.cgroup = true; 1922 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n"); 1923 return false; 1924 } else if (!perf_missing_features.branch_hw_idx && 1925 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { 1926 perf_missing_features.branch_hw_idx = true; 1927 pr_debug2("switching off branch HW index support\n"); 1928 return true; 1929 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { 1930 perf_missing_features.aux_output = true; 1931 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n"); 1932 return false; 1933 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { 1934 perf_missing_features.bpf = true; 1935 pr_debug2_peo("switching off bpf_event\n"); 1936 return true; 1937 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { 1938 perf_missing_features.ksymbol = true; 1939 pr_debug2_peo("switching off ksymbol\n"); 1940 return true; 1941 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { 1942 perf_missing_features.write_backward = true; 1943 pr_debug2_peo("switching off write_backward\n"); 1944 return false; 1945 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { 1946 perf_missing_features.clockid_wrong = true; 1947 pr_debug2_peo("switching off clockid\n"); 1948 return true; 1949 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { 1950 perf_missing_features.clockid = true; 1951 pr_debug2_peo("switching off use_clockid\n"); 1952 return true; 1953 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { 1954 perf_missing_features.cloexec = true; 1955 pr_debug2_peo("switching off cloexec flag\n"); 1956 return true; 1957 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { 1958 perf_missing_features.mmap2 = true; 1959 pr_debug2_peo("switching off mmap2\n"); 1960 return true; 1961 } else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) { 1962 if (evsel->pmu == NULL) 1963 evsel->pmu = evsel__find_pmu(evsel); 1964 1965 if (evsel->pmu) 1966 evsel->pmu->missing_features.exclude_guest = true; 1967 else { 1968 /* we cannot find PMU, disable attrs now */ 1969 evsel->core.attr.exclude_host = false; 1970 evsel->core.attr.exclude_guest = false; 1971 } 1972 1973 if (evsel->exclude_GH) { 1974 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n"); 1975 return false; 1976 } 1977 if (!perf_missing_features.exclude_guest) { 1978 perf_missing_features.exclude_guest = true; 1979 pr_debug2_peo("switching off exclude_guest, exclude_host\n"); 1980 } 1981 return true; 1982 } else if (!perf_missing_features.sample_id_all) { 1983 perf_missing_features.sample_id_all = true; 1984 pr_debug2_peo("switching off sample_id_all\n"); 1985 return true; 1986 } else if (!perf_missing_features.lbr_flags && 1987 (evsel->core.attr.branch_sample_type & 1988 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1989 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1990 perf_missing_features.lbr_flags = true; 1991 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n"); 1992 return true; 1993 } else if (!perf_missing_features.group_read && 1994 evsel->core.attr.inherit && 1995 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && 1996 evsel__is_group_leader(evsel)) { 1997 perf_missing_features.group_read = true; 1998 pr_debug2_peo("switching off group read\n"); 1999 return true; 2000 } else { 2001 return false; 2002 } 2003 } 2004 2005 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) 2006 { 2007 int old_errno; 2008 struct rlimit l; 2009 2010 if (*set_rlimit < INCREASED_MAX) { 2011 old_errno = errno; 2012 2013 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 2014 if (*set_rlimit == NO_CHANGE) { 2015 l.rlim_cur = l.rlim_max; 2016 } else { 2017 l.rlim_cur = l.rlim_max + 1000; 2018 l.rlim_max = l.rlim_cur; 2019 } 2020 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 2021 (*set_rlimit) += 1; 2022 errno = old_errno; 2023 return true; 2024 } 2025 } 2026 errno = old_errno; 2027 } 2028 2029 return false; 2030 } 2031 2032 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, 2033 struct perf_thread_map *threads, 2034 int start_cpu_map_idx, int end_cpu_map_idx) 2035 { 2036 int idx, thread, nthreads; 2037 int pid = -1, err, old_errno; 2038 enum rlimit_action set_rlimit = NO_CHANGE; 2039 2040 err = __evsel__prepare_open(evsel, cpus, threads); 2041 if (err) 2042 return err; 2043 2044 if (cpus == NULL) 2045 cpus = empty_cpu_map; 2046 2047 if (threads == NULL) 2048 threads = empty_thread_map; 2049 2050 nthreads = perf_thread_map__nr(threads); 2051 2052 if (evsel->cgrp) 2053 pid = evsel->cgrp->fd; 2054 2055 fallback_missing_features: 2056 evsel__disable_missing_features(evsel); 2057 2058 display_attr(&evsel->core.attr); 2059 2060 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) { 2061 2062 for (thread = 0; thread < nthreads; thread++) { 2063 int fd, group_fd; 2064 retry_open: 2065 if (thread >= nthreads) 2066 break; 2067 2068 if (!evsel->cgrp && !evsel->core.system_wide) 2069 pid = perf_thread_map__pid(threads, thread); 2070 2071 group_fd = get_group_fd(evsel, idx, thread); 2072 2073 if (group_fd == -2) { 2074 pr_debug("broken group leader for %s\n", evsel->name); 2075 err = -EINVAL; 2076 goto out_close; 2077 } 2078 2079 test_attr__ready(); 2080 2081 /* Debug message used by test scripts */ 2082 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", 2083 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags); 2084 2085 fd = sys_perf_event_open(&evsel->core.attr, pid, 2086 perf_cpu_map__cpu(cpus, idx).cpu, 2087 group_fd, evsel->open_flags); 2088 2089 FD(evsel, idx, thread) = fd; 2090 2091 if (fd < 0) { 2092 err = -errno; 2093 2094 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", 2095 err); 2096 goto try_fallback; 2097 } 2098 2099 bpf_counter__install_pe(evsel, idx, fd); 2100 2101 if (unlikely(test_attr__enabled)) { 2102 test_attr__open(&evsel->core.attr, pid, 2103 perf_cpu_map__cpu(cpus, idx), 2104 fd, group_fd, evsel->open_flags); 2105 } 2106 2107 /* Debug message used by test scripts */ 2108 pr_debug2_peo(" = %d\n", fd); 2109 2110 if (evsel->bpf_fd >= 0) { 2111 int evt_fd = fd; 2112 int bpf_fd = evsel->bpf_fd; 2113 2114 err = ioctl(evt_fd, 2115 PERF_EVENT_IOC_SET_BPF, 2116 bpf_fd); 2117 if (err && errno != EEXIST) { 2118 pr_err("failed to attach bpf fd %d: %s\n", 2119 bpf_fd, strerror(errno)); 2120 err = -EINVAL; 2121 goto out_close; 2122 } 2123 } 2124 2125 set_rlimit = NO_CHANGE; 2126 2127 /* 2128 * If we succeeded but had to kill clockid, fail and 2129 * have evsel__open_strerror() print us a nice error. 2130 */ 2131 if (perf_missing_features.clockid || 2132 perf_missing_features.clockid_wrong) { 2133 err = -EINVAL; 2134 goto out_close; 2135 } 2136 } 2137 } 2138 2139 return 0; 2140 2141 try_fallback: 2142 if (evsel__precise_ip_fallback(evsel)) 2143 goto retry_open; 2144 2145 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus), 2146 idx, threads, thread, err)) { 2147 /* We just removed 1 thread, so lower the upper nthreads limit. */ 2148 nthreads--; 2149 2150 /* ... and pretend like nothing have happened. */ 2151 err = 0; 2152 goto retry_open; 2153 } 2154 /* 2155 * perf stat needs between 5 and 22 fds per CPU. When we run out 2156 * of them try to increase the limits. 2157 */ 2158 if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) 2159 goto retry_open; 2160 2161 if (err != -EINVAL || idx > 0 || thread > 0) 2162 goto out_close; 2163 2164 if (evsel__detect_missing_features(evsel)) 2165 goto fallback_missing_features; 2166 out_close: 2167 if (err) 2168 threads->err_thread = thread; 2169 2170 old_errno = errno; 2171 do { 2172 while (--thread >= 0) { 2173 if (FD(evsel, idx, thread) >= 0) 2174 close(FD(evsel, idx, thread)); 2175 FD(evsel, idx, thread) = -1; 2176 } 2177 thread = nthreads; 2178 } while (--idx >= 0); 2179 errno = old_errno; 2180 return err; 2181 } 2182 2183 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, 2184 struct perf_thread_map *threads) 2185 { 2186 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus)); 2187 } 2188 2189 void evsel__close(struct evsel *evsel) 2190 { 2191 perf_evsel__close(&evsel->core); 2192 perf_evsel__free_id(&evsel->core); 2193 } 2194 2195 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) 2196 { 2197 if (cpu_map_idx == -1) 2198 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus)); 2199 2200 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); 2201 } 2202 2203 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) 2204 { 2205 return evsel__open(evsel, NULL, threads); 2206 } 2207 2208 static int perf_evsel__parse_id_sample(const struct evsel *evsel, 2209 const union perf_event *event, 2210 struct perf_sample *sample) 2211 { 2212 u64 type = evsel->core.attr.sample_type; 2213 const __u64 *array = event->sample.array; 2214 bool swapped = evsel->needs_swap; 2215 union u64_swap u; 2216 2217 array += ((event->header.size - 2218 sizeof(event->header)) / sizeof(u64)) - 1; 2219 2220 if (type & PERF_SAMPLE_IDENTIFIER) { 2221 sample->id = *array; 2222 array--; 2223 } 2224 2225 if (type & PERF_SAMPLE_CPU) { 2226 u.val64 = *array; 2227 if (swapped) { 2228 /* undo swap of u64, then swap on individual u32s */ 2229 u.val64 = bswap_64(u.val64); 2230 u.val32[0] = bswap_32(u.val32[0]); 2231 } 2232 2233 sample->cpu = u.val32[0]; 2234 array--; 2235 } 2236 2237 if (type & PERF_SAMPLE_STREAM_ID) { 2238 sample->stream_id = *array; 2239 array--; 2240 } 2241 2242 if (type & PERF_SAMPLE_ID) { 2243 sample->id = *array; 2244 array--; 2245 } 2246 2247 if (type & PERF_SAMPLE_TIME) { 2248 sample->time = *array; 2249 array--; 2250 } 2251 2252 if (type & PERF_SAMPLE_TID) { 2253 u.val64 = *array; 2254 if (swapped) { 2255 /* undo swap of u64, then swap on individual u32s */ 2256 u.val64 = bswap_64(u.val64); 2257 u.val32[0] = bswap_32(u.val32[0]); 2258 u.val32[1] = bswap_32(u.val32[1]); 2259 } 2260 2261 sample->pid = u.val32[0]; 2262 sample->tid = u.val32[1]; 2263 array--; 2264 } 2265 2266 return 0; 2267 } 2268 2269 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 2270 u64 size) 2271 { 2272 return size > max_size || offset + size > endp; 2273 } 2274 2275 #define OVERFLOW_CHECK(offset, size, max_size) \ 2276 do { \ 2277 if (overflow(endp, (max_size), (offset), (size))) \ 2278 return -EFAULT; \ 2279 } while (0) 2280 2281 #define OVERFLOW_CHECK_u64(offset) \ 2282 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 2283 2284 static int 2285 perf_event__check_size(union perf_event *event, unsigned int sample_size) 2286 { 2287 /* 2288 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 2289 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 2290 * check the format does not go past the end of the event. 2291 */ 2292 if (sample_size + sizeof(event->header) > event->header.size) 2293 return -EFAULT; 2294 2295 return 0; 2296 } 2297 2298 void __weak arch_perf_parse_sample_weight(struct perf_sample *data, 2299 const __u64 *array, 2300 u64 type __maybe_unused) 2301 { 2302 data->weight = *array; 2303 } 2304 2305 u64 evsel__bitfield_swap_branch_flags(u64 value) 2306 { 2307 u64 new_val = 0; 2308 2309 /* 2310 * branch_flags 2311 * union { 2312 * u64 values; 2313 * struct { 2314 * mispred:1 //target mispredicted 2315 * predicted:1 //target predicted 2316 * in_tx:1 //in transaction 2317 * abort:1 //transaction abort 2318 * cycles:16 //cycle count to last branch 2319 * type:4 //branch type 2320 * spec:2 //branch speculation info 2321 * new_type:4 //additional branch type 2322 * priv:3 //privilege level 2323 * reserved:31 2324 * } 2325 * } 2326 * 2327 * Avoid bswap64() the entire branch_flag.value, 2328 * as it has variable bit-field sizes. Instead the 2329 * macro takes the bit-field position/size, 2330 * swaps it based on the host endianness. 2331 */ 2332 if (host_is_bigendian()) { 2333 new_val = bitfield_swap(value, 0, 1); 2334 new_val |= bitfield_swap(value, 1, 1); 2335 new_val |= bitfield_swap(value, 2, 1); 2336 new_val |= bitfield_swap(value, 3, 1); 2337 new_val |= bitfield_swap(value, 4, 16); 2338 new_val |= bitfield_swap(value, 20, 4); 2339 new_val |= bitfield_swap(value, 24, 2); 2340 new_val |= bitfield_swap(value, 26, 4); 2341 new_val |= bitfield_swap(value, 30, 3); 2342 new_val |= bitfield_swap(value, 33, 31); 2343 } else { 2344 new_val = bitfield_swap(value, 63, 1); 2345 new_val |= bitfield_swap(value, 62, 1); 2346 new_val |= bitfield_swap(value, 61, 1); 2347 new_val |= bitfield_swap(value, 60, 1); 2348 new_val |= bitfield_swap(value, 44, 16); 2349 new_val |= bitfield_swap(value, 40, 4); 2350 new_val |= bitfield_swap(value, 38, 2); 2351 new_val |= bitfield_swap(value, 34, 4); 2352 new_val |= bitfield_swap(value, 31, 3); 2353 new_val |= bitfield_swap(value, 0, 31); 2354 } 2355 2356 return new_val; 2357 } 2358 2359 int evsel__parse_sample(struct evsel *evsel, union perf_event *event, 2360 struct perf_sample *data) 2361 { 2362 u64 type = evsel->core.attr.sample_type; 2363 bool swapped = evsel->needs_swap; 2364 const __u64 *array; 2365 u16 max_size = event->header.size; 2366 const void *endp = (void *)event + max_size; 2367 u64 sz; 2368 2369 /* 2370 * used for cross-endian analysis. See git commit 65014ab3 2371 * for why this goofiness is needed. 2372 */ 2373 union u64_swap u; 2374 2375 memset(data, 0, sizeof(*data)); 2376 data->cpu = data->pid = data->tid = -1; 2377 data->stream_id = data->id = data->time = -1ULL; 2378 data->period = evsel->core.attr.sample_period; 2379 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 2380 data->misc = event->header.misc; 2381 data->id = -1ULL; 2382 data->data_src = PERF_MEM_DATA_SRC_NONE; 2383 data->vcpu = -1; 2384 2385 if (event->header.type != PERF_RECORD_SAMPLE) { 2386 if (!evsel->core.attr.sample_id_all) 2387 return 0; 2388 return perf_evsel__parse_id_sample(evsel, event, data); 2389 } 2390 2391 array = event->sample.array; 2392 2393 if (perf_event__check_size(event, evsel->sample_size)) 2394 return -EFAULT; 2395 2396 if (type & PERF_SAMPLE_IDENTIFIER) { 2397 data->id = *array; 2398 array++; 2399 } 2400 2401 if (type & PERF_SAMPLE_IP) { 2402 data->ip = *array; 2403 array++; 2404 } 2405 2406 if (type & PERF_SAMPLE_TID) { 2407 u.val64 = *array; 2408 if (swapped) { 2409 /* undo swap of u64, then swap on individual u32s */ 2410 u.val64 = bswap_64(u.val64); 2411 u.val32[0] = bswap_32(u.val32[0]); 2412 u.val32[1] = bswap_32(u.val32[1]); 2413 } 2414 2415 data->pid = u.val32[0]; 2416 data->tid = u.val32[1]; 2417 array++; 2418 } 2419 2420 if (type & PERF_SAMPLE_TIME) { 2421 data->time = *array; 2422 array++; 2423 } 2424 2425 if (type & PERF_SAMPLE_ADDR) { 2426 data->addr = *array; 2427 array++; 2428 } 2429 2430 if (type & PERF_SAMPLE_ID) { 2431 data->id = *array; 2432 array++; 2433 } 2434 2435 if (type & PERF_SAMPLE_STREAM_ID) { 2436 data->stream_id = *array; 2437 array++; 2438 } 2439 2440 if (type & PERF_SAMPLE_CPU) { 2441 2442 u.val64 = *array; 2443 if (swapped) { 2444 /* undo swap of u64, then swap on individual u32s */ 2445 u.val64 = bswap_64(u.val64); 2446 u.val32[0] = bswap_32(u.val32[0]); 2447 } 2448 2449 data->cpu = u.val32[0]; 2450 array++; 2451 } 2452 2453 if (type & PERF_SAMPLE_PERIOD) { 2454 data->period = *array; 2455 array++; 2456 } 2457 2458 if (type & PERF_SAMPLE_READ) { 2459 u64 read_format = evsel->core.attr.read_format; 2460 2461 OVERFLOW_CHECK_u64(array); 2462 if (read_format & PERF_FORMAT_GROUP) 2463 data->read.group.nr = *array; 2464 else 2465 data->read.one.value = *array; 2466 2467 array++; 2468 2469 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2470 OVERFLOW_CHECK_u64(array); 2471 data->read.time_enabled = *array; 2472 array++; 2473 } 2474 2475 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2476 OVERFLOW_CHECK_u64(array); 2477 data->read.time_running = *array; 2478 array++; 2479 } 2480 2481 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2482 if (read_format & PERF_FORMAT_GROUP) { 2483 const u64 max_group_nr = UINT64_MAX / 2484 sizeof(struct sample_read_value); 2485 2486 if (data->read.group.nr > max_group_nr) 2487 return -EFAULT; 2488 2489 sz = data->read.group.nr * sample_read_value_size(read_format); 2490 OVERFLOW_CHECK(array, sz, max_size); 2491 data->read.group.values = 2492 (struct sample_read_value *)array; 2493 array = (void *)array + sz; 2494 } else { 2495 OVERFLOW_CHECK_u64(array); 2496 data->read.one.id = *array; 2497 array++; 2498 2499 if (read_format & PERF_FORMAT_LOST) { 2500 OVERFLOW_CHECK_u64(array); 2501 data->read.one.lost = *array; 2502 array++; 2503 } 2504 } 2505 } 2506 2507 if (type & PERF_SAMPLE_CALLCHAIN) { 2508 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 2509 2510 OVERFLOW_CHECK_u64(array); 2511 data->callchain = (struct ip_callchain *)array++; 2512 if (data->callchain->nr > max_callchain_nr) 2513 return -EFAULT; 2514 sz = data->callchain->nr * sizeof(u64); 2515 OVERFLOW_CHECK(array, sz, max_size); 2516 array = (void *)array + sz; 2517 } 2518 2519 if (type & PERF_SAMPLE_RAW) { 2520 OVERFLOW_CHECK_u64(array); 2521 u.val64 = *array; 2522 2523 /* 2524 * Undo swap of u64, then swap on individual u32s, 2525 * get the size of the raw area and undo all of the 2526 * swap. The pevent interface handles endianness by 2527 * itself. 2528 */ 2529 if (swapped) { 2530 u.val64 = bswap_64(u.val64); 2531 u.val32[0] = bswap_32(u.val32[0]); 2532 u.val32[1] = bswap_32(u.val32[1]); 2533 } 2534 data->raw_size = u.val32[0]; 2535 2536 /* 2537 * The raw data is aligned on 64bits including the 2538 * u32 size, so it's safe to use mem_bswap_64. 2539 */ 2540 if (swapped) 2541 mem_bswap_64((void *) array, data->raw_size); 2542 2543 array = (void *)array + sizeof(u32); 2544 2545 OVERFLOW_CHECK(array, data->raw_size, max_size); 2546 data->raw_data = (void *)array; 2547 array = (void *)array + data->raw_size; 2548 } 2549 2550 if (type & PERF_SAMPLE_BRANCH_STACK) { 2551 const u64 max_branch_nr = UINT64_MAX / 2552 sizeof(struct branch_entry); 2553 struct branch_entry *e; 2554 unsigned int i; 2555 2556 OVERFLOW_CHECK_u64(array); 2557 data->branch_stack = (struct branch_stack *)array++; 2558 2559 if (data->branch_stack->nr > max_branch_nr) 2560 return -EFAULT; 2561 2562 sz = data->branch_stack->nr * sizeof(struct branch_entry); 2563 if (evsel__has_branch_hw_idx(evsel)) { 2564 sz += sizeof(u64); 2565 e = &data->branch_stack->entries[0]; 2566 } else { 2567 data->no_hw_idx = true; 2568 /* 2569 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied, 2570 * only nr and entries[] will be output by kernel. 2571 */ 2572 e = (struct branch_entry *)&data->branch_stack->hw_idx; 2573 } 2574 2575 if (swapped) { 2576 /* 2577 * struct branch_flag does not have endian 2578 * specific bit field definition. And bswap 2579 * will not resolve the issue, since these 2580 * are bit fields. 2581 * 2582 * evsel__bitfield_swap_branch_flags() uses a 2583 * bitfield_swap macro to swap the bit position 2584 * based on the host endians. 2585 */ 2586 for (i = 0; i < data->branch_stack->nr; i++, e++) 2587 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value); 2588 } 2589 2590 OVERFLOW_CHECK(array, sz, max_size); 2591 array = (void *)array + sz; 2592 } 2593 2594 if (type & PERF_SAMPLE_REGS_USER) { 2595 OVERFLOW_CHECK_u64(array); 2596 data->user_regs.abi = *array; 2597 array++; 2598 2599 if (data->user_regs.abi) { 2600 u64 mask = evsel->core.attr.sample_regs_user; 2601 2602 sz = hweight64(mask) * sizeof(u64); 2603 OVERFLOW_CHECK(array, sz, max_size); 2604 data->user_regs.mask = mask; 2605 data->user_regs.regs = (u64 *)array; 2606 array = (void *)array + sz; 2607 } 2608 } 2609 2610 if (type & PERF_SAMPLE_STACK_USER) { 2611 OVERFLOW_CHECK_u64(array); 2612 sz = *array++; 2613 2614 data->user_stack.offset = ((char *)(array - 1) 2615 - (char *) event); 2616 2617 if (!sz) { 2618 data->user_stack.size = 0; 2619 } else { 2620 OVERFLOW_CHECK(array, sz, max_size); 2621 data->user_stack.data = (char *)array; 2622 array = (void *)array + sz; 2623 OVERFLOW_CHECK_u64(array); 2624 data->user_stack.size = *array++; 2625 if (WARN_ONCE(data->user_stack.size > sz, 2626 "user stack dump failure\n")) 2627 return -EFAULT; 2628 } 2629 } 2630 2631 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 2632 OVERFLOW_CHECK_u64(array); 2633 arch_perf_parse_sample_weight(data, array, type); 2634 array++; 2635 } 2636 2637 if (type & PERF_SAMPLE_DATA_SRC) { 2638 OVERFLOW_CHECK_u64(array); 2639 data->data_src = *array; 2640 array++; 2641 } 2642 2643 if (type & PERF_SAMPLE_TRANSACTION) { 2644 OVERFLOW_CHECK_u64(array); 2645 data->transaction = *array; 2646 array++; 2647 } 2648 2649 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 2650 if (type & PERF_SAMPLE_REGS_INTR) { 2651 OVERFLOW_CHECK_u64(array); 2652 data->intr_regs.abi = *array; 2653 array++; 2654 2655 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 2656 u64 mask = evsel->core.attr.sample_regs_intr; 2657 2658 sz = hweight64(mask) * sizeof(u64); 2659 OVERFLOW_CHECK(array, sz, max_size); 2660 data->intr_regs.mask = mask; 2661 data->intr_regs.regs = (u64 *)array; 2662 array = (void *)array + sz; 2663 } 2664 } 2665 2666 data->phys_addr = 0; 2667 if (type & PERF_SAMPLE_PHYS_ADDR) { 2668 data->phys_addr = *array; 2669 array++; 2670 } 2671 2672 data->cgroup = 0; 2673 if (type & PERF_SAMPLE_CGROUP) { 2674 data->cgroup = *array; 2675 array++; 2676 } 2677 2678 data->data_page_size = 0; 2679 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 2680 data->data_page_size = *array; 2681 array++; 2682 } 2683 2684 data->code_page_size = 0; 2685 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 2686 data->code_page_size = *array; 2687 array++; 2688 } 2689 2690 if (type & PERF_SAMPLE_AUX) { 2691 OVERFLOW_CHECK_u64(array); 2692 sz = *array++; 2693 2694 OVERFLOW_CHECK(array, sz, max_size); 2695 /* Undo swap of data */ 2696 if (swapped) 2697 mem_bswap_64((char *)array, sz); 2698 data->aux_sample.size = sz; 2699 data->aux_sample.data = (char *)array; 2700 array = (void *)array + sz; 2701 } 2702 2703 return 0; 2704 } 2705 2706 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, 2707 u64 *timestamp) 2708 { 2709 u64 type = evsel->core.attr.sample_type; 2710 const __u64 *array; 2711 2712 if (!(type & PERF_SAMPLE_TIME)) 2713 return -1; 2714 2715 if (event->header.type != PERF_RECORD_SAMPLE) { 2716 struct perf_sample data = { 2717 .time = -1ULL, 2718 }; 2719 2720 if (!evsel->core.attr.sample_id_all) 2721 return -1; 2722 if (perf_evsel__parse_id_sample(evsel, event, &data)) 2723 return -1; 2724 2725 *timestamp = data.time; 2726 return 0; 2727 } 2728 2729 array = event->sample.array; 2730 2731 if (perf_event__check_size(event, evsel->sample_size)) 2732 return -EFAULT; 2733 2734 if (type & PERF_SAMPLE_IDENTIFIER) 2735 array++; 2736 2737 if (type & PERF_SAMPLE_IP) 2738 array++; 2739 2740 if (type & PERF_SAMPLE_TID) 2741 array++; 2742 2743 if (type & PERF_SAMPLE_TIME) 2744 *timestamp = *array; 2745 2746 return 0; 2747 } 2748 2749 u16 evsel__id_hdr_size(struct evsel *evsel) 2750 { 2751 u64 sample_type = evsel->core.attr.sample_type; 2752 u16 size = 0; 2753 2754 if (sample_type & PERF_SAMPLE_TID) 2755 size += sizeof(u64); 2756 2757 if (sample_type & PERF_SAMPLE_TIME) 2758 size += sizeof(u64); 2759 2760 if (sample_type & PERF_SAMPLE_ID) 2761 size += sizeof(u64); 2762 2763 if (sample_type & PERF_SAMPLE_STREAM_ID) 2764 size += sizeof(u64); 2765 2766 if (sample_type & PERF_SAMPLE_CPU) 2767 size += sizeof(u64); 2768 2769 if (sample_type & PERF_SAMPLE_IDENTIFIER) 2770 size += sizeof(u64); 2771 2772 return size; 2773 } 2774 2775 #ifdef HAVE_LIBTRACEEVENT 2776 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name) 2777 { 2778 return tep_find_field(evsel->tp_format, name); 2779 } 2780 2781 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name) 2782 { 2783 struct tep_format_field *field = evsel__field(evsel, name); 2784 int offset; 2785 2786 if (!field) 2787 return NULL; 2788 2789 offset = field->offset; 2790 2791 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2792 offset = *(int *)(sample->raw_data + field->offset); 2793 offset &= 0xffff; 2794 if (tep_field_is_relative(field->flags)) 2795 offset += field->offset + field->size; 2796 } 2797 2798 return sample->raw_data + offset; 2799 } 2800 2801 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, 2802 bool needs_swap) 2803 { 2804 u64 value; 2805 void *ptr = sample->raw_data + field->offset; 2806 2807 switch (field->size) { 2808 case 1: 2809 return *(u8 *)ptr; 2810 case 2: 2811 value = *(u16 *)ptr; 2812 break; 2813 case 4: 2814 value = *(u32 *)ptr; 2815 break; 2816 case 8: 2817 memcpy(&value, ptr, sizeof(u64)); 2818 break; 2819 default: 2820 return 0; 2821 } 2822 2823 if (!needs_swap) 2824 return value; 2825 2826 switch (field->size) { 2827 case 2: 2828 return bswap_16(value); 2829 case 4: 2830 return bswap_32(value); 2831 case 8: 2832 return bswap_64(value); 2833 default: 2834 return 0; 2835 } 2836 2837 return 0; 2838 } 2839 2840 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name) 2841 { 2842 struct tep_format_field *field = evsel__field(evsel, name); 2843 2844 if (!field) 2845 return 0; 2846 2847 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; 2848 } 2849 #endif 2850 2851 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) 2852 { 2853 int paranoid; 2854 2855 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2856 evsel->core.attr.type == PERF_TYPE_HARDWARE && 2857 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2858 /* 2859 * If it's cycles then fall back to hrtimer based 2860 * cpu-clock-tick sw counter, which is always available even if 2861 * no PMU support. 2862 * 2863 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2864 * b0a873e). 2865 */ 2866 scnprintf(msg, msgsize, "%s", 2867 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2868 2869 evsel->core.attr.type = PERF_TYPE_SOFTWARE; 2870 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; 2871 2872 zfree(&evsel->name); 2873 return true; 2874 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && 2875 (paranoid = perf_event_paranoid()) > 1) { 2876 const char *name = evsel__name(evsel); 2877 char *new_name; 2878 const char *sep = ":"; 2879 2880 /* If event has exclude user then don't exclude kernel. */ 2881 if (evsel->core.attr.exclude_user) 2882 return false; 2883 2884 /* Is there already the separator in the name. */ 2885 if (strchr(name, '/') || 2886 (strchr(name, ':') && !evsel->is_libpfm_event)) 2887 sep = ""; 2888 2889 if (asprintf(&new_name, "%s%su", name, sep) < 0) 2890 return false; 2891 2892 free(evsel->name); 2893 evsel->name = new_name; 2894 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying " 2895 "to fall back to excluding kernel and hypervisor " 2896 " samples", paranoid); 2897 evsel->core.attr.exclude_kernel = 1; 2898 evsel->core.attr.exclude_hv = 1; 2899 2900 return true; 2901 } 2902 2903 return false; 2904 } 2905 2906 static bool find_process(const char *name) 2907 { 2908 size_t len = strlen(name); 2909 DIR *dir; 2910 struct dirent *d; 2911 int ret = -1; 2912 2913 dir = opendir(procfs__mountpoint()); 2914 if (!dir) 2915 return false; 2916 2917 /* Walk through the directory. */ 2918 while (ret && (d = readdir(dir)) != NULL) { 2919 char path[PATH_MAX]; 2920 char *data; 2921 size_t size; 2922 2923 if ((d->d_type != DT_DIR) || 2924 !strcmp(".", d->d_name) || 2925 !strcmp("..", d->d_name)) 2926 continue; 2927 2928 scnprintf(path, sizeof(path), "%s/%s/comm", 2929 procfs__mountpoint(), d->d_name); 2930 2931 if (filename__read_str(path, &data, &size)) 2932 continue; 2933 2934 ret = strncmp(name, data, len); 2935 free(data); 2936 } 2937 2938 closedir(dir); 2939 return ret ? false : true; 2940 } 2941 2942 static bool is_amd(const char *arch, const char *cpuid) 2943 { 2944 return arch && !strcmp("x86", arch) && cpuid && strstarts(cpuid, "AuthenticAMD"); 2945 } 2946 2947 static bool is_amd_ibs(struct evsel *evsel) 2948 { 2949 return evsel->core.attr.precise_ip 2950 || (evsel->pmu_name && !strncmp(evsel->pmu_name, "ibs", 3)); 2951 } 2952 2953 int evsel__open_strerror(struct evsel *evsel, struct target *target, 2954 int err, char *msg, size_t size) 2955 { 2956 struct perf_env *env = evsel__env(evsel); 2957 const char *arch = perf_env__arch(env); 2958 const char *cpuid = perf_env__cpuid(env); 2959 char sbuf[STRERR_BUFSIZE]; 2960 int printed = 0, enforced = 0; 2961 2962 switch (err) { 2963 case EPERM: 2964 case EACCES: 2965 printed += scnprintf(msg + printed, size - printed, 2966 "Access to performance monitoring and observability operations is limited.\n"); 2967 2968 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) { 2969 if (enforced) { 2970 printed += scnprintf(msg + printed, size - printed, 2971 "Enforced MAC policy settings (SELinux) can limit access to performance\n" 2972 "monitoring and observability operations. Inspect system audit records for\n" 2973 "more perf_event access control information and adjusting the policy.\n"); 2974 } 2975 } 2976 2977 if (err == EPERM) 2978 printed += scnprintf(msg, size, 2979 "No permission to enable %s event.\n\n", evsel__name(evsel)); 2980 2981 return scnprintf(msg + printed, size - printed, 2982 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n" 2983 "access to performance monitoring and observability operations for processes\n" 2984 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n" 2985 "More information can be found at 'Perf events and tool security' document:\n" 2986 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n" 2987 "perf_event_paranoid setting is %d:\n" 2988 " -1: Allow use of (almost) all events by all users\n" 2989 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n" 2990 ">= 0: Disallow raw and ftrace function tracepoint access\n" 2991 ">= 1: Disallow CPU event access\n" 2992 ">= 2: Disallow kernel profiling\n" 2993 "To make the adjusted perf_event_paranoid setting permanent preserve it\n" 2994 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)", 2995 perf_event_paranoid()); 2996 case ENOENT: 2997 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel)); 2998 case EMFILE: 2999 return scnprintf(msg, size, "%s", 3000 "Too many events are opened.\n" 3001 "Probably the maximum number of open file descriptors has been reached.\n" 3002 "Hint: Try again after reducing the number of events.\n" 3003 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 3004 case ENOMEM: 3005 if (evsel__has_callchain(evsel) && 3006 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 3007 return scnprintf(msg, size, 3008 "Not enough memory to setup event with callchain.\n" 3009 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 3010 "Hint: Current value: %d", sysctl__max_stack()); 3011 break; 3012 case ENODEV: 3013 if (target->cpu_list) 3014 return scnprintf(msg, size, "%s", 3015 "No such device - did you specify an out-of-range profile CPU?"); 3016 break; 3017 case EOPNOTSUPP: 3018 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK) 3019 return scnprintf(msg, size, 3020 "%s: PMU Hardware or event type doesn't support branch stack sampling.", 3021 evsel__name(evsel)); 3022 if (evsel->core.attr.aux_output) 3023 return scnprintf(msg, size, 3024 "%s: PMU Hardware doesn't support 'aux_output' feature", 3025 evsel__name(evsel)); 3026 if (evsel->core.attr.sample_period != 0) 3027 return scnprintf(msg, size, 3028 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", 3029 evsel__name(evsel)); 3030 if (evsel->core.attr.precise_ip) 3031 return scnprintf(msg, size, "%s", 3032 "\'precise\' request may not be supported. Try removing 'p' modifier."); 3033 #if defined(__i386__) || defined(__x86_64__) 3034 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) 3035 return scnprintf(msg, size, "%s", 3036 "No hardware sampling interrupt available.\n"); 3037 #endif 3038 break; 3039 case EBUSY: 3040 if (find_process("oprofiled")) 3041 return scnprintf(msg, size, 3042 "The PMU counters are busy/taken by another profiler.\n" 3043 "We found oprofile daemon running, please stop it and try again."); 3044 break; 3045 case EINVAL: 3046 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size) 3047 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel."); 3048 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size) 3049 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel."); 3050 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) 3051 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel."); 3052 if (perf_missing_features.clockid) 3053 return scnprintf(msg, size, "clockid feature not supported."); 3054 if (perf_missing_features.clockid_wrong) 3055 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 3056 if (perf_missing_features.aux_output) 3057 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel."); 3058 if (!target__has_cpu(target)) 3059 return scnprintf(msg, size, 3060 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.", 3061 evsel__name(evsel)); 3062 if (is_amd(arch, cpuid)) { 3063 if (is_amd_ibs(evsel)) { 3064 if (evsel->core.attr.exclude_kernel) 3065 return scnprintf(msg, size, 3066 "AMD IBS can't exclude kernel events. Try running at a higher privilege level."); 3067 if (!evsel->core.system_wide) 3068 return scnprintf(msg, size, 3069 "AMD IBS may only be available in system-wide/per-cpu mode. Try using -a, or -C and workload affinity"); 3070 } 3071 } 3072 3073 break; 3074 case ENODATA: 3075 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. " 3076 "Please add an auxiliary event in front of the load latency event."); 3077 default: 3078 break; 3079 } 3080 3081 return scnprintf(msg, size, 3082 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 3083 "/bin/dmesg | grep -i perf may provide additional information.\n", 3084 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel)); 3085 } 3086 3087 struct perf_env *evsel__env(struct evsel *evsel) 3088 { 3089 if (evsel && evsel->evlist && evsel->evlist->env) 3090 return evsel->evlist->env; 3091 return &perf_env; 3092 } 3093 3094 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) 3095 { 3096 int cpu_map_idx, thread; 3097 3098 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) { 3099 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); 3100 thread++) { 3101 int fd = FD(evsel, cpu_map_idx, thread); 3102 3103 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, 3104 cpu_map_idx, thread, fd) < 0) 3105 return -1; 3106 } 3107 } 3108 3109 return 0; 3110 } 3111 3112 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) 3113 { 3114 struct perf_cpu_map *cpus = evsel->core.cpus; 3115 struct perf_thread_map *threads = evsel->core.threads; 3116 3117 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr)) 3118 return -ENOMEM; 3119 3120 return store_evsel_ids(evsel, evlist); 3121 } 3122 3123 void evsel__zero_per_pkg(struct evsel *evsel) 3124 { 3125 struct hashmap_entry *cur; 3126 size_t bkt; 3127 3128 if (evsel->per_pkg_mask) { 3129 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) 3130 zfree(&cur->pkey); 3131 3132 hashmap__clear(evsel->per_pkg_mask); 3133 } 3134 } 3135 3136 /** 3137 * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this 3138 * will be false on hybrid systems for hardware and legacy 3139 * cache events. 3140 */ 3141 bool evsel__is_hybrid(const struct evsel *evsel) 3142 { 3143 if (!perf_pmus__has_hybrid()) 3144 return false; 3145 3146 return evsel->core.is_pmu_core; 3147 } 3148 3149 struct evsel *evsel__leader(const struct evsel *evsel) 3150 { 3151 return container_of(evsel->core.leader, struct evsel, core); 3152 } 3153 3154 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) 3155 { 3156 return evsel->core.leader == &leader->core; 3157 } 3158 3159 bool evsel__is_leader(struct evsel *evsel) 3160 { 3161 return evsel__has_leader(evsel, evsel); 3162 } 3163 3164 void evsel__set_leader(struct evsel *evsel, struct evsel *leader) 3165 { 3166 evsel->core.leader = &leader->core; 3167 } 3168 3169 int evsel__source_count(const struct evsel *evsel) 3170 { 3171 struct evsel *pos; 3172 int count = 0; 3173 3174 evlist__for_each_entry(evsel->evlist, pos) { 3175 if (pos->metric_leader == evsel) 3176 count++; 3177 } 3178 return count; 3179 } 3180 3181 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused) 3182 { 3183 return false; 3184 } 3185 3186 /* 3187 * Remove an event from a given group (leader). 3188 * Some events, e.g., perf metrics Topdown events, 3189 * must always be grouped. Ignore the events. 3190 */ 3191 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader) 3192 { 3193 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) { 3194 evsel__set_leader(evsel, evsel); 3195 evsel->core.nr_members = 0; 3196 leader->core.nr_members--; 3197 } 3198 } 3199