1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <byteswap.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <linux/bitops.h> 13 #include <api/fs/fs.h> 14 #include <api/fs/tracing_path.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/perf_event.h> 17 #include <linux/compiler.h> 18 #include <linux/err.h> 19 #include <linux/zalloc.h> 20 #include <sys/ioctl.h> 21 #include <sys/resource.h> 22 #include <sys/types.h> 23 #include <dirent.h> 24 #include <stdlib.h> 25 #include <perf/evsel.h> 26 #include "asm/bug.h" 27 #include "bpf_counter.h" 28 #include "callchain.h" 29 #include "cgroup.h" 30 #include "counts.h" 31 #include "event.h" 32 #include "evsel.h" 33 #include "util/env.h" 34 #include "util/evsel_config.h" 35 #include "util/evsel_fprintf.h" 36 #include "evlist.h" 37 #include <perf/cpumap.h> 38 #include "thread_map.h" 39 #include "target.h" 40 #include "perf_regs.h" 41 #include "record.h" 42 #include "debug.h" 43 #include "trace-event.h" 44 #include "stat.h" 45 #include "string2.h" 46 #include "memswap.h" 47 #include "util.h" 48 #include "util/hashmap.h" 49 #include "off_cpu.h" 50 #include "pmu.h" 51 #include "pmus.h" 52 #include "../perf-sys.h" 53 #include "util/parse-branch-options.h" 54 #include "util/bpf-filter.h" 55 #include <internal/xyarray.h> 56 #include <internal/lib.h> 57 #include <internal/threadmap.h> 58 59 #include <linux/ctype.h> 60 61 #ifdef HAVE_LIBTRACEEVENT 62 #include <traceevent/event-parse.h> 63 #endif 64 65 struct perf_missing_features perf_missing_features; 66 67 static clockid_t clockid; 68 69 static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = { 70 NULL, 71 "duration_time", 72 "user_time", 73 "system_time", 74 }; 75 76 const char *perf_tool_event__to_str(enum perf_tool_event ev) 77 { 78 if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX) 79 return perf_tool_event__tool_names[ev]; 80 81 return NULL; 82 } 83 84 enum perf_tool_event perf_tool_event__from_str(const char *str) 85 { 86 int i; 87 88 perf_tool_event__for_each_event(i) { 89 if (!strcmp(str, perf_tool_event__tool_names[i])) 90 return i; 91 } 92 return PERF_TOOL_NONE; 93 } 94 95 96 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused) 97 { 98 return 0; 99 } 100 101 void __weak test_attr__ready(void) { } 102 103 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused) 104 { 105 } 106 107 static struct { 108 size_t size; 109 int (*init)(struct evsel *evsel); 110 void (*fini)(struct evsel *evsel); 111 } perf_evsel__object = { 112 .size = sizeof(struct evsel), 113 .init = evsel__no_extra_init, 114 .fini = evsel__no_extra_fini, 115 }; 116 117 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel), 118 void (*fini)(struct evsel *evsel)) 119 { 120 121 if (object_size == 0) 122 goto set_methods; 123 124 if (perf_evsel__object.size > object_size) 125 return -EINVAL; 126 127 perf_evsel__object.size = object_size; 128 129 set_methods: 130 if (init != NULL) 131 perf_evsel__object.init = init; 132 133 if (fini != NULL) 134 perf_evsel__object.fini = fini; 135 136 return 0; 137 } 138 139 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 140 141 int __evsel__sample_size(u64 sample_type) 142 { 143 u64 mask = sample_type & PERF_SAMPLE_MASK; 144 int size = 0; 145 int i; 146 147 for (i = 0; i < 64; i++) { 148 if (mask & (1ULL << i)) 149 size++; 150 } 151 152 size *= sizeof(u64); 153 154 return size; 155 } 156 157 /** 158 * __perf_evsel__calc_id_pos - calculate id_pos. 159 * @sample_type: sample type 160 * 161 * This function returns the position of the event id (PERF_SAMPLE_ID or 162 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 163 * perf_record_sample. 164 */ 165 static int __perf_evsel__calc_id_pos(u64 sample_type) 166 { 167 int idx = 0; 168 169 if (sample_type & PERF_SAMPLE_IDENTIFIER) 170 return 0; 171 172 if (!(sample_type & PERF_SAMPLE_ID)) 173 return -1; 174 175 if (sample_type & PERF_SAMPLE_IP) 176 idx += 1; 177 178 if (sample_type & PERF_SAMPLE_TID) 179 idx += 1; 180 181 if (sample_type & PERF_SAMPLE_TIME) 182 idx += 1; 183 184 if (sample_type & PERF_SAMPLE_ADDR) 185 idx += 1; 186 187 return idx; 188 } 189 190 /** 191 * __perf_evsel__calc_is_pos - calculate is_pos. 192 * @sample_type: sample type 193 * 194 * This function returns the position (counting backwards) of the event id 195 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 196 * sample_id_all is used there is an id sample appended to non-sample events. 197 */ 198 static int __perf_evsel__calc_is_pos(u64 sample_type) 199 { 200 int idx = 1; 201 202 if (sample_type & PERF_SAMPLE_IDENTIFIER) 203 return 1; 204 205 if (!(sample_type & PERF_SAMPLE_ID)) 206 return -1; 207 208 if (sample_type & PERF_SAMPLE_CPU) 209 idx += 1; 210 211 if (sample_type & PERF_SAMPLE_STREAM_ID) 212 idx += 1; 213 214 return idx; 215 } 216 217 void evsel__calc_id_pos(struct evsel *evsel) 218 { 219 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); 220 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); 221 } 222 223 void __evsel__set_sample_bit(struct evsel *evsel, 224 enum perf_event_sample_format bit) 225 { 226 if (!(evsel->core.attr.sample_type & bit)) { 227 evsel->core.attr.sample_type |= bit; 228 evsel->sample_size += sizeof(u64); 229 evsel__calc_id_pos(evsel); 230 } 231 } 232 233 void __evsel__reset_sample_bit(struct evsel *evsel, 234 enum perf_event_sample_format bit) 235 { 236 if (evsel->core.attr.sample_type & bit) { 237 evsel->core.attr.sample_type &= ~bit; 238 evsel->sample_size -= sizeof(u64); 239 evsel__calc_id_pos(evsel); 240 } 241 } 242 243 void evsel__set_sample_id(struct evsel *evsel, 244 bool can_sample_identifier) 245 { 246 if (can_sample_identifier) { 247 evsel__reset_sample_bit(evsel, ID); 248 evsel__set_sample_bit(evsel, IDENTIFIER); 249 } else { 250 evsel__set_sample_bit(evsel, ID); 251 } 252 evsel->core.attr.read_format |= PERF_FORMAT_ID; 253 } 254 255 /** 256 * evsel__is_function_event - Return whether given evsel is a function 257 * trace event 258 * 259 * @evsel - evsel selector to be tested 260 * 261 * Return %true if event is function trace event 262 */ 263 bool evsel__is_function_event(struct evsel *evsel) 264 { 265 #define FUNCTION_EVENT "ftrace:function" 266 267 return evsel->name && 268 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); 269 270 #undef FUNCTION_EVENT 271 } 272 273 void evsel__init(struct evsel *evsel, 274 struct perf_event_attr *attr, int idx) 275 { 276 perf_evsel__init(&evsel->core, attr, idx); 277 evsel->tracking = !idx; 278 evsel->unit = strdup(""); 279 evsel->scale = 1.0; 280 evsel->max_events = ULONG_MAX; 281 evsel->evlist = NULL; 282 evsel->bpf_obj = NULL; 283 evsel->bpf_fd = -1; 284 INIT_LIST_HEAD(&evsel->config_terms); 285 INIT_LIST_HEAD(&evsel->bpf_counter_list); 286 INIT_LIST_HEAD(&evsel->bpf_filters); 287 perf_evsel__object.init(evsel); 288 evsel->sample_size = __evsel__sample_size(attr->sample_type); 289 evsel__calc_id_pos(evsel); 290 evsel->cmdline_group_boundary = false; 291 evsel->metric_events = NULL; 292 evsel->per_pkg_mask = NULL; 293 evsel->collect_stat = false; 294 evsel->pmu_name = NULL; 295 evsel->group_pmu_name = NULL; 296 evsel->skippable = false; 297 } 298 299 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) 300 { 301 struct evsel *evsel = zalloc(perf_evsel__object.size); 302 303 if (!evsel) 304 return NULL; 305 evsel__init(evsel, attr, idx); 306 307 if (evsel__is_bpf_output(evsel) && !attr->sample_type) { 308 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 309 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 310 evsel->core.attr.sample_period = 1; 311 } 312 313 if (evsel__is_clock(evsel)) { 314 free((char *)evsel->unit); 315 evsel->unit = strdup("msec"); 316 evsel->scale = 1e-6; 317 } 318 319 return evsel; 320 } 321 322 int copy_config_terms(struct list_head *dst, struct list_head *src) 323 { 324 struct evsel_config_term *pos, *tmp; 325 326 list_for_each_entry(pos, src, list) { 327 tmp = malloc(sizeof(*tmp)); 328 if (tmp == NULL) 329 return -ENOMEM; 330 331 *tmp = *pos; 332 if (tmp->free_str) { 333 tmp->val.str = strdup(pos->val.str); 334 if (tmp->val.str == NULL) { 335 free(tmp); 336 return -ENOMEM; 337 } 338 } 339 list_add_tail(&tmp->list, dst); 340 } 341 return 0; 342 } 343 344 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) 345 { 346 return copy_config_terms(&dst->config_terms, &src->config_terms); 347 } 348 349 /** 350 * evsel__clone - create a new evsel copied from @orig 351 * @orig: original evsel 352 * 353 * The assumption is that @orig is not configured nor opened yet. 354 * So we only care about the attributes that can be set while it's parsed. 355 */ 356 struct evsel *evsel__clone(struct evsel *orig) 357 { 358 struct evsel *evsel; 359 360 BUG_ON(orig->core.fd); 361 BUG_ON(orig->counts); 362 BUG_ON(orig->priv); 363 BUG_ON(orig->per_pkg_mask); 364 365 /* cannot handle BPF objects for now */ 366 if (orig->bpf_obj) 367 return NULL; 368 369 evsel = evsel__new(&orig->core.attr); 370 if (evsel == NULL) 371 return NULL; 372 373 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); 374 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); 375 evsel->core.threads = perf_thread_map__get(orig->core.threads); 376 evsel->core.nr_members = orig->core.nr_members; 377 evsel->core.system_wide = orig->core.system_wide; 378 evsel->core.requires_cpu = orig->core.requires_cpu; 379 evsel->core.is_pmu_core = orig->core.is_pmu_core; 380 381 if (orig->name) { 382 evsel->name = strdup(orig->name); 383 if (evsel->name == NULL) 384 goto out_err; 385 } 386 if (orig->group_name) { 387 evsel->group_name = strdup(orig->group_name); 388 if (evsel->group_name == NULL) 389 goto out_err; 390 } 391 if (orig->pmu_name) { 392 evsel->pmu_name = strdup(orig->pmu_name); 393 if (evsel->pmu_name == NULL) 394 goto out_err; 395 } 396 if (orig->group_pmu_name) { 397 evsel->group_pmu_name = strdup(orig->group_pmu_name); 398 if (evsel->group_pmu_name == NULL) 399 goto out_err; 400 } 401 if (orig->filter) { 402 evsel->filter = strdup(orig->filter); 403 if (evsel->filter == NULL) 404 goto out_err; 405 } 406 if (orig->metric_id) { 407 evsel->metric_id = strdup(orig->metric_id); 408 if (evsel->metric_id == NULL) 409 goto out_err; 410 } 411 evsel->cgrp = cgroup__get(orig->cgrp); 412 #ifdef HAVE_LIBTRACEEVENT 413 evsel->tp_format = orig->tp_format; 414 #endif 415 evsel->handler = orig->handler; 416 evsel->core.leader = orig->core.leader; 417 418 evsel->max_events = orig->max_events; 419 evsel->tool_event = orig->tool_event; 420 free((char *)evsel->unit); 421 evsel->unit = strdup(orig->unit); 422 if (evsel->unit == NULL) 423 goto out_err; 424 425 evsel->scale = orig->scale; 426 evsel->snapshot = orig->snapshot; 427 evsel->per_pkg = orig->per_pkg; 428 evsel->percore = orig->percore; 429 evsel->precise_max = orig->precise_max; 430 evsel->is_libpfm_event = orig->is_libpfm_event; 431 432 evsel->exclude_GH = orig->exclude_GH; 433 evsel->sample_read = orig->sample_read; 434 evsel->auto_merge_stats = orig->auto_merge_stats; 435 evsel->collect_stat = orig->collect_stat; 436 evsel->weak_group = orig->weak_group; 437 evsel->use_config_name = orig->use_config_name; 438 evsel->pmu = orig->pmu; 439 440 if (evsel__copy_config_terms(evsel, orig) < 0) 441 goto out_err; 442 443 return evsel; 444 445 out_err: 446 evsel__delete(evsel); 447 return NULL; 448 } 449 450 /* 451 * Returns pointer with encoded error via <linux/err.h> interface. 452 */ 453 #ifdef HAVE_LIBTRACEEVENT 454 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) 455 { 456 struct evsel *evsel = zalloc(perf_evsel__object.size); 457 int err = -ENOMEM; 458 459 if (evsel == NULL) { 460 goto out_err; 461 } else { 462 struct perf_event_attr attr = { 463 .type = PERF_TYPE_TRACEPOINT, 464 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 465 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 466 }; 467 468 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 469 goto out_free; 470 471 evsel->tp_format = trace_event__tp_format(sys, name); 472 if (IS_ERR(evsel->tp_format)) { 473 err = PTR_ERR(evsel->tp_format); 474 goto out_free; 475 } 476 477 event_attr_init(&attr); 478 attr.config = evsel->tp_format->id; 479 attr.sample_period = 1; 480 evsel__init(evsel, &attr, idx); 481 } 482 483 return evsel; 484 485 out_free: 486 zfree(&evsel->name); 487 free(evsel); 488 out_err: 489 return ERR_PTR(err); 490 } 491 #endif 492 493 const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = { 494 "cycles", 495 "instructions", 496 "cache-references", 497 "cache-misses", 498 "branches", 499 "branch-misses", 500 "bus-cycles", 501 "stalled-cycles-frontend", 502 "stalled-cycles-backend", 503 "ref-cycles", 504 }; 505 506 char *evsel__bpf_counter_events; 507 508 bool evsel__match_bpf_counter_events(const char *name) 509 { 510 int name_len; 511 bool match; 512 char *ptr; 513 514 if (!evsel__bpf_counter_events) 515 return false; 516 517 ptr = strstr(evsel__bpf_counter_events, name); 518 name_len = strlen(name); 519 520 /* check name matches a full token in evsel__bpf_counter_events */ 521 match = (ptr != NULL) && 522 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) && 523 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0')); 524 525 return match; 526 } 527 528 static const char *__evsel__hw_name(u64 config) 529 { 530 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config]) 531 return evsel__hw_names[config]; 532 533 return "unknown-hardware"; 534 } 535 536 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) 537 { 538 int colon = 0, r = 0; 539 struct perf_event_attr *attr = &evsel->core.attr; 540 bool exclude_guest_default = false; 541 542 #define MOD_PRINT(context, mod) do { \ 543 if (!attr->exclude_##context) { \ 544 if (!colon) colon = ++r; \ 545 r += scnprintf(bf + r, size - r, "%c", mod); \ 546 } } while(0) 547 548 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 549 MOD_PRINT(kernel, 'k'); 550 MOD_PRINT(user, 'u'); 551 MOD_PRINT(hv, 'h'); 552 exclude_guest_default = true; 553 } 554 555 if (attr->precise_ip) { 556 if (!colon) 557 colon = ++r; 558 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 559 exclude_guest_default = true; 560 } 561 562 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 563 MOD_PRINT(host, 'H'); 564 MOD_PRINT(guest, 'G'); 565 } 566 #undef MOD_PRINT 567 if (colon) 568 bf[colon - 1] = ':'; 569 return r; 570 } 571 572 int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 573 { 574 return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); 575 } 576 577 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 578 { 579 int r = arch_evsel__hw_name(evsel, bf, size); 580 return r + evsel__add_modifiers(evsel, bf + r, size - r); 581 } 582 583 const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = { 584 "cpu-clock", 585 "task-clock", 586 "page-faults", 587 "context-switches", 588 "cpu-migrations", 589 "minor-faults", 590 "major-faults", 591 "alignment-faults", 592 "emulation-faults", 593 "dummy", 594 }; 595 596 static const char *__evsel__sw_name(u64 config) 597 { 598 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config]) 599 return evsel__sw_names[config]; 600 return "unknown-software"; 601 } 602 603 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size) 604 { 605 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); 606 return r + evsel__add_modifiers(evsel, bf + r, size - r); 607 } 608 609 static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size) 610 { 611 return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev)); 612 } 613 614 static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 615 { 616 int r; 617 618 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 619 620 if (type & HW_BREAKPOINT_R) 621 r += scnprintf(bf + r, size - r, "r"); 622 623 if (type & HW_BREAKPOINT_W) 624 r += scnprintf(bf + r, size - r, "w"); 625 626 if (type & HW_BREAKPOINT_X) 627 r += scnprintf(bf + r, size - r, "x"); 628 629 return r; 630 } 631 632 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size) 633 { 634 struct perf_event_attr *attr = &evsel->core.attr; 635 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 636 return r + evsel__add_modifiers(evsel, bf + r, size - r); 637 } 638 639 const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = { 640 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 641 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 642 { "LLC", "L2", }, 643 { "dTLB", "d-tlb", "Data-TLB", }, 644 { "iTLB", "i-tlb", "Instruction-TLB", }, 645 { "branch", "branches", "bpu", "btb", "bpc", }, 646 { "node", }, 647 }; 648 649 const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = { 650 { "load", "loads", "read", }, 651 { "store", "stores", "write", }, 652 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 653 }; 654 655 const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = { 656 { "refs", "Reference", "ops", "access", }, 657 { "misses", "miss", }, 658 }; 659 660 #define C(x) PERF_COUNT_HW_CACHE_##x 661 #define CACHE_READ (1 << C(OP_READ)) 662 #define CACHE_WRITE (1 << C(OP_WRITE)) 663 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 664 #define COP(x) (1 << x) 665 666 /* 667 * cache operation stat 668 * L1I : Read and prefetch only 669 * ITLB and BPU : Read-only 670 */ 671 static const unsigned long evsel__hw_cache_stat[C(MAX)] = { 672 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 673 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 674 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 675 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 676 [C(ITLB)] = (CACHE_READ), 677 [C(BPU)] = (CACHE_READ), 678 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 679 }; 680 681 bool evsel__is_cache_op_valid(u8 type, u8 op) 682 { 683 if (evsel__hw_cache_stat[type] & COP(op)) 684 return true; /* valid */ 685 else 686 return false; /* invalid */ 687 } 688 689 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size) 690 { 691 if (result) { 692 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0], 693 evsel__hw_cache_op[op][0], 694 evsel__hw_cache_result[result][0]); 695 } 696 697 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0], 698 evsel__hw_cache_op[op][1]); 699 } 700 701 static int __evsel__hw_cache_name(u64 config, char *bf, size_t size) 702 { 703 u8 op, result, type = (config >> 0) & 0xff; 704 const char *err = "unknown-ext-hardware-cache-type"; 705 706 if (type >= PERF_COUNT_HW_CACHE_MAX) 707 goto out_err; 708 709 op = (config >> 8) & 0xff; 710 err = "unknown-ext-hardware-cache-op"; 711 if (op >= PERF_COUNT_HW_CACHE_OP_MAX) 712 goto out_err; 713 714 result = (config >> 16) & 0xff; 715 err = "unknown-ext-hardware-cache-result"; 716 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 717 goto out_err; 718 719 err = "invalid-cache"; 720 if (!evsel__is_cache_op_valid(type, op)) 721 goto out_err; 722 723 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 724 out_err: 725 return scnprintf(bf, size, "%s", err); 726 } 727 728 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) 729 { 730 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); 731 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 732 } 733 734 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size) 735 { 736 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); 737 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); 738 } 739 740 const char *evsel__name(struct evsel *evsel) 741 { 742 char bf[128]; 743 744 if (!evsel) 745 goto out_unknown; 746 747 if (evsel->name) 748 return evsel->name; 749 750 switch (evsel->core.attr.type) { 751 case PERF_TYPE_RAW: 752 evsel__raw_name(evsel, bf, sizeof(bf)); 753 break; 754 755 case PERF_TYPE_HARDWARE: 756 evsel__hw_name(evsel, bf, sizeof(bf)); 757 break; 758 759 case PERF_TYPE_HW_CACHE: 760 evsel__hw_cache_name(evsel, bf, sizeof(bf)); 761 break; 762 763 case PERF_TYPE_SOFTWARE: 764 if (evsel__is_tool(evsel)) 765 evsel__tool_name(evsel->tool_event, bf, sizeof(bf)); 766 else 767 evsel__sw_name(evsel, bf, sizeof(bf)); 768 break; 769 770 case PERF_TYPE_TRACEPOINT: 771 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 772 break; 773 774 case PERF_TYPE_BREAKPOINT: 775 evsel__bp_name(evsel, bf, sizeof(bf)); 776 break; 777 778 default: 779 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 780 evsel->core.attr.type); 781 break; 782 } 783 784 evsel->name = strdup(bf); 785 786 if (evsel->name) 787 return evsel->name; 788 out_unknown: 789 return "unknown"; 790 } 791 792 bool evsel__name_is(struct evsel *evsel, const char *name) 793 { 794 return !strcmp(evsel__name(evsel), name); 795 } 796 797 const char *evsel__metric_id(const struct evsel *evsel) 798 { 799 if (evsel->metric_id) 800 return evsel->metric_id; 801 802 if (evsel__is_tool(evsel)) 803 return perf_tool_event__to_str(evsel->tool_event); 804 805 return "unknown"; 806 } 807 808 const char *evsel__group_name(struct evsel *evsel) 809 { 810 return evsel->group_name ?: "anon group"; 811 } 812 813 /* 814 * Returns the group details for the specified leader, 815 * with following rules. 816 * 817 * For record -e '{cycles,instructions}' 818 * 'anon group { cycles:u, instructions:u }' 819 * 820 * For record -e 'cycles,instructions' and report --group 821 * 'cycles:u, instructions:u' 822 */ 823 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size) 824 { 825 int ret = 0; 826 struct evsel *pos; 827 const char *group_name = evsel__group_name(evsel); 828 829 if (!evsel->forced_leader) 830 ret = scnprintf(buf, size, "%s { ", group_name); 831 832 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel)); 833 834 for_each_group_member(pos, evsel) 835 ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos)); 836 837 if (!evsel->forced_leader) 838 ret += scnprintf(buf + ret, size - ret, " }"); 839 840 return ret; 841 } 842 843 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 844 struct callchain_param *param) 845 { 846 bool function = evsel__is_function_event(evsel); 847 struct perf_event_attr *attr = &evsel->core.attr; 848 849 evsel__set_sample_bit(evsel, CALLCHAIN); 850 851 attr->sample_max_stack = param->max_stack; 852 853 if (opts->kernel_callchains) 854 attr->exclude_callchain_user = 1; 855 if (opts->user_callchains) 856 attr->exclude_callchain_kernel = 1; 857 if (param->record_mode == CALLCHAIN_LBR) { 858 if (!opts->branch_stack) { 859 if (attr->exclude_user) { 860 pr_warning("LBR callstack option is only available " 861 "to get user callchain information. " 862 "Falling back to framepointers.\n"); 863 } else { 864 evsel__set_sample_bit(evsel, BRANCH_STACK); 865 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 866 PERF_SAMPLE_BRANCH_CALL_STACK | 867 PERF_SAMPLE_BRANCH_NO_CYCLES | 868 PERF_SAMPLE_BRANCH_NO_FLAGS | 869 PERF_SAMPLE_BRANCH_HW_INDEX; 870 } 871 } else 872 pr_warning("Cannot use LBR callstack with branch stack. " 873 "Falling back to framepointers.\n"); 874 } 875 876 if (param->record_mode == CALLCHAIN_DWARF) { 877 if (!function) { 878 evsel__set_sample_bit(evsel, REGS_USER); 879 evsel__set_sample_bit(evsel, STACK_USER); 880 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) { 881 attr->sample_regs_user |= DWARF_MINIMAL_REGS; 882 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, " 883 "specifying a subset with --user-regs may render DWARF unwinding unreliable, " 884 "so the minimal registers set (IP, SP) is explicitly forced.\n"); 885 } else { 886 attr->sample_regs_user |= arch__user_reg_mask(); 887 } 888 attr->sample_stack_user = param->dump_size; 889 attr->exclude_callchain_user = 1; 890 } else { 891 pr_info("Cannot use DWARF unwind for function trace event," 892 " falling back to framepointers.\n"); 893 } 894 } 895 896 if (function) { 897 pr_info("Disabling user space callchains for function trace event.\n"); 898 attr->exclude_callchain_user = 1; 899 } 900 } 901 902 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, 903 struct callchain_param *param) 904 { 905 if (param->enabled) 906 return __evsel__config_callchain(evsel, opts, param); 907 } 908 909 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param) 910 { 911 struct perf_event_attr *attr = &evsel->core.attr; 912 913 evsel__reset_sample_bit(evsel, CALLCHAIN); 914 if (param->record_mode == CALLCHAIN_LBR) { 915 evsel__reset_sample_bit(evsel, BRANCH_STACK); 916 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 917 PERF_SAMPLE_BRANCH_CALL_STACK | 918 PERF_SAMPLE_BRANCH_HW_INDEX); 919 } 920 if (param->record_mode == CALLCHAIN_DWARF) { 921 evsel__reset_sample_bit(evsel, REGS_USER); 922 evsel__reset_sample_bit(evsel, STACK_USER); 923 } 924 } 925 926 static void evsel__apply_config_terms(struct evsel *evsel, 927 struct record_opts *opts, bool track) 928 { 929 struct evsel_config_term *term; 930 struct list_head *config_terms = &evsel->config_terms; 931 struct perf_event_attr *attr = &evsel->core.attr; 932 /* callgraph default */ 933 struct callchain_param param = { 934 .record_mode = callchain_param.record_mode, 935 }; 936 u32 dump_size = 0; 937 int max_stack = 0; 938 const char *callgraph_buf = NULL; 939 940 list_for_each_entry(term, config_terms, list) { 941 switch (term->type) { 942 case EVSEL__CONFIG_TERM_PERIOD: 943 if (!(term->weak && opts->user_interval != ULLONG_MAX)) { 944 attr->sample_period = term->val.period; 945 attr->freq = 0; 946 evsel__reset_sample_bit(evsel, PERIOD); 947 } 948 break; 949 case EVSEL__CONFIG_TERM_FREQ: 950 if (!(term->weak && opts->user_freq != UINT_MAX)) { 951 attr->sample_freq = term->val.freq; 952 attr->freq = 1; 953 evsel__set_sample_bit(evsel, PERIOD); 954 } 955 break; 956 case EVSEL__CONFIG_TERM_TIME: 957 if (term->val.time) 958 evsel__set_sample_bit(evsel, TIME); 959 else 960 evsel__reset_sample_bit(evsel, TIME); 961 break; 962 case EVSEL__CONFIG_TERM_CALLGRAPH: 963 callgraph_buf = term->val.str; 964 break; 965 case EVSEL__CONFIG_TERM_BRANCH: 966 if (term->val.str && strcmp(term->val.str, "no")) { 967 evsel__set_sample_bit(evsel, BRANCH_STACK); 968 parse_branch_str(term->val.str, 969 &attr->branch_sample_type); 970 } else 971 evsel__reset_sample_bit(evsel, BRANCH_STACK); 972 break; 973 case EVSEL__CONFIG_TERM_STACK_USER: 974 dump_size = term->val.stack_user; 975 break; 976 case EVSEL__CONFIG_TERM_MAX_STACK: 977 max_stack = term->val.max_stack; 978 break; 979 case EVSEL__CONFIG_TERM_MAX_EVENTS: 980 evsel->max_events = term->val.max_events; 981 break; 982 case EVSEL__CONFIG_TERM_INHERIT: 983 /* 984 * attr->inherit should has already been set by 985 * evsel__config. If user explicitly set 986 * inherit using config terms, override global 987 * opt->no_inherit setting. 988 */ 989 attr->inherit = term->val.inherit ? 1 : 0; 990 break; 991 case EVSEL__CONFIG_TERM_OVERWRITE: 992 attr->write_backward = term->val.overwrite ? 1 : 0; 993 break; 994 case EVSEL__CONFIG_TERM_DRV_CFG: 995 break; 996 case EVSEL__CONFIG_TERM_PERCORE: 997 break; 998 case EVSEL__CONFIG_TERM_AUX_OUTPUT: 999 attr->aux_output = term->val.aux_output ? 1 : 0; 1000 break; 1001 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE: 1002 /* Already applied by auxtrace */ 1003 break; 1004 case EVSEL__CONFIG_TERM_CFG_CHG: 1005 break; 1006 default: 1007 break; 1008 } 1009 } 1010 1011 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 1012 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { 1013 bool sample_address = false; 1014 1015 if (max_stack) { 1016 param.max_stack = max_stack; 1017 if (callgraph_buf == NULL) 1018 callgraph_buf = "fp"; 1019 } 1020 1021 /* parse callgraph parameters */ 1022 if (callgraph_buf != NULL) { 1023 if (!strcmp(callgraph_buf, "no")) { 1024 param.enabled = false; 1025 param.record_mode = CALLCHAIN_NONE; 1026 } else { 1027 param.enabled = true; 1028 if (parse_callchain_record(callgraph_buf, ¶m)) { 1029 pr_err("per-event callgraph setting for %s failed. " 1030 "Apply callgraph global setting for it\n", 1031 evsel->name); 1032 return; 1033 } 1034 if (param.record_mode == CALLCHAIN_DWARF) 1035 sample_address = true; 1036 } 1037 } 1038 if (dump_size > 0) { 1039 dump_size = round_up(dump_size, sizeof(u64)); 1040 param.dump_size = dump_size; 1041 } 1042 1043 /* If global callgraph set, clear it */ 1044 if (callchain_param.enabled) 1045 evsel__reset_callgraph(evsel, &callchain_param); 1046 1047 /* set perf-event callgraph */ 1048 if (param.enabled) { 1049 if (sample_address) { 1050 evsel__set_sample_bit(evsel, ADDR); 1051 evsel__set_sample_bit(evsel, DATA_SRC); 1052 evsel->core.attr.mmap_data = track; 1053 } 1054 evsel__config_callchain(evsel, opts, ¶m); 1055 } 1056 } 1057 } 1058 1059 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) 1060 { 1061 struct evsel_config_term *term, *found_term = NULL; 1062 1063 list_for_each_entry(term, &evsel->config_terms, list) { 1064 if (term->type == type) 1065 found_term = term; 1066 } 1067 1068 return found_term; 1069 } 1070 1071 void __weak arch_evsel__set_sample_weight(struct evsel *evsel) 1072 { 1073 evsel__set_sample_bit(evsel, WEIGHT); 1074 } 1075 1076 void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused, 1077 struct perf_event_attr *attr __maybe_unused) 1078 { 1079 } 1080 1081 static void evsel__set_default_freq_period(struct record_opts *opts, 1082 struct perf_event_attr *attr) 1083 { 1084 if (opts->freq) { 1085 attr->freq = 1; 1086 attr->sample_freq = opts->freq; 1087 } else { 1088 attr->sample_period = opts->default_interval; 1089 } 1090 } 1091 1092 static bool evsel__is_offcpu_event(struct evsel *evsel) 1093 { 1094 return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT); 1095 } 1096 1097 /* 1098 * The enable_on_exec/disabled value strategy: 1099 * 1100 * 1) For any type of traced program: 1101 * - all independent events and group leaders are disabled 1102 * - all group members are enabled 1103 * 1104 * Group members are ruled by group leaders. They need to 1105 * be enabled, because the group scheduling relies on that. 1106 * 1107 * 2) For traced programs executed by perf: 1108 * - all independent events and group leaders have 1109 * enable_on_exec set 1110 * - we don't specifically enable or disable any event during 1111 * the record command 1112 * 1113 * Independent events and group leaders are initially disabled 1114 * and get enabled by exec. Group members are ruled by group 1115 * leaders as stated in 1). 1116 * 1117 * 3) For traced programs attached by perf (pid/tid): 1118 * - we specifically enable or disable all events during 1119 * the record command 1120 * 1121 * When attaching events to already running traced we 1122 * enable/disable events specifically, as there's no 1123 * initial traced exec call. 1124 */ 1125 void evsel__config(struct evsel *evsel, struct record_opts *opts, 1126 struct callchain_param *callchain) 1127 { 1128 struct evsel *leader = evsel__leader(evsel); 1129 struct perf_event_attr *attr = &evsel->core.attr; 1130 int track = evsel->tracking; 1131 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 1132 1133 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 1134 attr->inherit = !opts->no_inherit; 1135 attr->write_backward = opts->overwrite ? 1 : 0; 1136 attr->read_format = PERF_FORMAT_LOST; 1137 1138 evsel__set_sample_bit(evsel, IP); 1139 evsel__set_sample_bit(evsel, TID); 1140 1141 if (evsel->sample_read) { 1142 evsel__set_sample_bit(evsel, READ); 1143 1144 /* 1145 * We need ID even in case of single event, because 1146 * PERF_SAMPLE_READ process ID specific data. 1147 */ 1148 evsel__set_sample_id(evsel, false); 1149 1150 /* 1151 * Apply group format only if we belong to group 1152 * with more than one members. 1153 */ 1154 if (leader->core.nr_members > 1) { 1155 attr->read_format |= PERF_FORMAT_GROUP; 1156 attr->inherit = 0; 1157 } 1158 } 1159 1160 /* 1161 * We default some events to have a default interval. But keep 1162 * it a weak assumption overridable by the user. 1163 */ 1164 if ((evsel->is_libpfm_event && !attr->sample_period) || 1165 (!evsel->is_libpfm_event && (!attr->sample_period || 1166 opts->user_freq != UINT_MAX || 1167 opts->user_interval != ULLONG_MAX))) 1168 evsel__set_default_freq_period(opts, attr); 1169 1170 /* 1171 * If attr->freq was set (here or earlier), ask for period 1172 * to be sampled. 1173 */ 1174 if (attr->freq) 1175 evsel__set_sample_bit(evsel, PERIOD); 1176 1177 if (opts->no_samples) 1178 attr->sample_freq = 0; 1179 1180 if (opts->inherit_stat) { 1181 evsel->core.attr.read_format |= 1182 PERF_FORMAT_TOTAL_TIME_ENABLED | 1183 PERF_FORMAT_TOTAL_TIME_RUNNING | 1184 PERF_FORMAT_ID; 1185 attr->inherit_stat = 1; 1186 } 1187 1188 if (opts->sample_address) { 1189 evsel__set_sample_bit(evsel, ADDR); 1190 attr->mmap_data = track; 1191 } 1192 1193 /* 1194 * We don't allow user space callchains for function trace 1195 * event, due to issues with page faults while tracing page 1196 * fault handler and its overall trickiness nature. 1197 */ 1198 if (evsel__is_function_event(evsel)) 1199 evsel->core.attr.exclude_callchain_user = 1; 1200 1201 if (callchain && callchain->enabled && !evsel->no_aux_samples) 1202 evsel__config_callchain(evsel, opts, callchain); 1203 1204 if (opts->sample_intr_regs && !evsel->no_aux_samples && 1205 !evsel__is_dummy_event(evsel)) { 1206 attr->sample_regs_intr = opts->sample_intr_regs; 1207 evsel__set_sample_bit(evsel, REGS_INTR); 1208 } 1209 1210 if (opts->sample_user_regs && !evsel->no_aux_samples && 1211 !evsel__is_dummy_event(evsel)) { 1212 attr->sample_regs_user |= opts->sample_user_regs; 1213 evsel__set_sample_bit(evsel, REGS_USER); 1214 } 1215 1216 if (target__has_cpu(&opts->target) || opts->sample_cpu) 1217 evsel__set_sample_bit(evsel, CPU); 1218 1219 /* 1220 * When the user explicitly disabled time don't force it here. 1221 */ 1222 if (opts->sample_time && 1223 (!perf_missing_features.sample_id_all && 1224 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 1225 opts->sample_time_set))) 1226 evsel__set_sample_bit(evsel, TIME); 1227 1228 if (opts->raw_samples && !evsel->no_aux_samples) { 1229 evsel__set_sample_bit(evsel, TIME); 1230 evsel__set_sample_bit(evsel, RAW); 1231 evsel__set_sample_bit(evsel, CPU); 1232 } 1233 1234 if (opts->sample_address) 1235 evsel__set_sample_bit(evsel, DATA_SRC); 1236 1237 if (opts->sample_phys_addr) 1238 evsel__set_sample_bit(evsel, PHYS_ADDR); 1239 1240 if (opts->no_buffering) { 1241 attr->watermark = 0; 1242 attr->wakeup_events = 1; 1243 } 1244 if (opts->branch_stack && !evsel->no_aux_samples) { 1245 evsel__set_sample_bit(evsel, BRANCH_STACK); 1246 attr->branch_sample_type = opts->branch_stack; 1247 } 1248 1249 if (opts->sample_weight) 1250 arch_evsel__set_sample_weight(evsel); 1251 1252 attr->task = track; 1253 attr->mmap = track; 1254 attr->mmap2 = track && !perf_missing_features.mmap2; 1255 attr->comm = track; 1256 attr->build_id = track && opts->build_id; 1257 1258 /* 1259 * ksymbol is tracked separately with text poke because it needs to be 1260 * system wide and enabled immediately. 1261 */ 1262 if (!opts->text_poke) 1263 attr->ksymbol = track && !perf_missing_features.ksymbol; 1264 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf; 1265 1266 if (opts->record_namespaces) 1267 attr->namespaces = track; 1268 1269 if (opts->record_cgroup) { 1270 attr->cgroup = track && !perf_missing_features.cgroup; 1271 evsel__set_sample_bit(evsel, CGROUP); 1272 } 1273 1274 if (opts->sample_data_page_size) 1275 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE); 1276 1277 if (opts->sample_code_page_size) 1278 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE); 1279 1280 if (opts->record_switch_events) 1281 attr->context_switch = track; 1282 1283 if (opts->sample_transaction) 1284 evsel__set_sample_bit(evsel, TRANSACTION); 1285 1286 if (opts->running_time) { 1287 evsel->core.attr.read_format |= 1288 PERF_FORMAT_TOTAL_TIME_ENABLED | 1289 PERF_FORMAT_TOTAL_TIME_RUNNING; 1290 } 1291 1292 /* 1293 * XXX see the function comment above 1294 * 1295 * Disabling only independent events or group leaders, 1296 * keeping group members enabled. 1297 */ 1298 if (evsel__is_group_leader(evsel)) 1299 attr->disabled = 1; 1300 1301 /* 1302 * Setting enable_on_exec for independent events and 1303 * group leaders for traced executed by perf. 1304 */ 1305 if (target__none(&opts->target) && evsel__is_group_leader(evsel) && 1306 !opts->target.initial_delay) 1307 attr->enable_on_exec = 1; 1308 1309 if (evsel->immediate) { 1310 attr->disabled = 0; 1311 attr->enable_on_exec = 0; 1312 } 1313 1314 clockid = opts->clockid; 1315 if (opts->use_clockid) { 1316 attr->use_clockid = 1; 1317 attr->clockid = opts->clockid; 1318 } 1319 1320 if (evsel->precise_max) 1321 attr->precise_ip = 3; 1322 1323 if (opts->all_user) { 1324 attr->exclude_kernel = 1; 1325 attr->exclude_user = 0; 1326 } 1327 1328 if (opts->all_kernel) { 1329 attr->exclude_kernel = 0; 1330 attr->exclude_user = 1; 1331 } 1332 1333 if (evsel->core.own_cpus || evsel->unit) 1334 evsel->core.attr.read_format |= PERF_FORMAT_ID; 1335 1336 /* 1337 * Apply event specific term settings, 1338 * it overloads any global configuration. 1339 */ 1340 evsel__apply_config_terms(evsel, opts, track); 1341 1342 evsel->ignore_missing_thread = opts->ignore_missing_thread; 1343 1344 /* The --period option takes the precedence. */ 1345 if (opts->period_set) { 1346 if (opts->period) 1347 evsel__set_sample_bit(evsel, PERIOD); 1348 else 1349 evsel__reset_sample_bit(evsel, PERIOD); 1350 } 1351 1352 /* 1353 * A dummy event never triggers any actual counter and therefore 1354 * cannot be used with branch_stack. 1355 * 1356 * For initial_delay, a dummy event is added implicitly. 1357 * The software event will trigger -EOPNOTSUPP error out, 1358 * if BRANCH_STACK bit is set. 1359 */ 1360 if (evsel__is_dummy_event(evsel)) 1361 evsel__reset_sample_bit(evsel, BRANCH_STACK); 1362 1363 if (evsel__is_offcpu_event(evsel)) 1364 evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES; 1365 1366 arch__post_evsel_config(evsel, attr); 1367 } 1368 1369 int evsel__set_filter(struct evsel *evsel, const char *filter) 1370 { 1371 char *new_filter = strdup(filter); 1372 1373 if (new_filter != NULL) { 1374 free(evsel->filter); 1375 evsel->filter = new_filter; 1376 return 0; 1377 } 1378 1379 return -1; 1380 } 1381 1382 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter) 1383 { 1384 char *new_filter; 1385 1386 if (evsel->filter == NULL) 1387 return evsel__set_filter(evsel, filter); 1388 1389 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { 1390 free(evsel->filter); 1391 evsel->filter = new_filter; 1392 return 0; 1393 } 1394 1395 return -1; 1396 } 1397 1398 int evsel__append_tp_filter(struct evsel *evsel, const char *filter) 1399 { 1400 return evsel__append_filter(evsel, "(%s) && (%s)", filter); 1401 } 1402 1403 int evsel__append_addr_filter(struct evsel *evsel, const char *filter) 1404 { 1405 return evsel__append_filter(evsel, "%s,%s", filter); 1406 } 1407 1408 /* Caller has to clear disabled after going through all CPUs. */ 1409 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx) 1410 { 1411 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx); 1412 } 1413 1414 int evsel__enable(struct evsel *evsel) 1415 { 1416 int err = perf_evsel__enable(&evsel->core); 1417 1418 if (!err) 1419 evsel->disabled = false; 1420 return err; 1421 } 1422 1423 /* Caller has to set disabled after going through all CPUs. */ 1424 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx) 1425 { 1426 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx); 1427 } 1428 1429 int evsel__disable(struct evsel *evsel) 1430 { 1431 int err = perf_evsel__disable(&evsel->core); 1432 /* 1433 * We mark it disabled here so that tools that disable a event can 1434 * ignore events after they disable it. I.e. the ring buffer may have 1435 * already a few more events queued up before the kernel got the stop 1436 * request. 1437 */ 1438 if (!err) 1439 evsel->disabled = true; 1440 1441 return err; 1442 } 1443 1444 void free_config_terms(struct list_head *config_terms) 1445 { 1446 struct evsel_config_term *term, *h; 1447 1448 list_for_each_entry_safe(term, h, config_terms, list) { 1449 list_del_init(&term->list); 1450 if (term->free_str) 1451 zfree(&term->val.str); 1452 free(term); 1453 } 1454 } 1455 1456 static void evsel__free_config_terms(struct evsel *evsel) 1457 { 1458 free_config_terms(&evsel->config_terms); 1459 } 1460 1461 void evsel__exit(struct evsel *evsel) 1462 { 1463 assert(list_empty(&evsel->core.node)); 1464 assert(evsel->evlist == NULL); 1465 bpf_counter__destroy(evsel); 1466 perf_bpf_filter__destroy(evsel); 1467 evsel__free_counts(evsel); 1468 perf_evsel__free_fd(&evsel->core); 1469 perf_evsel__free_id(&evsel->core); 1470 evsel__free_config_terms(evsel); 1471 cgroup__put(evsel->cgrp); 1472 perf_cpu_map__put(evsel->core.cpus); 1473 perf_cpu_map__put(evsel->core.own_cpus); 1474 perf_thread_map__put(evsel->core.threads); 1475 zfree(&evsel->group_name); 1476 zfree(&evsel->name); 1477 zfree(&evsel->filter); 1478 zfree(&evsel->pmu_name); 1479 zfree(&evsel->group_pmu_name); 1480 zfree(&evsel->unit); 1481 zfree(&evsel->metric_id); 1482 evsel__zero_per_pkg(evsel); 1483 hashmap__free(evsel->per_pkg_mask); 1484 evsel->per_pkg_mask = NULL; 1485 zfree(&evsel->metric_events); 1486 perf_evsel__object.fini(evsel); 1487 } 1488 1489 void evsel__delete(struct evsel *evsel) 1490 { 1491 if (!evsel) 1492 return; 1493 1494 evsel__exit(evsel); 1495 free(evsel); 1496 } 1497 1498 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread, 1499 struct perf_counts_values *count) 1500 { 1501 struct perf_counts_values tmp; 1502 1503 if (!evsel->prev_raw_counts) 1504 return; 1505 1506 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); 1507 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count; 1508 1509 count->val = count->val - tmp.val; 1510 count->ena = count->ena - tmp.ena; 1511 count->run = count->run - tmp.run; 1512 } 1513 1514 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) 1515 { 1516 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread); 1517 1518 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); 1519 } 1520 1521 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread, 1522 u64 val, u64 ena, u64 run, u64 lost) 1523 { 1524 struct perf_counts_values *count; 1525 1526 count = perf_counts(counter->counts, cpu_map_idx, thread); 1527 1528 count->val = val; 1529 count->ena = ena; 1530 count->run = run; 1531 count->lost = lost; 1532 1533 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true); 1534 } 1535 1536 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data) 1537 { 1538 u64 read_format = leader->core.attr.read_format; 1539 struct sample_read_value *v; 1540 u64 nr, ena = 0, run = 0, lost = 0; 1541 1542 nr = *data++; 1543 1544 if (nr != (u64) leader->core.nr_members) 1545 return -EINVAL; 1546 1547 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1548 ena = *data++; 1549 1550 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1551 run = *data++; 1552 1553 v = (void *)data; 1554 sample_read_group__for_each(v, nr, read_format) { 1555 struct evsel *counter; 1556 1557 counter = evlist__id2evsel(leader->evlist, v->id); 1558 if (!counter) 1559 return -EINVAL; 1560 1561 if (read_format & PERF_FORMAT_LOST) 1562 lost = v->lost; 1563 1564 evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost); 1565 } 1566 1567 return 0; 1568 } 1569 1570 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread) 1571 { 1572 struct perf_stat_evsel *ps = leader->stats; 1573 u64 read_format = leader->core.attr.read_format; 1574 int size = perf_evsel__read_size(&leader->core); 1575 u64 *data = ps->group_data; 1576 1577 if (!(read_format & PERF_FORMAT_ID)) 1578 return -EINVAL; 1579 1580 if (!evsel__is_group_leader(leader)) 1581 return -EINVAL; 1582 1583 if (!data) { 1584 data = zalloc(size); 1585 if (!data) 1586 return -ENOMEM; 1587 1588 ps->group_data = data; 1589 } 1590 1591 if (FD(leader, cpu_map_idx, thread) < 0) 1592 return -EINVAL; 1593 1594 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0) 1595 return -errno; 1596 1597 return evsel__process_group_data(leader, cpu_map_idx, thread, data); 1598 } 1599 1600 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread) 1601 { 1602 u64 read_format = evsel->core.attr.read_format; 1603 1604 if (read_format & PERF_FORMAT_GROUP) 1605 return evsel__read_group(evsel, cpu_map_idx, thread); 1606 1607 return evsel__read_one(evsel, cpu_map_idx, thread); 1608 } 1609 1610 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale) 1611 { 1612 struct perf_counts_values count; 1613 size_t nv = scale ? 3 : 1; 1614 1615 if (FD(evsel, cpu_map_idx, thread) < 0) 1616 return -EINVAL; 1617 1618 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0) 1619 return -ENOMEM; 1620 1621 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0) 1622 return -errno; 1623 1624 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count); 1625 perf_counts_values__scale(&count, scale, NULL); 1626 *perf_counts(evsel->counts, cpu_map_idx, thread) = count; 1627 return 0; 1628 } 1629 1630 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, 1631 int cpu_map_idx) 1632 { 1633 struct perf_cpu cpu; 1634 1635 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); 1636 return perf_cpu_map__idx(other->core.cpus, cpu); 1637 } 1638 1639 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx) 1640 { 1641 struct evsel *leader = evsel__leader(evsel); 1642 1643 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || 1644 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { 1645 return evsel__match_other_cpu(evsel, leader, cpu_map_idx); 1646 } 1647 1648 return cpu_map_idx; 1649 } 1650 1651 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread) 1652 { 1653 struct evsel *leader = evsel__leader(evsel); 1654 int fd; 1655 1656 if (evsel__is_group_leader(evsel)) 1657 return -1; 1658 1659 /* 1660 * Leader must be already processed/open, 1661 * if not it's a bug. 1662 */ 1663 BUG_ON(!leader->core.fd); 1664 1665 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx); 1666 if (cpu_map_idx == -1) 1667 return -1; 1668 1669 fd = FD(leader, cpu_map_idx, thread); 1670 BUG_ON(fd == -1 && !leader->skippable); 1671 1672 /* 1673 * When the leader has been skipped, return -2 to distinguish from no 1674 * group leader case. 1675 */ 1676 return fd == -1 ? -2 : fd; 1677 } 1678 1679 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) 1680 { 1681 for (int cpu = 0; cpu < nr_cpus; cpu++) 1682 for (int thread = thread_idx; thread < nr_threads - 1; thread++) 1683 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); 1684 } 1685 1686 static int update_fds(struct evsel *evsel, 1687 int nr_cpus, int cpu_map_idx, 1688 int nr_threads, int thread_idx) 1689 { 1690 struct evsel *pos; 1691 1692 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) 1693 return -EINVAL; 1694 1695 evlist__for_each_entry(evsel->evlist, pos) { 1696 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx; 1697 1698 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); 1699 1700 /* 1701 * Since fds for next evsel has not been created, 1702 * there is no need to iterate whole event list. 1703 */ 1704 if (pos == evsel) 1705 break; 1706 } 1707 return 0; 1708 } 1709 1710 static bool evsel__ignore_missing_thread(struct evsel *evsel, 1711 int nr_cpus, int cpu_map_idx, 1712 struct perf_thread_map *threads, 1713 int thread, int err) 1714 { 1715 pid_t ignore_pid = perf_thread_map__pid(threads, thread); 1716 1717 if (!evsel->ignore_missing_thread) 1718 return false; 1719 1720 /* The system wide setup does not work with threads. */ 1721 if (evsel->core.system_wide) 1722 return false; 1723 1724 /* The -ESRCH is perf event syscall errno for pid's not found. */ 1725 if (err != -ESRCH) 1726 return false; 1727 1728 /* If there's only one thread, let it fail. */ 1729 if (threads->nr == 1) 1730 return false; 1731 1732 /* 1733 * We should remove fd for missing_thread first 1734 * because thread_map__remove() will decrease threads->nr. 1735 */ 1736 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread)) 1737 return false; 1738 1739 if (thread_map__remove(threads, thread)) 1740 return false; 1741 1742 pr_warning("WARNING: Ignored open failure for pid %d\n", 1743 ignore_pid); 1744 return true; 1745 } 1746 1747 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1748 void *priv __maybe_unused) 1749 { 1750 return fprintf(fp, " %-32s %s\n", name, val); 1751 } 1752 1753 static void display_attr(struct perf_event_attr *attr) 1754 { 1755 if (verbose >= 2 || debug_peo_args) { 1756 fprintf(stderr, "%.60s\n", graph_dotted_line); 1757 fprintf(stderr, "perf_event_attr:\n"); 1758 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL); 1759 fprintf(stderr, "%.60s\n", graph_dotted_line); 1760 } 1761 } 1762 1763 bool evsel__precise_ip_fallback(struct evsel *evsel) 1764 { 1765 /* Do not try less precise if not requested. */ 1766 if (!evsel->precise_max) 1767 return false; 1768 1769 /* 1770 * We tried all the precise_ip values, and it's 1771 * still failing, so leave it to standard fallback. 1772 */ 1773 if (!evsel->core.attr.precise_ip) { 1774 evsel->core.attr.precise_ip = evsel->precise_ip_original; 1775 return false; 1776 } 1777 1778 if (!evsel->precise_ip_original) 1779 evsel->precise_ip_original = evsel->core.attr.precise_ip; 1780 1781 evsel->core.attr.precise_ip--; 1782 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); 1783 display_attr(&evsel->core.attr); 1784 return true; 1785 } 1786 1787 static struct perf_cpu_map *empty_cpu_map; 1788 static struct perf_thread_map *empty_thread_map; 1789 1790 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1791 struct perf_thread_map *threads) 1792 { 1793 int nthreads = perf_thread_map__nr(threads); 1794 1795 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || 1796 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) 1797 return -EINVAL; 1798 1799 if (cpus == NULL) { 1800 if (empty_cpu_map == NULL) { 1801 empty_cpu_map = perf_cpu_map__dummy_new(); 1802 if (empty_cpu_map == NULL) 1803 return -ENOMEM; 1804 } 1805 1806 cpus = empty_cpu_map; 1807 } 1808 1809 if (threads == NULL) { 1810 if (empty_thread_map == NULL) { 1811 empty_thread_map = thread_map__new_by_tid(-1); 1812 if (empty_thread_map == NULL) 1813 return -ENOMEM; 1814 } 1815 1816 threads = empty_thread_map; 1817 } 1818 1819 if (evsel->core.fd == NULL && 1820 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0) 1821 return -ENOMEM; 1822 1823 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; 1824 if (evsel->cgrp) 1825 evsel->open_flags |= PERF_FLAG_PID_CGROUP; 1826 1827 return 0; 1828 } 1829 1830 static void evsel__disable_missing_features(struct evsel *evsel) 1831 { 1832 if (perf_missing_features.read_lost) 1833 evsel->core.attr.read_format &= ~PERF_FORMAT_LOST; 1834 if (perf_missing_features.weight_struct) { 1835 evsel__set_sample_bit(evsel, WEIGHT); 1836 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); 1837 } 1838 if (perf_missing_features.clockid_wrong) 1839 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1840 if (perf_missing_features.clockid) { 1841 evsel->core.attr.use_clockid = 0; 1842 evsel->core.attr.clockid = 0; 1843 } 1844 if (perf_missing_features.cloexec) 1845 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1846 if (perf_missing_features.mmap2) 1847 evsel->core.attr.mmap2 = 0; 1848 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest) 1849 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; 1850 if (perf_missing_features.lbr_flags) 1851 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1852 PERF_SAMPLE_BRANCH_NO_CYCLES); 1853 if (perf_missing_features.group_read && evsel->core.attr.inherit) 1854 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); 1855 if (perf_missing_features.ksymbol) 1856 evsel->core.attr.ksymbol = 0; 1857 if (perf_missing_features.bpf) 1858 evsel->core.attr.bpf_event = 0; 1859 if (perf_missing_features.branch_hw_idx) 1860 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; 1861 if (perf_missing_features.sample_id_all) 1862 evsel->core.attr.sample_id_all = 0; 1863 } 1864 1865 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, 1866 struct perf_thread_map *threads) 1867 { 1868 int err; 1869 1870 err = __evsel__prepare_open(evsel, cpus, threads); 1871 if (err) 1872 return err; 1873 1874 evsel__disable_missing_features(evsel); 1875 1876 return err; 1877 } 1878 1879 bool evsel__detect_missing_features(struct evsel *evsel) 1880 { 1881 /* 1882 * Must probe features in the order they were added to the 1883 * perf_event_attr interface. 1884 */ 1885 if (!perf_missing_features.read_lost && 1886 (evsel->core.attr.read_format & PERF_FORMAT_LOST)) { 1887 perf_missing_features.read_lost = true; 1888 pr_debug2("switching off PERF_FORMAT_LOST support\n"); 1889 return true; 1890 } else if (!perf_missing_features.weight_struct && 1891 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { 1892 perf_missing_features.weight_struct = true; 1893 pr_debug2("switching off weight struct support\n"); 1894 return true; 1895 } else if (!perf_missing_features.code_page_size && 1896 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { 1897 perf_missing_features.code_page_size = true; 1898 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n"); 1899 return false; 1900 } else if (!perf_missing_features.data_page_size && 1901 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { 1902 perf_missing_features.data_page_size = true; 1903 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n"); 1904 return false; 1905 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { 1906 perf_missing_features.cgroup = true; 1907 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n"); 1908 return false; 1909 } else if (!perf_missing_features.branch_hw_idx && 1910 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { 1911 perf_missing_features.branch_hw_idx = true; 1912 pr_debug2("switching off branch HW index support\n"); 1913 return true; 1914 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { 1915 perf_missing_features.aux_output = true; 1916 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n"); 1917 return false; 1918 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { 1919 perf_missing_features.bpf = true; 1920 pr_debug2_peo("switching off bpf_event\n"); 1921 return true; 1922 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { 1923 perf_missing_features.ksymbol = true; 1924 pr_debug2_peo("switching off ksymbol\n"); 1925 return true; 1926 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { 1927 perf_missing_features.write_backward = true; 1928 pr_debug2_peo("switching off write_backward\n"); 1929 return false; 1930 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { 1931 perf_missing_features.clockid_wrong = true; 1932 pr_debug2_peo("switching off clockid\n"); 1933 return true; 1934 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { 1935 perf_missing_features.clockid = true; 1936 pr_debug2_peo("switching off use_clockid\n"); 1937 return true; 1938 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { 1939 perf_missing_features.cloexec = true; 1940 pr_debug2_peo("switching off cloexec flag\n"); 1941 return true; 1942 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { 1943 perf_missing_features.mmap2 = true; 1944 pr_debug2_peo("switching off mmap2\n"); 1945 return true; 1946 } else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) { 1947 if (evsel->pmu == NULL) 1948 evsel->pmu = evsel__find_pmu(evsel); 1949 1950 if (evsel->pmu) 1951 evsel->pmu->missing_features.exclude_guest = true; 1952 else { 1953 /* we cannot find PMU, disable attrs now */ 1954 evsel->core.attr.exclude_host = false; 1955 evsel->core.attr.exclude_guest = false; 1956 } 1957 1958 if (evsel->exclude_GH) { 1959 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n"); 1960 return false; 1961 } 1962 if (!perf_missing_features.exclude_guest) { 1963 perf_missing_features.exclude_guest = true; 1964 pr_debug2_peo("switching off exclude_guest, exclude_host\n"); 1965 } 1966 return true; 1967 } else if (!perf_missing_features.sample_id_all) { 1968 perf_missing_features.sample_id_all = true; 1969 pr_debug2_peo("switching off sample_id_all\n"); 1970 return true; 1971 } else if (!perf_missing_features.lbr_flags && 1972 (evsel->core.attr.branch_sample_type & 1973 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1974 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1975 perf_missing_features.lbr_flags = true; 1976 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n"); 1977 return true; 1978 } else if (!perf_missing_features.group_read && 1979 evsel->core.attr.inherit && 1980 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && 1981 evsel__is_group_leader(evsel)) { 1982 perf_missing_features.group_read = true; 1983 pr_debug2_peo("switching off group read\n"); 1984 return true; 1985 } else { 1986 return false; 1987 } 1988 } 1989 1990 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) 1991 { 1992 int old_errno; 1993 struct rlimit l; 1994 1995 if (*set_rlimit < INCREASED_MAX) { 1996 old_errno = errno; 1997 1998 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 1999 if (*set_rlimit == NO_CHANGE) { 2000 l.rlim_cur = l.rlim_max; 2001 } else { 2002 l.rlim_cur = l.rlim_max + 1000; 2003 l.rlim_max = l.rlim_cur; 2004 } 2005 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 2006 (*set_rlimit) += 1; 2007 errno = old_errno; 2008 return true; 2009 } 2010 } 2011 errno = old_errno; 2012 } 2013 2014 return false; 2015 } 2016 2017 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, 2018 struct perf_thread_map *threads, 2019 int start_cpu_map_idx, int end_cpu_map_idx) 2020 { 2021 int idx, thread, nthreads; 2022 int pid = -1, err, old_errno; 2023 enum rlimit_action set_rlimit = NO_CHANGE; 2024 2025 err = __evsel__prepare_open(evsel, cpus, threads); 2026 if (err) 2027 return err; 2028 2029 if (cpus == NULL) 2030 cpus = empty_cpu_map; 2031 2032 if (threads == NULL) 2033 threads = empty_thread_map; 2034 2035 nthreads = perf_thread_map__nr(threads); 2036 2037 if (evsel->cgrp) 2038 pid = evsel->cgrp->fd; 2039 2040 fallback_missing_features: 2041 evsel__disable_missing_features(evsel); 2042 2043 pr_debug3("Opening: %s\n", evsel__name(evsel)); 2044 display_attr(&evsel->core.attr); 2045 2046 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) { 2047 2048 for (thread = 0; thread < nthreads; thread++) { 2049 int fd, group_fd; 2050 retry_open: 2051 if (thread >= nthreads) 2052 break; 2053 2054 if (!evsel->cgrp && !evsel->core.system_wide) 2055 pid = perf_thread_map__pid(threads, thread); 2056 2057 group_fd = get_group_fd(evsel, idx, thread); 2058 2059 if (group_fd == -2) { 2060 pr_debug("broken group leader for %s\n", evsel->name); 2061 err = -EINVAL; 2062 goto out_close; 2063 } 2064 2065 test_attr__ready(); 2066 2067 /* Debug message used by test scripts */ 2068 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", 2069 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags); 2070 2071 fd = sys_perf_event_open(&evsel->core.attr, pid, 2072 perf_cpu_map__cpu(cpus, idx).cpu, 2073 group_fd, evsel->open_flags); 2074 2075 FD(evsel, idx, thread) = fd; 2076 2077 if (fd < 0) { 2078 err = -errno; 2079 2080 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", 2081 err); 2082 goto try_fallback; 2083 } 2084 2085 bpf_counter__install_pe(evsel, idx, fd); 2086 2087 if (unlikely(test_attr__enabled)) { 2088 test_attr__open(&evsel->core.attr, pid, 2089 perf_cpu_map__cpu(cpus, idx), 2090 fd, group_fd, evsel->open_flags); 2091 } 2092 2093 /* Debug message used by test scripts */ 2094 pr_debug2_peo(" = %d\n", fd); 2095 2096 if (evsel->bpf_fd >= 0) { 2097 int evt_fd = fd; 2098 int bpf_fd = evsel->bpf_fd; 2099 2100 err = ioctl(evt_fd, 2101 PERF_EVENT_IOC_SET_BPF, 2102 bpf_fd); 2103 if (err && errno != EEXIST) { 2104 pr_err("failed to attach bpf fd %d: %s\n", 2105 bpf_fd, strerror(errno)); 2106 err = -EINVAL; 2107 goto out_close; 2108 } 2109 } 2110 2111 set_rlimit = NO_CHANGE; 2112 2113 /* 2114 * If we succeeded but had to kill clockid, fail and 2115 * have evsel__open_strerror() print us a nice error. 2116 */ 2117 if (perf_missing_features.clockid || 2118 perf_missing_features.clockid_wrong) { 2119 err = -EINVAL; 2120 goto out_close; 2121 } 2122 } 2123 } 2124 2125 return 0; 2126 2127 try_fallback: 2128 if (evsel__precise_ip_fallback(evsel)) 2129 goto retry_open; 2130 2131 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus), 2132 idx, threads, thread, err)) { 2133 /* We just removed 1 thread, so lower the upper nthreads limit. */ 2134 nthreads--; 2135 2136 /* ... and pretend like nothing have happened. */ 2137 err = 0; 2138 goto retry_open; 2139 } 2140 /* 2141 * perf stat needs between 5 and 22 fds per CPU. When we run out 2142 * of them try to increase the limits. 2143 */ 2144 if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) 2145 goto retry_open; 2146 2147 if (err != -EINVAL || idx > 0 || thread > 0) 2148 goto out_close; 2149 2150 if (evsel__detect_missing_features(evsel)) 2151 goto fallback_missing_features; 2152 out_close: 2153 if (err) 2154 threads->err_thread = thread; 2155 2156 old_errno = errno; 2157 do { 2158 while (--thread >= 0) { 2159 if (FD(evsel, idx, thread) >= 0) 2160 close(FD(evsel, idx, thread)); 2161 FD(evsel, idx, thread) = -1; 2162 } 2163 thread = nthreads; 2164 } while (--idx >= 0); 2165 errno = old_errno; 2166 return err; 2167 } 2168 2169 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, 2170 struct perf_thread_map *threads) 2171 { 2172 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus)); 2173 } 2174 2175 void evsel__close(struct evsel *evsel) 2176 { 2177 perf_evsel__close(&evsel->core); 2178 perf_evsel__free_id(&evsel->core); 2179 } 2180 2181 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) 2182 { 2183 if (cpu_map_idx == -1) 2184 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus)); 2185 2186 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); 2187 } 2188 2189 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) 2190 { 2191 return evsel__open(evsel, NULL, threads); 2192 } 2193 2194 static int perf_evsel__parse_id_sample(const struct evsel *evsel, 2195 const union perf_event *event, 2196 struct perf_sample *sample) 2197 { 2198 u64 type = evsel->core.attr.sample_type; 2199 const __u64 *array = event->sample.array; 2200 bool swapped = evsel->needs_swap; 2201 union u64_swap u; 2202 2203 array += ((event->header.size - 2204 sizeof(event->header)) / sizeof(u64)) - 1; 2205 2206 if (type & PERF_SAMPLE_IDENTIFIER) { 2207 sample->id = *array; 2208 array--; 2209 } 2210 2211 if (type & PERF_SAMPLE_CPU) { 2212 u.val64 = *array; 2213 if (swapped) { 2214 /* undo swap of u64, then swap on individual u32s */ 2215 u.val64 = bswap_64(u.val64); 2216 u.val32[0] = bswap_32(u.val32[0]); 2217 } 2218 2219 sample->cpu = u.val32[0]; 2220 array--; 2221 } 2222 2223 if (type & PERF_SAMPLE_STREAM_ID) { 2224 sample->stream_id = *array; 2225 array--; 2226 } 2227 2228 if (type & PERF_SAMPLE_ID) { 2229 sample->id = *array; 2230 array--; 2231 } 2232 2233 if (type & PERF_SAMPLE_TIME) { 2234 sample->time = *array; 2235 array--; 2236 } 2237 2238 if (type & PERF_SAMPLE_TID) { 2239 u.val64 = *array; 2240 if (swapped) { 2241 /* undo swap of u64, then swap on individual u32s */ 2242 u.val64 = bswap_64(u.val64); 2243 u.val32[0] = bswap_32(u.val32[0]); 2244 u.val32[1] = bswap_32(u.val32[1]); 2245 } 2246 2247 sample->pid = u.val32[0]; 2248 sample->tid = u.val32[1]; 2249 array--; 2250 } 2251 2252 return 0; 2253 } 2254 2255 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 2256 u64 size) 2257 { 2258 return size > max_size || offset + size > endp; 2259 } 2260 2261 #define OVERFLOW_CHECK(offset, size, max_size) \ 2262 do { \ 2263 if (overflow(endp, (max_size), (offset), (size))) \ 2264 return -EFAULT; \ 2265 } while (0) 2266 2267 #define OVERFLOW_CHECK_u64(offset) \ 2268 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 2269 2270 static int 2271 perf_event__check_size(union perf_event *event, unsigned int sample_size) 2272 { 2273 /* 2274 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 2275 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 2276 * check the format does not go past the end of the event. 2277 */ 2278 if (sample_size + sizeof(event->header) > event->header.size) 2279 return -EFAULT; 2280 2281 return 0; 2282 } 2283 2284 void __weak arch_perf_parse_sample_weight(struct perf_sample *data, 2285 const __u64 *array, 2286 u64 type __maybe_unused) 2287 { 2288 data->weight = *array; 2289 } 2290 2291 u64 evsel__bitfield_swap_branch_flags(u64 value) 2292 { 2293 u64 new_val = 0; 2294 2295 /* 2296 * branch_flags 2297 * union { 2298 * u64 values; 2299 * struct { 2300 * mispred:1 //target mispredicted 2301 * predicted:1 //target predicted 2302 * in_tx:1 //in transaction 2303 * abort:1 //transaction abort 2304 * cycles:16 //cycle count to last branch 2305 * type:4 //branch type 2306 * spec:2 //branch speculation info 2307 * new_type:4 //additional branch type 2308 * priv:3 //privilege level 2309 * reserved:31 2310 * } 2311 * } 2312 * 2313 * Avoid bswap64() the entire branch_flag.value, 2314 * as it has variable bit-field sizes. Instead the 2315 * macro takes the bit-field position/size, 2316 * swaps it based on the host endianness. 2317 */ 2318 if (host_is_bigendian()) { 2319 new_val = bitfield_swap(value, 0, 1); 2320 new_val |= bitfield_swap(value, 1, 1); 2321 new_val |= bitfield_swap(value, 2, 1); 2322 new_val |= bitfield_swap(value, 3, 1); 2323 new_val |= bitfield_swap(value, 4, 16); 2324 new_val |= bitfield_swap(value, 20, 4); 2325 new_val |= bitfield_swap(value, 24, 2); 2326 new_val |= bitfield_swap(value, 26, 4); 2327 new_val |= bitfield_swap(value, 30, 3); 2328 new_val |= bitfield_swap(value, 33, 31); 2329 } else { 2330 new_val = bitfield_swap(value, 63, 1); 2331 new_val |= bitfield_swap(value, 62, 1); 2332 new_val |= bitfield_swap(value, 61, 1); 2333 new_val |= bitfield_swap(value, 60, 1); 2334 new_val |= bitfield_swap(value, 44, 16); 2335 new_val |= bitfield_swap(value, 40, 4); 2336 new_val |= bitfield_swap(value, 38, 2); 2337 new_val |= bitfield_swap(value, 34, 4); 2338 new_val |= bitfield_swap(value, 31, 3); 2339 new_val |= bitfield_swap(value, 0, 31); 2340 } 2341 2342 return new_val; 2343 } 2344 2345 int evsel__parse_sample(struct evsel *evsel, union perf_event *event, 2346 struct perf_sample *data) 2347 { 2348 u64 type = evsel->core.attr.sample_type; 2349 bool swapped = evsel->needs_swap; 2350 const __u64 *array; 2351 u16 max_size = event->header.size; 2352 const void *endp = (void *)event + max_size; 2353 u64 sz; 2354 2355 /* 2356 * used for cross-endian analysis. See git commit 65014ab3 2357 * for why this goofiness is needed. 2358 */ 2359 union u64_swap u; 2360 2361 memset(data, 0, sizeof(*data)); 2362 data->cpu = data->pid = data->tid = -1; 2363 data->stream_id = data->id = data->time = -1ULL; 2364 data->period = evsel->core.attr.sample_period; 2365 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 2366 data->misc = event->header.misc; 2367 data->id = -1ULL; 2368 data->data_src = PERF_MEM_DATA_SRC_NONE; 2369 data->vcpu = -1; 2370 2371 if (event->header.type != PERF_RECORD_SAMPLE) { 2372 if (!evsel->core.attr.sample_id_all) 2373 return 0; 2374 return perf_evsel__parse_id_sample(evsel, event, data); 2375 } 2376 2377 array = event->sample.array; 2378 2379 if (perf_event__check_size(event, evsel->sample_size)) 2380 return -EFAULT; 2381 2382 if (type & PERF_SAMPLE_IDENTIFIER) { 2383 data->id = *array; 2384 array++; 2385 } 2386 2387 if (type & PERF_SAMPLE_IP) { 2388 data->ip = *array; 2389 array++; 2390 } 2391 2392 if (type & PERF_SAMPLE_TID) { 2393 u.val64 = *array; 2394 if (swapped) { 2395 /* undo swap of u64, then swap on individual u32s */ 2396 u.val64 = bswap_64(u.val64); 2397 u.val32[0] = bswap_32(u.val32[0]); 2398 u.val32[1] = bswap_32(u.val32[1]); 2399 } 2400 2401 data->pid = u.val32[0]; 2402 data->tid = u.val32[1]; 2403 array++; 2404 } 2405 2406 if (type & PERF_SAMPLE_TIME) { 2407 data->time = *array; 2408 array++; 2409 } 2410 2411 if (type & PERF_SAMPLE_ADDR) { 2412 data->addr = *array; 2413 array++; 2414 } 2415 2416 if (type & PERF_SAMPLE_ID) { 2417 data->id = *array; 2418 array++; 2419 } 2420 2421 if (type & PERF_SAMPLE_STREAM_ID) { 2422 data->stream_id = *array; 2423 array++; 2424 } 2425 2426 if (type & PERF_SAMPLE_CPU) { 2427 2428 u.val64 = *array; 2429 if (swapped) { 2430 /* undo swap of u64, then swap on individual u32s */ 2431 u.val64 = bswap_64(u.val64); 2432 u.val32[0] = bswap_32(u.val32[0]); 2433 } 2434 2435 data->cpu = u.val32[0]; 2436 array++; 2437 } 2438 2439 if (type & PERF_SAMPLE_PERIOD) { 2440 data->period = *array; 2441 array++; 2442 } 2443 2444 if (type & PERF_SAMPLE_READ) { 2445 u64 read_format = evsel->core.attr.read_format; 2446 2447 OVERFLOW_CHECK_u64(array); 2448 if (read_format & PERF_FORMAT_GROUP) 2449 data->read.group.nr = *array; 2450 else 2451 data->read.one.value = *array; 2452 2453 array++; 2454 2455 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2456 OVERFLOW_CHECK_u64(array); 2457 data->read.time_enabled = *array; 2458 array++; 2459 } 2460 2461 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2462 OVERFLOW_CHECK_u64(array); 2463 data->read.time_running = *array; 2464 array++; 2465 } 2466 2467 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2468 if (read_format & PERF_FORMAT_GROUP) { 2469 const u64 max_group_nr = UINT64_MAX / 2470 sizeof(struct sample_read_value); 2471 2472 if (data->read.group.nr > max_group_nr) 2473 return -EFAULT; 2474 2475 sz = data->read.group.nr * sample_read_value_size(read_format); 2476 OVERFLOW_CHECK(array, sz, max_size); 2477 data->read.group.values = 2478 (struct sample_read_value *)array; 2479 array = (void *)array + sz; 2480 } else { 2481 OVERFLOW_CHECK_u64(array); 2482 data->read.one.id = *array; 2483 array++; 2484 2485 if (read_format & PERF_FORMAT_LOST) { 2486 OVERFLOW_CHECK_u64(array); 2487 data->read.one.lost = *array; 2488 array++; 2489 } 2490 } 2491 } 2492 2493 if (type & PERF_SAMPLE_CALLCHAIN) { 2494 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 2495 2496 OVERFLOW_CHECK_u64(array); 2497 data->callchain = (struct ip_callchain *)array++; 2498 if (data->callchain->nr > max_callchain_nr) 2499 return -EFAULT; 2500 sz = data->callchain->nr * sizeof(u64); 2501 OVERFLOW_CHECK(array, sz, max_size); 2502 array = (void *)array + sz; 2503 } 2504 2505 if (type & PERF_SAMPLE_RAW) { 2506 OVERFLOW_CHECK_u64(array); 2507 u.val64 = *array; 2508 2509 /* 2510 * Undo swap of u64, then swap on individual u32s, 2511 * get the size of the raw area and undo all of the 2512 * swap. The pevent interface handles endianness by 2513 * itself. 2514 */ 2515 if (swapped) { 2516 u.val64 = bswap_64(u.val64); 2517 u.val32[0] = bswap_32(u.val32[0]); 2518 u.val32[1] = bswap_32(u.val32[1]); 2519 } 2520 data->raw_size = u.val32[0]; 2521 2522 /* 2523 * The raw data is aligned on 64bits including the 2524 * u32 size, so it's safe to use mem_bswap_64. 2525 */ 2526 if (swapped) 2527 mem_bswap_64((void *) array, data->raw_size); 2528 2529 array = (void *)array + sizeof(u32); 2530 2531 OVERFLOW_CHECK(array, data->raw_size, max_size); 2532 data->raw_data = (void *)array; 2533 array = (void *)array + data->raw_size; 2534 } 2535 2536 if (type & PERF_SAMPLE_BRANCH_STACK) { 2537 const u64 max_branch_nr = UINT64_MAX / 2538 sizeof(struct branch_entry); 2539 struct branch_entry *e; 2540 unsigned int i; 2541 2542 OVERFLOW_CHECK_u64(array); 2543 data->branch_stack = (struct branch_stack *)array++; 2544 2545 if (data->branch_stack->nr > max_branch_nr) 2546 return -EFAULT; 2547 2548 sz = data->branch_stack->nr * sizeof(struct branch_entry); 2549 if (evsel__has_branch_hw_idx(evsel)) { 2550 sz += sizeof(u64); 2551 e = &data->branch_stack->entries[0]; 2552 } else { 2553 data->no_hw_idx = true; 2554 /* 2555 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied, 2556 * only nr and entries[] will be output by kernel. 2557 */ 2558 e = (struct branch_entry *)&data->branch_stack->hw_idx; 2559 } 2560 2561 if (swapped) { 2562 /* 2563 * struct branch_flag does not have endian 2564 * specific bit field definition. And bswap 2565 * will not resolve the issue, since these 2566 * are bit fields. 2567 * 2568 * evsel__bitfield_swap_branch_flags() uses a 2569 * bitfield_swap macro to swap the bit position 2570 * based on the host endians. 2571 */ 2572 for (i = 0; i < data->branch_stack->nr; i++, e++) 2573 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value); 2574 } 2575 2576 OVERFLOW_CHECK(array, sz, max_size); 2577 array = (void *)array + sz; 2578 } 2579 2580 if (type & PERF_SAMPLE_REGS_USER) { 2581 OVERFLOW_CHECK_u64(array); 2582 data->user_regs.abi = *array; 2583 array++; 2584 2585 if (data->user_regs.abi) { 2586 u64 mask = evsel->core.attr.sample_regs_user; 2587 2588 sz = hweight64(mask) * sizeof(u64); 2589 OVERFLOW_CHECK(array, sz, max_size); 2590 data->user_regs.mask = mask; 2591 data->user_regs.regs = (u64 *)array; 2592 array = (void *)array + sz; 2593 } 2594 } 2595 2596 if (type & PERF_SAMPLE_STACK_USER) { 2597 OVERFLOW_CHECK_u64(array); 2598 sz = *array++; 2599 2600 data->user_stack.offset = ((char *)(array - 1) 2601 - (char *) event); 2602 2603 if (!sz) { 2604 data->user_stack.size = 0; 2605 } else { 2606 OVERFLOW_CHECK(array, sz, max_size); 2607 data->user_stack.data = (char *)array; 2608 array = (void *)array + sz; 2609 OVERFLOW_CHECK_u64(array); 2610 data->user_stack.size = *array++; 2611 if (WARN_ONCE(data->user_stack.size > sz, 2612 "user stack dump failure\n")) 2613 return -EFAULT; 2614 } 2615 } 2616 2617 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 2618 OVERFLOW_CHECK_u64(array); 2619 arch_perf_parse_sample_weight(data, array, type); 2620 array++; 2621 } 2622 2623 if (type & PERF_SAMPLE_DATA_SRC) { 2624 OVERFLOW_CHECK_u64(array); 2625 data->data_src = *array; 2626 array++; 2627 } 2628 2629 if (type & PERF_SAMPLE_TRANSACTION) { 2630 OVERFLOW_CHECK_u64(array); 2631 data->transaction = *array; 2632 array++; 2633 } 2634 2635 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 2636 if (type & PERF_SAMPLE_REGS_INTR) { 2637 OVERFLOW_CHECK_u64(array); 2638 data->intr_regs.abi = *array; 2639 array++; 2640 2641 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 2642 u64 mask = evsel->core.attr.sample_regs_intr; 2643 2644 sz = hweight64(mask) * sizeof(u64); 2645 OVERFLOW_CHECK(array, sz, max_size); 2646 data->intr_regs.mask = mask; 2647 data->intr_regs.regs = (u64 *)array; 2648 array = (void *)array + sz; 2649 } 2650 } 2651 2652 data->phys_addr = 0; 2653 if (type & PERF_SAMPLE_PHYS_ADDR) { 2654 data->phys_addr = *array; 2655 array++; 2656 } 2657 2658 data->cgroup = 0; 2659 if (type & PERF_SAMPLE_CGROUP) { 2660 data->cgroup = *array; 2661 array++; 2662 } 2663 2664 data->data_page_size = 0; 2665 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 2666 data->data_page_size = *array; 2667 array++; 2668 } 2669 2670 data->code_page_size = 0; 2671 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 2672 data->code_page_size = *array; 2673 array++; 2674 } 2675 2676 if (type & PERF_SAMPLE_AUX) { 2677 OVERFLOW_CHECK_u64(array); 2678 sz = *array++; 2679 2680 OVERFLOW_CHECK(array, sz, max_size); 2681 /* Undo swap of data */ 2682 if (swapped) 2683 mem_bswap_64((char *)array, sz); 2684 data->aux_sample.size = sz; 2685 data->aux_sample.data = (char *)array; 2686 array = (void *)array + sz; 2687 } 2688 2689 return 0; 2690 } 2691 2692 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, 2693 u64 *timestamp) 2694 { 2695 u64 type = evsel->core.attr.sample_type; 2696 const __u64 *array; 2697 2698 if (!(type & PERF_SAMPLE_TIME)) 2699 return -1; 2700 2701 if (event->header.type != PERF_RECORD_SAMPLE) { 2702 struct perf_sample data = { 2703 .time = -1ULL, 2704 }; 2705 2706 if (!evsel->core.attr.sample_id_all) 2707 return -1; 2708 if (perf_evsel__parse_id_sample(evsel, event, &data)) 2709 return -1; 2710 2711 *timestamp = data.time; 2712 return 0; 2713 } 2714 2715 array = event->sample.array; 2716 2717 if (perf_event__check_size(event, evsel->sample_size)) 2718 return -EFAULT; 2719 2720 if (type & PERF_SAMPLE_IDENTIFIER) 2721 array++; 2722 2723 if (type & PERF_SAMPLE_IP) 2724 array++; 2725 2726 if (type & PERF_SAMPLE_TID) 2727 array++; 2728 2729 if (type & PERF_SAMPLE_TIME) 2730 *timestamp = *array; 2731 2732 return 0; 2733 } 2734 2735 u16 evsel__id_hdr_size(struct evsel *evsel) 2736 { 2737 u64 sample_type = evsel->core.attr.sample_type; 2738 u16 size = 0; 2739 2740 if (sample_type & PERF_SAMPLE_TID) 2741 size += sizeof(u64); 2742 2743 if (sample_type & PERF_SAMPLE_TIME) 2744 size += sizeof(u64); 2745 2746 if (sample_type & PERF_SAMPLE_ID) 2747 size += sizeof(u64); 2748 2749 if (sample_type & PERF_SAMPLE_STREAM_ID) 2750 size += sizeof(u64); 2751 2752 if (sample_type & PERF_SAMPLE_CPU) 2753 size += sizeof(u64); 2754 2755 if (sample_type & PERF_SAMPLE_IDENTIFIER) 2756 size += sizeof(u64); 2757 2758 return size; 2759 } 2760 2761 #ifdef HAVE_LIBTRACEEVENT 2762 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name) 2763 { 2764 return tep_find_field(evsel->tp_format, name); 2765 } 2766 2767 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name) 2768 { 2769 struct tep_format_field *field = evsel__field(evsel, name); 2770 int offset; 2771 2772 if (!field) 2773 return NULL; 2774 2775 offset = field->offset; 2776 2777 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2778 offset = *(int *)(sample->raw_data + field->offset); 2779 offset &= 0xffff; 2780 if (tep_field_is_relative(field->flags)) 2781 offset += field->offset + field->size; 2782 } 2783 2784 return sample->raw_data + offset; 2785 } 2786 2787 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, 2788 bool needs_swap) 2789 { 2790 u64 value; 2791 void *ptr = sample->raw_data + field->offset; 2792 2793 switch (field->size) { 2794 case 1: 2795 return *(u8 *)ptr; 2796 case 2: 2797 value = *(u16 *)ptr; 2798 break; 2799 case 4: 2800 value = *(u32 *)ptr; 2801 break; 2802 case 8: 2803 memcpy(&value, ptr, sizeof(u64)); 2804 break; 2805 default: 2806 return 0; 2807 } 2808 2809 if (!needs_swap) 2810 return value; 2811 2812 switch (field->size) { 2813 case 2: 2814 return bswap_16(value); 2815 case 4: 2816 return bswap_32(value); 2817 case 8: 2818 return bswap_64(value); 2819 default: 2820 return 0; 2821 } 2822 2823 return 0; 2824 } 2825 2826 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name) 2827 { 2828 struct tep_format_field *field = evsel__field(evsel, name); 2829 2830 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; 2831 } 2832 #endif 2833 2834 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) 2835 { 2836 int paranoid; 2837 2838 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2839 evsel->core.attr.type == PERF_TYPE_HARDWARE && 2840 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2841 /* 2842 * If it's cycles then fall back to hrtimer based 2843 * cpu-clock-tick sw counter, which is always available even if 2844 * no PMU support. 2845 * 2846 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2847 * b0a873e). 2848 */ 2849 scnprintf(msg, msgsize, "%s", 2850 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2851 2852 evsel->core.attr.type = PERF_TYPE_SOFTWARE; 2853 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; 2854 2855 zfree(&evsel->name); 2856 return true; 2857 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && 2858 (paranoid = perf_event_paranoid()) > 1) { 2859 const char *name = evsel__name(evsel); 2860 char *new_name; 2861 const char *sep = ":"; 2862 2863 /* If event has exclude user then don't exclude kernel. */ 2864 if (evsel->core.attr.exclude_user) 2865 return false; 2866 2867 /* Is there already the separator in the name. */ 2868 if (strchr(name, '/') || 2869 (strchr(name, ':') && !evsel->is_libpfm_event)) 2870 sep = ""; 2871 2872 if (asprintf(&new_name, "%s%su", name, sep) < 0) 2873 return false; 2874 2875 free(evsel->name); 2876 evsel->name = new_name; 2877 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying " 2878 "to fall back to excluding kernel and hypervisor " 2879 " samples", paranoid); 2880 evsel->core.attr.exclude_kernel = 1; 2881 evsel->core.attr.exclude_hv = 1; 2882 2883 return true; 2884 } 2885 2886 return false; 2887 } 2888 2889 static bool find_process(const char *name) 2890 { 2891 size_t len = strlen(name); 2892 DIR *dir; 2893 struct dirent *d; 2894 int ret = -1; 2895 2896 dir = opendir(procfs__mountpoint()); 2897 if (!dir) 2898 return false; 2899 2900 /* Walk through the directory. */ 2901 while (ret && (d = readdir(dir)) != NULL) { 2902 char path[PATH_MAX]; 2903 char *data; 2904 size_t size; 2905 2906 if ((d->d_type != DT_DIR) || 2907 !strcmp(".", d->d_name) || 2908 !strcmp("..", d->d_name)) 2909 continue; 2910 2911 scnprintf(path, sizeof(path), "%s/%s/comm", 2912 procfs__mountpoint(), d->d_name); 2913 2914 if (filename__read_str(path, &data, &size)) 2915 continue; 2916 2917 ret = strncmp(name, data, len); 2918 free(data); 2919 } 2920 2921 closedir(dir); 2922 return ret ? false : true; 2923 } 2924 2925 int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused, 2926 char *msg __maybe_unused, 2927 size_t size __maybe_unused) 2928 { 2929 return 0; 2930 } 2931 2932 int evsel__open_strerror(struct evsel *evsel, struct target *target, 2933 int err, char *msg, size_t size) 2934 { 2935 char sbuf[STRERR_BUFSIZE]; 2936 int printed = 0, enforced = 0; 2937 int ret; 2938 2939 switch (err) { 2940 case EPERM: 2941 case EACCES: 2942 printed += scnprintf(msg + printed, size - printed, 2943 "Access to performance monitoring and observability operations is limited.\n"); 2944 2945 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) { 2946 if (enforced) { 2947 printed += scnprintf(msg + printed, size - printed, 2948 "Enforced MAC policy settings (SELinux) can limit access to performance\n" 2949 "monitoring and observability operations. Inspect system audit records for\n" 2950 "more perf_event access control information and adjusting the policy.\n"); 2951 } 2952 } 2953 2954 if (err == EPERM) 2955 printed += scnprintf(msg, size, 2956 "No permission to enable %s event.\n\n", evsel__name(evsel)); 2957 2958 return scnprintf(msg + printed, size - printed, 2959 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n" 2960 "access to performance monitoring and observability operations for processes\n" 2961 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n" 2962 "More information can be found at 'Perf events and tool security' document:\n" 2963 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n" 2964 "perf_event_paranoid setting is %d:\n" 2965 " -1: Allow use of (almost) all events by all users\n" 2966 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n" 2967 ">= 0: Disallow raw and ftrace function tracepoint access\n" 2968 ">= 1: Disallow CPU event access\n" 2969 ">= 2: Disallow kernel profiling\n" 2970 "To make the adjusted perf_event_paranoid setting permanent preserve it\n" 2971 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)", 2972 perf_event_paranoid()); 2973 case ENOENT: 2974 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel)); 2975 case EMFILE: 2976 return scnprintf(msg, size, "%s", 2977 "Too many events are opened.\n" 2978 "Probably the maximum number of open file descriptors has been reached.\n" 2979 "Hint: Try again after reducing the number of events.\n" 2980 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2981 case ENOMEM: 2982 if (evsel__has_callchain(evsel) && 2983 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 2984 return scnprintf(msg, size, 2985 "Not enough memory to setup event with callchain.\n" 2986 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2987 "Hint: Current value: %d", sysctl__max_stack()); 2988 break; 2989 case ENODEV: 2990 if (target->cpu_list) 2991 return scnprintf(msg, size, "%s", 2992 "No such device - did you specify an out-of-range profile CPU?"); 2993 break; 2994 case EOPNOTSUPP: 2995 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK) 2996 return scnprintf(msg, size, 2997 "%s: PMU Hardware or event type doesn't support branch stack sampling.", 2998 evsel__name(evsel)); 2999 if (evsel->core.attr.aux_output) 3000 return scnprintf(msg, size, 3001 "%s: PMU Hardware doesn't support 'aux_output' feature", 3002 evsel__name(evsel)); 3003 if (evsel->core.attr.sample_period != 0) 3004 return scnprintf(msg, size, 3005 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", 3006 evsel__name(evsel)); 3007 if (evsel->core.attr.precise_ip) 3008 return scnprintf(msg, size, "%s", 3009 "\'precise\' request may not be supported. Try removing 'p' modifier."); 3010 #if defined(__i386__) || defined(__x86_64__) 3011 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) 3012 return scnprintf(msg, size, "%s", 3013 "No hardware sampling interrupt available.\n"); 3014 #endif 3015 break; 3016 case EBUSY: 3017 if (find_process("oprofiled")) 3018 return scnprintf(msg, size, 3019 "The PMU counters are busy/taken by another profiler.\n" 3020 "We found oprofile daemon running, please stop it and try again."); 3021 break; 3022 case EINVAL: 3023 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size) 3024 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel."); 3025 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size) 3026 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel."); 3027 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) 3028 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel."); 3029 if (perf_missing_features.clockid) 3030 return scnprintf(msg, size, "clockid feature not supported."); 3031 if (perf_missing_features.clockid_wrong) 3032 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 3033 if (perf_missing_features.aux_output) 3034 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel."); 3035 if (!target__has_cpu(target)) 3036 return scnprintf(msg, size, 3037 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.", 3038 evsel__name(evsel)); 3039 3040 break; 3041 case ENODATA: 3042 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. " 3043 "Please add an auxiliary event in front of the load latency event."); 3044 default: 3045 break; 3046 } 3047 3048 ret = arch_evsel__open_strerror(evsel, msg, size); 3049 if (ret) 3050 return ret; 3051 3052 return scnprintf(msg, size, 3053 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 3054 "/bin/dmesg | grep -i perf may provide additional information.\n", 3055 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel)); 3056 } 3057 3058 struct perf_env *evsel__env(struct evsel *evsel) 3059 { 3060 if (evsel && evsel->evlist && evsel->evlist->env) 3061 return evsel->evlist->env; 3062 return &perf_env; 3063 } 3064 3065 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) 3066 { 3067 int cpu_map_idx, thread; 3068 3069 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) { 3070 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); 3071 thread++) { 3072 int fd = FD(evsel, cpu_map_idx, thread); 3073 3074 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, 3075 cpu_map_idx, thread, fd) < 0) 3076 return -1; 3077 } 3078 } 3079 3080 return 0; 3081 } 3082 3083 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) 3084 { 3085 struct perf_cpu_map *cpus = evsel->core.cpus; 3086 struct perf_thread_map *threads = evsel->core.threads; 3087 3088 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr)) 3089 return -ENOMEM; 3090 3091 return store_evsel_ids(evsel, evlist); 3092 } 3093 3094 void evsel__zero_per_pkg(struct evsel *evsel) 3095 { 3096 struct hashmap_entry *cur; 3097 size_t bkt; 3098 3099 if (evsel->per_pkg_mask) { 3100 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) 3101 zfree(&cur->pkey); 3102 3103 hashmap__clear(evsel->per_pkg_mask); 3104 } 3105 } 3106 3107 /** 3108 * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this 3109 * will be false on hybrid systems for hardware and legacy 3110 * cache events. 3111 */ 3112 bool evsel__is_hybrid(const struct evsel *evsel) 3113 { 3114 if (perf_pmus__num_core_pmus() == 1) 3115 return false; 3116 3117 return evsel->core.is_pmu_core; 3118 } 3119 3120 struct evsel *evsel__leader(const struct evsel *evsel) 3121 { 3122 return container_of(evsel->core.leader, struct evsel, core); 3123 } 3124 3125 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) 3126 { 3127 return evsel->core.leader == &leader->core; 3128 } 3129 3130 bool evsel__is_leader(struct evsel *evsel) 3131 { 3132 return evsel__has_leader(evsel, evsel); 3133 } 3134 3135 void evsel__set_leader(struct evsel *evsel, struct evsel *leader) 3136 { 3137 evsel->core.leader = &leader->core; 3138 } 3139 3140 int evsel__source_count(const struct evsel *evsel) 3141 { 3142 struct evsel *pos; 3143 int count = 0; 3144 3145 evlist__for_each_entry(evsel->evlist, pos) { 3146 if (pos->metric_leader == evsel) 3147 count++; 3148 } 3149 return count; 3150 } 3151 3152 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused) 3153 { 3154 return false; 3155 } 3156 3157 /* 3158 * Remove an event from a given group (leader). 3159 * Some events, e.g., perf metrics Topdown events, 3160 * must always be grouped. Ignore the events. 3161 */ 3162 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader) 3163 { 3164 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) { 3165 evsel__set_leader(evsel, evsel); 3166 evsel->core.nr_members = 0; 3167 leader->core.nr_members--; 3168 } 3169 } 3170