1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <byteswap.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <linux/bitops.h> 13 #include <api/fs/fs.h> 14 #include <api/fs/tracing_path.h> 15 #include <traceevent/event-parse.h> 16 #include <linux/hw_breakpoint.h> 17 #include <linux/perf_event.h> 18 #include <linux/compiler.h> 19 #include <linux/err.h> 20 #include <linux/zalloc.h> 21 #include <sys/ioctl.h> 22 #include <sys/resource.h> 23 #include <sys/types.h> 24 #include <dirent.h> 25 #include <perf/evsel.h> 26 #include "asm/bug.h" 27 #include "callchain.h" 28 #include "cgroup.h" 29 #include "event.h" 30 #include "evsel.h" 31 #include "evlist.h" 32 #include "cpumap.h" 33 #include "thread_map.h" 34 #include "target.h" 35 #include "perf_regs.h" 36 #include "debug.h" 37 #include "trace-event.h" 38 #include "stat.h" 39 #include "string2.h" 40 #include "memswap.h" 41 #include "util/parse-branch-options.h" 42 #include <internal/xyarray.h> 43 44 #include <linux/ctype.h> 45 46 struct perf_missing_features perf_missing_features; 47 48 static clockid_t clockid; 49 50 static int perf_evsel__no_extra_init(struct evsel *evsel __maybe_unused) 51 { 52 return 0; 53 } 54 55 void __weak test_attr__ready(void) { } 56 57 static void perf_evsel__no_extra_fini(struct evsel *evsel __maybe_unused) 58 { 59 } 60 61 static struct { 62 size_t size; 63 int (*init)(struct evsel *evsel); 64 void (*fini)(struct evsel *evsel); 65 } perf_evsel__object = { 66 .size = sizeof(struct evsel), 67 .init = perf_evsel__no_extra_init, 68 .fini = perf_evsel__no_extra_fini, 69 }; 70 71 int perf_evsel__object_config(size_t object_size, 72 int (*init)(struct evsel *evsel), 73 void (*fini)(struct evsel *evsel)) 74 { 75 76 if (object_size == 0) 77 goto set_methods; 78 79 if (perf_evsel__object.size > object_size) 80 return -EINVAL; 81 82 perf_evsel__object.size = object_size; 83 84 set_methods: 85 if (init != NULL) 86 perf_evsel__object.init = init; 87 88 if (fini != NULL) 89 perf_evsel__object.fini = fini; 90 91 return 0; 92 } 93 94 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 95 96 int __perf_evsel__sample_size(u64 sample_type) 97 { 98 u64 mask = sample_type & PERF_SAMPLE_MASK; 99 int size = 0; 100 int i; 101 102 for (i = 0; i < 64; i++) { 103 if (mask & (1ULL << i)) 104 size++; 105 } 106 107 size *= sizeof(u64); 108 109 return size; 110 } 111 112 /** 113 * __perf_evsel__calc_id_pos - calculate id_pos. 114 * @sample_type: sample type 115 * 116 * This function returns the position of the event id (PERF_SAMPLE_ID or 117 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 118 * sample_event. 119 */ 120 static int __perf_evsel__calc_id_pos(u64 sample_type) 121 { 122 int idx = 0; 123 124 if (sample_type & PERF_SAMPLE_IDENTIFIER) 125 return 0; 126 127 if (!(sample_type & PERF_SAMPLE_ID)) 128 return -1; 129 130 if (sample_type & PERF_SAMPLE_IP) 131 idx += 1; 132 133 if (sample_type & PERF_SAMPLE_TID) 134 idx += 1; 135 136 if (sample_type & PERF_SAMPLE_TIME) 137 idx += 1; 138 139 if (sample_type & PERF_SAMPLE_ADDR) 140 idx += 1; 141 142 return idx; 143 } 144 145 /** 146 * __perf_evsel__calc_is_pos - calculate is_pos. 147 * @sample_type: sample type 148 * 149 * This function returns the position (counting backwards) of the event id 150 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 151 * sample_id_all is used there is an id sample appended to non-sample events. 152 */ 153 static int __perf_evsel__calc_is_pos(u64 sample_type) 154 { 155 int idx = 1; 156 157 if (sample_type & PERF_SAMPLE_IDENTIFIER) 158 return 1; 159 160 if (!(sample_type & PERF_SAMPLE_ID)) 161 return -1; 162 163 if (sample_type & PERF_SAMPLE_CPU) 164 idx += 1; 165 166 if (sample_type & PERF_SAMPLE_STREAM_ID) 167 idx += 1; 168 169 return idx; 170 } 171 172 void perf_evsel__calc_id_pos(struct evsel *evsel) 173 { 174 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); 175 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); 176 } 177 178 void __perf_evsel__set_sample_bit(struct evsel *evsel, 179 enum perf_event_sample_format bit) 180 { 181 if (!(evsel->core.attr.sample_type & bit)) { 182 evsel->core.attr.sample_type |= bit; 183 evsel->sample_size += sizeof(u64); 184 perf_evsel__calc_id_pos(evsel); 185 } 186 } 187 188 void __perf_evsel__reset_sample_bit(struct evsel *evsel, 189 enum perf_event_sample_format bit) 190 { 191 if (evsel->core.attr.sample_type & bit) { 192 evsel->core.attr.sample_type &= ~bit; 193 evsel->sample_size -= sizeof(u64); 194 perf_evsel__calc_id_pos(evsel); 195 } 196 } 197 198 void perf_evsel__set_sample_id(struct evsel *evsel, 199 bool can_sample_identifier) 200 { 201 if (can_sample_identifier) { 202 perf_evsel__reset_sample_bit(evsel, ID); 203 perf_evsel__set_sample_bit(evsel, IDENTIFIER); 204 } else { 205 perf_evsel__set_sample_bit(evsel, ID); 206 } 207 evsel->core.attr.read_format |= PERF_FORMAT_ID; 208 } 209 210 /** 211 * perf_evsel__is_function_event - Return whether given evsel is a function 212 * trace event 213 * 214 * @evsel - evsel selector to be tested 215 * 216 * Return %true if event is function trace event 217 */ 218 bool perf_evsel__is_function_event(struct evsel *evsel) 219 { 220 #define FUNCTION_EVENT "ftrace:function" 221 222 return evsel->name && 223 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); 224 225 #undef FUNCTION_EVENT 226 } 227 228 void evsel__init(struct evsel *evsel, 229 struct perf_event_attr *attr, int idx) 230 { 231 perf_evsel__init(&evsel->core, attr); 232 evsel->idx = idx; 233 evsel->tracking = !idx; 234 evsel->leader = evsel; 235 evsel->unit = ""; 236 evsel->scale = 1.0; 237 evsel->max_events = ULONG_MAX; 238 evsel->evlist = NULL; 239 evsel->bpf_obj = NULL; 240 evsel->bpf_fd = -1; 241 INIT_LIST_HEAD(&evsel->config_terms); 242 perf_evsel__object.init(evsel); 243 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); 244 perf_evsel__calc_id_pos(evsel); 245 evsel->cmdline_group_boundary = false; 246 evsel->metric_expr = NULL; 247 evsel->metric_name = NULL; 248 evsel->metric_events = NULL; 249 evsel->collect_stat = false; 250 evsel->pmu_name = NULL; 251 } 252 253 struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) 254 { 255 struct evsel *evsel = zalloc(perf_evsel__object.size); 256 257 if (!evsel) 258 return NULL; 259 evsel__init(evsel, attr, idx); 260 261 if (perf_evsel__is_bpf_output(evsel)) { 262 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 263 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 264 evsel->core.attr.sample_period = 1; 265 } 266 267 if (perf_evsel__is_clock(evsel)) { 268 /* 269 * The evsel->unit points to static alias->unit 270 * so it's ok to use static string in here. 271 */ 272 static const char *unit = "msec"; 273 274 evsel->unit = unit; 275 evsel->scale = 1e-6; 276 } 277 278 return evsel; 279 } 280 281 static bool perf_event_can_profile_kernel(void) 282 { 283 return geteuid() == 0 || perf_event_paranoid() == -1; 284 } 285 286 struct evsel *perf_evsel__new_cycles(bool precise) 287 { 288 struct perf_event_attr attr = { 289 .type = PERF_TYPE_HARDWARE, 290 .config = PERF_COUNT_HW_CPU_CYCLES, 291 .exclude_kernel = !perf_event_can_profile_kernel(), 292 }; 293 struct evsel *evsel; 294 295 event_attr_init(&attr); 296 297 if (!precise) 298 goto new_event; 299 300 /* 301 * Now let the usual logic to set up the perf_event_attr defaults 302 * to kick in when we return and before perf_evsel__open() is called. 303 */ 304 new_event: 305 evsel = evsel__new(&attr); 306 if (evsel == NULL) 307 goto out; 308 309 evsel->precise_max = true; 310 311 /* use asprintf() because free(evsel) assumes name is allocated */ 312 if (asprintf(&evsel->name, "cycles%s%s%.*s", 313 (attr.precise_ip || attr.exclude_kernel) ? ":" : "", 314 attr.exclude_kernel ? "u" : "", 315 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0) 316 goto error_free; 317 out: 318 return evsel; 319 error_free: 320 evsel__delete(evsel); 321 evsel = NULL; 322 goto out; 323 } 324 325 /* 326 * Returns pointer with encoded error via <linux/err.h> interface. 327 */ 328 struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) 329 { 330 struct evsel *evsel = zalloc(perf_evsel__object.size); 331 int err = -ENOMEM; 332 333 if (evsel == NULL) { 334 goto out_err; 335 } else { 336 struct perf_event_attr attr = { 337 .type = PERF_TYPE_TRACEPOINT, 338 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 339 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 340 }; 341 342 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 343 goto out_free; 344 345 evsel->tp_format = trace_event__tp_format(sys, name); 346 if (IS_ERR(evsel->tp_format)) { 347 err = PTR_ERR(evsel->tp_format); 348 goto out_free; 349 } 350 351 event_attr_init(&attr); 352 attr.config = evsel->tp_format->id; 353 attr.sample_period = 1; 354 evsel__init(evsel, &attr, idx); 355 } 356 357 return evsel; 358 359 out_free: 360 zfree(&evsel->name); 361 free(evsel); 362 out_err: 363 return ERR_PTR(err); 364 } 365 366 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { 367 "cycles", 368 "instructions", 369 "cache-references", 370 "cache-misses", 371 "branches", 372 "branch-misses", 373 "bus-cycles", 374 "stalled-cycles-frontend", 375 "stalled-cycles-backend", 376 "ref-cycles", 377 }; 378 379 static const char *__perf_evsel__hw_name(u64 config) 380 { 381 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) 382 return perf_evsel__hw_names[config]; 383 384 return "unknown-hardware"; 385 } 386 387 static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) 388 { 389 int colon = 0, r = 0; 390 struct perf_event_attr *attr = &evsel->core.attr; 391 bool exclude_guest_default = false; 392 393 #define MOD_PRINT(context, mod) do { \ 394 if (!attr->exclude_##context) { \ 395 if (!colon) colon = ++r; \ 396 r += scnprintf(bf + r, size - r, "%c", mod); \ 397 } } while(0) 398 399 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 400 MOD_PRINT(kernel, 'k'); 401 MOD_PRINT(user, 'u'); 402 MOD_PRINT(hv, 'h'); 403 exclude_guest_default = true; 404 } 405 406 if (attr->precise_ip) { 407 if (!colon) 408 colon = ++r; 409 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 410 exclude_guest_default = true; 411 } 412 413 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 414 MOD_PRINT(host, 'H'); 415 MOD_PRINT(guest, 'G'); 416 } 417 #undef MOD_PRINT 418 if (colon) 419 bf[colon - 1] = ':'; 420 return r; 421 } 422 423 static int perf_evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 424 { 425 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->core.attr.config)); 426 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 427 } 428 429 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { 430 "cpu-clock", 431 "task-clock", 432 "page-faults", 433 "context-switches", 434 "cpu-migrations", 435 "minor-faults", 436 "major-faults", 437 "alignment-faults", 438 "emulation-faults", 439 "dummy", 440 }; 441 442 static const char *__perf_evsel__sw_name(u64 config) 443 { 444 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) 445 return perf_evsel__sw_names[config]; 446 return "unknown-software"; 447 } 448 449 static int perf_evsel__sw_name(struct evsel *evsel, char *bf, size_t size) 450 { 451 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->core.attr.config)); 452 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 453 } 454 455 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 456 { 457 int r; 458 459 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 460 461 if (type & HW_BREAKPOINT_R) 462 r += scnprintf(bf + r, size - r, "r"); 463 464 if (type & HW_BREAKPOINT_W) 465 r += scnprintf(bf + r, size - r, "w"); 466 467 if (type & HW_BREAKPOINT_X) 468 r += scnprintf(bf + r, size - r, "x"); 469 470 return r; 471 } 472 473 static int perf_evsel__bp_name(struct evsel *evsel, char *bf, size_t size) 474 { 475 struct perf_event_attr *attr = &evsel->core.attr; 476 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 477 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 478 } 479 480 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] 481 [PERF_EVSEL__MAX_ALIASES] = { 482 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 483 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 484 { "LLC", "L2", }, 485 { "dTLB", "d-tlb", "Data-TLB", }, 486 { "iTLB", "i-tlb", "Instruction-TLB", }, 487 { "branch", "branches", "bpu", "btb", "bpc", }, 488 { "node", }, 489 }; 490 491 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] 492 [PERF_EVSEL__MAX_ALIASES] = { 493 { "load", "loads", "read", }, 494 { "store", "stores", "write", }, 495 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 496 }; 497 498 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] 499 [PERF_EVSEL__MAX_ALIASES] = { 500 { "refs", "Reference", "ops", "access", }, 501 { "misses", "miss", }, 502 }; 503 504 #define C(x) PERF_COUNT_HW_CACHE_##x 505 #define CACHE_READ (1 << C(OP_READ)) 506 #define CACHE_WRITE (1 << C(OP_WRITE)) 507 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 508 #define COP(x) (1 << x) 509 510 /* 511 * cache operartion stat 512 * L1I : Read and prefetch only 513 * ITLB and BPU : Read-only 514 */ 515 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { 516 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 517 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 518 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 519 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 520 [C(ITLB)] = (CACHE_READ), 521 [C(BPU)] = (CACHE_READ), 522 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 523 }; 524 525 bool perf_evsel__is_cache_op_valid(u8 type, u8 op) 526 { 527 if (perf_evsel__hw_cache_stat[type] & COP(op)) 528 return true; /* valid */ 529 else 530 return false; /* invalid */ 531 } 532 533 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, 534 char *bf, size_t size) 535 { 536 if (result) { 537 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], 538 perf_evsel__hw_cache_op[op][0], 539 perf_evsel__hw_cache_result[result][0]); 540 } 541 542 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], 543 perf_evsel__hw_cache_op[op][1]); 544 } 545 546 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) 547 { 548 u8 op, result, type = (config >> 0) & 0xff; 549 const char *err = "unknown-ext-hardware-cache-type"; 550 551 if (type >= PERF_COUNT_HW_CACHE_MAX) 552 goto out_err; 553 554 op = (config >> 8) & 0xff; 555 err = "unknown-ext-hardware-cache-op"; 556 if (op >= PERF_COUNT_HW_CACHE_OP_MAX) 557 goto out_err; 558 559 result = (config >> 16) & 0xff; 560 err = "unknown-ext-hardware-cache-result"; 561 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 562 goto out_err; 563 564 err = "invalid-cache"; 565 if (!perf_evsel__is_cache_op_valid(type, op)) 566 goto out_err; 567 568 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 569 out_err: 570 return scnprintf(bf, size, "%s", err); 571 } 572 573 static int perf_evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) 574 { 575 int ret = __perf_evsel__hw_cache_name(evsel->core.attr.config, bf, size); 576 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 577 } 578 579 static int perf_evsel__raw_name(struct evsel *evsel, char *bf, size_t size) 580 { 581 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); 582 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 583 } 584 585 static int perf_evsel__tool_name(char *bf, size_t size) 586 { 587 int ret = scnprintf(bf, size, "duration_time"); 588 return ret; 589 } 590 591 const char *perf_evsel__name(struct evsel *evsel) 592 { 593 char bf[128]; 594 595 if (!evsel) 596 goto out_unknown; 597 598 if (evsel->name) 599 return evsel->name; 600 601 switch (evsel->core.attr.type) { 602 case PERF_TYPE_RAW: 603 perf_evsel__raw_name(evsel, bf, sizeof(bf)); 604 break; 605 606 case PERF_TYPE_HARDWARE: 607 perf_evsel__hw_name(evsel, bf, sizeof(bf)); 608 break; 609 610 case PERF_TYPE_HW_CACHE: 611 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); 612 break; 613 614 case PERF_TYPE_SOFTWARE: 615 if (evsel->tool_event) 616 perf_evsel__tool_name(bf, sizeof(bf)); 617 else 618 perf_evsel__sw_name(evsel, bf, sizeof(bf)); 619 break; 620 621 case PERF_TYPE_TRACEPOINT: 622 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 623 break; 624 625 case PERF_TYPE_BREAKPOINT: 626 perf_evsel__bp_name(evsel, bf, sizeof(bf)); 627 break; 628 629 default: 630 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 631 evsel->core.attr.type); 632 break; 633 } 634 635 evsel->name = strdup(bf); 636 637 if (evsel->name) 638 return evsel->name; 639 out_unknown: 640 return "unknown"; 641 } 642 643 const char *perf_evsel__group_name(struct evsel *evsel) 644 { 645 return evsel->group_name ?: "anon group"; 646 } 647 648 /* 649 * Returns the group details for the specified leader, 650 * with following rules. 651 * 652 * For record -e '{cycles,instructions}' 653 * 'anon group { cycles:u, instructions:u }' 654 * 655 * For record -e 'cycles,instructions' and report --group 656 * 'cycles:u, instructions:u' 657 */ 658 int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size) 659 { 660 int ret = 0; 661 struct evsel *pos; 662 const char *group_name = perf_evsel__group_name(evsel); 663 664 if (!evsel->forced_leader) 665 ret = scnprintf(buf, size, "%s { ", group_name); 666 667 ret += scnprintf(buf + ret, size - ret, "%s", 668 perf_evsel__name(evsel)); 669 670 for_each_group_member(pos, evsel) 671 ret += scnprintf(buf + ret, size - ret, ", %s", 672 perf_evsel__name(pos)); 673 674 if (!evsel->forced_leader) 675 ret += scnprintf(buf + ret, size - ret, " }"); 676 677 return ret; 678 } 679 680 static void __perf_evsel__config_callchain(struct evsel *evsel, 681 struct record_opts *opts, 682 struct callchain_param *param) 683 { 684 bool function = perf_evsel__is_function_event(evsel); 685 struct perf_event_attr *attr = &evsel->core.attr; 686 687 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 688 689 attr->sample_max_stack = param->max_stack; 690 691 if (opts->kernel_callchains) 692 attr->exclude_callchain_user = 1; 693 if (opts->user_callchains) 694 attr->exclude_callchain_kernel = 1; 695 if (param->record_mode == CALLCHAIN_LBR) { 696 if (!opts->branch_stack) { 697 if (attr->exclude_user) { 698 pr_warning("LBR callstack option is only available " 699 "to get user callchain information. " 700 "Falling back to framepointers.\n"); 701 } else { 702 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 703 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 704 PERF_SAMPLE_BRANCH_CALL_STACK | 705 PERF_SAMPLE_BRANCH_NO_CYCLES | 706 PERF_SAMPLE_BRANCH_NO_FLAGS; 707 } 708 } else 709 pr_warning("Cannot use LBR callstack with branch stack. " 710 "Falling back to framepointers.\n"); 711 } 712 713 if (param->record_mode == CALLCHAIN_DWARF) { 714 if (!function) { 715 perf_evsel__set_sample_bit(evsel, REGS_USER); 716 perf_evsel__set_sample_bit(evsel, STACK_USER); 717 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) { 718 attr->sample_regs_user |= DWARF_MINIMAL_REGS; 719 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, " 720 "specifying a subset with --user-regs may render DWARF unwinding unreliable, " 721 "so the minimal registers set (IP, SP) is explicitly forced.\n"); 722 } else { 723 attr->sample_regs_user |= PERF_REGS_MASK; 724 } 725 attr->sample_stack_user = param->dump_size; 726 attr->exclude_callchain_user = 1; 727 } else { 728 pr_info("Cannot use DWARF unwind for function trace event," 729 " falling back to framepointers.\n"); 730 } 731 } 732 733 if (function) { 734 pr_info("Disabling user space callchains for function trace event.\n"); 735 attr->exclude_callchain_user = 1; 736 } 737 } 738 739 void perf_evsel__config_callchain(struct evsel *evsel, 740 struct record_opts *opts, 741 struct callchain_param *param) 742 { 743 if (param->enabled) 744 return __perf_evsel__config_callchain(evsel, opts, param); 745 } 746 747 static void 748 perf_evsel__reset_callgraph(struct evsel *evsel, 749 struct callchain_param *param) 750 { 751 struct perf_event_attr *attr = &evsel->core.attr; 752 753 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 754 if (param->record_mode == CALLCHAIN_LBR) { 755 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); 756 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 757 PERF_SAMPLE_BRANCH_CALL_STACK); 758 } 759 if (param->record_mode == CALLCHAIN_DWARF) { 760 perf_evsel__reset_sample_bit(evsel, REGS_USER); 761 perf_evsel__reset_sample_bit(evsel, STACK_USER); 762 } 763 } 764 765 static void apply_config_terms(struct evsel *evsel, 766 struct record_opts *opts, bool track) 767 { 768 struct perf_evsel_config_term *term; 769 struct list_head *config_terms = &evsel->config_terms; 770 struct perf_event_attr *attr = &evsel->core.attr; 771 /* callgraph default */ 772 struct callchain_param param = { 773 .record_mode = callchain_param.record_mode, 774 }; 775 u32 dump_size = 0; 776 int max_stack = 0; 777 const char *callgraph_buf = NULL; 778 779 list_for_each_entry(term, config_terms, list) { 780 switch (term->type) { 781 case PERF_EVSEL__CONFIG_TERM_PERIOD: 782 if (!(term->weak && opts->user_interval != ULLONG_MAX)) { 783 attr->sample_period = term->val.period; 784 attr->freq = 0; 785 perf_evsel__reset_sample_bit(evsel, PERIOD); 786 } 787 break; 788 case PERF_EVSEL__CONFIG_TERM_FREQ: 789 if (!(term->weak && opts->user_freq != UINT_MAX)) { 790 attr->sample_freq = term->val.freq; 791 attr->freq = 1; 792 perf_evsel__set_sample_bit(evsel, PERIOD); 793 } 794 break; 795 case PERF_EVSEL__CONFIG_TERM_TIME: 796 if (term->val.time) 797 perf_evsel__set_sample_bit(evsel, TIME); 798 else 799 perf_evsel__reset_sample_bit(evsel, TIME); 800 break; 801 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH: 802 callgraph_buf = term->val.callgraph; 803 break; 804 case PERF_EVSEL__CONFIG_TERM_BRANCH: 805 if (term->val.branch && strcmp(term->val.branch, "no")) { 806 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 807 parse_branch_str(term->val.branch, 808 &attr->branch_sample_type); 809 } else 810 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); 811 break; 812 case PERF_EVSEL__CONFIG_TERM_STACK_USER: 813 dump_size = term->val.stack_user; 814 break; 815 case PERF_EVSEL__CONFIG_TERM_MAX_STACK: 816 max_stack = term->val.max_stack; 817 break; 818 case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS: 819 evsel->max_events = term->val.max_events; 820 break; 821 case PERF_EVSEL__CONFIG_TERM_INHERIT: 822 /* 823 * attr->inherit should has already been set by 824 * perf_evsel__config. If user explicitly set 825 * inherit using config terms, override global 826 * opt->no_inherit setting. 827 */ 828 attr->inherit = term->val.inherit ? 1 : 0; 829 break; 830 case PERF_EVSEL__CONFIG_TERM_OVERWRITE: 831 attr->write_backward = term->val.overwrite ? 1 : 0; 832 break; 833 case PERF_EVSEL__CONFIG_TERM_DRV_CFG: 834 break; 835 case PERF_EVSEL__CONFIG_TERM_PERCORE: 836 break; 837 case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT: 838 attr->aux_output = term->val.aux_output ? 1 : 0; 839 break; 840 default: 841 break; 842 } 843 } 844 845 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 846 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { 847 bool sample_address = false; 848 849 if (max_stack) { 850 param.max_stack = max_stack; 851 if (callgraph_buf == NULL) 852 callgraph_buf = "fp"; 853 } 854 855 /* parse callgraph parameters */ 856 if (callgraph_buf != NULL) { 857 if (!strcmp(callgraph_buf, "no")) { 858 param.enabled = false; 859 param.record_mode = CALLCHAIN_NONE; 860 } else { 861 param.enabled = true; 862 if (parse_callchain_record(callgraph_buf, ¶m)) { 863 pr_err("per-event callgraph setting for %s failed. " 864 "Apply callgraph global setting for it\n", 865 evsel->name); 866 return; 867 } 868 if (param.record_mode == CALLCHAIN_DWARF) 869 sample_address = true; 870 } 871 } 872 if (dump_size > 0) { 873 dump_size = round_up(dump_size, sizeof(u64)); 874 param.dump_size = dump_size; 875 } 876 877 /* If global callgraph set, clear it */ 878 if (callchain_param.enabled) 879 perf_evsel__reset_callgraph(evsel, &callchain_param); 880 881 /* set perf-event callgraph */ 882 if (param.enabled) { 883 if (sample_address) { 884 perf_evsel__set_sample_bit(evsel, ADDR); 885 perf_evsel__set_sample_bit(evsel, DATA_SRC); 886 evsel->core.attr.mmap_data = track; 887 } 888 perf_evsel__config_callchain(evsel, opts, ¶m); 889 } 890 } 891 } 892 893 static bool is_dummy_event(struct evsel *evsel) 894 { 895 return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) && 896 (evsel->core.attr.config == PERF_COUNT_SW_DUMMY); 897 } 898 899 /* 900 * The enable_on_exec/disabled value strategy: 901 * 902 * 1) For any type of traced program: 903 * - all independent events and group leaders are disabled 904 * - all group members are enabled 905 * 906 * Group members are ruled by group leaders. They need to 907 * be enabled, because the group scheduling relies on that. 908 * 909 * 2) For traced programs executed by perf: 910 * - all independent events and group leaders have 911 * enable_on_exec set 912 * - we don't specifically enable or disable any event during 913 * the record command 914 * 915 * Independent events and group leaders are initially disabled 916 * and get enabled by exec. Group members are ruled by group 917 * leaders as stated in 1). 918 * 919 * 3) For traced programs attached by perf (pid/tid): 920 * - we specifically enable or disable all events during 921 * the record command 922 * 923 * When attaching events to already running traced we 924 * enable/disable events specifically, as there's no 925 * initial traced exec call. 926 */ 927 void perf_evsel__config(struct evsel *evsel, struct record_opts *opts, 928 struct callchain_param *callchain) 929 { 930 struct evsel *leader = evsel->leader; 931 struct perf_event_attr *attr = &evsel->core.attr; 932 int track = evsel->tracking; 933 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 934 935 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 936 attr->inherit = !opts->no_inherit; 937 attr->write_backward = opts->overwrite ? 1 : 0; 938 939 perf_evsel__set_sample_bit(evsel, IP); 940 perf_evsel__set_sample_bit(evsel, TID); 941 942 if (evsel->sample_read) { 943 perf_evsel__set_sample_bit(evsel, READ); 944 945 /* 946 * We need ID even in case of single event, because 947 * PERF_SAMPLE_READ process ID specific data. 948 */ 949 perf_evsel__set_sample_id(evsel, false); 950 951 /* 952 * Apply group format only if we belong to group 953 * with more than one members. 954 */ 955 if (leader->core.nr_members > 1) { 956 attr->read_format |= PERF_FORMAT_GROUP; 957 attr->inherit = 0; 958 } 959 } 960 961 /* 962 * We default some events to have a default interval. But keep 963 * it a weak assumption overridable by the user. 964 */ 965 if (!attr->sample_period || (opts->user_freq != UINT_MAX || 966 opts->user_interval != ULLONG_MAX)) { 967 if (opts->freq) { 968 perf_evsel__set_sample_bit(evsel, PERIOD); 969 attr->freq = 1; 970 attr->sample_freq = opts->freq; 971 } else { 972 attr->sample_period = opts->default_interval; 973 } 974 } 975 976 /* 977 * Disable sampling for all group members other 978 * than leader in case leader 'leads' the sampling. 979 */ 980 if ((leader != evsel) && leader->sample_read) { 981 attr->freq = 0; 982 attr->sample_freq = 0; 983 attr->sample_period = 0; 984 attr->write_backward = 0; 985 986 /* 987 * We don't get sample for slave events, we make them 988 * when delivering group leader sample. Set the slave 989 * event to follow the master sample_type to ease up 990 * report. 991 */ 992 attr->sample_type = leader->core.attr.sample_type; 993 } 994 995 if (opts->no_samples) 996 attr->sample_freq = 0; 997 998 if (opts->inherit_stat) { 999 evsel->core.attr.read_format |= 1000 PERF_FORMAT_TOTAL_TIME_ENABLED | 1001 PERF_FORMAT_TOTAL_TIME_RUNNING | 1002 PERF_FORMAT_ID; 1003 attr->inherit_stat = 1; 1004 } 1005 1006 if (opts->sample_address) { 1007 perf_evsel__set_sample_bit(evsel, ADDR); 1008 attr->mmap_data = track; 1009 } 1010 1011 /* 1012 * We don't allow user space callchains for function trace 1013 * event, due to issues with page faults while tracing page 1014 * fault handler and its overall trickiness nature. 1015 */ 1016 if (perf_evsel__is_function_event(evsel)) 1017 evsel->core.attr.exclude_callchain_user = 1; 1018 1019 if (callchain && callchain->enabled && !evsel->no_aux_samples) 1020 perf_evsel__config_callchain(evsel, opts, callchain); 1021 1022 if (opts->sample_intr_regs) { 1023 attr->sample_regs_intr = opts->sample_intr_regs; 1024 perf_evsel__set_sample_bit(evsel, REGS_INTR); 1025 } 1026 1027 if (opts->sample_user_regs) { 1028 attr->sample_regs_user |= opts->sample_user_regs; 1029 perf_evsel__set_sample_bit(evsel, REGS_USER); 1030 } 1031 1032 if (target__has_cpu(&opts->target) || opts->sample_cpu) 1033 perf_evsel__set_sample_bit(evsel, CPU); 1034 1035 /* 1036 * When the user explicitly disabled time don't force it here. 1037 */ 1038 if (opts->sample_time && 1039 (!perf_missing_features.sample_id_all && 1040 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 1041 opts->sample_time_set))) 1042 perf_evsel__set_sample_bit(evsel, TIME); 1043 1044 if (opts->raw_samples && !evsel->no_aux_samples) { 1045 perf_evsel__set_sample_bit(evsel, TIME); 1046 perf_evsel__set_sample_bit(evsel, RAW); 1047 perf_evsel__set_sample_bit(evsel, CPU); 1048 } 1049 1050 if (opts->sample_address) 1051 perf_evsel__set_sample_bit(evsel, DATA_SRC); 1052 1053 if (opts->sample_phys_addr) 1054 perf_evsel__set_sample_bit(evsel, PHYS_ADDR); 1055 1056 if (opts->no_buffering) { 1057 attr->watermark = 0; 1058 attr->wakeup_events = 1; 1059 } 1060 if (opts->branch_stack && !evsel->no_aux_samples) { 1061 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 1062 attr->branch_sample_type = opts->branch_stack; 1063 } 1064 1065 if (opts->sample_weight) 1066 perf_evsel__set_sample_bit(evsel, WEIGHT); 1067 1068 attr->task = track; 1069 attr->mmap = track; 1070 attr->mmap2 = track && !perf_missing_features.mmap2; 1071 attr->comm = track; 1072 attr->ksymbol = track && !perf_missing_features.ksymbol; 1073 attr->bpf_event = track && !opts->no_bpf_event && 1074 !perf_missing_features.bpf_event; 1075 1076 if (opts->record_namespaces) 1077 attr->namespaces = track; 1078 1079 if (opts->record_switch_events) 1080 attr->context_switch = track; 1081 1082 if (opts->sample_transaction) 1083 perf_evsel__set_sample_bit(evsel, TRANSACTION); 1084 1085 if (opts->running_time) { 1086 evsel->core.attr.read_format |= 1087 PERF_FORMAT_TOTAL_TIME_ENABLED | 1088 PERF_FORMAT_TOTAL_TIME_RUNNING; 1089 } 1090 1091 /* 1092 * XXX see the function comment above 1093 * 1094 * Disabling only independent events or group leaders, 1095 * keeping group members enabled. 1096 */ 1097 if (perf_evsel__is_group_leader(evsel)) 1098 attr->disabled = 1; 1099 1100 /* 1101 * Setting enable_on_exec for independent events and 1102 * group leaders for traced executed by perf. 1103 */ 1104 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && 1105 !opts->initial_delay) 1106 attr->enable_on_exec = 1; 1107 1108 if (evsel->immediate) { 1109 attr->disabled = 0; 1110 attr->enable_on_exec = 0; 1111 } 1112 1113 clockid = opts->clockid; 1114 if (opts->use_clockid) { 1115 attr->use_clockid = 1; 1116 attr->clockid = opts->clockid; 1117 } 1118 1119 if (evsel->precise_max) 1120 attr->precise_ip = 3; 1121 1122 if (opts->all_user) { 1123 attr->exclude_kernel = 1; 1124 attr->exclude_user = 0; 1125 } 1126 1127 if (opts->all_kernel) { 1128 attr->exclude_kernel = 0; 1129 attr->exclude_user = 1; 1130 } 1131 1132 if (evsel->core.own_cpus || evsel->unit) 1133 evsel->core.attr.read_format |= PERF_FORMAT_ID; 1134 1135 /* 1136 * Apply event specific term settings, 1137 * it overloads any global configuration. 1138 */ 1139 apply_config_terms(evsel, opts, track); 1140 1141 evsel->ignore_missing_thread = opts->ignore_missing_thread; 1142 1143 /* The --period option takes the precedence. */ 1144 if (opts->period_set) { 1145 if (opts->period) 1146 perf_evsel__set_sample_bit(evsel, PERIOD); 1147 else 1148 perf_evsel__reset_sample_bit(evsel, PERIOD); 1149 } 1150 1151 /* 1152 * For initial_delay, a dummy event is added implicitly. 1153 * The software event will trigger -EOPNOTSUPP error out, 1154 * if BRANCH_STACK bit is set. 1155 */ 1156 if (opts->initial_delay && is_dummy_event(evsel)) 1157 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); 1158 } 1159 1160 int perf_evsel__set_filter(struct evsel *evsel, const char *filter) 1161 { 1162 char *new_filter = strdup(filter); 1163 1164 if (new_filter != NULL) { 1165 free(evsel->filter); 1166 evsel->filter = new_filter; 1167 return 0; 1168 } 1169 1170 return -1; 1171 } 1172 1173 static int perf_evsel__append_filter(struct evsel *evsel, 1174 const char *fmt, const char *filter) 1175 { 1176 char *new_filter; 1177 1178 if (evsel->filter == NULL) 1179 return perf_evsel__set_filter(evsel, filter); 1180 1181 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { 1182 free(evsel->filter); 1183 evsel->filter = new_filter; 1184 return 0; 1185 } 1186 1187 return -1; 1188 } 1189 1190 int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter) 1191 { 1192 return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter); 1193 } 1194 1195 int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter) 1196 { 1197 return perf_evsel__append_filter(evsel, "%s,%s", filter); 1198 } 1199 1200 int evsel__enable(struct evsel *evsel) 1201 { 1202 int err = perf_evsel__enable(&evsel->core); 1203 1204 if (!err) 1205 evsel->disabled = false; 1206 1207 return err; 1208 } 1209 1210 int evsel__disable(struct evsel *evsel) 1211 { 1212 int err = perf_evsel__disable(&evsel->core); 1213 /* 1214 * We mark it disabled here so that tools that disable a event can 1215 * ignore events after they disable it. I.e. the ring buffer may have 1216 * already a few more events queued up before the kernel got the stop 1217 * request. 1218 */ 1219 if (!err) 1220 evsel->disabled = true; 1221 1222 return err; 1223 } 1224 1225 int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads) 1226 { 1227 if (ncpus == 0 || nthreads == 0) 1228 return 0; 1229 1230 if (evsel->system_wide) 1231 nthreads = 1; 1232 1233 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 1234 if (evsel->sample_id == NULL) 1235 return -ENOMEM; 1236 1237 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 1238 if (evsel->id == NULL) { 1239 xyarray__delete(evsel->sample_id); 1240 evsel->sample_id = NULL; 1241 return -ENOMEM; 1242 } 1243 1244 return 0; 1245 } 1246 1247 static void perf_evsel__free_id(struct evsel *evsel) 1248 { 1249 xyarray__delete(evsel->sample_id); 1250 evsel->sample_id = NULL; 1251 zfree(&evsel->id); 1252 evsel->ids = 0; 1253 } 1254 1255 static void perf_evsel__free_config_terms(struct evsel *evsel) 1256 { 1257 struct perf_evsel_config_term *term, *h; 1258 1259 list_for_each_entry_safe(term, h, &evsel->config_terms, list) { 1260 list_del_init(&term->list); 1261 free(term); 1262 } 1263 } 1264 1265 void perf_evsel__exit(struct evsel *evsel) 1266 { 1267 assert(list_empty(&evsel->core.node)); 1268 assert(evsel->evlist == NULL); 1269 perf_evsel__free_counts(evsel); 1270 perf_evsel__free_fd(&evsel->core); 1271 perf_evsel__free_id(evsel); 1272 perf_evsel__free_config_terms(evsel); 1273 cgroup__put(evsel->cgrp); 1274 perf_cpu_map__put(evsel->core.cpus); 1275 perf_cpu_map__put(evsel->core.own_cpus); 1276 perf_thread_map__put(evsel->core.threads); 1277 zfree(&evsel->group_name); 1278 zfree(&evsel->name); 1279 perf_evsel__object.fini(evsel); 1280 } 1281 1282 void evsel__delete(struct evsel *evsel) 1283 { 1284 perf_evsel__exit(evsel); 1285 free(evsel); 1286 } 1287 1288 void perf_evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, 1289 struct perf_counts_values *count) 1290 { 1291 struct perf_counts_values tmp; 1292 1293 if (!evsel->prev_raw_counts) 1294 return; 1295 1296 if (cpu == -1) { 1297 tmp = evsel->prev_raw_counts->aggr; 1298 evsel->prev_raw_counts->aggr = *count; 1299 } else { 1300 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); 1301 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; 1302 } 1303 1304 count->val = count->val - tmp.val; 1305 count->ena = count->ena - tmp.ena; 1306 count->run = count->run - tmp.run; 1307 } 1308 1309 void perf_counts_values__scale(struct perf_counts_values *count, 1310 bool scale, s8 *pscaled) 1311 { 1312 s8 scaled = 0; 1313 1314 if (scale) { 1315 if (count->run == 0) { 1316 scaled = -1; 1317 count->val = 0; 1318 } else if (count->run < count->ena) { 1319 scaled = 1; 1320 count->val = (u64)((double) count->val * count->ena / count->run); 1321 } 1322 } 1323 1324 if (pscaled) 1325 *pscaled = scaled; 1326 } 1327 1328 static int 1329 perf_evsel__read_one(struct evsel *evsel, int cpu, int thread) 1330 { 1331 struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread); 1332 1333 return perf_evsel__read(&evsel->core, cpu, thread, count); 1334 } 1335 1336 static void 1337 perf_evsel__set_count(struct evsel *counter, int cpu, int thread, 1338 u64 val, u64 ena, u64 run) 1339 { 1340 struct perf_counts_values *count; 1341 1342 count = perf_counts(counter->counts, cpu, thread); 1343 1344 count->val = val; 1345 count->ena = ena; 1346 count->run = run; 1347 1348 perf_counts__set_loaded(counter->counts, cpu, thread, true); 1349 } 1350 1351 static int 1352 perf_evsel__process_group_data(struct evsel *leader, 1353 int cpu, int thread, u64 *data) 1354 { 1355 u64 read_format = leader->core.attr.read_format; 1356 struct sample_read_value *v; 1357 u64 nr, ena = 0, run = 0, i; 1358 1359 nr = *data++; 1360 1361 if (nr != (u64) leader->core.nr_members) 1362 return -EINVAL; 1363 1364 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1365 ena = *data++; 1366 1367 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1368 run = *data++; 1369 1370 v = (struct sample_read_value *) data; 1371 1372 perf_evsel__set_count(leader, cpu, thread, 1373 v[0].value, ena, run); 1374 1375 for (i = 1; i < nr; i++) { 1376 struct evsel *counter; 1377 1378 counter = perf_evlist__id2evsel(leader->evlist, v[i].id); 1379 if (!counter) 1380 return -EINVAL; 1381 1382 perf_evsel__set_count(counter, cpu, thread, 1383 v[i].value, ena, run); 1384 } 1385 1386 return 0; 1387 } 1388 1389 static int 1390 perf_evsel__read_group(struct evsel *leader, int cpu, int thread) 1391 { 1392 struct perf_stat_evsel *ps = leader->stats; 1393 u64 read_format = leader->core.attr.read_format; 1394 int size = perf_evsel__read_size(&leader->core); 1395 u64 *data = ps->group_data; 1396 1397 if (!(read_format & PERF_FORMAT_ID)) 1398 return -EINVAL; 1399 1400 if (!perf_evsel__is_group_leader(leader)) 1401 return -EINVAL; 1402 1403 if (!data) { 1404 data = zalloc(size); 1405 if (!data) 1406 return -ENOMEM; 1407 1408 ps->group_data = data; 1409 } 1410 1411 if (FD(leader, cpu, thread) < 0) 1412 return -EINVAL; 1413 1414 if (readn(FD(leader, cpu, thread), data, size) <= 0) 1415 return -errno; 1416 1417 return perf_evsel__process_group_data(leader, cpu, thread, data); 1418 } 1419 1420 int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread) 1421 { 1422 u64 read_format = evsel->core.attr.read_format; 1423 1424 if (read_format & PERF_FORMAT_GROUP) 1425 return perf_evsel__read_group(evsel, cpu, thread); 1426 else 1427 return perf_evsel__read_one(evsel, cpu, thread); 1428 } 1429 1430 int __perf_evsel__read_on_cpu(struct evsel *evsel, 1431 int cpu, int thread, bool scale) 1432 { 1433 struct perf_counts_values count; 1434 size_t nv = scale ? 3 : 1; 1435 1436 if (FD(evsel, cpu, thread) < 0) 1437 return -EINVAL; 1438 1439 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) 1440 return -ENOMEM; 1441 1442 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0) 1443 return -errno; 1444 1445 perf_evsel__compute_deltas(evsel, cpu, thread, &count); 1446 perf_counts_values__scale(&count, scale, NULL); 1447 *perf_counts(evsel->counts, cpu, thread) = count; 1448 return 0; 1449 } 1450 1451 static int get_group_fd(struct evsel *evsel, int cpu, int thread) 1452 { 1453 struct evsel *leader = evsel->leader; 1454 int fd; 1455 1456 if (perf_evsel__is_group_leader(evsel)) 1457 return -1; 1458 1459 /* 1460 * Leader must be already processed/open, 1461 * if not it's a bug. 1462 */ 1463 BUG_ON(!leader->core.fd); 1464 1465 fd = FD(leader, cpu, thread); 1466 BUG_ON(fd == -1); 1467 1468 return fd; 1469 } 1470 1471 struct bit_names { 1472 int bit; 1473 const char *name; 1474 }; 1475 1476 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) 1477 { 1478 bool first_bit = true; 1479 int i = 0; 1480 1481 do { 1482 if (value & bits[i].bit) { 1483 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); 1484 first_bit = false; 1485 } 1486 } while (bits[++i].name != NULL); 1487 } 1488 1489 static void __p_sample_type(char *buf, size_t size, u64 value) 1490 { 1491 #define bit_name(n) { PERF_SAMPLE_##n, #n } 1492 struct bit_names bits[] = { 1493 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 1494 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1495 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1496 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1497 bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC), 1498 bit_name(WEIGHT), bit_name(PHYS_ADDR), 1499 { .name = NULL, } 1500 }; 1501 #undef bit_name 1502 __p_bits(buf, size, value, bits); 1503 } 1504 1505 static void __p_branch_sample_type(char *buf, size_t size, u64 value) 1506 { 1507 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n } 1508 struct bit_names bits[] = { 1509 bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY), 1510 bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL), 1511 bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX), 1512 bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP), 1513 bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES), 1514 { .name = NULL, } 1515 }; 1516 #undef bit_name 1517 __p_bits(buf, size, value, bits); 1518 } 1519 1520 static void __p_read_format(char *buf, size_t size, u64 value) 1521 { 1522 #define bit_name(n) { PERF_FORMAT_##n, #n } 1523 struct bit_names bits[] = { 1524 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 1525 bit_name(ID), bit_name(GROUP), 1526 { .name = NULL, } 1527 }; 1528 #undef bit_name 1529 __p_bits(buf, size, value, bits); 1530 } 1531 1532 #define BUF_SIZE 1024 1533 1534 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) 1535 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) 1536 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) 1537 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) 1538 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) 1539 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) 1540 1541 #define PRINT_ATTRn(_n, _f, _p) \ 1542 do { \ 1543 if (attr->_f) { \ 1544 _p(attr->_f); \ 1545 ret += attr__fprintf(fp, _n, buf, priv);\ 1546 } \ 1547 } while (0) 1548 1549 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) 1550 1551 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 1552 attr__fprintf_f attr__fprintf, void *priv) 1553 { 1554 char buf[BUF_SIZE]; 1555 int ret = 0; 1556 1557 PRINT_ATTRf(type, p_unsigned); 1558 PRINT_ATTRf(size, p_unsigned); 1559 PRINT_ATTRf(config, p_hex); 1560 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); 1561 PRINT_ATTRf(sample_type, p_sample_type); 1562 PRINT_ATTRf(read_format, p_read_format); 1563 1564 PRINT_ATTRf(disabled, p_unsigned); 1565 PRINT_ATTRf(inherit, p_unsigned); 1566 PRINT_ATTRf(pinned, p_unsigned); 1567 PRINT_ATTRf(exclusive, p_unsigned); 1568 PRINT_ATTRf(exclude_user, p_unsigned); 1569 PRINT_ATTRf(exclude_kernel, p_unsigned); 1570 PRINT_ATTRf(exclude_hv, p_unsigned); 1571 PRINT_ATTRf(exclude_idle, p_unsigned); 1572 PRINT_ATTRf(mmap, p_unsigned); 1573 PRINT_ATTRf(comm, p_unsigned); 1574 PRINT_ATTRf(freq, p_unsigned); 1575 PRINT_ATTRf(inherit_stat, p_unsigned); 1576 PRINT_ATTRf(enable_on_exec, p_unsigned); 1577 PRINT_ATTRf(task, p_unsigned); 1578 PRINT_ATTRf(watermark, p_unsigned); 1579 PRINT_ATTRf(precise_ip, p_unsigned); 1580 PRINT_ATTRf(mmap_data, p_unsigned); 1581 PRINT_ATTRf(sample_id_all, p_unsigned); 1582 PRINT_ATTRf(exclude_host, p_unsigned); 1583 PRINT_ATTRf(exclude_guest, p_unsigned); 1584 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); 1585 PRINT_ATTRf(exclude_callchain_user, p_unsigned); 1586 PRINT_ATTRf(mmap2, p_unsigned); 1587 PRINT_ATTRf(comm_exec, p_unsigned); 1588 PRINT_ATTRf(use_clockid, p_unsigned); 1589 PRINT_ATTRf(context_switch, p_unsigned); 1590 PRINT_ATTRf(write_backward, p_unsigned); 1591 PRINT_ATTRf(namespaces, p_unsigned); 1592 PRINT_ATTRf(ksymbol, p_unsigned); 1593 PRINT_ATTRf(bpf_event, p_unsigned); 1594 PRINT_ATTRf(aux_output, p_unsigned); 1595 1596 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); 1597 PRINT_ATTRf(bp_type, p_unsigned); 1598 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); 1599 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); 1600 PRINT_ATTRf(branch_sample_type, p_branch_sample_type); 1601 PRINT_ATTRf(sample_regs_user, p_hex); 1602 PRINT_ATTRf(sample_stack_user, p_unsigned); 1603 PRINT_ATTRf(clockid, p_signed); 1604 PRINT_ATTRf(sample_regs_intr, p_hex); 1605 PRINT_ATTRf(aux_watermark, p_unsigned); 1606 PRINT_ATTRf(sample_max_stack, p_unsigned); 1607 1608 return ret; 1609 } 1610 1611 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1612 void *priv __maybe_unused) 1613 { 1614 return fprintf(fp, " %-32s %s\n", name, val); 1615 } 1616 1617 static void perf_evsel__remove_fd(struct evsel *pos, 1618 int nr_cpus, int nr_threads, 1619 int thread_idx) 1620 { 1621 for (int cpu = 0; cpu < nr_cpus; cpu++) 1622 for (int thread = thread_idx; thread < nr_threads - 1; thread++) 1623 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); 1624 } 1625 1626 static int update_fds(struct evsel *evsel, 1627 int nr_cpus, int cpu_idx, 1628 int nr_threads, int thread_idx) 1629 { 1630 struct evsel *pos; 1631 1632 if (cpu_idx >= nr_cpus || thread_idx >= nr_threads) 1633 return -EINVAL; 1634 1635 evlist__for_each_entry(evsel->evlist, pos) { 1636 nr_cpus = pos != evsel ? nr_cpus : cpu_idx; 1637 1638 perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); 1639 1640 /* 1641 * Since fds for next evsel has not been created, 1642 * there is no need to iterate whole event list. 1643 */ 1644 if (pos == evsel) 1645 break; 1646 } 1647 return 0; 1648 } 1649 1650 static bool ignore_missing_thread(struct evsel *evsel, 1651 int nr_cpus, int cpu, 1652 struct perf_thread_map *threads, 1653 int thread, int err) 1654 { 1655 pid_t ignore_pid = thread_map__pid(threads, thread); 1656 1657 if (!evsel->ignore_missing_thread) 1658 return false; 1659 1660 /* The system wide setup does not work with threads. */ 1661 if (evsel->system_wide) 1662 return false; 1663 1664 /* The -ESRCH is perf event syscall errno for pid's not found. */ 1665 if (err != -ESRCH) 1666 return false; 1667 1668 /* If there's only one thread, let it fail. */ 1669 if (threads->nr == 1) 1670 return false; 1671 1672 /* 1673 * We should remove fd for missing_thread first 1674 * because thread_map__remove() will decrease threads->nr. 1675 */ 1676 if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) 1677 return false; 1678 1679 if (thread_map__remove(threads, thread)) 1680 return false; 1681 1682 pr_warning("WARNING: Ignored open failure for pid %d\n", 1683 ignore_pid); 1684 return true; 1685 } 1686 1687 static void display_attr(struct perf_event_attr *attr) 1688 { 1689 if (verbose >= 2) { 1690 fprintf(stderr, "%.60s\n", graph_dotted_line); 1691 fprintf(stderr, "perf_event_attr:\n"); 1692 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL); 1693 fprintf(stderr, "%.60s\n", graph_dotted_line); 1694 } 1695 } 1696 1697 static int perf_event_open(struct evsel *evsel, 1698 pid_t pid, int cpu, int group_fd, 1699 unsigned long flags) 1700 { 1701 int precise_ip = evsel->core.attr.precise_ip; 1702 int fd; 1703 1704 while (1) { 1705 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", 1706 pid, cpu, group_fd, flags); 1707 1708 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags); 1709 if (fd >= 0) 1710 break; 1711 1712 /* Do not try less precise if not requested. */ 1713 if (!evsel->precise_max) 1714 break; 1715 1716 /* 1717 * We tried all the precise_ip values, and it's 1718 * still failing, so leave it to standard fallback. 1719 */ 1720 if (!evsel->core.attr.precise_ip) { 1721 evsel->core.attr.precise_ip = precise_ip; 1722 break; 1723 } 1724 1725 pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP); 1726 evsel->core.attr.precise_ip--; 1727 pr_debug2("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); 1728 display_attr(&evsel->core.attr); 1729 } 1730 1731 return fd; 1732 } 1733 1734 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, 1735 struct perf_thread_map *threads) 1736 { 1737 int cpu, thread, nthreads; 1738 unsigned long flags = PERF_FLAG_FD_CLOEXEC; 1739 int pid = -1, err; 1740 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; 1741 1742 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || 1743 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) 1744 return -EINVAL; 1745 1746 if (cpus == NULL) { 1747 static struct perf_cpu_map *empty_cpu_map; 1748 1749 if (empty_cpu_map == NULL) { 1750 empty_cpu_map = perf_cpu_map__dummy_new(); 1751 if (empty_cpu_map == NULL) 1752 return -ENOMEM; 1753 } 1754 1755 cpus = empty_cpu_map; 1756 } 1757 1758 if (threads == NULL) { 1759 static struct perf_thread_map *empty_thread_map; 1760 1761 if (empty_thread_map == NULL) { 1762 empty_thread_map = thread_map__new_by_tid(-1); 1763 if (empty_thread_map == NULL) 1764 return -ENOMEM; 1765 } 1766 1767 threads = empty_thread_map; 1768 } 1769 1770 if (evsel->system_wide) 1771 nthreads = 1; 1772 else 1773 nthreads = threads->nr; 1774 1775 if (evsel->core.fd == NULL && 1776 perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) 1777 return -ENOMEM; 1778 1779 if (evsel->cgrp) { 1780 flags |= PERF_FLAG_PID_CGROUP; 1781 pid = evsel->cgrp->fd; 1782 } 1783 1784 fallback_missing_features: 1785 if (perf_missing_features.clockid_wrong) 1786 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1787 if (perf_missing_features.clockid) { 1788 evsel->core.attr.use_clockid = 0; 1789 evsel->core.attr.clockid = 0; 1790 } 1791 if (perf_missing_features.cloexec) 1792 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1793 if (perf_missing_features.mmap2) 1794 evsel->core.attr.mmap2 = 0; 1795 if (perf_missing_features.exclude_guest) 1796 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; 1797 if (perf_missing_features.lbr_flags) 1798 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1799 PERF_SAMPLE_BRANCH_NO_CYCLES); 1800 if (perf_missing_features.group_read && evsel->core.attr.inherit) 1801 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); 1802 if (perf_missing_features.ksymbol) 1803 evsel->core.attr.ksymbol = 0; 1804 if (perf_missing_features.bpf_event) 1805 evsel->core.attr.bpf_event = 0; 1806 retry_sample_id: 1807 if (perf_missing_features.sample_id_all) 1808 evsel->core.attr.sample_id_all = 0; 1809 1810 display_attr(&evsel->core.attr); 1811 1812 for (cpu = 0; cpu < cpus->nr; cpu++) { 1813 1814 for (thread = 0; thread < nthreads; thread++) { 1815 int fd, group_fd; 1816 1817 if (!evsel->cgrp && !evsel->system_wide) 1818 pid = thread_map__pid(threads, thread); 1819 1820 group_fd = get_group_fd(evsel, cpu, thread); 1821 retry_open: 1822 test_attr__ready(); 1823 1824 fd = perf_event_open(evsel, pid, cpus->map[cpu], 1825 group_fd, flags); 1826 1827 FD(evsel, cpu, thread) = fd; 1828 1829 if (fd < 0) { 1830 err = -errno; 1831 1832 if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { 1833 /* 1834 * We just removed 1 thread, so take a step 1835 * back on thread index and lower the upper 1836 * nthreads limit. 1837 */ 1838 nthreads--; 1839 thread--; 1840 1841 /* ... and pretend like nothing have happened. */ 1842 err = 0; 1843 continue; 1844 } 1845 1846 pr_debug2("\nsys_perf_event_open failed, error %d\n", 1847 err); 1848 goto try_fallback; 1849 } 1850 1851 pr_debug2(" = %d\n", fd); 1852 1853 if (evsel->bpf_fd >= 0) { 1854 int evt_fd = fd; 1855 int bpf_fd = evsel->bpf_fd; 1856 1857 err = ioctl(evt_fd, 1858 PERF_EVENT_IOC_SET_BPF, 1859 bpf_fd); 1860 if (err && errno != EEXIST) { 1861 pr_err("failed to attach bpf fd %d: %s\n", 1862 bpf_fd, strerror(errno)); 1863 err = -EINVAL; 1864 goto out_close; 1865 } 1866 } 1867 1868 set_rlimit = NO_CHANGE; 1869 1870 /* 1871 * If we succeeded but had to kill clockid, fail and 1872 * have perf_evsel__open_strerror() print us a nice 1873 * error. 1874 */ 1875 if (perf_missing_features.clockid || 1876 perf_missing_features.clockid_wrong) { 1877 err = -EINVAL; 1878 goto out_close; 1879 } 1880 } 1881 } 1882 1883 return 0; 1884 1885 try_fallback: 1886 /* 1887 * perf stat needs between 5 and 22 fds per CPU. When we run out 1888 * of them try to increase the limits. 1889 */ 1890 if (err == -EMFILE && set_rlimit < INCREASED_MAX) { 1891 struct rlimit l; 1892 int old_errno = errno; 1893 1894 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 1895 if (set_rlimit == NO_CHANGE) 1896 l.rlim_cur = l.rlim_max; 1897 else { 1898 l.rlim_cur = l.rlim_max + 1000; 1899 l.rlim_max = l.rlim_cur; 1900 } 1901 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 1902 set_rlimit++; 1903 errno = old_errno; 1904 goto retry_open; 1905 } 1906 } 1907 errno = old_errno; 1908 } 1909 1910 if (err != -EINVAL || cpu > 0 || thread > 0) 1911 goto out_close; 1912 1913 /* 1914 * Must probe features in the order they were added to the 1915 * perf_event_attr interface. 1916 */ 1917 if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { 1918 perf_missing_features.aux_output = true; 1919 pr_debug2("Kernel has no attr.aux_output support, bailing out\n"); 1920 goto out_close; 1921 } else if (!perf_missing_features.bpf_event && evsel->core.attr.bpf_event) { 1922 perf_missing_features.bpf_event = true; 1923 pr_debug2("switching off bpf_event\n"); 1924 goto fallback_missing_features; 1925 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { 1926 perf_missing_features.ksymbol = true; 1927 pr_debug2("switching off ksymbol\n"); 1928 goto fallback_missing_features; 1929 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { 1930 perf_missing_features.write_backward = true; 1931 pr_debug2("switching off write_backward\n"); 1932 goto out_close; 1933 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { 1934 perf_missing_features.clockid_wrong = true; 1935 pr_debug2("switching off clockid\n"); 1936 goto fallback_missing_features; 1937 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { 1938 perf_missing_features.clockid = true; 1939 pr_debug2("switching off use_clockid\n"); 1940 goto fallback_missing_features; 1941 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { 1942 perf_missing_features.cloexec = true; 1943 pr_debug2("switching off cloexec flag\n"); 1944 goto fallback_missing_features; 1945 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { 1946 perf_missing_features.mmap2 = true; 1947 pr_debug2("switching off mmap2\n"); 1948 goto fallback_missing_features; 1949 } else if (!perf_missing_features.exclude_guest && 1950 (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) { 1951 perf_missing_features.exclude_guest = true; 1952 pr_debug2("switching off exclude_guest, exclude_host\n"); 1953 goto fallback_missing_features; 1954 } else if (!perf_missing_features.sample_id_all) { 1955 perf_missing_features.sample_id_all = true; 1956 pr_debug2("switching off sample_id_all\n"); 1957 goto retry_sample_id; 1958 } else if (!perf_missing_features.lbr_flags && 1959 (evsel->core.attr.branch_sample_type & 1960 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1961 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1962 perf_missing_features.lbr_flags = true; 1963 pr_debug2("switching off branch sample type no (cycles/flags)\n"); 1964 goto fallback_missing_features; 1965 } else if (!perf_missing_features.group_read && 1966 evsel->core.attr.inherit && 1967 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && 1968 perf_evsel__is_group_leader(evsel)) { 1969 perf_missing_features.group_read = true; 1970 pr_debug2("switching off group read\n"); 1971 goto fallback_missing_features; 1972 } 1973 out_close: 1974 if (err) 1975 threads->err_thread = thread; 1976 1977 do { 1978 while (--thread >= 0) { 1979 close(FD(evsel, cpu, thread)); 1980 FD(evsel, cpu, thread) = -1; 1981 } 1982 thread = nthreads; 1983 } while (--cpu >= 0); 1984 return err; 1985 } 1986 1987 void evsel__close(struct evsel *evsel) 1988 { 1989 perf_evsel__close(&evsel->core); 1990 perf_evsel__free_id(evsel); 1991 } 1992 1993 int perf_evsel__open_per_cpu(struct evsel *evsel, 1994 struct perf_cpu_map *cpus) 1995 { 1996 return evsel__open(evsel, cpus, NULL); 1997 } 1998 1999 int perf_evsel__open_per_thread(struct evsel *evsel, 2000 struct perf_thread_map *threads) 2001 { 2002 return evsel__open(evsel, NULL, threads); 2003 } 2004 2005 static int perf_evsel__parse_id_sample(const struct evsel *evsel, 2006 const union perf_event *event, 2007 struct perf_sample *sample) 2008 { 2009 u64 type = evsel->core.attr.sample_type; 2010 const u64 *array = event->sample.array; 2011 bool swapped = evsel->needs_swap; 2012 union u64_swap u; 2013 2014 array += ((event->header.size - 2015 sizeof(event->header)) / sizeof(u64)) - 1; 2016 2017 if (type & PERF_SAMPLE_IDENTIFIER) { 2018 sample->id = *array; 2019 array--; 2020 } 2021 2022 if (type & PERF_SAMPLE_CPU) { 2023 u.val64 = *array; 2024 if (swapped) { 2025 /* undo swap of u64, then swap on individual u32s */ 2026 u.val64 = bswap_64(u.val64); 2027 u.val32[0] = bswap_32(u.val32[0]); 2028 } 2029 2030 sample->cpu = u.val32[0]; 2031 array--; 2032 } 2033 2034 if (type & PERF_SAMPLE_STREAM_ID) { 2035 sample->stream_id = *array; 2036 array--; 2037 } 2038 2039 if (type & PERF_SAMPLE_ID) { 2040 sample->id = *array; 2041 array--; 2042 } 2043 2044 if (type & PERF_SAMPLE_TIME) { 2045 sample->time = *array; 2046 array--; 2047 } 2048 2049 if (type & PERF_SAMPLE_TID) { 2050 u.val64 = *array; 2051 if (swapped) { 2052 /* undo swap of u64, then swap on individual u32s */ 2053 u.val64 = bswap_64(u.val64); 2054 u.val32[0] = bswap_32(u.val32[0]); 2055 u.val32[1] = bswap_32(u.val32[1]); 2056 } 2057 2058 sample->pid = u.val32[0]; 2059 sample->tid = u.val32[1]; 2060 array--; 2061 } 2062 2063 return 0; 2064 } 2065 2066 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 2067 u64 size) 2068 { 2069 return size > max_size || offset + size > endp; 2070 } 2071 2072 #define OVERFLOW_CHECK(offset, size, max_size) \ 2073 do { \ 2074 if (overflow(endp, (max_size), (offset), (size))) \ 2075 return -EFAULT; \ 2076 } while (0) 2077 2078 #define OVERFLOW_CHECK_u64(offset) \ 2079 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 2080 2081 static int 2082 perf_event__check_size(union perf_event *event, unsigned int sample_size) 2083 { 2084 /* 2085 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 2086 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 2087 * check the format does not go past the end of the event. 2088 */ 2089 if (sample_size + sizeof(event->header) > event->header.size) 2090 return -EFAULT; 2091 2092 return 0; 2093 } 2094 2095 int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event, 2096 struct perf_sample *data) 2097 { 2098 u64 type = evsel->core.attr.sample_type; 2099 bool swapped = evsel->needs_swap; 2100 const u64 *array; 2101 u16 max_size = event->header.size; 2102 const void *endp = (void *)event + max_size; 2103 u64 sz; 2104 2105 /* 2106 * used for cross-endian analysis. See git commit 65014ab3 2107 * for why this goofiness is needed. 2108 */ 2109 union u64_swap u; 2110 2111 memset(data, 0, sizeof(*data)); 2112 data->cpu = data->pid = data->tid = -1; 2113 data->stream_id = data->id = data->time = -1ULL; 2114 data->period = evsel->core.attr.sample_period; 2115 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 2116 data->misc = event->header.misc; 2117 data->id = -1ULL; 2118 data->data_src = PERF_MEM_DATA_SRC_NONE; 2119 2120 if (event->header.type != PERF_RECORD_SAMPLE) { 2121 if (!evsel->core.attr.sample_id_all) 2122 return 0; 2123 return perf_evsel__parse_id_sample(evsel, event, data); 2124 } 2125 2126 array = event->sample.array; 2127 2128 if (perf_event__check_size(event, evsel->sample_size)) 2129 return -EFAULT; 2130 2131 if (type & PERF_SAMPLE_IDENTIFIER) { 2132 data->id = *array; 2133 array++; 2134 } 2135 2136 if (type & PERF_SAMPLE_IP) { 2137 data->ip = *array; 2138 array++; 2139 } 2140 2141 if (type & PERF_SAMPLE_TID) { 2142 u.val64 = *array; 2143 if (swapped) { 2144 /* undo swap of u64, then swap on individual u32s */ 2145 u.val64 = bswap_64(u.val64); 2146 u.val32[0] = bswap_32(u.val32[0]); 2147 u.val32[1] = bswap_32(u.val32[1]); 2148 } 2149 2150 data->pid = u.val32[0]; 2151 data->tid = u.val32[1]; 2152 array++; 2153 } 2154 2155 if (type & PERF_SAMPLE_TIME) { 2156 data->time = *array; 2157 array++; 2158 } 2159 2160 if (type & PERF_SAMPLE_ADDR) { 2161 data->addr = *array; 2162 array++; 2163 } 2164 2165 if (type & PERF_SAMPLE_ID) { 2166 data->id = *array; 2167 array++; 2168 } 2169 2170 if (type & PERF_SAMPLE_STREAM_ID) { 2171 data->stream_id = *array; 2172 array++; 2173 } 2174 2175 if (type & PERF_SAMPLE_CPU) { 2176 2177 u.val64 = *array; 2178 if (swapped) { 2179 /* undo swap of u64, then swap on individual u32s */ 2180 u.val64 = bswap_64(u.val64); 2181 u.val32[0] = bswap_32(u.val32[0]); 2182 } 2183 2184 data->cpu = u.val32[0]; 2185 array++; 2186 } 2187 2188 if (type & PERF_SAMPLE_PERIOD) { 2189 data->period = *array; 2190 array++; 2191 } 2192 2193 if (type & PERF_SAMPLE_READ) { 2194 u64 read_format = evsel->core.attr.read_format; 2195 2196 OVERFLOW_CHECK_u64(array); 2197 if (read_format & PERF_FORMAT_GROUP) 2198 data->read.group.nr = *array; 2199 else 2200 data->read.one.value = *array; 2201 2202 array++; 2203 2204 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2205 OVERFLOW_CHECK_u64(array); 2206 data->read.time_enabled = *array; 2207 array++; 2208 } 2209 2210 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2211 OVERFLOW_CHECK_u64(array); 2212 data->read.time_running = *array; 2213 array++; 2214 } 2215 2216 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2217 if (read_format & PERF_FORMAT_GROUP) { 2218 const u64 max_group_nr = UINT64_MAX / 2219 sizeof(struct sample_read_value); 2220 2221 if (data->read.group.nr > max_group_nr) 2222 return -EFAULT; 2223 sz = data->read.group.nr * 2224 sizeof(struct sample_read_value); 2225 OVERFLOW_CHECK(array, sz, max_size); 2226 data->read.group.values = 2227 (struct sample_read_value *)array; 2228 array = (void *)array + sz; 2229 } else { 2230 OVERFLOW_CHECK_u64(array); 2231 data->read.one.id = *array; 2232 array++; 2233 } 2234 } 2235 2236 if (evsel__has_callchain(evsel)) { 2237 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 2238 2239 OVERFLOW_CHECK_u64(array); 2240 data->callchain = (struct ip_callchain *)array++; 2241 if (data->callchain->nr > max_callchain_nr) 2242 return -EFAULT; 2243 sz = data->callchain->nr * sizeof(u64); 2244 OVERFLOW_CHECK(array, sz, max_size); 2245 array = (void *)array + sz; 2246 } 2247 2248 if (type & PERF_SAMPLE_RAW) { 2249 OVERFLOW_CHECK_u64(array); 2250 u.val64 = *array; 2251 2252 /* 2253 * Undo swap of u64, then swap on individual u32s, 2254 * get the size of the raw area and undo all of the 2255 * swap. The pevent interface handles endianity by 2256 * itself. 2257 */ 2258 if (swapped) { 2259 u.val64 = bswap_64(u.val64); 2260 u.val32[0] = bswap_32(u.val32[0]); 2261 u.val32[1] = bswap_32(u.val32[1]); 2262 } 2263 data->raw_size = u.val32[0]; 2264 2265 /* 2266 * The raw data is aligned on 64bits including the 2267 * u32 size, so it's safe to use mem_bswap_64. 2268 */ 2269 if (swapped) 2270 mem_bswap_64((void *) array, data->raw_size); 2271 2272 array = (void *)array + sizeof(u32); 2273 2274 OVERFLOW_CHECK(array, data->raw_size, max_size); 2275 data->raw_data = (void *)array; 2276 array = (void *)array + data->raw_size; 2277 } 2278 2279 if (type & PERF_SAMPLE_BRANCH_STACK) { 2280 const u64 max_branch_nr = UINT64_MAX / 2281 sizeof(struct branch_entry); 2282 2283 OVERFLOW_CHECK_u64(array); 2284 data->branch_stack = (struct branch_stack *)array++; 2285 2286 if (data->branch_stack->nr > max_branch_nr) 2287 return -EFAULT; 2288 sz = data->branch_stack->nr * sizeof(struct branch_entry); 2289 OVERFLOW_CHECK(array, sz, max_size); 2290 array = (void *)array + sz; 2291 } 2292 2293 if (type & PERF_SAMPLE_REGS_USER) { 2294 OVERFLOW_CHECK_u64(array); 2295 data->user_regs.abi = *array; 2296 array++; 2297 2298 if (data->user_regs.abi) { 2299 u64 mask = evsel->core.attr.sample_regs_user; 2300 2301 sz = hweight64(mask) * sizeof(u64); 2302 OVERFLOW_CHECK(array, sz, max_size); 2303 data->user_regs.mask = mask; 2304 data->user_regs.regs = (u64 *)array; 2305 array = (void *)array + sz; 2306 } 2307 } 2308 2309 if (type & PERF_SAMPLE_STACK_USER) { 2310 OVERFLOW_CHECK_u64(array); 2311 sz = *array++; 2312 2313 data->user_stack.offset = ((char *)(array - 1) 2314 - (char *) event); 2315 2316 if (!sz) { 2317 data->user_stack.size = 0; 2318 } else { 2319 OVERFLOW_CHECK(array, sz, max_size); 2320 data->user_stack.data = (char *)array; 2321 array = (void *)array + sz; 2322 OVERFLOW_CHECK_u64(array); 2323 data->user_stack.size = *array++; 2324 if (WARN_ONCE(data->user_stack.size > sz, 2325 "user stack dump failure\n")) 2326 return -EFAULT; 2327 } 2328 } 2329 2330 if (type & PERF_SAMPLE_WEIGHT) { 2331 OVERFLOW_CHECK_u64(array); 2332 data->weight = *array; 2333 array++; 2334 } 2335 2336 if (type & PERF_SAMPLE_DATA_SRC) { 2337 OVERFLOW_CHECK_u64(array); 2338 data->data_src = *array; 2339 array++; 2340 } 2341 2342 if (type & PERF_SAMPLE_TRANSACTION) { 2343 OVERFLOW_CHECK_u64(array); 2344 data->transaction = *array; 2345 array++; 2346 } 2347 2348 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 2349 if (type & PERF_SAMPLE_REGS_INTR) { 2350 OVERFLOW_CHECK_u64(array); 2351 data->intr_regs.abi = *array; 2352 array++; 2353 2354 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 2355 u64 mask = evsel->core.attr.sample_regs_intr; 2356 2357 sz = hweight64(mask) * sizeof(u64); 2358 OVERFLOW_CHECK(array, sz, max_size); 2359 data->intr_regs.mask = mask; 2360 data->intr_regs.regs = (u64 *)array; 2361 array = (void *)array + sz; 2362 } 2363 } 2364 2365 data->phys_addr = 0; 2366 if (type & PERF_SAMPLE_PHYS_ADDR) { 2367 data->phys_addr = *array; 2368 array++; 2369 } 2370 2371 return 0; 2372 } 2373 2374 int perf_evsel__parse_sample_timestamp(struct evsel *evsel, 2375 union perf_event *event, 2376 u64 *timestamp) 2377 { 2378 u64 type = evsel->core.attr.sample_type; 2379 const u64 *array; 2380 2381 if (!(type & PERF_SAMPLE_TIME)) 2382 return -1; 2383 2384 if (event->header.type != PERF_RECORD_SAMPLE) { 2385 struct perf_sample data = { 2386 .time = -1ULL, 2387 }; 2388 2389 if (!evsel->core.attr.sample_id_all) 2390 return -1; 2391 if (perf_evsel__parse_id_sample(evsel, event, &data)) 2392 return -1; 2393 2394 *timestamp = data.time; 2395 return 0; 2396 } 2397 2398 array = event->sample.array; 2399 2400 if (perf_event__check_size(event, evsel->sample_size)) 2401 return -EFAULT; 2402 2403 if (type & PERF_SAMPLE_IDENTIFIER) 2404 array++; 2405 2406 if (type & PERF_SAMPLE_IP) 2407 array++; 2408 2409 if (type & PERF_SAMPLE_TID) 2410 array++; 2411 2412 if (type & PERF_SAMPLE_TIME) 2413 *timestamp = *array; 2414 2415 return 0; 2416 } 2417 2418 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 2419 u64 read_format) 2420 { 2421 size_t sz, result = sizeof(struct sample_event); 2422 2423 if (type & PERF_SAMPLE_IDENTIFIER) 2424 result += sizeof(u64); 2425 2426 if (type & PERF_SAMPLE_IP) 2427 result += sizeof(u64); 2428 2429 if (type & PERF_SAMPLE_TID) 2430 result += sizeof(u64); 2431 2432 if (type & PERF_SAMPLE_TIME) 2433 result += sizeof(u64); 2434 2435 if (type & PERF_SAMPLE_ADDR) 2436 result += sizeof(u64); 2437 2438 if (type & PERF_SAMPLE_ID) 2439 result += sizeof(u64); 2440 2441 if (type & PERF_SAMPLE_STREAM_ID) 2442 result += sizeof(u64); 2443 2444 if (type & PERF_SAMPLE_CPU) 2445 result += sizeof(u64); 2446 2447 if (type & PERF_SAMPLE_PERIOD) 2448 result += sizeof(u64); 2449 2450 if (type & PERF_SAMPLE_READ) { 2451 result += sizeof(u64); 2452 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 2453 result += sizeof(u64); 2454 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2455 result += sizeof(u64); 2456 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2457 if (read_format & PERF_FORMAT_GROUP) { 2458 sz = sample->read.group.nr * 2459 sizeof(struct sample_read_value); 2460 result += sz; 2461 } else { 2462 result += sizeof(u64); 2463 } 2464 } 2465 2466 if (type & PERF_SAMPLE_CALLCHAIN) { 2467 sz = (sample->callchain->nr + 1) * sizeof(u64); 2468 result += sz; 2469 } 2470 2471 if (type & PERF_SAMPLE_RAW) { 2472 result += sizeof(u32); 2473 result += sample->raw_size; 2474 } 2475 2476 if (type & PERF_SAMPLE_BRANCH_STACK) { 2477 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2478 sz += sizeof(u64); 2479 result += sz; 2480 } 2481 2482 if (type & PERF_SAMPLE_REGS_USER) { 2483 if (sample->user_regs.abi) { 2484 result += sizeof(u64); 2485 sz = hweight64(sample->user_regs.mask) * sizeof(u64); 2486 result += sz; 2487 } else { 2488 result += sizeof(u64); 2489 } 2490 } 2491 2492 if (type & PERF_SAMPLE_STACK_USER) { 2493 sz = sample->user_stack.size; 2494 result += sizeof(u64); 2495 if (sz) { 2496 result += sz; 2497 result += sizeof(u64); 2498 } 2499 } 2500 2501 if (type & PERF_SAMPLE_WEIGHT) 2502 result += sizeof(u64); 2503 2504 if (type & PERF_SAMPLE_DATA_SRC) 2505 result += sizeof(u64); 2506 2507 if (type & PERF_SAMPLE_TRANSACTION) 2508 result += sizeof(u64); 2509 2510 if (type & PERF_SAMPLE_REGS_INTR) { 2511 if (sample->intr_regs.abi) { 2512 result += sizeof(u64); 2513 sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 2514 result += sz; 2515 } else { 2516 result += sizeof(u64); 2517 } 2518 } 2519 2520 if (type & PERF_SAMPLE_PHYS_ADDR) 2521 result += sizeof(u64); 2522 2523 return result; 2524 } 2525 2526 int perf_event__synthesize_sample(union perf_event *event, u64 type, 2527 u64 read_format, 2528 const struct perf_sample *sample) 2529 { 2530 u64 *array; 2531 size_t sz; 2532 /* 2533 * used for cross-endian analysis. See git commit 65014ab3 2534 * for why this goofiness is needed. 2535 */ 2536 union u64_swap u; 2537 2538 array = event->sample.array; 2539 2540 if (type & PERF_SAMPLE_IDENTIFIER) { 2541 *array = sample->id; 2542 array++; 2543 } 2544 2545 if (type & PERF_SAMPLE_IP) { 2546 *array = sample->ip; 2547 array++; 2548 } 2549 2550 if (type & PERF_SAMPLE_TID) { 2551 u.val32[0] = sample->pid; 2552 u.val32[1] = sample->tid; 2553 *array = u.val64; 2554 array++; 2555 } 2556 2557 if (type & PERF_SAMPLE_TIME) { 2558 *array = sample->time; 2559 array++; 2560 } 2561 2562 if (type & PERF_SAMPLE_ADDR) { 2563 *array = sample->addr; 2564 array++; 2565 } 2566 2567 if (type & PERF_SAMPLE_ID) { 2568 *array = sample->id; 2569 array++; 2570 } 2571 2572 if (type & PERF_SAMPLE_STREAM_ID) { 2573 *array = sample->stream_id; 2574 array++; 2575 } 2576 2577 if (type & PERF_SAMPLE_CPU) { 2578 u.val32[0] = sample->cpu; 2579 u.val32[1] = 0; 2580 *array = u.val64; 2581 array++; 2582 } 2583 2584 if (type & PERF_SAMPLE_PERIOD) { 2585 *array = sample->period; 2586 array++; 2587 } 2588 2589 if (type & PERF_SAMPLE_READ) { 2590 if (read_format & PERF_FORMAT_GROUP) 2591 *array = sample->read.group.nr; 2592 else 2593 *array = sample->read.one.value; 2594 array++; 2595 2596 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2597 *array = sample->read.time_enabled; 2598 array++; 2599 } 2600 2601 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2602 *array = sample->read.time_running; 2603 array++; 2604 } 2605 2606 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2607 if (read_format & PERF_FORMAT_GROUP) { 2608 sz = sample->read.group.nr * 2609 sizeof(struct sample_read_value); 2610 memcpy(array, sample->read.group.values, sz); 2611 array = (void *)array + sz; 2612 } else { 2613 *array = sample->read.one.id; 2614 array++; 2615 } 2616 } 2617 2618 if (type & PERF_SAMPLE_CALLCHAIN) { 2619 sz = (sample->callchain->nr + 1) * sizeof(u64); 2620 memcpy(array, sample->callchain, sz); 2621 array = (void *)array + sz; 2622 } 2623 2624 if (type & PERF_SAMPLE_RAW) { 2625 u.val32[0] = sample->raw_size; 2626 *array = u.val64; 2627 array = (void *)array + sizeof(u32); 2628 2629 memcpy(array, sample->raw_data, sample->raw_size); 2630 array = (void *)array + sample->raw_size; 2631 } 2632 2633 if (type & PERF_SAMPLE_BRANCH_STACK) { 2634 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2635 sz += sizeof(u64); 2636 memcpy(array, sample->branch_stack, sz); 2637 array = (void *)array + sz; 2638 } 2639 2640 if (type & PERF_SAMPLE_REGS_USER) { 2641 if (sample->user_regs.abi) { 2642 *array++ = sample->user_regs.abi; 2643 sz = hweight64(sample->user_regs.mask) * sizeof(u64); 2644 memcpy(array, sample->user_regs.regs, sz); 2645 array = (void *)array + sz; 2646 } else { 2647 *array++ = 0; 2648 } 2649 } 2650 2651 if (type & PERF_SAMPLE_STACK_USER) { 2652 sz = sample->user_stack.size; 2653 *array++ = sz; 2654 if (sz) { 2655 memcpy(array, sample->user_stack.data, sz); 2656 array = (void *)array + sz; 2657 *array++ = sz; 2658 } 2659 } 2660 2661 if (type & PERF_SAMPLE_WEIGHT) { 2662 *array = sample->weight; 2663 array++; 2664 } 2665 2666 if (type & PERF_SAMPLE_DATA_SRC) { 2667 *array = sample->data_src; 2668 array++; 2669 } 2670 2671 if (type & PERF_SAMPLE_TRANSACTION) { 2672 *array = sample->transaction; 2673 array++; 2674 } 2675 2676 if (type & PERF_SAMPLE_REGS_INTR) { 2677 if (sample->intr_regs.abi) { 2678 *array++ = sample->intr_regs.abi; 2679 sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 2680 memcpy(array, sample->intr_regs.regs, sz); 2681 array = (void *)array + sz; 2682 } else { 2683 *array++ = 0; 2684 } 2685 } 2686 2687 if (type & PERF_SAMPLE_PHYS_ADDR) { 2688 *array = sample->phys_addr; 2689 array++; 2690 } 2691 2692 return 0; 2693 } 2694 2695 struct tep_format_field *perf_evsel__field(struct evsel *evsel, const char *name) 2696 { 2697 return tep_find_field(evsel->tp_format, name); 2698 } 2699 2700 void *perf_evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, 2701 const char *name) 2702 { 2703 struct tep_format_field *field = perf_evsel__field(evsel, name); 2704 int offset; 2705 2706 if (!field) 2707 return NULL; 2708 2709 offset = field->offset; 2710 2711 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2712 offset = *(int *)(sample->raw_data + field->offset); 2713 offset &= 0xffff; 2714 } 2715 2716 return sample->raw_data + offset; 2717 } 2718 2719 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, 2720 bool needs_swap) 2721 { 2722 u64 value; 2723 void *ptr = sample->raw_data + field->offset; 2724 2725 switch (field->size) { 2726 case 1: 2727 return *(u8 *)ptr; 2728 case 2: 2729 value = *(u16 *)ptr; 2730 break; 2731 case 4: 2732 value = *(u32 *)ptr; 2733 break; 2734 case 8: 2735 memcpy(&value, ptr, sizeof(u64)); 2736 break; 2737 default: 2738 return 0; 2739 } 2740 2741 if (!needs_swap) 2742 return value; 2743 2744 switch (field->size) { 2745 case 2: 2746 return bswap_16(value); 2747 case 4: 2748 return bswap_32(value); 2749 case 8: 2750 return bswap_64(value); 2751 default: 2752 return 0; 2753 } 2754 2755 return 0; 2756 } 2757 2758 u64 perf_evsel__intval(struct evsel *evsel, struct perf_sample *sample, 2759 const char *name) 2760 { 2761 struct tep_format_field *field = perf_evsel__field(evsel, name); 2762 2763 if (!field) 2764 return 0; 2765 2766 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; 2767 } 2768 2769 bool perf_evsel__fallback(struct evsel *evsel, int err, 2770 char *msg, size_t msgsize) 2771 { 2772 int paranoid; 2773 2774 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2775 evsel->core.attr.type == PERF_TYPE_HARDWARE && 2776 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2777 /* 2778 * If it's cycles then fall back to hrtimer based 2779 * cpu-clock-tick sw counter, which is always available even if 2780 * no PMU support. 2781 * 2782 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2783 * b0a873e). 2784 */ 2785 scnprintf(msg, msgsize, "%s", 2786 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2787 2788 evsel->core.attr.type = PERF_TYPE_SOFTWARE; 2789 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; 2790 2791 zfree(&evsel->name); 2792 return true; 2793 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && 2794 (paranoid = perf_event_paranoid()) > 1) { 2795 const char *name = perf_evsel__name(evsel); 2796 char *new_name; 2797 const char *sep = ":"; 2798 2799 /* Is there already the separator in the name. */ 2800 if (strchr(name, '/') || 2801 strchr(name, ':')) 2802 sep = ""; 2803 2804 if (asprintf(&new_name, "%s%su", name, sep) < 0) 2805 return false; 2806 2807 if (evsel->name) 2808 free(evsel->name); 2809 evsel->name = new_name; 2810 scnprintf(msg, msgsize, 2811 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid); 2812 evsel->core.attr.exclude_kernel = 1; 2813 2814 return true; 2815 } 2816 2817 return false; 2818 } 2819 2820 static bool find_process(const char *name) 2821 { 2822 size_t len = strlen(name); 2823 DIR *dir; 2824 struct dirent *d; 2825 int ret = -1; 2826 2827 dir = opendir(procfs__mountpoint()); 2828 if (!dir) 2829 return false; 2830 2831 /* Walk through the directory. */ 2832 while (ret && (d = readdir(dir)) != NULL) { 2833 char path[PATH_MAX]; 2834 char *data; 2835 size_t size; 2836 2837 if ((d->d_type != DT_DIR) || 2838 !strcmp(".", d->d_name) || 2839 !strcmp("..", d->d_name)) 2840 continue; 2841 2842 scnprintf(path, sizeof(path), "%s/%s/comm", 2843 procfs__mountpoint(), d->d_name); 2844 2845 if (filename__read_str(path, &data, &size)) 2846 continue; 2847 2848 ret = strncmp(name, data, len); 2849 free(data); 2850 } 2851 2852 closedir(dir); 2853 return ret ? false : true; 2854 } 2855 2856 int perf_evsel__open_strerror(struct evsel *evsel, struct target *target, 2857 int err, char *msg, size_t size) 2858 { 2859 char sbuf[STRERR_BUFSIZE]; 2860 int printed = 0; 2861 2862 switch (err) { 2863 case EPERM: 2864 case EACCES: 2865 if (err == EPERM) 2866 printed = scnprintf(msg, size, 2867 "No permission to enable %s event.\n\n", 2868 perf_evsel__name(evsel)); 2869 2870 return scnprintf(msg + printed, size - printed, 2871 "You may not have permission to collect %sstats.\n\n" 2872 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n" 2873 "which controls use of the performance events system by\n" 2874 "unprivileged users (without CAP_SYS_ADMIN).\n\n" 2875 "The current value is %d:\n\n" 2876 " -1: Allow use of (almost) all events by all users\n" 2877 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n" 2878 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n" 2879 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n" 2880 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n" 2881 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n" 2882 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n" 2883 " kernel.perf_event_paranoid = -1\n" , 2884 target->system_wide ? "system-wide " : "", 2885 perf_event_paranoid()); 2886 case ENOENT: 2887 return scnprintf(msg, size, "The %s event is not supported.", 2888 perf_evsel__name(evsel)); 2889 case EMFILE: 2890 return scnprintf(msg, size, "%s", 2891 "Too many events are opened.\n" 2892 "Probably the maximum number of open file descriptors has been reached.\n" 2893 "Hint: Try again after reducing the number of events.\n" 2894 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2895 case ENOMEM: 2896 if (evsel__has_callchain(evsel) && 2897 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 2898 return scnprintf(msg, size, 2899 "Not enough memory to setup event with callchain.\n" 2900 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2901 "Hint: Current value: %d", sysctl__max_stack()); 2902 break; 2903 case ENODEV: 2904 if (target->cpu_list) 2905 return scnprintf(msg, size, "%s", 2906 "No such device - did you specify an out-of-range profile CPU?"); 2907 break; 2908 case EOPNOTSUPP: 2909 if (evsel->core.attr.sample_period != 0) 2910 return scnprintf(msg, size, 2911 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", 2912 perf_evsel__name(evsel)); 2913 if (evsel->core.attr.precise_ip) 2914 return scnprintf(msg, size, "%s", 2915 "\'precise\' request may not be supported. Try removing 'p' modifier."); 2916 #if defined(__i386__) || defined(__x86_64__) 2917 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) 2918 return scnprintf(msg, size, "%s", 2919 "No hardware sampling interrupt available.\n"); 2920 #endif 2921 break; 2922 case EBUSY: 2923 if (find_process("oprofiled")) 2924 return scnprintf(msg, size, 2925 "The PMU counters are busy/taken by another profiler.\n" 2926 "We found oprofile daemon running, please stop it and try again."); 2927 break; 2928 case EINVAL: 2929 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) 2930 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel."); 2931 if (perf_missing_features.clockid) 2932 return scnprintf(msg, size, "clockid feature not supported."); 2933 if (perf_missing_features.clockid_wrong) 2934 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 2935 if (perf_missing_features.aux_output) 2936 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel."); 2937 break; 2938 default: 2939 break; 2940 } 2941 2942 return scnprintf(msg, size, 2943 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2944 "/bin/dmesg | grep -i perf may provide additional information.\n", 2945 err, str_error_r(err, sbuf, sizeof(sbuf)), 2946 perf_evsel__name(evsel)); 2947 } 2948 2949 struct perf_env *perf_evsel__env(struct evsel *evsel) 2950 { 2951 if (evsel && evsel->evlist) 2952 return evsel->evlist->env; 2953 return NULL; 2954 } 2955 2956 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) 2957 { 2958 int cpu, thread; 2959 2960 for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) { 2961 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); 2962 thread++) { 2963 int fd = FD(evsel, cpu, thread); 2964 2965 if (perf_evlist__id_add_fd(evlist, evsel, 2966 cpu, thread, fd) < 0) 2967 return -1; 2968 } 2969 } 2970 2971 return 0; 2972 } 2973 2974 int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist) 2975 { 2976 struct perf_cpu_map *cpus = evsel->core.cpus; 2977 struct perf_thread_map *threads = evsel->core.threads; 2978 2979 if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr)) 2980 return -ENOMEM; 2981 2982 return store_evsel_ids(evsel, evlist); 2983 } 2984