1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <byteswap.h> 11 #include <linux/bitops.h> 12 #include <api/fs/tracing_path.h> 13 #include <traceevent/event-parse.h> 14 #include <linux/hw_breakpoint.h> 15 #include <linux/perf_event.h> 16 #include <linux/err.h> 17 #include <sys/resource.h> 18 #include "asm/bug.h" 19 #include "callchain.h" 20 #include "cgroup.h" 21 #include "evsel.h" 22 #include "evlist.h" 23 #include "util.h" 24 #include "cpumap.h" 25 #include "thread_map.h" 26 #include "target.h" 27 #include "perf_regs.h" 28 #include "debug.h" 29 #include "trace-event.h" 30 #include "stat.h" 31 32 static struct { 33 bool sample_id_all; 34 bool exclude_guest; 35 bool mmap2; 36 bool cloexec; 37 bool clockid; 38 bool clockid_wrong; 39 bool lbr_flags; 40 bool write_backward; 41 } perf_missing_features; 42 43 static clockid_t clockid; 44 45 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused) 46 { 47 return 0; 48 } 49 50 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused) 51 { 52 } 53 54 static struct { 55 size_t size; 56 int (*init)(struct perf_evsel *evsel); 57 void (*fini)(struct perf_evsel *evsel); 58 } perf_evsel__object = { 59 .size = sizeof(struct perf_evsel), 60 .init = perf_evsel__no_extra_init, 61 .fini = perf_evsel__no_extra_fini, 62 }; 63 64 int perf_evsel__object_config(size_t object_size, 65 int (*init)(struct perf_evsel *evsel), 66 void (*fini)(struct perf_evsel *evsel)) 67 { 68 69 if (object_size == 0) 70 goto set_methods; 71 72 if (perf_evsel__object.size > object_size) 73 return -EINVAL; 74 75 perf_evsel__object.size = object_size; 76 77 set_methods: 78 if (init != NULL) 79 perf_evsel__object.init = init; 80 81 if (fini != NULL) 82 perf_evsel__object.fini = fini; 83 84 return 0; 85 } 86 87 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 88 89 int __perf_evsel__sample_size(u64 sample_type) 90 { 91 u64 mask = sample_type & PERF_SAMPLE_MASK; 92 int size = 0; 93 int i; 94 95 for (i = 0; i < 64; i++) { 96 if (mask & (1ULL << i)) 97 size++; 98 } 99 100 size *= sizeof(u64); 101 102 return size; 103 } 104 105 /** 106 * __perf_evsel__calc_id_pos - calculate id_pos. 107 * @sample_type: sample type 108 * 109 * This function returns the position of the event id (PERF_SAMPLE_ID or 110 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 111 * sample_event. 112 */ 113 static int __perf_evsel__calc_id_pos(u64 sample_type) 114 { 115 int idx = 0; 116 117 if (sample_type & PERF_SAMPLE_IDENTIFIER) 118 return 0; 119 120 if (!(sample_type & PERF_SAMPLE_ID)) 121 return -1; 122 123 if (sample_type & PERF_SAMPLE_IP) 124 idx += 1; 125 126 if (sample_type & PERF_SAMPLE_TID) 127 idx += 1; 128 129 if (sample_type & PERF_SAMPLE_TIME) 130 idx += 1; 131 132 if (sample_type & PERF_SAMPLE_ADDR) 133 idx += 1; 134 135 return idx; 136 } 137 138 /** 139 * __perf_evsel__calc_is_pos - calculate is_pos. 140 * @sample_type: sample type 141 * 142 * This function returns the position (counting backwards) of the event id 143 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 144 * sample_id_all is used there is an id sample appended to non-sample events. 145 */ 146 static int __perf_evsel__calc_is_pos(u64 sample_type) 147 { 148 int idx = 1; 149 150 if (sample_type & PERF_SAMPLE_IDENTIFIER) 151 return 1; 152 153 if (!(sample_type & PERF_SAMPLE_ID)) 154 return -1; 155 156 if (sample_type & PERF_SAMPLE_CPU) 157 idx += 1; 158 159 if (sample_type & PERF_SAMPLE_STREAM_ID) 160 idx += 1; 161 162 return idx; 163 } 164 165 void perf_evsel__calc_id_pos(struct perf_evsel *evsel) 166 { 167 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); 168 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); 169 } 170 171 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, 172 enum perf_event_sample_format bit) 173 { 174 if (!(evsel->attr.sample_type & bit)) { 175 evsel->attr.sample_type |= bit; 176 evsel->sample_size += sizeof(u64); 177 perf_evsel__calc_id_pos(evsel); 178 } 179 } 180 181 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, 182 enum perf_event_sample_format bit) 183 { 184 if (evsel->attr.sample_type & bit) { 185 evsel->attr.sample_type &= ~bit; 186 evsel->sample_size -= sizeof(u64); 187 perf_evsel__calc_id_pos(evsel); 188 } 189 } 190 191 void perf_evsel__set_sample_id(struct perf_evsel *evsel, 192 bool can_sample_identifier) 193 { 194 if (can_sample_identifier) { 195 perf_evsel__reset_sample_bit(evsel, ID); 196 perf_evsel__set_sample_bit(evsel, IDENTIFIER); 197 } else { 198 perf_evsel__set_sample_bit(evsel, ID); 199 } 200 evsel->attr.read_format |= PERF_FORMAT_ID; 201 } 202 203 /** 204 * perf_evsel__is_function_event - Return whether given evsel is a function 205 * trace event 206 * 207 * @evsel - evsel selector to be tested 208 * 209 * Return %true if event is function trace event 210 */ 211 bool perf_evsel__is_function_event(struct perf_evsel *evsel) 212 { 213 #define FUNCTION_EVENT "ftrace:function" 214 215 return evsel->name && 216 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); 217 218 #undef FUNCTION_EVENT 219 } 220 221 void perf_evsel__init(struct perf_evsel *evsel, 222 struct perf_event_attr *attr, int idx) 223 { 224 evsel->idx = idx; 225 evsel->tracking = !idx; 226 evsel->attr = *attr; 227 evsel->leader = evsel; 228 evsel->unit = ""; 229 evsel->scale = 1.0; 230 evsel->evlist = NULL; 231 evsel->bpf_fd = -1; 232 INIT_LIST_HEAD(&evsel->node); 233 INIT_LIST_HEAD(&evsel->config_terms); 234 perf_evsel__object.init(evsel); 235 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); 236 perf_evsel__calc_id_pos(evsel); 237 evsel->cmdline_group_boundary = false; 238 } 239 240 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) 241 { 242 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); 243 244 if (evsel != NULL) 245 perf_evsel__init(evsel, attr, idx); 246 247 if (perf_evsel__is_bpf_output(evsel)) { 248 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 249 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 250 evsel->attr.sample_period = 1; 251 } 252 253 return evsel; 254 } 255 256 struct perf_evsel *perf_evsel__new_cycles(void) 257 { 258 struct perf_event_attr attr = { 259 .type = PERF_TYPE_HARDWARE, 260 .config = PERF_COUNT_HW_CPU_CYCLES, 261 }; 262 struct perf_evsel *evsel; 263 264 event_attr_init(&attr); 265 266 perf_event_attr__set_max_precise_ip(&attr); 267 268 evsel = perf_evsel__new(&attr); 269 if (evsel == NULL) 270 goto out; 271 272 /* use asprintf() because free(evsel) assumes name is allocated */ 273 if (asprintf(&evsel->name, "cycles%.*s", 274 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0) 275 goto error_free; 276 out: 277 return evsel; 278 error_free: 279 perf_evsel__delete(evsel); 280 evsel = NULL; 281 goto out; 282 } 283 284 /* 285 * Returns pointer with encoded error via <linux/err.h> interface. 286 */ 287 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) 288 { 289 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); 290 int err = -ENOMEM; 291 292 if (evsel == NULL) { 293 goto out_err; 294 } else { 295 struct perf_event_attr attr = { 296 .type = PERF_TYPE_TRACEPOINT, 297 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 298 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 299 }; 300 301 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 302 goto out_free; 303 304 evsel->tp_format = trace_event__tp_format(sys, name); 305 if (IS_ERR(evsel->tp_format)) { 306 err = PTR_ERR(evsel->tp_format); 307 goto out_free; 308 } 309 310 event_attr_init(&attr); 311 attr.config = evsel->tp_format->id; 312 attr.sample_period = 1; 313 perf_evsel__init(evsel, &attr, idx); 314 } 315 316 return evsel; 317 318 out_free: 319 zfree(&evsel->name); 320 free(evsel); 321 out_err: 322 return ERR_PTR(err); 323 } 324 325 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { 326 "cycles", 327 "instructions", 328 "cache-references", 329 "cache-misses", 330 "branches", 331 "branch-misses", 332 "bus-cycles", 333 "stalled-cycles-frontend", 334 "stalled-cycles-backend", 335 "ref-cycles", 336 }; 337 338 static const char *__perf_evsel__hw_name(u64 config) 339 { 340 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) 341 return perf_evsel__hw_names[config]; 342 343 return "unknown-hardware"; 344 } 345 346 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) 347 { 348 int colon = 0, r = 0; 349 struct perf_event_attr *attr = &evsel->attr; 350 bool exclude_guest_default = false; 351 352 #define MOD_PRINT(context, mod) do { \ 353 if (!attr->exclude_##context) { \ 354 if (!colon) colon = ++r; \ 355 r += scnprintf(bf + r, size - r, "%c", mod); \ 356 } } while(0) 357 358 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 359 MOD_PRINT(kernel, 'k'); 360 MOD_PRINT(user, 'u'); 361 MOD_PRINT(hv, 'h'); 362 exclude_guest_default = true; 363 } 364 365 if (attr->precise_ip) { 366 if (!colon) 367 colon = ++r; 368 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 369 exclude_guest_default = true; 370 } 371 372 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 373 MOD_PRINT(host, 'H'); 374 MOD_PRINT(guest, 'G'); 375 } 376 #undef MOD_PRINT 377 if (colon) 378 bf[colon - 1] = ':'; 379 return r; 380 } 381 382 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) 383 { 384 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); 385 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 386 } 387 388 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { 389 "cpu-clock", 390 "task-clock", 391 "page-faults", 392 "context-switches", 393 "cpu-migrations", 394 "minor-faults", 395 "major-faults", 396 "alignment-faults", 397 "emulation-faults", 398 "dummy", 399 }; 400 401 static const char *__perf_evsel__sw_name(u64 config) 402 { 403 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) 404 return perf_evsel__sw_names[config]; 405 return "unknown-software"; 406 } 407 408 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) 409 { 410 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); 411 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 412 } 413 414 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 415 { 416 int r; 417 418 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 419 420 if (type & HW_BREAKPOINT_R) 421 r += scnprintf(bf + r, size - r, "r"); 422 423 if (type & HW_BREAKPOINT_W) 424 r += scnprintf(bf + r, size - r, "w"); 425 426 if (type & HW_BREAKPOINT_X) 427 r += scnprintf(bf + r, size - r, "x"); 428 429 return r; 430 } 431 432 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) 433 { 434 struct perf_event_attr *attr = &evsel->attr; 435 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 436 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 437 } 438 439 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] 440 [PERF_EVSEL__MAX_ALIASES] = { 441 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 442 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 443 { "LLC", "L2", }, 444 { "dTLB", "d-tlb", "Data-TLB", }, 445 { "iTLB", "i-tlb", "Instruction-TLB", }, 446 { "branch", "branches", "bpu", "btb", "bpc", }, 447 { "node", }, 448 }; 449 450 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] 451 [PERF_EVSEL__MAX_ALIASES] = { 452 { "load", "loads", "read", }, 453 { "store", "stores", "write", }, 454 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 455 }; 456 457 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] 458 [PERF_EVSEL__MAX_ALIASES] = { 459 { "refs", "Reference", "ops", "access", }, 460 { "misses", "miss", }, 461 }; 462 463 #define C(x) PERF_COUNT_HW_CACHE_##x 464 #define CACHE_READ (1 << C(OP_READ)) 465 #define CACHE_WRITE (1 << C(OP_WRITE)) 466 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 467 #define COP(x) (1 << x) 468 469 /* 470 * cache operartion stat 471 * L1I : Read and prefetch only 472 * ITLB and BPU : Read-only 473 */ 474 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { 475 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 476 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 477 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 478 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 479 [C(ITLB)] = (CACHE_READ), 480 [C(BPU)] = (CACHE_READ), 481 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 482 }; 483 484 bool perf_evsel__is_cache_op_valid(u8 type, u8 op) 485 { 486 if (perf_evsel__hw_cache_stat[type] & COP(op)) 487 return true; /* valid */ 488 else 489 return false; /* invalid */ 490 } 491 492 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, 493 char *bf, size_t size) 494 { 495 if (result) { 496 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], 497 perf_evsel__hw_cache_op[op][0], 498 perf_evsel__hw_cache_result[result][0]); 499 } 500 501 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], 502 perf_evsel__hw_cache_op[op][1]); 503 } 504 505 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) 506 { 507 u8 op, result, type = (config >> 0) & 0xff; 508 const char *err = "unknown-ext-hardware-cache-type"; 509 510 if (type >= PERF_COUNT_HW_CACHE_MAX) 511 goto out_err; 512 513 op = (config >> 8) & 0xff; 514 err = "unknown-ext-hardware-cache-op"; 515 if (op >= PERF_COUNT_HW_CACHE_OP_MAX) 516 goto out_err; 517 518 result = (config >> 16) & 0xff; 519 err = "unknown-ext-hardware-cache-result"; 520 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 521 goto out_err; 522 523 err = "invalid-cache"; 524 if (!perf_evsel__is_cache_op_valid(type, op)) 525 goto out_err; 526 527 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 528 out_err: 529 return scnprintf(bf, size, "%s", err); 530 } 531 532 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) 533 { 534 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); 535 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 536 } 537 538 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) 539 { 540 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); 541 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 542 } 543 544 const char *perf_evsel__name(struct perf_evsel *evsel) 545 { 546 char bf[128]; 547 548 if (evsel->name) 549 return evsel->name; 550 551 switch (evsel->attr.type) { 552 case PERF_TYPE_RAW: 553 perf_evsel__raw_name(evsel, bf, sizeof(bf)); 554 break; 555 556 case PERF_TYPE_HARDWARE: 557 perf_evsel__hw_name(evsel, bf, sizeof(bf)); 558 break; 559 560 case PERF_TYPE_HW_CACHE: 561 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); 562 break; 563 564 case PERF_TYPE_SOFTWARE: 565 perf_evsel__sw_name(evsel, bf, sizeof(bf)); 566 break; 567 568 case PERF_TYPE_TRACEPOINT: 569 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 570 break; 571 572 case PERF_TYPE_BREAKPOINT: 573 perf_evsel__bp_name(evsel, bf, sizeof(bf)); 574 break; 575 576 default: 577 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 578 evsel->attr.type); 579 break; 580 } 581 582 evsel->name = strdup(bf); 583 584 return evsel->name ?: "unknown"; 585 } 586 587 const char *perf_evsel__group_name(struct perf_evsel *evsel) 588 { 589 return evsel->group_name ?: "anon group"; 590 } 591 592 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) 593 { 594 int ret; 595 struct perf_evsel *pos; 596 const char *group_name = perf_evsel__group_name(evsel); 597 598 ret = scnprintf(buf, size, "%s", group_name); 599 600 ret += scnprintf(buf + ret, size - ret, " { %s", 601 perf_evsel__name(evsel)); 602 603 for_each_group_member(pos, evsel) 604 ret += scnprintf(buf + ret, size - ret, ", %s", 605 perf_evsel__name(pos)); 606 607 ret += scnprintf(buf + ret, size - ret, " }"); 608 609 return ret; 610 } 611 612 void perf_evsel__config_callchain(struct perf_evsel *evsel, 613 struct record_opts *opts, 614 struct callchain_param *param) 615 { 616 bool function = perf_evsel__is_function_event(evsel); 617 struct perf_event_attr *attr = &evsel->attr; 618 619 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 620 621 attr->sample_max_stack = param->max_stack; 622 623 if (param->record_mode == CALLCHAIN_LBR) { 624 if (!opts->branch_stack) { 625 if (attr->exclude_user) { 626 pr_warning("LBR callstack option is only available " 627 "to get user callchain information. " 628 "Falling back to framepointers.\n"); 629 } else { 630 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 631 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 632 PERF_SAMPLE_BRANCH_CALL_STACK | 633 PERF_SAMPLE_BRANCH_NO_CYCLES | 634 PERF_SAMPLE_BRANCH_NO_FLAGS; 635 } 636 } else 637 pr_warning("Cannot use LBR callstack with branch stack. " 638 "Falling back to framepointers.\n"); 639 } 640 641 if (param->record_mode == CALLCHAIN_DWARF) { 642 if (!function) { 643 perf_evsel__set_sample_bit(evsel, REGS_USER); 644 perf_evsel__set_sample_bit(evsel, STACK_USER); 645 attr->sample_regs_user = PERF_REGS_MASK; 646 attr->sample_stack_user = param->dump_size; 647 attr->exclude_callchain_user = 1; 648 } else { 649 pr_info("Cannot use DWARF unwind for function trace event," 650 " falling back to framepointers.\n"); 651 } 652 } 653 654 if (function) { 655 pr_info("Disabling user space callchains for function trace event.\n"); 656 attr->exclude_callchain_user = 1; 657 } 658 } 659 660 static void 661 perf_evsel__reset_callgraph(struct perf_evsel *evsel, 662 struct callchain_param *param) 663 { 664 struct perf_event_attr *attr = &evsel->attr; 665 666 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 667 if (param->record_mode == CALLCHAIN_LBR) { 668 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); 669 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 670 PERF_SAMPLE_BRANCH_CALL_STACK); 671 } 672 if (param->record_mode == CALLCHAIN_DWARF) { 673 perf_evsel__reset_sample_bit(evsel, REGS_USER); 674 perf_evsel__reset_sample_bit(evsel, STACK_USER); 675 } 676 } 677 678 static void apply_config_terms(struct perf_evsel *evsel, 679 struct record_opts *opts) 680 { 681 struct perf_evsel_config_term *term; 682 struct list_head *config_terms = &evsel->config_terms; 683 struct perf_event_attr *attr = &evsel->attr; 684 struct callchain_param param; 685 u32 dump_size = 0; 686 int max_stack = 0; 687 const char *callgraph_buf = NULL; 688 689 /* callgraph default */ 690 param.record_mode = callchain_param.record_mode; 691 692 list_for_each_entry(term, config_terms, list) { 693 switch (term->type) { 694 case PERF_EVSEL__CONFIG_TERM_PERIOD: 695 attr->sample_period = term->val.period; 696 attr->freq = 0; 697 break; 698 case PERF_EVSEL__CONFIG_TERM_FREQ: 699 attr->sample_freq = term->val.freq; 700 attr->freq = 1; 701 break; 702 case PERF_EVSEL__CONFIG_TERM_TIME: 703 if (term->val.time) 704 perf_evsel__set_sample_bit(evsel, TIME); 705 else 706 perf_evsel__reset_sample_bit(evsel, TIME); 707 break; 708 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH: 709 callgraph_buf = term->val.callgraph; 710 break; 711 case PERF_EVSEL__CONFIG_TERM_STACK_USER: 712 dump_size = term->val.stack_user; 713 break; 714 case PERF_EVSEL__CONFIG_TERM_MAX_STACK: 715 max_stack = term->val.max_stack; 716 break; 717 case PERF_EVSEL__CONFIG_TERM_INHERIT: 718 /* 719 * attr->inherit should has already been set by 720 * perf_evsel__config. If user explicitly set 721 * inherit using config terms, override global 722 * opt->no_inherit setting. 723 */ 724 attr->inherit = term->val.inherit ? 1 : 0; 725 break; 726 case PERF_EVSEL__CONFIG_TERM_OVERWRITE: 727 attr->write_backward = term->val.overwrite ? 1 : 0; 728 break; 729 default: 730 break; 731 } 732 } 733 734 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 735 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { 736 if (max_stack) { 737 param.max_stack = max_stack; 738 if (callgraph_buf == NULL) 739 callgraph_buf = "fp"; 740 } 741 742 /* parse callgraph parameters */ 743 if (callgraph_buf != NULL) { 744 if (!strcmp(callgraph_buf, "no")) { 745 param.enabled = false; 746 param.record_mode = CALLCHAIN_NONE; 747 } else { 748 param.enabled = true; 749 if (parse_callchain_record(callgraph_buf, ¶m)) { 750 pr_err("per-event callgraph setting for %s failed. " 751 "Apply callgraph global setting for it\n", 752 evsel->name); 753 return; 754 } 755 } 756 } 757 if (dump_size > 0) { 758 dump_size = round_up(dump_size, sizeof(u64)); 759 param.dump_size = dump_size; 760 } 761 762 /* If global callgraph set, clear it */ 763 if (callchain_param.enabled) 764 perf_evsel__reset_callgraph(evsel, &callchain_param); 765 766 /* set perf-event callgraph */ 767 if (param.enabled) 768 perf_evsel__config_callchain(evsel, opts, ¶m); 769 } 770 } 771 772 /* 773 * The enable_on_exec/disabled value strategy: 774 * 775 * 1) For any type of traced program: 776 * - all independent events and group leaders are disabled 777 * - all group members are enabled 778 * 779 * Group members are ruled by group leaders. They need to 780 * be enabled, because the group scheduling relies on that. 781 * 782 * 2) For traced programs executed by perf: 783 * - all independent events and group leaders have 784 * enable_on_exec set 785 * - we don't specifically enable or disable any event during 786 * the record command 787 * 788 * Independent events and group leaders are initially disabled 789 * and get enabled by exec. Group members are ruled by group 790 * leaders as stated in 1). 791 * 792 * 3) For traced programs attached by perf (pid/tid): 793 * - we specifically enable or disable all events during 794 * the record command 795 * 796 * When attaching events to already running traced we 797 * enable/disable events specifically, as there's no 798 * initial traced exec call. 799 */ 800 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, 801 struct callchain_param *callchain) 802 { 803 struct perf_evsel *leader = evsel->leader; 804 struct perf_event_attr *attr = &evsel->attr; 805 int track = evsel->tracking; 806 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 807 808 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 809 attr->inherit = !opts->no_inherit; 810 attr->write_backward = opts->overwrite ? 1 : 0; 811 812 perf_evsel__set_sample_bit(evsel, IP); 813 perf_evsel__set_sample_bit(evsel, TID); 814 815 if (evsel->sample_read) { 816 perf_evsel__set_sample_bit(evsel, READ); 817 818 /* 819 * We need ID even in case of single event, because 820 * PERF_SAMPLE_READ process ID specific data. 821 */ 822 perf_evsel__set_sample_id(evsel, false); 823 824 /* 825 * Apply group format only if we belong to group 826 * with more than one members. 827 */ 828 if (leader->nr_members > 1) { 829 attr->read_format |= PERF_FORMAT_GROUP; 830 attr->inherit = 0; 831 } 832 } 833 834 /* 835 * We default some events to have a default interval. But keep 836 * it a weak assumption overridable by the user. 837 */ 838 if (!attr->sample_period || (opts->user_freq != UINT_MAX || 839 opts->user_interval != ULLONG_MAX)) { 840 if (opts->freq) { 841 perf_evsel__set_sample_bit(evsel, PERIOD); 842 attr->freq = 1; 843 attr->sample_freq = opts->freq; 844 } else { 845 attr->sample_period = opts->default_interval; 846 } 847 } 848 849 /* 850 * Disable sampling for all group members other 851 * than leader in case leader 'leads' the sampling. 852 */ 853 if ((leader != evsel) && leader->sample_read) { 854 attr->sample_freq = 0; 855 attr->sample_period = 0; 856 } 857 858 if (opts->no_samples) 859 attr->sample_freq = 0; 860 861 if (opts->inherit_stat) 862 attr->inherit_stat = 1; 863 864 if (opts->sample_address) { 865 perf_evsel__set_sample_bit(evsel, ADDR); 866 attr->mmap_data = track; 867 } 868 869 /* 870 * We don't allow user space callchains for function trace 871 * event, due to issues with page faults while tracing page 872 * fault handler and its overall trickiness nature. 873 */ 874 if (perf_evsel__is_function_event(evsel)) 875 evsel->attr.exclude_callchain_user = 1; 876 877 if (callchain && callchain->enabled && !evsel->no_aux_samples) 878 perf_evsel__config_callchain(evsel, opts, callchain); 879 880 if (opts->sample_intr_regs) { 881 attr->sample_regs_intr = opts->sample_intr_regs; 882 perf_evsel__set_sample_bit(evsel, REGS_INTR); 883 } 884 885 if (target__has_cpu(&opts->target) || opts->sample_cpu) 886 perf_evsel__set_sample_bit(evsel, CPU); 887 888 if (opts->period) 889 perf_evsel__set_sample_bit(evsel, PERIOD); 890 891 /* 892 * When the user explicitly disabled time don't force it here. 893 */ 894 if (opts->sample_time && 895 (!perf_missing_features.sample_id_all && 896 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 897 opts->sample_time_set))) 898 perf_evsel__set_sample_bit(evsel, TIME); 899 900 if (opts->raw_samples && !evsel->no_aux_samples) { 901 perf_evsel__set_sample_bit(evsel, TIME); 902 perf_evsel__set_sample_bit(evsel, RAW); 903 perf_evsel__set_sample_bit(evsel, CPU); 904 } 905 906 if (opts->sample_address) 907 perf_evsel__set_sample_bit(evsel, DATA_SRC); 908 909 if (opts->no_buffering) { 910 attr->watermark = 0; 911 attr->wakeup_events = 1; 912 } 913 if (opts->branch_stack && !evsel->no_aux_samples) { 914 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 915 attr->branch_sample_type = opts->branch_stack; 916 } 917 918 if (opts->sample_weight) 919 perf_evsel__set_sample_bit(evsel, WEIGHT); 920 921 attr->task = track; 922 attr->mmap = track; 923 attr->mmap2 = track && !perf_missing_features.mmap2; 924 attr->comm = track; 925 926 if (opts->record_switch_events) 927 attr->context_switch = track; 928 929 if (opts->sample_transaction) 930 perf_evsel__set_sample_bit(evsel, TRANSACTION); 931 932 if (opts->running_time) { 933 evsel->attr.read_format |= 934 PERF_FORMAT_TOTAL_TIME_ENABLED | 935 PERF_FORMAT_TOTAL_TIME_RUNNING; 936 } 937 938 /* 939 * XXX see the function comment above 940 * 941 * Disabling only independent events or group leaders, 942 * keeping group members enabled. 943 */ 944 if (perf_evsel__is_group_leader(evsel)) 945 attr->disabled = 1; 946 947 /* 948 * Setting enable_on_exec for independent events and 949 * group leaders for traced executed by perf. 950 */ 951 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && 952 !opts->initial_delay) 953 attr->enable_on_exec = 1; 954 955 if (evsel->immediate) { 956 attr->disabled = 0; 957 attr->enable_on_exec = 0; 958 } 959 960 clockid = opts->clockid; 961 if (opts->use_clockid) { 962 attr->use_clockid = 1; 963 attr->clockid = opts->clockid; 964 } 965 966 if (evsel->precise_max) 967 perf_event_attr__set_max_precise_ip(attr); 968 969 if (opts->all_user) { 970 attr->exclude_kernel = 1; 971 attr->exclude_user = 0; 972 } 973 974 if (opts->all_kernel) { 975 attr->exclude_kernel = 0; 976 attr->exclude_user = 1; 977 } 978 979 /* 980 * Apply event specific term settings, 981 * it overloads any global configuration. 982 */ 983 apply_config_terms(evsel, opts); 984 } 985 986 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 987 { 988 if (evsel->system_wide) 989 nthreads = 1; 990 991 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 992 993 if (evsel->fd) { 994 int cpu, thread; 995 for (cpu = 0; cpu < ncpus; cpu++) { 996 for (thread = 0; thread < nthreads; thread++) { 997 FD(evsel, cpu, thread) = -1; 998 } 999 } 1000 } 1001 1002 return evsel->fd != NULL ? 0 : -ENOMEM; 1003 } 1004 1005 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, 1006 int ioc, void *arg) 1007 { 1008 int cpu, thread; 1009 1010 if (evsel->system_wide) 1011 nthreads = 1; 1012 1013 for (cpu = 0; cpu < ncpus; cpu++) { 1014 for (thread = 0; thread < nthreads; thread++) { 1015 int fd = FD(evsel, cpu, thread), 1016 err = ioctl(fd, ioc, arg); 1017 1018 if (err) 1019 return err; 1020 } 1021 } 1022 1023 return 0; 1024 } 1025 1026 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 1027 const char *filter) 1028 { 1029 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, 1030 PERF_EVENT_IOC_SET_FILTER, 1031 (void *)filter); 1032 } 1033 1034 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter) 1035 { 1036 char *new_filter = strdup(filter); 1037 1038 if (new_filter != NULL) { 1039 free(evsel->filter); 1040 evsel->filter = new_filter; 1041 return 0; 1042 } 1043 1044 return -1; 1045 } 1046 1047 static int perf_evsel__append_filter(struct perf_evsel *evsel, 1048 const char *fmt, const char *filter) 1049 { 1050 char *new_filter; 1051 1052 if (evsel->filter == NULL) 1053 return perf_evsel__set_filter(evsel, filter); 1054 1055 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { 1056 free(evsel->filter); 1057 evsel->filter = new_filter; 1058 return 0; 1059 } 1060 1061 return -1; 1062 } 1063 1064 int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter) 1065 { 1066 return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter); 1067 } 1068 1069 int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter) 1070 { 1071 return perf_evsel__append_filter(evsel, "%s,%s", filter); 1072 } 1073 1074 int perf_evsel__enable(struct perf_evsel *evsel) 1075 { 1076 int nthreads = thread_map__nr(evsel->threads); 1077 int ncpus = cpu_map__nr(evsel->cpus); 1078 1079 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, 1080 PERF_EVENT_IOC_ENABLE, 1081 0); 1082 } 1083 1084 int perf_evsel__disable(struct perf_evsel *evsel) 1085 { 1086 int nthreads = thread_map__nr(evsel->threads); 1087 int ncpus = cpu_map__nr(evsel->cpus); 1088 1089 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, 1090 PERF_EVENT_IOC_DISABLE, 1091 0); 1092 } 1093 1094 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 1095 { 1096 if (ncpus == 0 || nthreads == 0) 1097 return 0; 1098 1099 if (evsel->system_wide) 1100 nthreads = 1; 1101 1102 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 1103 if (evsel->sample_id == NULL) 1104 return -ENOMEM; 1105 1106 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 1107 if (evsel->id == NULL) { 1108 xyarray__delete(evsel->sample_id); 1109 evsel->sample_id = NULL; 1110 return -ENOMEM; 1111 } 1112 1113 return 0; 1114 } 1115 1116 static void perf_evsel__free_fd(struct perf_evsel *evsel) 1117 { 1118 xyarray__delete(evsel->fd); 1119 evsel->fd = NULL; 1120 } 1121 1122 static void perf_evsel__free_id(struct perf_evsel *evsel) 1123 { 1124 xyarray__delete(evsel->sample_id); 1125 evsel->sample_id = NULL; 1126 zfree(&evsel->id); 1127 } 1128 1129 static void perf_evsel__free_config_terms(struct perf_evsel *evsel) 1130 { 1131 struct perf_evsel_config_term *term, *h; 1132 1133 list_for_each_entry_safe(term, h, &evsel->config_terms, list) { 1134 list_del(&term->list); 1135 free(term); 1136 } 1137 } 1138 1139 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 1140 { 1141 int cpu, thread; 1142 1143 if (evsel->system_wide) 1144 nthreads = 1; 1145 1146 for (cpu = 0; cpu < ncpus; cpu++) 1147 for (thread = 0; thread < nthreads; ++thread) { 1148 close(FD(evsel, cpu, thread)); 1149 FD(evsel, cpu, thread) = -1; 1150 } 1151 } 1152 1153 void perf_evsel__exit(struct perf_evsel *evsel) 1154 { 1155 assert(list_empty(&evsel->node)); 1156 assert(evsel->evlist == NULL); 1157 perf_evsel__free_fd(evsel); 1158 perf_evsel__free_id(evsel); 1159 perf_evsel__free_config_terms(evsel); 1160 close_cgroup(evsel->cgrp); 1161 cpu_map__put(evsel->cpus); 1162 cpu_map__put(evsel->own_cpus); 1163 thread_map__put(evsel->threads); 1164 zfree(&evsel->group_name); 1165 zfree(&evsel->name); 1166 perf_evsel__object.fini(evsel); 1167 } 1168 1169 void perf_evsel__delete(struct perf_evsel *evsel) 1170 { 1171 perf_evsel__exit(evsel); 1172 free(evsel); 1173 } 1174 1175 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, 1176 struct perf_counts_values *count) 1177 { 1178 struct perf_counts_values tmp; 1179 1180 if (!evsel->prev_raw_counts) 1181 return; 1182 1183 if (cpu == -1) { 1184 tmp = evsel->prev_raw_counts->aggr; 1185 evsel->prev_raw_counts->aggr = *count; 1186 } else { 1187 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); 1188 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; 1189 } 1190 1191 count->val = count->val - tmp.val; 1192 count->ena = count->ena - tmp.ena; 1193 count->run = count->run - tmp.run; 1194 } 1195 1196 void perf_counts_values__scale(struct perf_counts_values *count, 1197 bool scale, s8 *pscaled) 1198 { 1199 s8 scaled = 0; 1200 1201 if (scale) { 1202 if (count->run == 0) { 1203 scaled = -1; 1204 count->val = 0; 1205 } else if (count->run < count->ena) { 1206 scaled = 1; 1207 count->val = (u64)((double) count->val * count->ena / count->run + 0.5); 1208 } 1209 } else 1210 count->ena = count->run = 0; 1211 1212 if (pscaled) 1213 *pscaled = scaled; 1214 } 1215 1216 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, 1217 struct perf_counts_values *count) 1218 { 1219 memset(count, 0, sizeof(*count)); 1220 1221 if (FD(evsel, cpu, thread) < 0) 1222 return -EINVAL; 1223 1224 if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0) 1225 return -errno; 1226 1227 return 0; 1228 } 1229 1230 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, 1231 int cpu, int thread, bool scale) 1232 { 1233 struct perf_counts_values count; 1234 size_t nv = scale ? 3 : 1; 1235 1236 if (FD(evsel, cpu, thread) < 0) 1237 return -EINVAL; 1238 1239 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) 1240 return -ENOMEM; 1241 1242 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) 1243 return -errno; 1244 1245 perf_evsel__compute_deltas(evsel, cpu, thread, &count); 1246 perf_counts_values__scale(&count, scale, NULL); 1247 *perf_counts(evsel->counts, cpu, thread) = count; 1248 return 0; 1249 } 1250 1251 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) 1252 { 1253 struct perf_evsel *leader = evsel->leader; 1254 int fd; 1255 1256 if (perf_evsel__is_group_leader(evsel)) 1257 return -1; 1258 1259 /* 1260 * Leader must be already processed/open, 1261 * if not it's a bug. 1262 */ 1263 BUG_ON(!leader->fd); 1264 1265 fd = FD(leader, cpu, thread); 1266 BUG_ON(fd == -1); 1267 1268 return fd; 1269 } 1270 1271 struct bit_names { 1272 int bit; 1273 const char *name; 1274 }; 1275 1276 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) 1277 { 1278 bool first_bit = true; 1279 int i = 0; 1280 1281 do { 1282 if (value & bits[i].bit) { 1283 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); 1284 first_bit = false; 1285 } 1286 } while (bits[++i].name != NULL); 1287 } 1288 1289 static void __p_sample_type(char *buf, size_t size, u64 value) 1290 { 1291 #define bit_name(n) { PERF_SAMPLE_##n, #n } 1292 struct bit_names bits[] = { 1293 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 1294 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1295 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1296 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1297 bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC), 1298 bit_name(WEIGHT), 1299 { .name = NULL, } 1300 }; 1301 #undef bit_name 1302 __p_bits(buf, size, value, bits); 1303 } 1304 1305 static void __p_branch_sample_type(char *buf, size_t size, u64 value) 1306 { 1307 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n } 1308 struct bit_names bits[] = { 1309 bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY), 1310 bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL), 1311 bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX), 1312 bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP), 1313 bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES), 1314 { .name = NULL, } 1315 }; 1316 #undef bit_name 1317 __p_bits(buf, size, value, bits); 1318 } 1319 1320 static void __p_read_format(char *buf, size_t size, u64 value) 1321 { 1322 #define bit_name(n) { PERF_FORMAT_##n, #n } 1323 struct bit_names bits[] = { 1324 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 1325 bit_name(ID), bit_name(GROUP), 1326 { .name = NULL, } 1327 }; 1328 #undef bit_name 1329 __p_bits(buf, size, value, bits); 1330 } 1331 1332 #define BUF_SIZE 1024 1333 1334 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) 1335 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) 1336 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) 1337 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) 1338 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) 1339 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) 1340 1341 #define PRINT_ATTRn(_n, _f, _p) \ 1342 do { \ 1343 if (attr->_f) { \ 1344 _p(attr->_f); \ 1345 ret += attr__fprintf(fp, _n, buf, priv);\ 1346 } \ 1347 } while (0) 1348 1349 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) 1350 1351 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 1352 attr__fprintf_f attr__fprintf, void *priv) 1353 { 1354 char buf[BUF_SIZE]; 1355 int ret = 0; 1356 1357 PRINT_ATTRf(type, p_unsigned); 1358 PRINT_ATTRf(size, p_unsigned); 1359 PRINT_ATTRf(config, p_hex); 1360 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); 1361 PRINT_ATTRf(sample_type, p_sample_type); 1362 PRINT_ATTRf(read_format, p_read_format); 1363 1364 PRINT_ATTRf(disabled, p_unsigned); 1365 PRINT_ATTRf(inherit, p_unsigned); 1366 PRINT_ATTRf(pinned, p_unsigned); 1367 PRINT_ATTRf(exclusive, p_unsigned); 1368 PRINT_ATTRf(exclude_user, p_unsigned); 1369 PRINT_ATTRf(exclude_kernel, p_unsigned); 1370 PRINT_ATTRf(exclude_hv, p_unsigned); 1371 PRINT_ATTRf(exclude_idle, p_unsigned); 1372 PRINT_ATTRf(mmap, p_unsigned); 1373 PRINT_ATTRf(comm, p_unsigned); 1374 PRINT_ATTRf(freq, p_unsigned); 1375 PRINT_ATTRf(inherit_stat, p_unsigned); 1376 PRINT_ATTRf(enable_on_exec, p_unsigned); 1377 PRINT_ATTRf(task, p_unsigned); 1378 PRINT_ATTRf(watermark, p_unsigned); 1379 PRINT_ATTRf(precise_ip, p_unsigned); 1380 PRINT_ATTRf(mmap_data, p_unsigned); 1381 PRINT_ATTRf(sample_id_all, p_unsigned); 1382 PRINT_ATTRf(exclude_host, p_unsigned); 1383 PRINT_ATTRf(exclude_guest, p_unsigned); 1384 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); 1385 PRINT_ATTRf(exclude_callchain_user, p_unsigned); 1386 PRINT_ATTRf(mmap2, p_unsigned); 1387 PRINT_ATTRf(comm_exec, p_unsigned); 1388 PRINT_ATTRf(use_clockid, p_unsigned); 1389 PRINT_ATTRf(context_switch, p_unsigned); 1390 PRINT_ATTRf(write_backward, p_unsigned); 1391 1392 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); 1393 PRINT_ATTRf(bp_type, p_unsigned); 1394 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); 1395 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); 1396 PRINT_ATTRf(branch_sample_type, p_branch_sample_type); 1397 PRINT_ATTRf(sample_regs_user, p_hex); 1398 PRINT_ATTRf(sample_stack_user, p_unsigned); 1399 PRINT_ATTRf(clockid, p_signed); 1400 PRINT_ATTRf(sample_regs_intr, p_hex); 1401 PRINT_ATTRf(aux_watermark, p_unsigned); 1402 PRINT_ATTRf(sample_max_stack, p_unsigned); 1403 1404 return ret; 1405 } 1406 1407 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1408 void *priv __attribute__((unused))) 1409 { 1410 return fprintf(fp, " %-32s %s\n", name, val); 1411 } 1412 1413 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 1414 struct thread_map *threads) 1415 { 1416 int cpu, thread, nthreads; 1417 unsigned long flags = PERF_FLAG_FD_CLOEXEC; 1418 int pid = -1, err; 1419 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; 1420 1421 if (perf_missing_features.write_backward && evsel->attr.write_backward) 1422 return -EINVAL; 1423 1424 if (evsel->system_wide) 1425 nthreads = 1; 1426 else 1427 nthreads = threads->nr; 1428 1429 if (evsel->fd == NULL && 1430 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) 1431 return -ENOMEM; 1432 1433 if (evsel->cgrp) { 1434 flags |= PERF_FLAG_PID_CGROUP; 1435 pid = evsel->cgrp->fd; 1436 } 1437 1438 fallback_missing_features: 1439 if (perf_missing_features.clockid_wrong) 1440 evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1441 if (perf_missing_features.clockid) { 1442 evsel->attr.use_clockid = 0; 1443 evsel->attr.clockid = 0; 1444 } 1445 if (perf_missing_features.cloexec) 1446 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1447 if (perf_missing_features.mmap2) 1448 evsel->attr.mmap2 = 0; 1449 if (perf_missing_features.exclude_guest) 1450 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 1451 if (perf_missing_features.lbr_flags) 1452 evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1453 PERF_SAMPLE_BRANCH_NO_CYCLES); 1454 retry_sample_id: 1455 if (perf_missing_features.sample_id_all) 1456 evsel->attr.sample_id_all = 0; 1457 1458 if (verbose >= 2) { 1459 fprintf(stderr, "%.60s\n", graph_dotted_line); 1460 fprintf(stderr, "perf_event_attr:\n"); 1461 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL); 1462 fprintf(stderr, "%.60s\n", graph_dotted_line); 1463 } 1464 1465 for (cpu = 0; cpu < cpus->nr; cpu++) { 1466 1467 for (thread = 0; thread < nthreads; thread++) { 1468 int group_fd; 1469 1470 if (!evsel->cgrp && !evsel->system_wide) 1471 pid = thread_map__pid(threads, thread); 1472 1473 group_fd = get_group_fd(evsel, cpu, thread); 1474 retry_open: 1475 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n", 1476 pid, cpus->map[cpu], group_fd, flags); 1477 1478 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, 1479 pid, 1480 cpus->map[cpu], 1481 group_fd, flags); 1482 if (FD(evsel, cpu, thread) < 0) { 1483 err = -errno; 1484 pr_debug2("sys_perf_event_open failed, error %d\n", 1485 err); 1486 goto try_fallback; 1487 } 1488 1489 if (evsel->bpf_fd >= 0) { 1490 int evt_fd = FD(evsel, cpu, thread); 1491 int bpf_fd = evsel->bpf_fd; 1492 1493 err = ioctl(evt_fd, 1494 PERF_EVENT_IOC_SET_BPF, 1495 bpf_fd); 1496 if (err && errno != EEXIST) { 1497 pr_err("failed to attach bpf fd %d: %s\n", 1498 bpf_fd, strerror(errno)); 1499 err = -EINVAL; 1500 goto out_close; 1501 } 1502 } 1503 1504 set_rlimit = NO_CHANGE; 1505 1506 /* 1507 * If we succeeded but had to kill clockid, fail and 1508 * have perf_evsel__open_strerror() print us a nice 1509 * error. 1510 */ 1511 if (perf_missing_features.clockid || 1512 perf_missing_features.clockid_wrong) { 1513 err = -EINVAL; 1514 goto out_close; 1515 } 1516 } 1517 } 1518 1519 return 0; 1520 1521 try_fallback: 1522 /* 1523 * perf stat needs between 5 and 22 fds per CPU. When we run out 1524 * of them try to increase the limits. 1525 */ 1526 if (err == -EMFILE && set_rlimit < INCREASED_MAX) { 1527 struct rlimit l; 1528 int old_errno = errno; 1529 1530 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 1531 if (set_rlimit == NO_CHANGE) 1532 l.rlim_cur = l.rlim_max; 1533 else { 1534 l.rlim_cur = l.rlim_max + 1000; 1535 l.rlim_max = l.rlim_cur; 1536 } 1537 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 1538 set_rlimit++; 1539 errno = old_errno; 1540 goto retry_open; 1541 } 1542 } 1543 errno = old_errno; 1544 } 1545 1546 if (err != -EINVAL || cpu > 0 || thread > 0) 1547 goto out_close; 1548 1549 /* 1550 * Must probe features in the order they were added to the 1551 * perf_event_attr interface. 1552 */ 1553 if (!perf_missing_features.write_backward && evsel->attr.write_backward) { 1554 perf_missing_features.write_backward = true; 1555 goto out_close; 1556 } else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) { 1557 perf_missing_features.clockid_wrong = true; 1558 goto fallback_missing_features; 1559 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) { 1560 perf_missing_features.clockid = true; 1561 goto fallback_missing_features; 1562 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { 1563 perf_missing_features.cloexec = true; 1564 goto fallback_missing_features; 1565 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { 1566 perf_missing_features.mmap2 = true; 1567 goto fallback_missing_features; 1568 } else if (!perf_missing_features.exclude_guest && 1569 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 1570 perf_missing_features.exclude_guest = true; 1571 goto fallback_missing_features; 1572 } else if (!perf_missing_features.sample_id_all) { 1573 perf_missing_features.sample_id_all = true; 1574 goto retry_sample_id; 1575 } else if (!perf_missing_features.lbr_flags && 1576 (evsel->attr.branch_sample_type & 1577 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1578 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1579 perf_missing_features.lbr_flags = true; 1580 goto fallback_missing_features; 1581 } 1582 out_close: 1583 do { 1584 while (--thread >= 0) { 1585 close(FD(evsel, cpu, thread)); 1586 FD(evsel, cpu, thread) = -1; 1587 } 1588 thread = nthreads; 1589 } while (--cpu >= 0); 1590 return err; 1591 } 1592 1593 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) 1594 { 1595 if (evsel->fd == NULL) 1596 return; 1597 1598 perf_evsel__close_fd(evsel, ncpus, nthreads); 1599 perf_evsel__free_fd(evsel); 1600 } 1601 1602 static struct { 1603 struct cpu_map map; 1604 int cpus[1]; 1605 } empty_cpu_map = { 1606 .map.nr = 1, 1607 .cpus = { -1, }, 1608 }; 1609 1610 static struct { 1611 struct thread_map map; 1612 int threads[1]; 1613 } empty_thread_map = { 1614 .map.nr = 1, 1615 .threads = { -1, }, 1616 }; 1617 1618 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 1619 struct thread_map *threads) 1620 { 1621 if (cpus == NULL) { 1622 /* Work around old compiler warnings about strict aliasing */ 1623 cpus = &empty_cpu_map.map; 1624 } 1625 1626 if (threads == NULL) 1627 threads = &empty_thread_map.map; 1628 1629 return __perf_evsel__open(evsel, cpus, threads); 1630 } 1631 1632 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 1633 struct cpu_map *cpus) 1634 { 1635 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); 1636 } 1637 1638 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 1639 struct thread_map *threads) 1640 { 1641 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); 1642 } 1643 1644 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, 1645 const union perf_event *event, 1646 struct perf_sample *sample) 1647 { 1648 u64 type = evsel->attr.sample_type; 1649 const u64 *array = event->sample.array; 1650 bool swapped = evsel->needs_swap; 1651 union u64_swap u; 1652 1653 array += ((event->header.size - 1654 sizeof(event->header)) / sizeof(u64)) - 1; 1655 1656 if (type & PERF_SAMPLE_IDENTIFIER) { 1657 sample->id = *array; 1658 array--; 1659 } 1660 1661 if (type & PERF_SAMPLE_CPU) { 1662 u.val64 = *array; 1663 if (swapped) { 1664 /* undo swap of u64, then swap on individual u32s */ 1665 u.val64 = bswap_64(u.val64); 1666 u.val32[0] = bswap_32(u.val32[0]); 1667 } 1668 1669 sample->cpu = u.val32[0]; 1670 array--; 1671 } 1672 1673 if (type & PERF_SAMPLE_STREAM_ID) { 1674 sample->stream_id = *array; 1675 array--; 1676 } 1677 1678 if (type & PERF_SAMPLE_ID) { 1679 sample->id = *array; 1680 array--; 1681 } 1682 1683 if (type & PERF_SAMPLE_TIME) { 1684 sample->time = *array; 1685 array--; 1686 } 1687 1688 if (type & PERF_SAMPLE_TID) { 1689 u.val64 = *array; 1690 if (swapped) { 1691 /* undo swap of u64, then swap on individual u32s */ 1692 u.val64 = bswap_64(u.val64); 1693 u.val32[0] = bswap_32(u.val32[0]); 1694 u.val32[1] = bswap_32(u.val32[1]); 1695 } 1696 1697 sample->pid = u.val32[0]; 1698 sample->tid = u.val32[1]; 1699 array--; 1700 } 1701 1702 return 0; 1703 } 1704 1705 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 1706 u64 size) 1707 { 1708 return size > max_size || offset + size > endp; 1709 } 1710 1711 #define OVERFLOW_CHECK(offset, size, max_size) \ 1712 do { \ 1713 if (overflow(endp, (max_size), (offset), (size))) \ 1714 return -EFAULT; \ 1715 } while (0) 1716 1717 #define OVERFLOW_CHECK_u64(offset) \ 1718 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 1719 1720 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, 1721 struct perf_sample *data) 1722 { 1723 u64 type = evsel->attr.sample_type; 1724 bool swapped = evsel->needs_swap; 1725 const u64 *array; 1726 u16 max_size = event->header.size; 1727 const void *endp = (void *)event + max_size; 1728 u64 sz; 1729 1730 /* 1731 * used for cross-endian analysis. See git commit 65014ab3 1732 * for why this goofiness is needed. 1733 */ 1734 union u64_swap u; 1735 1736 memset(data, 0, sizeof(*data)); 1737 data->cpu = data->pid = data->tid = -1; 1738 data->stream_id = data->id = data->time = -1ULL; 1739 data->period = evsel->attr.sample_period; 1740 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1741 1742 if (event->header.type != PERF_RECORD_SAMPLE) { 1743 if (!evsel->attr.sample_id_all) 1744 return 0; 1745 return perf_evsel__parse_id_sample(evsel, event, data); 1746 } 1747 1748 array = event->sample.array; 1749 1750 /* 1751 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 1752 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 1753 * check the format does not go past the end of the event. 1754 */ 1755 if (evsel->sample_size + sizeof(event->header) > event->header.size) 1756 return -EFAULT; 1757 1758 data->id = -1ULL; 1759 if (type & PERF_SAMPLE_IDENTIFIER) { 1760 data->id = *array; 1761 array++; 1762 } 1763 1764 if (type & PERF_SAMPLE_IP) { 1765 data->ip = *array; 1766 array++; 1767 } 1768 1769 if (type & PERF_SAMPLE_TID) { 1770 u.val64 = *array; 1771 if (swapped) { 1772 /* undo swap of u64, then swap on individual u32s */ 1773 u.val64 = bswap_64(u.val64); 1774 u.val32[0] = bswap_32(u.val32[0]); 1775 u.val32[1] = bswap_32(u.val32[1]); 1776 } 1777 1778 data->pid = u.val32[0]; 1779 data->tid = u.val32[1]; 1780 array++; 1781 } 1782 1783 if (type & PERF_SAMPLE_TIME) { 1784 data->time = *array; 1785 array++; 1786 } 1787 1788 data->addr = 0; 1789 if (type & PERF_SAMPLE_ADDR) { 1790 data->addr = *array; 1791 array++; 1792 } 1793 1794 if (type & PERF_SAMPLE_ID) { 1795 data->id = *array; 1796 array++; 1797 } 1798 1799 if (type & PERF_SAMPLE_STREAM_ID) { 1800 data->stream_id = *array; 1801 array++; 1802 } 1803 1804 if (type & PERF_SAMPLE_CPU) { 1805 1806 u.val64 = *array; 1807 if (swapped) { 1808 /* undo swap of u64, then swap on individual u32s */ 1809 u.val64 = bswap_64(u.val64); 1810 u.val32[0] = bswap_32(u.val32[0]); 1811 } 1812 1813 data->cpu = u.val32[0]; 1814 array++; 1815 } 1816 1817 if (type & PERF_SAMPLE_PERIOD) { 1818 data->period = *array; 1819 array++; 1820 } 1821 1822 if (type & PERF_SAMPLE_READ) { 1823 u64 read_format = evsel->attr.read_format; 1824 1825 OVERFLOW_CHECK_u64(array); 1826 if (read_format & PERF_FORMAT_GROUP) 1827 data->read.group.nr = *array; 1828 else 1829 data->read.one.value = *array; 1830 1831 array++; 1832 1833 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1834 OVERFLOW_CHECK_u64(array); 1835 data->read.time_enabled = *array; 1836 array++; 1837 } 1838 1839 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1840 OVERFLOW_CHECK_u64(array); 1841 data->read.time_running = *array; 1842 array++; 1843 } 1844 1845 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1846 if (read_format & PERF_FORMAT_GROUP) { 1847 const u64 max_group_nr = UINT64_MAX / 1848 sizeof(struct sample_read_value); 1849 1850 if (data->read.group.nr > max_group_nr) 1851 return -EFAULT; 1852 sz = data->read.group.nr * 1853 sizeof(struct sample_read_value); 1854 OVERFLOW_CHECK(array, sz, max_size); 1855 data->read.group.values = 1856 (struct sample_read_value *)array; 1857 array = (void *)array + sz; 1858 } else { 1859 OVERFLOW_CHECK_u64(array); 1860 data->read.one.id = *array; 1861 array++; 1862 } 1863 } 1864 1865 if (type & PERF_SAMPLE_CALLCHAIN) { 1866 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 1867 1868 OVERFLOW_CHECK_u64(array); 1869 data->callchain = (struct ip_callchain *)array++; 1870 if (data->callchain->nr > max_callchain_nr) 1871 return -EFAULT; 1872 sz = data->callchain->nr * sizeof(u64); 1873 OVERFLOW_CHECK(array, sz, max_size); 1874 array = (void *)array + sz; 1875 } 1876 1877 if (type & PERF_SAMPLE_RAW) { 1878 OVERFLOW_CHECK_u64(array); 1879 u.val64 = *array; 1880 if (WARN_ONCE(swapped, 1881 "Endianness of raw data not corrected!\n")) { 1882 /* undo swap of u64, then swap on individual u32s */ 1883 u.val64 = bswap_64(u.val64); 1884 u.val32[0] = bswap_32(u.val32[0]); 1885 u.val32[1] = bswap_32(u.val32[1]); 1886 } 1887 data->raw_size = u.val32[0]; 1888 array = (void *)array + sizeof(u32); 1889 1890 OVERFLOW_CHECK(array, data->raw_size, max_size); 1891 data->raw_data = (void *)array; 1892 array = (void *)array + data->raw_size; 1893 } 1894 1895 if (type & PERF_SAMPLE_BRANCH_STACK) { 1896 const u64 max_branch_nr = UINT64_MAX / 1897 sizeof(struct branch_entry); 1898 1899 OVERFLOW_CHECK_u64(array); 1900 data->branch_stack = (struct branch_stack *)array++; 1901 1902 if (data->branch_stack->nr > max_branch_nr) 1903 return -EFAULT; 1904 sz = data->branch_stack->nr * sizeof(struct branch_entry); 1905 OVERFLOW_CHECK(array, sz, max_size); 1906 array = (void *)array + sz; 1907 } 1908 1909 if (type & PERF_SAMPLE_REGS_USER) { 1910 OVERFLOW_CHECK_u64(array); 1911 data->user_regs.abi = *array; 1912 array++; 1913 1914 if (data->user_regs.abi) { 1915 u64 mask = evsel->attr.sample_regs_user; 1916 1917 sz = hweight_long(mask) * sizeof(u64); 1918 OVERFLOW_CHECK(array, sz, max_size); 1919 data->user_regs.mask = mask; 1920 data->user_regs.regs = (u64 *)array; 1921 array = (void *)array + sz; 1922 } 1923 } 1924 1925 if (type & PERF_SAMPLE_STACK_USER) { 1926 OVERFLOW_CHECK_u64(array); 1927 sz = *array++; 1928 1929 data->user_stack.offset = ((char *)(array - 1) 1930 - (char *) event); 1931 1932 if (!sz) { 1933 data->user_stack.size = 0; 1934 } else { 1935 OVERFLOW_CHECK(array, sz, max_size); 1936 data->user_stack.data = (char *)array; 1937 array = (void *)array + sz; 1938 OVERFLOW_CHECK_u64(array); 1939 data->user_stack.size = *array++; 1940 if (WARN_ONCE(data->user_stack.size > sz, 1941 "user stack dump failure\n")) 1942 return -EFAULT; 1943 } 1944 } 1945 1946 if (type & PERF_SAMPLE_WEIGHT) { 1947 OVERFLOW_CHECK_u64(array); 1948 data->weight = *array; 1949 array++; 1950 } 1951 1952 data->data_src = PERF_MEM_DATA_SRC_NONE; 1953 if (type & PERF_SAMPLE_DATA_SRC) { 1954 OVERFLOW_CHECK_u64(array); 1955 data->data_src = *array; 1956 array++; 1957 } 1958 1959 data->transaction = 0; 1960 if (type & PERF_SAMPLE_TRANSACTION) { 1961 OVERFLOW_CHECK_u64(array); 1962 data->transaction = *array; 1963 array++; 1964 } 1965 1966 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 1967 if (type & PERF_SAMPLE_REGS_INTR) { 1968 OVERFLOW_CHECK_u64(array); 1969 data->intr_regs.abi = *array; 1970 array++; 1971 1972 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 1973 u64 mask = evsel->attr.sample_regs_intr; 1974 1975 sz = hweight_long(mask) * sizeof(u64); 1976 OVERFLOW_CHECK(array, sz, max_size); 1977 data->intr_regs.mask = mask; 1978 data->intr_regs.regs = (u64 *)array; 1979 array = (void *)array + sz; 1980 } 1981 } 1982 1983 return 0; 1984 } 1985 1986 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 1987 u64 read_format) 1988 { 1989 size_t sz, result = sizeof(struct sample_event); 1990 1991 if (type & PERF_SAMPLE_IDENTIFIER) 1992 result += sizeof(u64); 1993 1994 if (type & PERF_SAMPLE_IP) 1995 result += sizeof(u64); 1996 1997 if (type & PERF_SAMPLE_TID) 1998 result += sizeof(u64); 1999 2000 if (type & PERF_SAMPLE_TIME) 2001 result += sizeof(u64); 2002 2003 if (type & PERF_SAMPLE_ADDR) 2004 result += sizeof(u64); 2005 2006 if (type & PERF_SAMPLE_ID) 2007 result += sizeof(u64); 2008 2009 if (type & PERF_SAMPLE_STREAM_ID) 2010 result += sizeof(u64); 2011 2012 if (type & PERF_SAMPLE_CPU) 2013 result += sizeof(u64); 2014 2015 if (type & PERF_SAMPLE_PERIOD) 2016 result += sizeof(u64); 2017 2018 if (type & PERF_SAMPLE_READ) { 2019 result += sizeof(u64); 2020 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 2021 result += sizeof(u64); 2022 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2023 result += sizeof(u64); 2024 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2025 if (read_format & PERF_FORMAT_GROUP) { 2026 sz = sample->read.group.nr * 2027 sizeof(struct sample_read_value); 2028 result += sz; 2029 } else { 2030 result += sizeof(u64); 2031 } 2032 } 2033 2034 if (type & PERF_SAMPLE_CALLCHAIN) { 2035 sz = (sample->callchain->nr + 1) * sizeof(u64); 2036 result += sz; 2037 } 2038 2039 if (type & PERF_SAMPLE_RAW) { 2040 result += sizeof(u32); 2041 result += sample->raw_size; 2042 } 2043 2044 if (type & PERF_SAMPLE_BRANCH_STACK) { 2045 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2046 sz += sizeof(u64); 2047 result += sz; 2048 } 2049 2050 if (type & PERF_SAMPLE_REGS_USER) { 2051 if (sample->user_regs.abi) { 2052 result += sizeof(u64); 2053 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 2054 result += sz; 2055 } else { 2056 result += sizeof(u64); 2057 } 2058 } 2059 2060 if (type & PERF_SAMPLE_STACK_USER) { 2061 sz = sample->user_stack.size; 2062 result += sizeof(u64); 2063 if (sz) { 2064 result += sz; 2065 result += sizeof(u64); 2066 } 2067 } 2068 2069 if (type & PERF_SAMPLE_WEIGHT) 2070 result += sizeof(u64); 2071 2072 if (type & PERF_SAMPLE_DATA_SRC) 2073 result += sizeof(u64); 2074 2075 if (type & PERF_SAMPLE_TRANSACTION) 2076 result += sizeof(u64); 2077 2078 if (type & PERF_SAMPLE_REGS_INTR) { 2079 if (sample->intr_regs.abi) { 2080 result += sizeof(u64); 2081 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 2082 result += sz; 2083 } else { 2084 result += sizeof(u64); 2085 } 2086 } 2087 2088 return result; 2089 } 2090 2091 int perf_event__synthesize_sample(union perf_event *event, u64 type, 2092 u64 read_format, 2093 const struct perf_sample *sample, 2094 bool swapped) 2095 { 2096 u64 *array; 2097 size_t sz; 2098 /* 2099 * used for cross-endian analysis. See git commit 65014ab3 2100 * for why this goofiness is needed. 2101 */ 2102 union u64_swap u; 2103 2104 array = event->sample.array; 2105 2106 if (type & PERF_SAMPLE_IDENTIFIER) { 2107 *array = sample->id; 2108 array++; 2109 } 2110 2111 if (type & PERF_SAMPLE_IP) { 2112 *array = sample->ip; 2113 array++; 2114 } 2115 2116 if (type & PERF_SAMPLE_TID) { 2117 u.val32[0] = sample->pid; 2118 u.val32[1] = sample->tid; 2119 if (swapped) { 2120 /* 2121 * Inverse of what is done in perf_evsel__parse_sample 2122 */ 2123 u.val32[0] = bswap_32(u.val32[0]); 2124 u.val32[1] = bswap_32(u.val32[1]); 2125 u.val64 = bswap_64(u.val64); 2126 } 2127 2128 *array = u.val64; 2129 array++; 2130 } 2131 2132 if (type & PERF_SAMPLE_TIME) { 2133 *array = sample->time; 2134 array++; 2135 } 2136 2137 if (type & PERF_SAMPLE_ADDR) { 2138 *array = sample->addr; 2139 array++; 2140 } 2141 2142 if (type & PERF_SAMPLE_ID) { 2143 *array = sample->id; 2144 array++; 2145 } 2146 2147 if (type & PERF_SAMPLE_STREAM_ID) { 2148 *array = sample->stream_id; 2149 array++; 2150 } 2151 2152 if (type & PERF_SAMPLE_CPU) { 2153 u.val32[0] = sample->cpu; 2154 if (swapped) { 2155 /* 2156 * Inverse of what is done in perf_evsel__parse_sample 2157 */ 2158 u.val32[0] = bswap_32(u.val32[0]); 2159 u.val64 = bswap_64(u.val64); 2160 } 2161 *array = u.val64; 2162 array++; 2163 } 2164 2165 if (type & PERF_SAMPLE_PERIOD) { 2166 *array = sample->period; 2167 array++; 2168 } 2169 2170 if (type & PERF_SAMPLE_READ) { 2171 if (read_format & PERF_FORMAT_GROUP) 2172 *array = sample->read.group.nr; 2173 else 2174 *array = sample->read.one.value; 2175 array++; 2176 2177 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2178 *array = sample->read.time_enabled; 2179 array++; 2180 } 2181 2182 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2183 *array = sample->read.time_running; 2184 array++; 2185 } 2186 2187 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2188 if (read_format & PERF_FORMAT_GROUP) { 2189 sz = sample->read.group.nr * 2190 sizeof(struct sample_read_value); 2191 memcpy(array, sample->read.group.values, sz); 2192 array = (void *)array + sz; 2193 } else { 2194 *array = sample->read.one.id; 2195 array++; 2196 } 2197 } 2198 2199 if (type & PERF_SAMPLE_CALLCHAIN) { 2200 sz = (sample->callchain->nr + 1) * sizeof(u64); 2201 memcpy(array, sample->callchain, sz); 2202 array = (void *)array + sz; 2203 } 2204 2205 if (type & PERF_SAMPLE_RAW) { 2206 u.val32[0] = sample->raw_size; 2207 if (WARN_ONCE(swapped, 2208 "Endianness of raw data not corrected!\n")) { 2209 /* 2210 * Inverse of what is done in perf_evsel__parse_sample 2211 */ 2212 u.val32[0] = bswap_32(u.val32[0]); 2213 u.val32[1] = bswap_32(u.val32[1]); 2214 u.val64 = bswap_64(u.val64); 2215 } 2216 *array = u.val64; 2217 array = (void *)array + sizeof(u32); 2218 2219 memcpy(array, sample->raw_data, sample->raw_size); 2220 array = (void *)array + sample->raw_size; 2221 } 2222 2223 if (type & PERF_SAMPLE_BRANCH_STACK) { 2224 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2225 sz += sizeof(u64); 2226 memcpy(array, sample->branch_stack, sz); 2227 array = (void *)array + sz; 2228 } 2229 2230 if (type & PERF_SAMPLE_REGS_USER) { 2231 if (sample->user_regs.abi) { 2232 *array++ = sample->user_regs.abi; 2233 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 2234 memcpy(array, sample->user_regs.regs, sz); 2235 array = (void *)array + sz; 2236 } else { 2237 *array++ = 0; 2238 } 2239 } 2240 2241 if (type & PERF_SAMPLE_STACK_USER) { 2242 sz = sample->user_stack.size; 2243 *array++ = sz; 2244 if (sz) { 2245 memcpy(array, sample->user_stack.data, sz); 2246 array = (void *)array + sz; 2247 *array++ = sz; 2248 } 2249 } 2250 2251 if (type & PERF_SAMPLE_WEIGHT) { 2252 *array = sample->weight; 2253 array++; 2254 } 2255 2256 if (type & PERF_SAMPLE_DATA_SRC) { 2257 *array = sample->data_src; 2258 array++; 2259 } 2260 2261 if (type & PERF_SAMPLE_TRANSACTION) { 2262 *array = sample->transaction; 2263 array++; 2264 } 2265 2266 if (type & PERF_SAMPLE_REGS_INTR) { 2267 if (sample->intr_regs.abi) { 2268 *array++ = sample->intr_regs.abi; 2269 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 2270 memcpy(array, sample->intr_regs.regs, sz); 2271 array = (void *)array + sz; 2272 } else { 2273 *array++ = 0; 2274 } 2275 } 2276 2277 return 0; 2278 } 2279 2280 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) 2281 { 2282 return pevent_find_field(evsel->tp_format, name); 2283 } 2284 2285 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, 2286 const char *name) 2287 { 2288 struct format_field *field = perf_evsel__field(evsel, name); 2289 int offset; 2290 2291 if (!field) 2292 return NULL; 2293 2294 offset = field->offset; 2295 2296 if (field->flags & FIELD_IS_DYNAMIC) { 2297 offset = *(int *)(sample->raw_data + field->offset); 2298 offset &= 0xffff; 2299 } 2300 2301 return sample->raw_data + offset; 2302 } 2303 2304 u64 format_field__intval(struct format_field *field, struct perf_sample *sample, 2305 bool needs_swap) 2306 { 2307 u64 value; 2308 void *ptr = sample->raw_data + field->offset; 2309 2310 switch (field->size) { 2311 case 1: 2312 return *(u8 *)ptr; 2313 case 2: 2314 value = *(u16 *)ptr; 2315 break; 2316 case 4: 2317 value = *(u32 *)ptr; 2318 break; 2319 case 8: 2320 memcpy(&value, ptr, sizeof(u64)); 2321 break; 2322 default: 2323 return 0; 2324 } 2325 2326 if (!needs_swap) 2327 return value; 2328 2329 switch (field->size) { 2330 case 2: 2331 return bswap_16(value); 2332 case 4: 2333 return bswap_32(value); 2334 case 8: 2335 return bswap_64(value); 2336 default: 2337 return 0; 2338 } 2339 2340 return 0; 2341 } 2342 2343 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, 2344 const char *name) 2345 { 2346 struct format_field *field = perf_evsel__field(evsel, name); 2347 2348 if (!field) 2349 return 0; 2350 2351 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; 2352 } 2353 2354 bool perf_evsel__fallback(struct perf_evsel *evsel, int err, 2355 char *msg, size_t msgsize) 2356 { 2357 int paranoid; 2358 2359 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2360 evsel->attr.type == PERF_TYPE_HARDWARE && 2361 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2362 /* 2363 * If it's cycles then fall back to hrtimer based 2364 * cpu-clock-tick sw counter, which is always available even if 2365 * no PMU support. 2366 * 2367 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2368 * b0a873e). 2369 */ 2370 scnprintf(msg, msgsize, "%s", 2371 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2372 2373 evsel->attr.type = PERF_TYPE_SOFTWARE; 2374 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; 2375 2376 zfree(&evsel->name); 2377 return true; 2378 } else if (err == EACCES && !evsel->attr.exclude_kernel && 2379 (paranoid = perf_event_paranoid()) > 1) { 2380 const char *name = perf_evsel__name(evsel); 2381 char *new_name; 2382 2383 if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0) 2384 return false; 2385 2386 if (evsel->name) 2387 free(evsel->name); 2388 evsel->name = new_name; 2389 scnprintf(msg, msgsize, 2390 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid); 2391 evsel->attr.exclude_kernel = 1; 2392 2393 return true; 2394 } 2395 2396 return false; 2397 } 2398 2399 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, 2400 int err, char *msg, size_t size) 2401 { 2402 char sbuf[STRERR_BUFSIZE]; 2403 2404 switch (err) { 2405 case EPERM: 2406 case EACCES: 2407 return scnprintf(msg, size, 2408 "You may not have permission to collect %sstats.\n\n" 2409 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n" 2410 "which controls use of the performance events system by\n" 2411 "unprivileged users (without CAP_SYS_ADMIN).\n\n" 2412 "The current value is %d:\n\n" 2413 " -1: Allow use of (almost) all events by all users\n" 2414 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n" 2415 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n" 2416 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN", 2417 target->system_wide ? "system-wide " : "", 2418 perf_event_paranoid()); 2419 case ENOENT: 2420 return scnprintf(msg, size, "The %s event is not supported.", 2421 perf_evsel__name(evsel)); 2422 case EMFILE: 2423 return scnprintf(msg, size, "%s", 2424 "Too many events are opened.\n" 2425 "Probably the maximum number of open file descriptors has been reached.\n" 2426 "Hint: Try again after reducing the number of events.\n" 2427 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2428 case ENOMEM: 2429 if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 && 2430 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 2431 return scnprintf(msg, size, 2432 "Not enough memory to setup event with callchain.\n" 2433 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2434 "Hint: Current value: %d", sysctl_perf_event_max_stack); 2435 break; 2436 case ENODEV: 2437 if (target->cpu_list) 2438 return scnprintf(msg, size, "%s", 2439 "No such device - did you specify an out-of-range profile CPU?"); 2440 break; 2441 case EOPNOTSUPP: 2442 if (evsel->attr.sample_period != 0) 2443 return scnprintf(msg, size, "%s", 2444 "PMU Hardware doesn't support sampling/overflow-interrupts."); 2445 if (evsel->attr.precise_ip) 2446 return scnprintf(msg, size, "%s", 2447 "\'precise\' request may not be supported. Try removing 'p' modifier."); 2448 #if defined(__i386__) || defined(__x86_64__) 2449 if (evsel->attr.type == PERF_TYPE_HARDWARE) 2450 return scnprintf(msg, size, "%s", 2451 "No hardware sampling interrupt available.\n" 2452 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); 2453 #endif 2454 break; 2455 case EBUSY: 2456 if (find_process("oprofiled")) 2457 return scnprintf(msg, size, 2458 "The PMU counters are busy/taken by another profiler.\n" 2459 "We found oprofile daemon running, please stop it and try again."); 2460 break; 2461 case EINVAL: 2462 if (evsel->attr.write_backward && perf_missing_features.write_backward) 2463 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel."); 2464 if (perf_missing_features.clockid) 2465 return scnprintf(msg, size, "clockid feature not supported."); 2466 if (perf_missing_features.clockid_wrong) 2467 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 2468 break; 2469 default: 2470 break; 2471 } 2472 2473 return scnprintf(msg, size, 2474 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2475 "/bin/dmesg may provide additional information.\n" 2476 "No CONFIG_PERF_EVENTS=y kernel support configured?", 2477 err, str_error_r(err, sbuf, sizeof(sbuf)), 2478 perf_evsel__name(evsel)); 2479 } 2480 2481 char *perf_evsel__env_arch(struct perf_evsel *evsel) 2482 { 2483 if (evsel && evsel->evlist && evsel->evlist->env) 2484 return evsel->evlist->env->arch; 2485 return NULL; 2486 } 2487