1 // SPDX-License-Identifier: GPL-2.0 2 #include "builtin.h" 3 4 #include "util/counts.h" 5 #include "util/debug.h" 6 #include "util/dso.h" 7 #include <subcmd/exec-cmd.h> 8 #include "util/header.h" 9 #include <subcmd/parse-options.h> 10 #include "util/perf_regs.h" 11 #include "util/session.h" 12 #include "util/tool.h" 13 #include "util/map.h" 14 #include "util/srcline.h" 15 #include "util/symbol.h" 16 #include "util/thread.h" 17 #include "util/trace-event.h" 18 #include "util/evlist.h" 19 #include "util/evsel.h" 20 #include "util/evsel_fprintf.h" 21 #include "util/evswitch.h" 22 #include "util/sort.h" 23 #include "util/data.h" 24 #include "util/auxtrace.h" 25 #include "util/cpumap.h" 26 #include "util/thread_map.h" 27 #include "util/stat.h" 28 #include "util/color.h" 29 #include "util/string2.h" 30 #include "util/thread-stack.h" 31 #include "util/time-utils.h" 32 #include "util/path.h" 33 #include "util/event.h" 34 #include "ui/ui.h" 35 #include "print_binary.h" 36 #include "archinsn.h" 37 #include <linux/bitmap.h> 38 #include <linux/kernel.h> 39 #include <linux/stringify.h> 40 #include <linux/time64.h> 41 #include <linux/zalloc.h> 42 #include <sys/utsname.h> 43 #include "asm/bug.h" 44 #include "util/mem-events.h" 45 #include "util/dump-insn.h" 46 #include <dirent.h> 47 #include <errno.h> 48 #include <inttypes.h> 49 #include <signal.h> 50 #include <sys/param.h> 51 #include <sys/types.h> 52 #include <sys/stat.h> 53 #include <fcntl.h> 54 #include <unistd.h> 55 #include <subcmd/pager.h> 56 #include <perf/evlist.h> 57 #include <linux/err.h> 58 #include "util/record.h" 59 #include "util/util.h" 60 #include "perf.h" 61 62 #include <linux/ctype.h> 63 64 static char const *script_name; 65 static char const *generate_script_lang; 66 static bool reltime; 67 static bool deltatime; 68 static u64 initial_time; 69 static u64 previous_time; 70 static bool debug_mode; 71 static u64 last_timestamp; 72 static u64 nr_unordered; 73 static bool no_callchain; 74 static bool latency_format; 75 static bool system_wide; 76 static bool print_flags; 77 static const char *cpu_list; 78 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 79 static struct perf_stat_config stat_config; 80 static int max_blocks; 81 static bool native_arch; 82 83 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH; 84 85 enum perf_output_field { 86 PERF_OUTPUT_COMM = 1ULL << 0, 87 PERF_OUTPUT_TID = 1ULL << 1, 88 PERF_OUTPUT_PID = 1ULL << 2, 89 PERF_OUTPUT_TIME = 1ULL << 3, 90 PERF_OUTPUT_CPU = 1ULL << 4, 91 PERF_OUTPUT_EVNAME = 1ULL << 5, 92 PERF_OUTPUT_TRACE = 1ULL << 6, 93 PERF_OUTPUT_IP = 1ULL << 7, 94 PERF_OUTPUT_SYM = 1ULL << 8, 95 PERF_OUTPUT_DSO = 1ULL << 9, 96 PERF_OUTPUT_ADDR = 1ULL << 10, 97 PERF_OUTPUT_SYMOFFSET = 1ULL << 11, 98 PERF_OUTPUT_SRCLINE = 1ULL << 12, 99 PERF_OUTPUT_PERIOD = 1ULL << 13, 100 PERF_OUTPUT_IREGS = 1ULL << 14, 101 PERF_OUTPUT_BRSTACK = 1ULL << 15, 102 PERF_OUTPUT_BRSTACKSYM = 1ULL << 16, 103 PERF_OUTPUT_DATA_SRC = 1ULL << 17, 104 PERF_OUTPUT_WEIGHT = 1ULL << 18, 105 PERF_OUTPUT_BPF_OUTPUT = 1ULL << 19, 106 PERF_OUTPUT_CALLINDENT = 1ULL << 20, 107 PERF_OUTPUT_INSN = 1ULL << 21, 108 PERF_OUTPUT_INSNLEN = 1ULL << 22, 109 PERF_OUTPUT_BRSTACKINSN = 1ULL << 23, 110 PERF_OUTPUT_BRSTACKOFF = 1ULL << 24, 111 PERF_OUTPUT_SYNTH = 1ULL << 25, 112 PERF_OUTPUT_PHYS_ADDR = 1ULL << 26, 113 PERF_OUTPUT_UREGS = 1ULL << 27, 114 PERF_OUTPUT_METRIC = 1ULL << 28, 115 PERF_OUTPUT_MISC = 1ULL << 29, 116 PERF_OUTPUT_SRCCODE = 1ULL << 30, 117 PERF_OUTPUT_IPC = 1ULL << 31, 118 PERF_OUTPUT_TOD = 1ULL << 32, 119 PERF_OUTPUT_DATA_PAGE_SIZE = 1ULL << 33, 120 }; 121 122 struct perf_script { 123 struct perf_tool tool; 124 struct perf_session *session; 125 bool show_task_events; 126 bool show_mmap_events; 127 bool show_switch_events; 128 bool show_namespace_events; 129 bool show_lost_events; 130 bool show_round_events; 131 bool show_bpf_events; 132 bool show_cgroup_events; 133 bool show_text_poke_events; 134 bool allocated; 135 bool per_event_dump; 136 bool stitch_lbr; 137 struct evswitch evswitch; 138 struct perf_cpu_map *cpus; 139 struct perf_thread_map *threads; 140 int name_width; 141 const char *time_str; 142 struct perf_time_interval *ptime_range; 143 int range_size; 144 int range_num; 145 }; 146 147 struct output_option { 148 const char *str; 149 enum perf_output_field field; 150 } all_output_options[] = { 151 {.str = "comm", .field = PERF_OUTPUT_COMM}, 152 {.str = "tid", .field = PERF_OUTPUT_TID}, 153 {.str = "pid", .field = PERF_OUTPUT_PID}, 154 {.str = "time", .field = PERF_OUTPUT_TIME}, 155 {.str = "cpu", .field = PERF_OUTPUT_CPU}, 156 {.str = "event", .field = PERF_OUTPUT_EVNAME}, 157 {.str = "trace", .field = PERF_OUTPUT_TRACE}, 158 {.str = "ip", .field = PERF_OUTPUT_IP}, 159 {.str = "sym", .field = PERF_OUTPUT_SYM}, 160 {.str = "dso", .field = PERF_OUTPUT_DSO}, 161 {.str = "addr", .field = PERF_OUTPUT_ADDR}, 162 {.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET}, 163 {.str = "srcline", .field = PERF_OUTPUT_SRCLINE}, 164 {.str = "period", .field = PERF_OUTPUT_PERIOD}, 165 {.str = "iregs", .field = PERF_OUTPUT_IREGS}, 166 {.str = "uregs", .field = PERF_OUTPUT_UREGS}, 167 {.str = "brstack", .field = PERF_OUTPUT_BRSTACK}, 168 {.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM}, 169 {.str = "data_src", .field = PERF_OUTPUT_DATA_SRC}, 170 {.str = "weight", .field = PERF_OUTPUT_WEIGHT}, 171 {.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT}, 172 {.str = "callindent", .field = PERF_OUTPUT_CALLINDENT}, 173 {.str = "insn", .field = PERF_OUTPUT_INSN}, 174 {.str = "insnlen", .field = PERF_OUTPUT_INSNLEN}, 175 {.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN}, 176 {.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF}, 177 {.str = "synth", .field = PERF_OUTPUT_SYNTH}, 178 {.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR}, 179 {.str = "metric", .field = PERF_OUTPUT_METRIC}, 180 {.str = "misc", .field = PERF_OUTPUT_MISC}, 181 {.str = "srccode", .field = PERF_OUTPUT_SRCCODE}, 182 {.str = "ipc", .field = PERF_OUTPUT_IPC}, 183 {.str = "tod", .field = PERF_OUTPUT_TOD}, 184 {.str = "data_page_size", .field = PERF_OUTPUT_DATA_PAGE_SIZE}, 185 }; 186 187 enum { 188 OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX, 189 OUTPUT_TYPE_OTHER, 190 OUTPUT_TYPE_MAX 191 }; 192 193 /* default set to maintain compatibility with current format */ 194 static struct { 195 bool user_set; 196 bool wildcard_set; 197 unsigned int print_ip_opts; 198 u64 fields; 199 u64 invalid_fields; 200 u64 user_set_fields; 201 u64 user_unset_fields; 202 } output[OUTPUT_TYPE_MAX] = { 203 204 [PERF_TYPE_HARDWARE] = { 205 .user_set = false, 206 207 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 208 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 209 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 210 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 211 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, 212 213 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 214 }, 215 216 [PERF_TYPE_SOFTWARE] = { 217 .user_set = false, 218 219 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 220 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 221 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 222 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 223 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD | 224 PERF_OUTPUT_BPF_OUTPUT, 225 226 .invalid_fields = PERF_OUTPUT_TRACE, 227 }, 228 229 [PERF_TYPE_TRACEPOINT] = { 230 .user_set = false, 231 232 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 233 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 234 PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE 235 }, 236 237 [PERF_TYPE_HW_CACHE] = { 238 .user_set = false, 239 240 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 241 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 242 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 243 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 244 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, 245 246 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 247 }, 248 249 [PERF_TYPE_RAW] = { 250 .user_set = false, 251 252 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 253 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 254 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 255 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 256 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD | 257 PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC | 258 PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR | 259 PERF_OUTPUT_DATA_PAGE_SIZE, 260 261 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 262 }, 263 264 [PERF_TYPE_BREAKPOINT] = { 265 .user_set = false, 266 267 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 268 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 269 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 270 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 271 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, 272 273 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 274 }, 275 276 [OUTPUT_TYPE_SYNTH] = { 277 .user_set = false, 278 279 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 280 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 281 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 282 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 283 PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH, 284 285 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 286 }, 287 288 [OUTPUT_TYPE_OTHER] = { 289 .user_set = false, 290 291 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 292 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 293 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 294 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | 295 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, 296 297 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 298 }, 299 }; 300 301 struct evsel_script { 302 char *filename; 303 FILE *fp; 304 u64 samples; 305 /* For metric output */ 306 u64 val; 307 int gnum; 308 }; 309 310 static inline struct evsel_script *evsel_script(struct evsel *evsel) 311 { 312 return (struct evsel_script *)evsel->priv; 313 } 314 315 static struct evsel_script *perf_evsel_script__new(struct evsel *evsel, 316 struct perf_data *data) 317 { 318 struct evsel_script *es = zalloc(sizeof(*es)); 319 320 if (es != NULL) { 321 if (asprintf(&es->filename, "%s.%s.dump", data->file.path, evsel__name(evsel)) < 0) 322 goto out_free; 323 es->fp = fopen(es->filename, "w"); 324 if (es->fp == NULL) 325 goto out_free_filename; 326 } 327 328 return es; 329 out_free_filename: 330 zfree(&es->filename); 331 out_free: 332 free(es); 333 return NULL; 334 } 335 336 static void perf_evsel_script__delete(struct evsel_script *es) 337 { 338 zfree(&es->filename); 339 fclose(es->fp); 340 es->fp = NULL; 341 free(es); 342 } 343 344 static int perf_evsel_script__fprintf(struct evsel_script *es, FILE *fp) 345 { 346 struct stat st; 347 348 fstat(fileno(es->fp), &st); 349 return fprintf(fp, "[ perf script: Wrote %.3f MB %s (%" PRIu64 " samples) ]\n", 350 st.st_size / 1024.0 / 1024.0, es->filename, es->samples); 351 } 352 353 static inline int output_type(unsigned int type) 354 { 355 switch (type) { 356 case PERF_TYPE_SYNTH: 357 return OUTPUT_TYPE_SYNTH; 358 default: 359 if (type < PERF_TYPE_MAX) 360 return type; 361 } 362 363 return OUTPUT_TYPE_OTHER; 364 } 365 366 static inline unsigned int attr_type(unsigned int type) 367 { 368 switch (type) { 369 case OUTPUT_TYPE_SYNTH: 370 return PERF_TYPE_SYNTH; 371 default: 372 return type; 373 } 374 } 375 376 static bool output_set_by_user(void) 377 { 378 int j; 379 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) { 380 if (output[j].user_set) 381 return true; 382 } 383 return false; 384 } 385 386 static const char *output_field2str(enum perf_output_field field) 387 { 388 int i, imax = ARRAY_SIZE(all_output_options); 389 const char *str = ""; 390 391 for (i = 0; i < imax; ++i) { 392 if (all_output_options[i].field == field) { 393 str = all_output_options[i].str; 394 break; 395 } 396 } 397 return str; 398 } 399 400 #define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x) 401 402 static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg, 403 enum perf_output_field field, bool allow_user_set) 404 { 405 struct perf_event_attr *attr = &evsel->core.attr; 406 int type = output_type(attr->type); 407 const char *evname; 408 409 if (attr->sample_type & sample_type) 410 return 0; 411 412 if (output[type].user_set_fields & field) { 413 if (allow_user_set) 414 return 0; 415 evname = evsel__name(evsel); 416 pr_err("Samples for '%s' event do not have %s attribute set. " 417 "Cannot print '%s' field.\n", 418 evname, sample_msg, output_field2str(field)); 419 return -1; 420 } 421 422 /* user did not ask for it explicitly so remove from the default list */ 423 output[type].fields &= ~field; 424 evname = evsel__name(evsel); 425 pr_debug("Samples for '%s' event do not have %s attribute set. " 426 "Skipping '%s' field.\n", 427 evname, sample_msg, output_field2str(field)); 428 429 return 0; 430 } 431 432 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg, 433 enum perf_output_field field) 434 { 435 return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false); 436 } 437 438 static int evsel__check_attr(struct evsel *evsel, struct perf_session *session) 439 { 440 struct perf_event_attr *attr = &evsel->core.attr; 441 bool allow_user_set; 442 443 if (perf_header__has_feat(&session->header, HEADER_STAT)) 444 return 0; 445 446 allow_user_set = perf_header__has_feat(&session->header, 447 HEADER_AUXTRACE); 448 449 if (PRINT_FIELD(TRACE) && 450 !perf_session__has_traces(session, "record -R")) 451 return -EINVAL; 452 453 if (PRINT_FIELD(IP)) { 454 if (evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP)) 455 return -EINVAL; 456 } 457 458 if (PRINT_FIELD(ADDR) && 459 evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", PERF_OUTPUT_ADDR, allow_user_set)) 460 return -EINVAL; 461 462 if (PRINT_FIELD(DATA_SRC) && 463 evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC)) 464 return -EINVAL; 465 466 if (PRINT_FIELD(WEIGHT) && 467 evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT, "WEIGHT", PERF_OUTPUT_WEIGHT)) 468 return -EINVAL; 469 470 if (PRINT_FIELD(SYM) && 471 !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) { 472 pr_err("Display of symbols requested but neither sample IP nor " 473 "sample address\navailable. Hence, no addresses to convert " 474 "to symbols.\n"); 475 return -EINVAL; 476 } 477 if (PRINT_FIELD(SYMOFFSET) && !PRINT_FIELD(SYM)) { 478 pr_err("Display of offsets requested but symbol is not" 479 "selected.\n"); 480 return -EINVAL; 481 } 482 if (PRINT_FIELD(DSO) && 483 !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) { 484 pr_err("Display of DSO requested but no address to convert.\n"); 485 return -EINVAL; 486 } 487 if ((PRINT_FIELD(SRCLINE) || PRINT_FIELD(SRCCODE)) && !PRINT_FIELD(IP)) { 488 pr_err("Display of source line number requested but sample IP is not\n" 489 "selected. Hence, no address to lookup the source line number.\n"); 490 return -EINVAL; 491 } 492 if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set && 493 !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) { 494 pr_err("Display of branch stack assembler requested, but non all-branch filter set\n" 495 "Hint: run 'perf record -b ...'\n"); 496 return -EINVAL; 497 } 498 if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && 499 evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID)) 500 return -EINVAL; 501 502 if (PRINT_FIELD(TIME) && 503 evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", PERF_OUTPUT_TIME)) 504 return -EINVAL; 505 506 if (PRINT_FIELD(CPU) && 507 evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU", PERF_OUTPUT_CPU, allow_user_set)) 508 return -EINVAL; 509 510 if (PRINT_FIELD(IREGS) && 511 evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set)) 512 return -EINVAL; 513 514 if (PRINT_FIELD(UREGS) && 515 evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS", PERF_OUTPUT_UREGS)) 516 return -EINVAL; 517 518 if (PRINT_FIELD(PHYS_ADDR) && 519 evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR)) 520 return -EINVAL; 521 522 if (PRINT_FIELD(DATA_PAGE_SIZE) && 523 evsel__check_stype(evsel, PERF_SAMPLE_DATA_PAGE_SIZE, "DATA_PAGE_SIZE", PERF_OUTPUT_DATA_PAGE_SIZE)) 524 return -EINVAL; 525 526 return 0; 527 } 528 529 static void set_print_ip_opts(struct perf_event_attr *attr) 530 { 531 unsigned int type = output_type(attr->type); 532 533 output[type].print_ip_opts = 0; 534 if (PRINT_FIELD(IP)) 535 output[type].print_ip_opts |= EVSEL__PRINT_IP; 536 537 if (PRINT_FIELD(SYM)) 538 output[type].print_ip_opts |= EVSEL__PRINT_SYM; 539 540 if (PRINT_FIELD(DSO)) 541 output[type].print_ip_opts |= EVSEL__PRINT_DSO; 542 543 if (PRINT_FIELD(SYMOFFSET)) 544 output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET; 545 546 if (PRINT_FIELD(SRCLINE)) 547 output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE; 548 } 549 550 /* 551 * verify all user requested events exist and the samples 552 * have the expected data 553 */ 554 static int perf_session__check_output_opt(struct perf_session *session) 555 { 556 bool tod = false; 557 unsigned int j; 558 struct evsel *evsel; 559 560 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) { 561 evsel = perf_session__find_first_evtype(session, attr_type(j)); 562 563 /* 564 * even if fields is set to 0 (ie., show nothing) event must 565 * exist if user explicitly includes it on the command line 566 */ 567 if (!evsel && output[j].user_set && !output[j].wildcard_set && 568 j != OUTPUT_TYPE_SYNTH) { 569 pr_err("%s events do not exist. " 570 "Remove corresponding -F option to proceed.\n", 571 event_type(j)); 572 return -1; 573 } 574 575 if (evsel && output[j].fields && 576 evsel__check_attr(evsel, session)) 577 return -1; 578 579 if (evsel == NULL) 580 continue; 581 582 set_print_ip_opts(&evsel->core.attr); 583 tod |= output[j].fields & PERF_OUTPUT_TOD; 584 } 585 586 if (!no_callchain) { 587 bool use_callchain = false; 588 bool not_pipe = false; 589 590 evlist__for_each_entry(session->evlist, evsel) { 591 not_pipe = true; 592 if (evsel__has_callchain(evsel)) { 593 use_callchain = true; 594 break; 595 } 596 } 597 if (not_pipe && !use_callchain) 598 symbol_conf.use_callchain = false; 599 } 600 601 /* 602 * set default for tracepoints to print symbols only 603 * if callchains are present 604 */ 605 if (symbol_conf.use_callchain && 606 !output[PERF_TYPE_TRACEPOINT].user_set) { 607 j = PERF_TYPE_TRACEPOINT; 608 609 evlist__for_each_entry(session->evlist, evsel) { 610 if (evsel->core.attr.type != j) 611 continue; 612 613 if (evsel__has_callchain(evsel)) { 614 output[j].fields |= PERF_OUTPUT_IP; 615 output[j].fields |= PERF_OUTPUT_SYM; 616 output[j].fields |= PERF_OUTPUT_SYMOFFSET; 617 output[j].fields |= PERF_OUTPUT_DSO; 618 set_print_ip_opts(&evsel->core.attr); 619 goto out; 620 } 621 } 622 } 623 624 if (tod && !session->header.env.clock.enabled) { 625 pr_err("Can't provide 'tod' time, missing clock data. " 626 "Please record with -k/--clockid option.\n"); 627 return -1; 628 } 629 out: 630 return 0; 631 } 632 633 static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, 634 FILE *fp) 635 { 636 unsigned i = 0, r; 637 int printed = 0; 638 639 if (!regs || !regs->regs) 640 return 0; 641 642 printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi); 643 644 for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) { 645 u64 val = regs->regs[i++]; 646 printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val); 647 } 648 649 return printed; 650 } 651 652 #define DEFAULT_TOD_FMT "%F %H:%M:%S" 653 654 static char* 655 tod_scnprintf(struct perf_script *script, char *buf, int buflen, 656 u64 timestamp) 657 { 658 u64 tod_ns, clockid_ns; 659 struct perf_env *env; 660 unsigned long nsec; 661 struct tm ltime; 662 char date[64]; 663 time_t sec; 664 665 buf[0] = '\0'; 666 if (buflen < 64 || !script) 667 return buf; 668 669 env = &script->session->header.env; 670 if (!env->clock.enabled) { 671 scnprintf(buf, buflen, "disabled"); 672 return buf; 673 } 674 675 clockid_ns = env->clock.clockid_ns; 676 tod_ns = env->clock.tod_ns; 677 678 if (timestamp > clockid_ns) 679 tod_ns += timestamp - clockid_ns; 680 else 681 tod_ns -= clockid_ns - timestamp; 682 683 sec = (time_t) (tod_ns / NSEC_PER_SEC); 684 nsec = tod_ns - sec * NSEC_PER_SEC; 685 686 if (localtime_r(&sec, <ime) == NULL) { 687 scnprintf(buf, buflen, "failed"); 688 } else { 689 strftime(date, sizeof(date), DEFAULT_TOD_FMT, <ime); 690 691 if (symbol_conf.nanosecs) { 692 snprintf(buf, buflen, "%s.%09lu", date, nsec); 693 } else { 694 snprintf(buf, buflen, "%s.%06lu", 695 date, nsec / NSEC_PER_USEC); 696 } 697 } 698 699 return buf; 700 } 701 702 static int perf_sample__fprintf_iregs(struct perf_sample *sample, 703 struct perf_event_attr *attr, FILE *fp) 704 { 705 return perf_sample__fprintf_regs(&sample->intr_regs, 706 attr->sample_regs_intr, fp); 707 } 708 709 static int perf_sample__fprintf_uregs(struct perf_sample *sample, 710 struct perf_event_attr *attr, FILE *fp) 711 { 712 return perf_sample__fprintf_regs(&sample->user_regs, 713 attr->sample_regs_user, fp); 714 } 715 716 static int perf_sample__fprintf_start(struct perf_script *script, 717 struct perf_sample *sample, 718 struct thread *thread, 719 struct evsel *evsel, 720 u32 type, FILE *fp) 721 { 722 struct perf_event_attr *attr = &evsel->core.attr; 723 unsigned long secs; 724 unsigned long long nsecs; 725 int printed = 0; 726 char tstr[128]; 727 728 if (PRINT_FIELD(COMM)) { 729 const char *comm = thread ? thread__comm_str(thread) : ":-1"; 730 731 if (latency_format) 732 printed += fprintf(fp, "%8.8s ", comm); 733 else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain) 734 printed += fprintf(fp, "%s ", comm); 735 else 736 printed += fprintf(fp, "%16s ", comm); 737 } 738 739 if (PRINT_FIELD(PID) && PRINT_FIELD(TID)) 740 printed += fprintf(fp, "%5d/%-5d ", sample->pid, sample->tid); 741 else if (PRINT_FIELD(PID)) 742 printed += fprintf(fp, "%5d ", sample->pid); 743 else if (PRINT_FIELD(TID)) 744 printed += fprintf(fp, "%5d ", sample->tid); 745 746 if (PRINT_FIELD(CPU)) { 747 if (latency_format) 748 printed += fprintf(fp, "%3d ", sample->cpu); 749 else 750 printed += fprintf(fp, "[%03d] ", sample->cpu); 751 } 752 753 if (PRINT_FIELD(MISC)) { 754 int ret = 0; 755 756 #define has(m) \ 757 (sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m 758 759 if (has(KERNEL)) 760 ret += fprintf(fp, "K"); 761 if (has(USER)) 762 ret += fprintf(fp, "U"); 763 if (has(HYPERVISOR)) 764 ret += fprintf(fp, "H"); 765 if (has(GUEST_KERNEL)) 766 ret += fprintf(fp, "G"); 767 if (has(GUEST_USER)) 768 ret += fprintf(fp, "g"); 769 770 switch (type) { 771 case PERF_RECORD_MMAP: 772 case PERF_RECORD_MMAP2: 773 if (has(MMAP_DATA)) 774 ret += fprintf(fp, "M"); 775 break; 776 case PERF_RECORD_COMM: 777 if (has(COMM_EXEC)) 778 ret += fprintf(fp, "E"); 779 break; 780 case PERF_RECORD_SWITCH: 781 case PERF_RECORD_SWITCH_CPU_WIDE: 782 if (has(SWITCH_OUT)) { 783 ret += fprintf(fp, "S"); 784 if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) 785 ret += fprintf(fp, "p"); 786 } 787 default: 788 break; 789 } 790 791 #undef has 792 793 ret += fprintf(fp, "%*s", 6 - ret, " "); 794 printed += ret; 795 } 796 797 if (PRINT_FIELD(TOD)) { 798 tod_scnprintf(script, tstr, sizeof(tstr), sample->time); 799 printed += fprintf(fp, "%s ", tstr); 800 } 801 802 if (PRINT_FIELD(TIME)) { 803 u64 t = sample->time; 804 if (reltime) { 805 if (!initial_time) 806 initial_time = sample->time; 807 t = sample->time - initial_time; 808 } else if (deltatime) { 809 if (previous_time) 810 t = sample->time - previous_time; 811 else { 812 t = 0; 813 } 814 previous_time = sample->time; 815 } 816 nsecs = t; 817 secs = nsecs / NSEC_PER_SEC; 818 nsecs -= secs * NSEC_PER_SEC; 819 820 if (symbol_conf.nanosecs) 821 printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs); 822 else { 823 char sample_time[32]; 824 timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time)); 825 printed += fprintf(fp, "%12s: ", sample_time); 826 } 827 } 828 829 return printed; 830 } 831 832 static inline char 833 mispred_str(struct branch_entry *br) 834 { 835 if (!(br->flags.mispred || br->flags.predicted)) 836 return '-'; 837 838 return br->flags.predicted ? 'P' : 'M'; 839 } 840 841 static int perf_sample__fprintf_brstack(struct perf_sample *sample, 842 struct thread *thread, 843 struct perf_event_attr *attr, FILE *fp) 844 { 845 struct branch_stack *br = sample->branch_stack; 846 struct branch_entry *entries = perf_sample__branch_entries(sample); 847 struct addr_location alf, alt; 848 u64 i, from, to; 849 int printed = 0; 850 851 if (!(br && br->nr)) 852 return 0; 853 854 for (i = 0; i < br->nr; i++) { 855 from = entries[i].from; 856 to = entries[i].to; 857 858 if (PRINT_FIELD(DSO)) { 859 memset(&alf, 0, sizeof(alf)); 860 memset(&alt, 0, sizeof(alt)); 861 thread__find_map_fb(thread, sample->cpumode, from, &alf); 862 thread__find_map_fb(thread, sample->cpumode, to, &alt); 863 } 864 865 printed += fprintf(fp, " 0x%"PRIx64, from); 866 if (PRINT_FIELD(DSO)) { 867 printed += fprintf(fp, "("); 868 printed += map__fprintf_dsoname(alf.map, fp); 869 printed += fprintf(fp, ")"); 870 } 871 872 printed += fprintf(fp, "/0x%"PRIx64, to); 873 if (PRINT_FIELD(DSO)) { 874 printed += fprintf(fp, "("); 875 printed += map__fprintf_dsoname(alt.map, fp); 876 printed += fprintf(fp, ")"); 877 } 878 879 printed += fprintf(fp, "/%c/%c/%c/%d ", 880 mispred_str(entries + i), 881 entries[i].flags.in_tx ? 'X' : '-', 882 entries[i].flags.abort ? 'A' : '-', 883 entries[i].flags.cycles); 884 } 885 886 return printed; 887 } 888 889 static int perf_sample__fprintf_brstacksym(struct perf_sample *sample, 890 struct thread *thread, 891 struct perf_event_attr *attr, FILE *fp) 892 { 893 struct branch_stack *br = sample->branch_stack; 894 struct branch_entry *entries = perf_sample__branch_entries(sample); 895 struct addr_location alf, alt; 896 u64 i, from, to; 897 int printed = 0; 898 899 if (!(br && br->nr)) 900 return 0; 901 902 for (i = 0; i < br->nr; i++) { 903 904 memset(&alf, 0, sizeof(alf)); 905 memset(&alt, 0, sizeof(alt)); 906 from = entries[i].from; 907 to = entries[i].to; 908 909 thread__find_symbol_fb(thread, sample->cpumode, from, &alf); 910 thread__find_symbol_fb(thread, sample->cpumode, to, &alt); 911 912 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp); 913 if (PRINT_FIELD(DSO)) { 914 printed += fprintf(fp, "("); 915 printed += map__fprintf_dsoname(alf.map, fp); 916 printed += fprintf(fp, ")"); 917 } 918 printed += fprintf(fp, "%c", '/'); 919 printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp); 920 if (PRINT_FIELD(DSO)) { 921 printed += fprintf(fp, "("); 922 printed += map__fprintf_dsoname(alt.map, fp); 923 printed += fprintf(fp, ")"); 924 } 925 printed += fprintf(fp, "/%c/%c/%c/%d ", 926 mispred_str(entries + i), 927 entries[i].flags.in_tx ? 'X' : '-', 928 entries[i].flags.abort ? 'A' : '-', 929 entries[i].flags.cycles); 930 } 931 932 return printed; 933 } 934 935 static int perf_sample__fprintf_brstackoff(struct perf_sample *sample, 936 struct thread *thread, 937 struct perf_event_attr *attr, FILE *fp) 938 { 939 struct branch_stack *br = sample->branch_stack; 940 struct branch_entry *entries = perf_sample__branch_entries(sample); 941 struct addr_location alf, alt; 942 u64 i, from, to; 943 int printed = 0; 944 945 if (!(br && br->nr)) 946 return 0; 947 948 for (i = 0; i < br->nr; i++) { 949 950 memset(&alf, 0, sizeof(alf)); 951 memset(&alt, 0, sizeof(alt)); 952 from = entries[i].from; 953 to = entries[i].to; 954 955 if (thread__find_map_fb(thread, sample->cpumode, from, &alf) && 956 !alf.map->dso->adjust_symbols) 957 from = map__map_ip(alf.map, from); 958 959 if (thread__find_map_fb(thread, sample->cpumode, to, &alt) && 960 !alt.map->dso->adjust_symbols) 961 to = map__map_ip(alt.map, to); 962 963 printed += fprintf(fp, " 0x%"PRIx64, from); 964 if (PRINT_FIELD(DSO)) { 965 printed += fprintf(fp, "("); 966 printed += map__fprintf_dsoname(alf.map, fp); 967 printed += fprintf(fp, ")"); 968 } 969 printed += fprintf(fp, "/0x%"PRIx64, to); 970 if (PRINT_FIELD(DSO)) { 971 printed += fprintf(fp, "("); 972 printed += map__fprintf_dsoname(alt.map, fp); 973 printed += fprintf(fp, ")"); 974 } 975 printed += fprintf(fp, "/%c/%c/%c/%d ", 976 mispred_str(entries + i), 977 entries[i].flags.in_tx ? 'X' : '-', 978 entries[i].flags.abort ? 'A' : '-', 979 entries[i].flags.cycles); 980 } 981 982 return printed; 983 } 984 #define MAXBB 16384UL 985 986 static int grab_bb(u8 *buffer, u64 start, u64 end, 987 struct machine *machine, struct thread *thread, 988 bool *is64bit, u8 *cpumode, bool last) 989 { 990 long offset, len; 991 struct addr_location al; 992 bool kernel; 993 994 if (!start || !end) 995 return 0; 996 997 kernel = machine__kernel_ip(machine, start); 998 if (kernel) 999 *cpumode = PERF_RECORD_MISC_KERNEL; 1000 else 1001 *cpumode = PERF_RECORD_MISC_USER; 1002 1003 /* 1004 * Block overlaps between kernel and user. 1005 * This can happen due to ring filtering 1006 * On Intel CPUs the entry into the kernel is filtered, 1007 * but the exit is not. Let the caller patch it up. 1008 */ 1009 if (kernel != machine__kernel_ip(machine, end)) { 1010 pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end); 1011 return -ENXIO; 1012 } 1013 1014 memset(&al, 0, sizeof(al)); 1015 if (end - start > MAXBB - MAXINSN) { 1016 if (last) 1017 pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end); 1018 else 1019 pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start); 1020 return 0; 1021 } 1022 1023 if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) { 1024 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); 1025 return 0; 1026 } 1027 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR) { 1028 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); 1029 return 0; 1030 } 1031 1032 /* Load maps to ensure dso->is_64_bit has been updated */ 1033 map__load(al.map); 1034 1035 offset = al.map->map_ip(al.map, start); 1036 len = dso__data_read_offset(al.map->dso, machine, offset, (u8 *)buffer, 1037 end - start + MAXINSN); 1038 1039 *is64bit = al.map->dso->is_64_bit; 1040 if (len <= 0) 1041 pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n", 1042 start, end); 1043 return len; 1044 } 1045 1046 static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srccode_state *state) 1047 { 1048 char *srcfile; 1049 int ret = 0; 1050 unsigned line; 1051 int len; 1052 char *srccode; 1053 1054 if (!map || !map->dso) 1055 return 0; 1056 srcfile = get_srcline_split(map->dso, 1057 map__rip_2objdump(map, addr), 1058 &line); 1059 if (!srcfile) 1060 return 0; 1061 1062 /* Avoid redundant printing */ 1063 if (state && 1064 state->srcfile && 1065 !strcmp(state->srcfile, srcfile) && 1066 state->line == line) { 1067 free(srcfile); 1068 return 0; 1069 } 1070 1071 srccode = find_sourceline(srcfile, line, &len); 1072 if (!srccode) 1073 goto out_free_line; 1074 1075 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); 1076 1077 if (state) { 1078 state->srcfile = srcfile; 1079 state->line = line; 1080 } 1081 return ret; 1082 1083 out_free_line: 1084 free(srcfile); 1085 return ret; 1086 } 1087 1088 static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr) 1089 { 1090 struct addr_location al; 1091 int ret = 0; 1092 1093 memset(&al, 0, sizeof(al)); 1094 thread__find_map(thread, cpumode, addr, &al); 1095 if (!al.map) 1096 return 0; 1097 ret = map__fprintf_srccode(al.map, al.addr, stdout, 1098 &thread->srccode_state); 1099 if (ret) 1100 ret += printf("\n"); 1101 return ret; 1102 } 1103 1104 static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en, 1105 struct perf_insn *x, u8 *inbuf, int len, 1106 int insn, FILE *fp, int *total_cycles) 1107 { 1108 int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip, 1109 dump_insn(x, ip, inbuf, len, NULL), 1110 en->flags.predicted ? " PRED" : "", 1111 en->flags.mispred ? " MISPRED" : "", 1112 en->flags.in_tx ? " INTX" : "", 1113 en->flags.abort ? " ABORT" : ""); 1114 if (en->flags.cycles) { 1115 *total_cycles += en->flags.cycles; 1116 printed += fprintf(fp, " %d cycles [%d]", en->flags.cycles, *total_cycles); 1117 if (insn) 1118 printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles); 1119 } 1120 return printed + fprintf(fp, "\n"); 1121 } 1122 1123 static int ip__fprintf_sym(uint64_t addr, struct thread *thread, 1124 u8 cpumode, int cpu, struct symbol **lastsym, 1125 struct perf_event_attr *attr, FILE *fp) 1126 { 1127 struct addr_location al; 1128 int off, printed = 0; 1129 1130 memset(&al, 0, sizeof(al)); 1131 1132 thread__find_map(thread, cpumode, addr, &al); 1133 1134 if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end) 1135 return 0; 1136 1137 al.cpu = cpu; 1138 al.sym = NULL; 1139 if (al.map) 1140 al.sym = map__find_symbol(al.map, al.addr); 1141 1142 if (!al.sym) 1143 return 0; 1144 1145 if (al.addr < al.sym->end) 1146 off = al.addr - al.sym->start; 1147 else 1148 off = al.addr - al.map->start - al.sym->start; 1149 printed += fprintf(fp, "\t%s", al.sym->name); 1150 if (off) 1151 printed += fprintf(fp, "%+d", off); 1152 printed += fprintf(fp, ":"); 1153 if (PRINT_FIELD(SRCLINE)) 1154 printed += map__fprintf_srcline(al.map, al.addr, "\t", fp); 1155 printed += fprintf(fp, "\n"); 1156 *lastsym = al.sym; 1157 1158 return printed; 1159 } 1160 1161 static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, 1162 struct thread *thread, 1163 struct perf_event_attr *attr, 1164 struct machine *machine, FILE *fp) 1165 { 1166 struct branch_stack *br = sample->branch_stack; 1167 struct branch_entry *entries = perf_sample__branch_entries(sample); 1168 u64 start, end; 1169 int i, insn, len, nr, ilen, printed = 0; 1170 struct perf_insn x; 1171 u8 buffer[MAXBB]; 1172 unsigned off; 1173 struct symbol *lastsym = NULL; 1174 int total_cycles = 0; 1175 1176 if (!(br && br->nr)) 1177 return 0; 1178 nr = br->nr; 1179 if (max_blocks && nr > max_blocks + 1) 1180 nr = max_blocks + 1; 1181 1182 x.thread = thread; 1183 x.cpu = sample->cpu; 1184 1185 printed += fprintf(fp, "%c", '\n'); 1186 1187 /* Handle first from jump, of which we don't know the entry. */ 1188 len = grab_bb(buffer, entries[nr-1].from, 1189 entries[nr-1].from, 1190 machine, thread, &x.is64bit, &x.cpumode, false); 1191 if (len > 0) { 1192 printed += ip__fprintf_sym(entries[nr - 1].from, thread, 1193 x.cpumode, x.cpu, &lastsym, attr, fp); 1194 printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1], 1195 &x, buffer, len, 0, fp, &total_cycles); 1196 if (PRINT_FIELD(SRCCODE)) 1197 printed += print_srccode(thread, x.cpumode, entries[nr - 1].from); 1198 } 1199 1200 /* Print all blocks */ 1201 for (i = nr - 2; i >= 0; i--) { 1202 if (entries[i].from || entries[i].to) 1203 pr_debug("%d: %" PRIx64 "-%" PRIx64 "\n", i, 1204 entries[i].from, 1205 entries[i].to); 1206 start = entries[i + 1].to; 1207 end = entries[i].from; 1208 1209 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false); 1210 /* Patch up missing kernel transfers due to ring filters */ 1211 if (len == -ENXIO && i > 0) { 1212 end = entries[--i].from; 1213 pr_debug("\tpatching up to %" PRIx64 "-%" PRIx64 "\n", start, end); 1214 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false); 1215 } 1216 if (len <= 0) 1217 continue; 1218 1219 insn = 0; 1220 for (off = 0; off < (unsigned)len; off += ilen) { 1221 uint64_t ip = start + off; 1222 1223 printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); 1224 if (ip == end) { 1225 printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp, 1226 &total_cycles); 1227 if (PRINT_FIELD(SRCCODE)) 1228 printed += print_srccode(thread, x.cpumode, ip); 1229 break; 1230 } else { 1231 ilen = 0; 1232 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip, 1233 dump_insn(&x, ip, buffer + off, len - off, &ilen)); 1234 if (ilen == 0) 1235 break; 1236 if (PRINT_FIELD(SRCCODE)) 1237 print_srccode(thread, x.cpumode, ip); 1238 insn++; 1239 } 1240 } 1241 if (off != end - start) 1242 printed += fprintf(fp, "\tmismatch of LBR data and executable\n"); 1243 } 1244 1245 /* 1246 * Hit the branch? In this case we are already done, and the target 1247 * has not been executed yet. 1248 */ 1249 if (entries[0].from == sample->ip) 1250 goto out; 1251 if (entries[0].flags.abort) 1252 goto out; 1253 1254 /* 1255 * Print final block upto sample 1256 * 1257 * Due to pipeline delays the LBRs might be missing a branch 1258 * or two, which can result in very large or negative blocks 1259 * between final branch and sample. When this happens just 1260 * continue walking after the last TO until we hit a branch. 1261 */ 1262 start = entries[0].to; 1263 end = sample->ip; 1264 if (end < start) { 1265 /* Missing jump. Scan 128 bytes for the next branch */ 1266 end = start + 128; 1267 } 1268 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true); 1269 printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp); 1270 if (len <= 0) { 1271 /* Print at least last IP if basic block did not work */ 1272 len = grab_bb(buffer, sample->ip, sample->ip, 1273 machine, thread, &x.is64bit, &x.cpumode, false); 1274 if (len <= 0) 1275 goto out; 1276 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip, 1277 dump_insn(&x, sample->ip, buffer, len, NULL)); 1278 if (PRINT_FIELD(SRCCODE)) 1279 print_srccode(thread, x.cpumode, sample->ip); 1280 goto out; 1281 } 1282 for (off = 0; off <= end - start; off += ilen) { 1283 ilen = 0; 1284 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off, 1285 dump_insn(&x, start + off, buffer + off, len - off, &ilen)); 1286 if (ilen == 0) 1287 break; 1288 if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) { 1289 /* 1290 * Hit a missing branch. Just stop. 1291 */ 1292 printed += fprintf(fp, "\t... not reaching sample ...\n"); 1293 break; 1294 } 1295 if (PRINT_FIELD(SRCCODE)) 1296 print_srccode(thread, x.cpumode, start + off); 1297 } 1298 out: 1299 return printed; 1300 } 1301 1302 static int perf_sample__fprintf_addr(struct perf_sample *sample, 1303 struct thread *thread, 1304 struct perf_event_attr *attr, FILE *fp) 1305 { 1306 struct addr_location al; 1307 int printed = fprintf(fp, "%16" PRIx64, sample->addr); 1308 1309 if (!sample_addr_correlates_sym(attr)) 1310 goto out; 1311 1312 thread__resolve(thread, &al, sample); 1313 1314 if (PRINT_FIELD(SYM)) { 1315 printed += fprintf(fp, " "); 1316 if (PRINT_FIELD(SYMOFFSET)) 1317 printed += symbol__fprintf_symname_offs(al.sym, &al, fp); 1318 else 1319 printed += symbol__fprintf_symname(al.sym, fp); 1320 } 1321 1322 if (PRINT_FIELD(DSO)) { 1323 printed += fprintf(fp, " ("); 1324 printed += map__fprintf_dsoname(al.map, fp); 1325 printed += fprintf(fp, ")"); 1326 } 1327 out: 1328 return printed; 1329 } 1330 1331 static const char *resolve_branch_sym(struct perf_sample *sample, 1332 struct evsel *evsel, 1333 struct thread *thread, 1334 struct addr_location *al, 1335 u64 *ip) 1336 { 1337 struct addr_location addr_al; 1338 struct perf_event_attr *attr = &evsel->core.attr; 1339 const char *name = NULL; 1340 1341 if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) { 1342 if (sample_addr_correlates_sym(attr)) { 1343 thread__resolve(thread, &addr_al, sample); 1344 if (addr_al.sym) 1345 name = addr_al.sym->name; 1346 else 1347 *ip = sample->addr; 1348 } else { 1349 *ip = sample->addr; 1350 } 1351 } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) { 1352 if (al->sym) 1353 name = al->sym->name; 1354 else 1355 *ip = sample->ip; 1356 } 1357 return name; 1358 } 1359 1360 static int perf_sample__fprintf_callindent(struct perf_sample *sample, 1361 struct evsel *evsel, 1362 struct thread *thread, 1363 struct addr_location *al, FILE *fp) 1364 { 1365 struct perf_event_attr *attr = &evsel->core.attr; 1366 size_t depth = thread_stack__depth(thread, sample->cpu); 1367 const char *name = NULL; 1368 static int spacing; 1369 int len = 0; 1370 int dlen = 0; 1371 u64 ip = 0; 1372 1373 /* 1374 * The 'return' has already been popped off the stack so the depth has 1375 * to be adjusted to match the 'call'. 1376 */ 1377 if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN) 1378 depth += 1; 1379 1380 name = resolve_branch_sym(sample, evsel, thread, al, &ip); 1381 1382 if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) { 1383 dlen += fprintf(fp, "("); 1384 dlen += map__fprintf_dsoname(al->map, fp); 1385 dlen += fprintf(fp, ")\t"); 1386 } 1387 1388 if (name) 1389 len = fprintf(fp, "%*s%s", (int)depth * 4, "", name); 1390 else if (ip) 1391 len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip); 1392 1393 if (len < 0) 1394 return len; 1395 1396 /* 1397 * Try to keep the output length from changing frequently so that the 1398 * output lines up more nicely. 1399 */ 1400 if (len > spacing || (len && len < spacing - 52)) 1401 spacing = round_up(len + 4, 32); 1402 1403 if (len < spacing) 1404 len += fprintf(fp, "%*s", spacing - len, ""); 1405 1406 return len + dlen; 1407 } 1408 1409 __weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused, 1410 struct thread *thread __maybe_unused, 1411 struct machine *machine __maybe_unused) 1412 { 1413 } 1414 1415 static int perf_sample__fprintf_insn(struct perf_sample *sample, 1416 struct perf_event_attr *attr, 1417 struct thread *thread, 1418 struct machine *machine, FILE *fp) 1419 { 1420 int printed = 0; 1421 1422 if (sample->insn_len == 0 && native_arch) 1423 arch_fetch_insn(sample, thread, machine); 1424 1425 if (PRINT_FIELD(INSNLEN)) 1426 printed += fprintf(fp, " ilen: %d", sample->insn_len); 1427 if (PRINT_FIELD(INSN) && sample->insn_len) { 1428 int i; 1429 1430 printed += fprintf(fp, " insn:"); 1431 for (i = 0; i < sample->insn_len; i++) 1432 printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]); 1433 } 1434 if (PRINT_FIELD(BRSTACKINSN)) 1435 printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp); 1436 1437 return printed; 1438 } 1439 1440 static int perf_sample__fprintf_ipc(struct perf_sample *sample, 1441 struct perf_event_attr *attr, FILE *fp) 1442 { 1443 unsigned int ipc; 1444 1445 if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt) 1446 return 0; 1447 1448 ipc = (sample->insn_cnt * 100) / sample->cyc_cnt; 1449 1450 return fprintf(fp, " \t IPC: %u.%02u (%" PRIu64 "/%" PRIu64 ") ", 1451 ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt); 1452 } 1453 1454 static int perf_sample__fprintf_bts(struct perf_sample *sample, 1455 struct evsel *evsel, 1456 struct thread *thread, 1457 struct addr_location *al, 1458 struct machine *machine, FILE *fp) 1459 { 1460 struct perf_event_attr *attr = &evsel->core.attr; 1461 unsigned int type = output_type(attr->type); 1462 bool print_srcline_last = false; 1463 int printed = 0; 1464 1465 if (PRINT_FIELD(CALLINDENT)) 1466 printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp); 1467 1468 /* print branch_from information */ 1469 if (PRINT_FIELD(IP)) { 1470 unsigned int print_opts = output[type].print_ip_opts; 1471 struct callchain_cursor *cursor = NULL; 1472 1473 if (symbol_conf.use_callchain && sample->callchain && 1474 thread__resolve_callchain(al->thread, &callchain_cursor, evsel, 1475 sample, NULL, NULL, scripting_max_stack) == 0) 1476 cursor = &callchain_cursor; 1477 1478 if (cursor == NULL) { 1479 printed += fprintf(fp, " "); 1480 if (print_opts & EVSEL__PRINT_SRCLINE) { 1481 print_srcline_last = true; 1482 print_opts &= ~EVSEL__PRINT_SRCLINE; 1483 } 1484 } else 1485 printed += fprintf(fp, "\n"); 1486 1487 printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, 1488 symbol_conf.bt_stop_list, fp); 1489 } 1490 1491 /* print branch_to information */ 1492 if (PRINT_FIELD(ADDR) || 1493 ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) && 1494 !output[type].user_set)) { 1495 printed += fprintf(fp, " => "); 1496 printed += perf_sample__fprintf_addr(sample, thread, attr, fp); 1497 } 1498 1499 printed += perf_sample__fprintf_ipc(sample, attr, fp); 1500 1501 if (print_srcline_last) 1502 printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp); 1503 1504 printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp); 1505 printed += fprintf(fp, "\n"); 1506 if (PRINT_FIELD(SRCCODE)) { 1507 int ret = map__fprintf_srccode(al->map, al->addr, stdout, 1508 &thread->srccode_state); 1509 if (ret) { 1510 printed += ret; 1511 printed += printf("\n"); 1512 } 1513 } 1514 return printed; 1515 } 1516 1517 static struct { 1518 u32 flags; 1519 const char *name; 1520 } sample_flags[] = { 1521 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"}, 1522 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"}, 1523 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "jcc"}, 1524 {PERF_IP_FLAG_BRANCH, "jmp"}, 1525 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT, "int"}, 1526 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT, "iret"}, 1527 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET, "syscall"}, 1528 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET, "sysret"}, 1529 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "async"}, 1530 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | PERF_IP_FLAG_INTERRUPT, "hw int"}, 1531 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "tx abrt"}, 1532 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "tr strt"}, 1533 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "tr end"}, 1534 {0, NULL} 1535 }; 1536 1537 static const char *sample_flags_to_name(u32 flags) 1538 { 1539 int i; 1540 1541 for (i = 0; sample_flags[i].name ; i++) { 1542 if (sample_flags[i].flags == flags) 1543 return sample_flags[i].name; 1544 } 1545 1546 return NULL; 1547 } 1548 1549 static int perf_sample__fprintf_flags(u32 flags, FILE *fp) 1550 { 1551 const char *chars = PERF_IP_FLAG_CHARS; 1552 const int n = strlen(PERF_IP_FLAG_CHARS); 1553 bool in_tx = flags & PERF_IP_FLAG_IN_TX; 1554 const char *name = NULL; 1555 char str[33]; 1556 int i, pos = 0; 1557 1558 name = sample_flags_to_name(flags & ~PERF_IP_FLAG_IN_TX); 1559 if (name) 1560 return fprintf(fp, " %-15s%4s ", name, in_tx ? "(x)" : ""); 1561 1562 if (flags & PERF_IP_FLAG_TRACE_BEGIN) { 1563 name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_BEGIN)); 1564 if (name) 1565 return fprintf(fp, " tr strt %-7s%4s ", name, in_tx ? "(x)" : ""); 1566 } 1567 1568 if (flags & PERF_IP_FLAG_TRACE_END) { 1569 name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_END)); 1570 if (name) 1571 return fprintf(fp, " tr end %-7s%4s ", name, in_tx ? "(x)" : ""); 1572 } 1573 1574 for (i = 0; i < n; i++, flags >>= 1) { 1575 if (flags & 1) 1576 str[pos++] = chars[i]; 1577 } 1578 for (; i < 32; i++, flags >>= 1) { 1579 if (flags & 1) 1580 str[pos++] = '?'; 1581 } 1582 str[pos] = 0; 1583 1584 return fprintf(fp, " %-19s ", str); 1585 } 1586 1587 struct printer_data { 1588 int line_no; 1589 bool hit_nul; 1590 bool is_printable; 1591 }; 1592 1593 static int sample__fprintf_bpf_output(enum binary_printer_ops op, 1594 unsigned int val, 1595 void *extra, FILE *fp) 1596 { 1597 unsigned char ch = (unsigned char)val; 1598 struct printer_data *printer_data = extra; 1599 int printed = 0; 1600 1601 switch (op) { 1602 case BINARY_PRINT_DATA_BEGIN: 1603 printed += fprintf(fp, "\n"); 1604 break; 1605 case BINARY_PRINT_LINE_BEGIN: 1606 printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" : 1607 " "); 1608 break; 1609 case BINARY_PRINT_ADDR: 1610 printed += fprintf(fp, " %04x:", val); 1611 break; 1612 case BINARY_PRINT_NUM_DATA: 1613 printed += fprintf(fp, " %02x", val); 1614 break; 1615 case BINARY_PRINT_NUM_PAD: 1616 printed += fprintf(fp, " "); 1617 break; 1618 case BINARY_PRINT_SEP: 1619 printed += fprintf(fp, " "); 1620 break; 1621 case BINARY_PRINT_CHAR_DATA: 1622 if (printer_data->hit_nul && ch) 1623 printer_data->is_printable = false; 1624 1625 if (!isprint(ch)) { 1626 printed += fprintf(fp, "%c", '.'); 1627 1628 if (!printer_data->is_printable) 1629 break; 1630 1631 if (ch == '\0') 1632 printer_data->hit_nul = true; 1633 else 1634 printer_data->is_printable = false; 1635 } else { 1636 printed += fprintf(fp, "%c", ch); 1637 } 1638 break; 1639 case BINARY_PRINT_CHAR_PAD: 1640 printed += fprintf(fp, " "); 1641 break; 1642 case BINARY_PRINT_LINE_END: 1643 printed += fprintf(fp, "\n"); 1644 printer_data->line_no++; 1645 break; 1646 case BINARY_PRINT_DATA_END: 1647 default: 1648 break; 1649 } 1650 1651 return printed; 1652 } 1653 1654 static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp) 1655 { 1656 unsigned int nr_bytes = sample->raw_size; 1657 struct printer_data printer_data = {0, false, true}; 1658 int printed = binary__fprintf(sample->raw_data, nr_bytes, 8, 1659 sample__fprintf_bpf_output, &printer_data, fp); 1660 1661 if (printer_data.is_printable && printer_data.hit_nul) 1662 printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data)); 1663 1664 return printed; 1665 } 1666 1667 static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp) 1668 { 1669 if (len > 0 && len < spacing) 1670 return fprintf(fp, "%*s", spacing - len, ""); 1671 1672 return 0; 1673 } 1674 1675 static int perf_sample__fprintf_pt_spacing(int len, FILE *fp) 1676 { 1677 return perf_sample__fprintf_spacing(len, 34, fp); 1678 } 1679 1680 static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp) 1681 { 1682 struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample); 1683 int len; 1684 1685 if (perf_sample__bad_synth_size(sample, *data)) 1686 return 0; 1687 1688 len = fprintf(fp, " IP: %u payload: %#" PRIx64 " ", 1689 data->ip, le64_to_cpu(data->payload)); 1690 return len + perf_sample__fprintf_pt_spacing(len, fp); 1691 } 1692 1693 static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp) 1694 { 1695 struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample); 1696 int len; 1697 1698 if (perf_sample__bad_synth_size(sample, *data)) 1699 return 0; 1700 1701 len = fprintf(fp, " hints: %#x extensions: %#x ", 1702 data->hints, data->extensions); 1703 return len + perf_sample__fprintf_pt_spacing(len, fp); 1704 } 1705 1706 static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp) 1707 { 1708 struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample); 1709 int len; 1710 1711 if (perf_sample__bad_synth_size(sample, *data)) 1712 return 0; 1713 1714 len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ", 1715 data->hw, data->cstate, data->subcstate); 1716 return len + perf_sample__fprintf_pt_spacing(len, fp); 1717 } 1718 1719 static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp) 1720 { 1721 struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample); 1722 int len; 1723 1724 if (perf_sample__bad_synth_size(sample, *data)) 1725 return 0; 1726 1727 len = fprintf(fp, " IP: %u ", data->ip); 1728 return len + perf_sample__fprintf_pt_spacing(len, fp); 1729 } 1730 1731 static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp) 1732 { 1733 struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample); 1734 int len; 1735 1736 if (perf_sample__bad_synth_size(sample, *data)) 1737 return 0; 1738 1739 len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ", 1740 data->deepest_cstate, data->last_cstate, 1741 data->wake_reason); 1742 return len + perf_sample__fprintf_pt_spacing(len, fp); 1743 } 1744 1745 static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp) 1746 { 1747 struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample); 1748 unsigned int percent, freq; 1749 int len; 1750 1751 if (perf_sample__bad_synth_size(sample, *data)) 1752 return 0; 1753 1754 freq = (le32_to_cpu(data->freq) + 500) / 1000; 1755 len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq); 1756 if (data->max_nonturbo) { 1757 percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10; 1758 len += fprintf(fp, "(%3u%%) ", percent); 1759 } 1760 return len + perf_sample__fprintf_pt_spacing(len, fp); 1761 } 1762 1763 static int perf_sample__fprintf_synth(struct perf_sample *sample, 1764 struct evsel *evsel, FILE *fp) 1765 { 1766 switch (evsel->core.attr.config) { 1767 case PERF_SYNTH_INTEL_PTWRITE: 1768 return perf_sample__fprintf_synth_ptwrite(sample, fp); 1769 case PERF_SYNTH_INTEL_MWAIT: 1770 return perf_sample__fprintf_synth_mwait(sample, fp); 1771 case PERF_SYNTH_INTEL_PWRE: 1772 return perf_sample__fprintf_synth_pwre(sample, fp); 1773 case PERF_SYNTH_INTEL_EXSTOP: 1774 return perf_sample__fprintf_synth_exstop(sample, fp); 1775 case PERF_SYNTH_INTEL_PWRX: 1776 return perf_sample__fprintf_synth_pwrx(sample, fp); 1777 case PERF_SYNTH_INTEL_CBR: 1778 return perf_sample__fprintf_synth_cbr(sample, fp); 1779 default: 1780 break; 1781 } 1782 1783 return 0; 1784 } 1785 1786 static int evlist__max_name_len(struct evlist *evlist) 1787 { 1788 struct evsel *evsel; 1789 int max = 0; 1790 1791 evlist__for_each_entry(evlist, evsel) { 1792 int len = strlen(evsel__name(evsel)); 1793 1794 max = MAX(len, max); 1795 } 1796 1797 return max; 1798 } 1799 1800 static int data_src__fprintf(u64 data_src, FILE *fp) 1801 { 1802 struct mem_info mi = { .data_src.val = data_src }; 1803 char decode[100]; 1804 char out[100]; 1805 static int maxlen; 1806 int len; 1807 1808 perf_script__meminfo_scnprintf(decode, 100, &mi); 1809 1810 len = scnprintf(out, 100, "%16" PRIx64 " %s", data_src, decode); 1811 if (maxlen < len) 1812 maxlen = len; 1813 1814 return fprintf(fp, "%-*s", maxlen, out); 1815 } 1816 1817 struct metric_ctx { 1818 struct perf_sample *sample; 1819 struct thread *thread; 1820 struct evsel *evsel; 1821 FILE *fp; 1822 }; 1823 1824 static void script_print_metric(struct perf_stat_config *config __maybe_unused, 1825 void *ctx, const char *color, 1826 const char *fmt, 1827 const char *unit, double val) 1828 { 1829 struct metric_ctx *mctx = ctx; 1830 1831 if (!fmt) 1832 return; 1833 perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel, 1834 PERF_RECORD_SAMPLE, mctx->fp); 1835 fputs("\tmetric: ", mctx->fp); 1836 if (color) 1837 color_fprintf(mctx->fp, color, fmt, val); 1838 else 1839 printf(fmt, val); 1840 fprintf(mctx->fp, " %s\n", unit); 1841 } 1842 1843 static void script_new_line(struct perf_stat_config *config __maybe_unused, 1844 void *ctx) 1845 { 1846 struct metric_ctx *mctx = ctx; 1847 1848 perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel, 1849 PERF_RECORD_SAMPLE, mctx->fp); 1850 fputs("\tmetric: ", mctx->fp); 1851 } 1852 1853 static void perf_sample__fprint_metric(struct perf_script *script, 1854 struct thread *thread, 1855 struct evsel *evsel, 1856 struct perf_sample *sample, 1857 FILE *fp) 1858 { 1859 struct perf_stat_output_ctx ctx = { 1860 .print_metric = script_print_metric, 1861 .new_line = script_new_line, 1862 .ctx = &(struct metric_ctx) { 1863 .sample = sample, 1864 .thread = thread, 1865 .evsel = evsel, 1866 .fp = fp, 1867 }, 1868 .force_header = false, 1869 }; 1870 struct evsel *ev2; 1871 u64 val; 1872 1873 if (!evsel->stats) 1874 evlist__alloc_stats(script->session->evlist, false); 1875 if (evsel_script(evsel->leader)->gnum++ == 0) 1876 perf_stat__reset_shadow_stats(); 1877 val = sample->period * evsel->scale; 1878 perf_stat__update_shadow_stats(evsel, 1879 val, 1880 sample->cpu, 1881 &rt_stat); 1882 evsel_script(evsel)->val = val; 1883 if (evsel_script(evsel->leader)->gnum == evsel->leader->core.nr_members) { 1884 for_each_group_member (ev2, evsel->leader) { 1885 perf_stat__print_shadow_stats(&stat_config, ev2, 1886 evsel_script(ev2)->val, 1887 sample->cpu, 1888 &ctx, 1889 NULL, 1890 &rt_stat); 1891 } 1892 evsel_script(evsel->leader)->gnum = 0; 1893 } 1894 } 1895 1896 static bool show_event(struct perf_sample *sample, 1897 struct evsel *evsel, 1898 struct thread *thread, 1899 struct addr_location *al) 1900 { 1901 int depth = thread_stack__depth(thread, sample->cpu); 1902 1903 if (!symbol_conf.graph_function) 1904 return true; 1905 1906 if (thread->filter) { 1907 if (depth <= thread->filter_entry_depth) { 1908 thread->filter = false; 1909 return false; 1910 } 1911 return true; 1912 } else { 1913 const char *s = symbol_conf.graph_function; 1914 u64 ip; 1915 const char *name = resolve_branch_sym(sample, evsel, thread, al, 1916 &ip); 1917 unsigned nlen; 1918 1919 if (!name) 1920 return false; 1921 nlen = strlen(name); 1922 while (*s) { 1923 unsigned len = strcspn(s, ","); 1924 if (nlen == len && !strncmp(name, s, len)) { 1925 thread->filter = true; 1926 thread->filter_entry_depth = depth; 1927 return true; 1928 } 1929 s += len; 1930 if (*s == ',') 1931 s++; 1932 } 1933 return false; 1934 } 1935 } 1936 1937 static void process_event(struct perf_script *script, 1938 struct perf_sample *sample, struct evsel *evsel, 1939 struct addr_location *al, 1940 struct machine *machine) 1941 { 1942 struct thread *thread = al->thread; 1943 struct perf_event_attr *attr = &evsel->core.attr; 1944 unsigned int type = output_type(attr->type); 1945 struct evsel_script *es = evsel->priv; 1946 FILE *fp = es->fp; 1947 char str[PAGE_SIZE_NAME_LEN]; 1948 1949 if (output[type].fields == 0) 1950 return; 1951 1952 if (!show_event(sample, evsel, thread, al)) 1953 return; 1954 1955 if (evswitch__discard(&script->evswitch, evsel)) 1956 return; 1957 1958 ++es->samples; 1959 1960 perf_sample__fprintf_start(script, sample, thread, evsel, 1961 PERF_RECORD_SAMPLE, fp); 1962 1963 if (PRINT_FIELD(PERIOD)) 1964 fprintf(fp, "%10" PRIu64 " ", sample->period); 1965 1966 if (PRINT_FIELD(EVNAME)) { 1967 const char *evname = evsel__name(evsel); 1968 1969 if (!script->name_width) 1970 script->name_width = evlist__max_name_len(script->session->evlist); 1971 1972 fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]"); 1973 } 1974 1975 if (print_flags) 1976 perf_sample__fprintf_flags(sample->flags, fp); 1977 1978 if (is_bts_event(attr)) { 1979 perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp); 1980 return; 1981 } 1982 1983 if (PRINT_FIELD(TRACE) && sample->raw_data) { 1984 event_format__fprintf(evsel->tp_format, sample->cpu, 1985 sample->raw_data, sample->raw_size, fp); 1986 } 1987 1988 if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH)) 1989 perf_sample__fprintf_synth(sample, evsel, fp); 1990 1991 if (PRINT_FIELD(ADDR)) 1992 perf_sample__fprintf_addr(sample, thread, attr, fp); 1993 1994 if (PRINT_FIELD(DATA_SRC)) 1995 data_src__fprintf(sample->data_src, fp); 1996 1997 if (PRINT_FIELD(WEIGHT)) 1998 fprintf(fp, "%16" PRIu64, sample->weight); 1999 2000 if (PRINT_FIELD(IP)) { 2001 struct callchain_cursor *cursor = NULL; 2002 2003 if (script->stitch_lbr) 2004 al->thread->lbr_stitch_enable = true; 2005 2006 if (symbol_conf.use_callchain && sample->callchain && 2007 thread__resolve_callchain(al->thread, &callchain_cursor, evsel, 2008 sample, NULL, NULL, scripting_max_stack) == 0) 2009 cursor = &callchain_cursor; 2010 2011 fputc(cursor ? '\n' : ' ', fp); 2012 sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, 2013 symbol_conf.bt_stop_list, fp); 2014 } 2015 2016 if (PRINT_FIELD(IREGS)) 2017 perf_sample__fprintf_iregs(sample, attr, fp); 2018 2019 if (PRINT_FIELD(UREGS)) 2020 perf_sample__fprintf_uregs(sample, attr, fp); 2021 2022 if (PRINT_FIELD(BRSTACK)) 2023 perf_sample__fprintf_brstack(sample, thread, attr, fp); 2024 else if (PRINT_FIELD(BRSTACKSYM)) 2025 perf_sample__fprintf_brstacksym(sample, thread, attr, fp); 2026 else if (PRINT_FIELD(BRSTACKOFF)) 2027 perf_sample__fprintf_brstackoff(sample, thread, attr, fp); 2028 2029 if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT)) 2030 perf_sample__fprintf_bpf_output(sample, fp); 2031 perf_sample__fprintf_insn(sample, attr, thread, machine, fp); 2032 2033 if (PRINT_FIELD(PHYS_ADDR)) 2034 fprintf(fp, "%16" PRIx64, sample->phys_addr); 2035 2036 if (PRINT_FIELD(DATA_PAGE_SIZE)) 2037 fprintf(fp, " %s", get_page_size_name(sample->data_page_size, str)); 2038 2039 perf_sample__fprintf_ipc(sample, attr, fp); 2040 2041 fprintf(fp, "\n"); 2042 2043 if (PRINT_FIELD(SRCCODE)) { 2044 if (map__fprintf_srccode(al->map, al->addr, stdout, 2045 &thread->srccode_state)) 2046 printf("\n"); 2047 } 2048 2049 if (PRINT_FIELD(METRIC)) 2050 perf_sample__fprint_metric(script, thread, evsel, sample, fp); 2051 2052 if (verbose) 2053 fflush(fp); 2054 } 2055 2056 static struct scripting_ops *scripting_ops; 2057 2058 static void __process_stat(struct evsel *counter, u64 tstamp) 2059 { 2060 int nthreads = perf_thread_map__nr(counter->core.threads); 2061 int ncpus = evsel__nr_cpus(counter); 2062 int cpu, thread; 2063 static int header_printed; 2064 2065 if (counter->core.system_wide) 2066 nthreads = 1; 2067 2068 if (!header_printed) { 2069 printf("%3s %8s %15s %15s %15s %15s %s\n", 2070 "CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT"); 2071 header_printed = 1; 2072 } 2073 2074 for (thread = 0; thread < nthreads; thread++) { 2075 for (cpu = 0; cpu < ncpus; cpu++) { 2076 struct perf_counts_values *counts; 2077 2078 counts = perf_counts(counter->counts, cpu, thread); 2079 2080 printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n", 2081 counter->core.cpus->map[cpu], 2082 perf_thread_map__pid(counter->core.threads, thread), 2083 counts->val, 2084 counts->ena, 2085 counts->run, 2086 tstamp, 2087 evsel__name(counter)); 2088 } 2089 } 2090 } 2091 2092 static void process_stat(struct evsel *counter, u64 tstamp) 2093 { 2094 if (scripting_ops && scripting_ops->process_stat) 2095 scripting_ops->process_stat(&stat_config, counter, tstamp); 2096 else 2097 __process_stat(counter, tstamp); 2098 } 2099 2100 static void process_stat_interval(u64 tstamp) 2101 { 2102 if (scripting_ops && scripting_ops->process_stat_interval) 2103 scripting_ops->process_stat_interval(tstamp); 2104 } 2105 2106 static void setup_scripting(void) 2107 { 2108 setup_perl_scripting(); 2109 setup_python_scripting(); 2110 } 2111 2112 static int flush_scripting(void) 2113 { 2114 return scripting_ops ? scripting_ops->flush_script() : 0; 2115 } 2116 2117 static int cleanup_scripting(void) 2118 { 2119 pr_debug("\nperf script stopped\n"); 2120 2121 return scripting_ops ? scripting_ops->stop_script() : 0; 2122 } 2123 2124 static bool filter_cpu(struct perf_sample *sample) 2125 { 2126 if (cpu_list && sample->cpu != (u32)-1) 2127 return !test_bit(sample->cpu, cpu_bitmap); 2128 return false; 2129 } 2130 2131 static int process_sample_event(struct perf_tool *tool, 2132 union perf_event *event, 2133 struct perf_sample *sample, 2134 struct evsel *evsel, 2135 struct machine *machine) 2136 { 2137 struct perf_script *scr = container_of(tool, struct perf_script, tool); 2138 struct addr_location al; 2139 2140 if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num, 2141 sample->time)) { 2142 return 0; 2143 } 2144 2145 if (debug_mode) { 2146 if (sample->time < last_timestamp) { 2147 pr_err("Samples misordered, previous: %" PRIu64 2148 " this: %" PRIu64 "\n", last_timestamp, 2149 sample->time); 2150 nr_unordered++; 2151 } 2152 last_timestamp = sample->time; 2153 return 0; 2154 } 2155 2156 if (machine__resolve(machine, &al, sample) < 0) { 2157 pr_err("problem processing %d event, skipping it.\n", 2158 event->header.type); 2159 return -1; 2160 } 2161 2162 if (al.filtered) 2163 goto out_put; 2164 2165 if (filter_cpu(sample)) 2166 goto out_put; 2167 2168 if (scripting_ops) 2169 scripting_ops->process_event(event, sample, evsel, &al); 2170 else 2171 process_event(scr, sample, evsel, &al, machine); 2172 2173 out_put: 2174 addr_location__put(&al); 2175 return 0; 2176 } 2177 2178 static int process_attr(struct perf_tool *tool, union perf_event *event, 2179 struct evlist **pevlist) 2180 { 2181 struct perf_script *scr = container_of(tool, struct perf_script, tool); 2182 struct evlist *evlist; 2183 struct evsel *evsel, *pos; 2184 u64 sample_type; 2185 int err; 2186 static struct evsel_script *es; 2187 2188 err = perf_event__process_attr(tool, event, pevlist); 2189 if (err) 2190 return err; 2191 2192 evlist = *pevlist; 2193 evsel = evlist__last(*pevlist); 2194 2195 if (!evsel->priv) { 2196 if (scr->per_event_dump) { 2197 evsel->priv = perf_evsel_script__new(evsel, 2198 scr->session->data); 2199 } else { 2200 es = zalloc(sizeof(*es)); 2201 if (!es) 2202 return -ENOMEM; 2203 es->fp = stdout; 2204 evsel->priv = es; 2205 } 2206 } 2207 2208 if (evsel->core.attr.type >= PERF_TYPE_MAX && 2209 evsel->core.attr.type != PERF_TYPE_SYNTH) 2210 return 0; 2211 2212 evlist__for_each_entry(evlist, pos) { 2213 if (pos->core.attr.type == evsel->core.attr.type && pos != evsel) 2214 return 0; 2215 } 2216 2217 if (evsel->core.attr.sample_type) { 2218 err = evsel__check_attr(evsel, scr->session); 2219 if (err) 2220 return err; 2221 } 2222 2223 /* 2224 * Check if we need to enable callchains based 2225 * on events sample_type. 2226 */ 2227 sample_type = evlist__combined_sample_type(evlist); 2228 callchain_param_setup(sample_type); 2229 2230 /* Enable fields for callchain entries */ 2231 if (symbol_conf.use_callchain && 2232 (sample_type & PERF_SAMPLE_CALLCHAIN || 2233 sample_type & PERF_SAMPLE_BRANCH_STACK || 2234 (sample_type & PERF_SAMPLE_REGS_USER && 2235 sample_type & PERF_SAMPLE_STACK_USER))) { 2236 int type = output_type(evsel->core.attr.type); 2237 2238 if (!(output[type].user_unset_fields & PERF_OUTPUT_IP)) 2239 output[type].fields |= PERF_OUTPUT_IP; 2240 if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM)) 2241 output[type].fields |= PERF_OUTPUT_SYM; 2242 } 2243 set_print_ip_opts(&evsel->core.attr); 2244 return 0; 2245 } 2246 2247 static int print_event_with_time(struct perf_tool *tool, 2248 union perf_event *event, 2249 struct perf_sample *sample, 2250 struct machine *machine, 2251 pid_t pid, pid_t tid, u64 timestamp) 2252 { 2253 struct perf_script *script = container_of(tool, struct perf_script, tool); 2254 struct perf_session *session = script->session; 2255 struct evsel *evsel = evlist__id2evsel(session->evlist, sample->id); 2256 struct thread *thread = NULL; 2257 2258 if (evsel && !evsel->core.attr.sample_id_all) { 2259 sample->cpu = 0; 2260 sample->time = timestamp; 2261 sample->pid = pid; 2262 sample->tid = tid; 2263 } 2264 2265 if (filter_cpu(sample)) 2266 return 0; 2267 2268 if (tid != -1) 2269 thread = machine__findnew_thread(machine, pid, tid); 2270 2271 if (evsel) { 2272 perf_sample__fprintf_start(script, sample, thread, evsel, 2273 event->header.type, stdout); 2274 } 2275 2276 perf_event__fprintf(event, machine, stdout); 2277 2278 thread__put(thread); 2279 2280 return 0; 2281 } 2282 2283 static int print_event(struct perf_tool *tool, union perf_event *event, 2284 struct perf_sample *sample, struct machine *machine, 2285 pid_t pid, pid_t tid) 2286 { 2287 return print_event_with_time(tool, event, sample, machine, pid, tid, 0); 2288 } 2289 2290 static int process_comm_event(struct perf_tool *tool, 2291 union perf_event *event, 2292 struct perf_sample *sample, 2293 struct machine *machine) 2294 { 2295 if (perf_event__process_comm(tool, event, sample, machine) < 0) 2296 return -1; 2297 2298 return print_event(tool, event, sample, machine, event->comm.pid, 2299 event->comm.tid); 2300 } 2301 2302 static int process_namespaces_event(struct perf_tool *tool, 2303 union perf_event *event, 2304 struct perf_sample *sample, 2305 struct machine *machine) 2306 { 2307 if (perf_event__process_namespaces(tool, event, sample, machine) < 0) 2308 return -1; 2309 2310 return print_event(tool, event, sample, machine, event->namespaces.pid, 2311 event->namespaces.tid); 2312 } 2313 2314 static int process_cgroup_event(struct perf_tool *tool, 2315 union perf_event *event, 2316 struct perf_sample *sample, 2317 struct machine *machine) 2318 { 2319 if (perf_event__process_cgroup(tool, event, sample, machine) < 0) 2320 return -1; 2321 2322 return print_event(tool, event, sample, machine, sample->pid, 2323 sample->tid); 2324 } 2325 2326 static int process_fork_event(struct perf_tool *tool, 2327 union perf_event *event, 2328 struct perf_sample *sample, 2329 struct machine *machine) 2330 { 2331 if (perf_event__process_fork(tool, event, sample, machine) < 0) 2332 return -1; 2333 2334 return print_event_with_time(tool, event, sample, machine, 2335 event->fork.pid, event->fork.tid, 2336 event->fork.time); 2337 } 2338 static int process_exit_event(struct perf_tool *tool, 2339 union perf_event *event, 2340 struct perf_sample *sample, 2341 struct machine *machine) 2342 { 2343 /* Print before 'exit' deletes anything */ 2344 if (print_event_with_time(tool, event, sample, machine, event->fork.pid, 2345 event->fork.tid, event->fork.time)) 2346 return -1; 2347 2348 return perf_event__process_exit(tool, event, sample, machine); 2349 } 2350 2351 static int process_mmap_event(struct perf_tool *tool, 2352 union perf_event *event, 2353 struct perf_sample *sample, 2354 struct machine *machine) 2355 { 2356 if (perf_event__process_mmap(tool, event, sample, machine) < 0) 2357 return -1; 2358 2359 return print_event(tool, event, sample, machine, event->mmap.pid, 2360 event->mmap.tid); 2361 } 2362 2363 static int process_mmap2_event(struct perf_tool *tool, 2364 union perf_event *event, 2365 struct perf_sample *sample, 2366 struct machine *machine) 2367 { 2368 if (perf_event__process_mmap2(tool, event, sample, machine) < 0) 2369 return -1; 2370 2371 return print_event(tool, event, sample, machine, event->mmap2.pid, 2372 event->mmap2.tid); 2373 } 2374 2375 static int process_switch_event(struct perf_tool *tool, 2376 union perf_event *event, 2377 struct perf_sample *sample, 2378 struct machine *machine) 2379 { 2380 struct perf_script *script = container_of(tool, struct perf_script, tool); 2381 2382 if (perf_event__process_switch(tool, event, sample, machine) < 0) 2383 return -1; 2384 2385 if (scripting_ops && scripting_ops->process_switch) 2386 scripting_ops->process_switch(event, sample, machine); 2387 2388 if (!script->show_switch_events) 2389 return 0; 2390 2391 return print_event(tool, event, sample, machine, sample->pid, 2392 sample->tid); 2393 } 2394 2395 static int 2396 process_lost_event(struct perf_tool *tool, 2397 union perf_event *event, 2398 struct perf_sample *sample, 2399 struct machine *machine) 2400 { 2401 return print_event(tool, event, sample, machine, sample->pid, 2402 sample->tid); 2403 } 2404 2405 static int 2406 process_finished_round_event(struct perf_tool *tool __maybe_unused, 2407 union perf_event *event, 2408 struct ordered_events *oe __maybe_unused) 2409 2410 { 2411 perf_event__fprintf(event, NULL, stdout); 2412 return 0; 2413 } 2414 2415 static int 2416 process_bpf_events(struct perf_tool *tool __maybe_unused, 2417 union perf_event *event, 2418 struct perf_sample *sample, 2419 struct machine *machine) 2420 { 2421 if (machine__process_ksymbol(machine, event, sample) < 0) 2422 return -1; 2423 2424 return print_event(tool, event, sample, machine, sample->pid, 2425 sample->tid); 2426 } 2427 2428 static int process_text_poke_events(struct perf_tool *tool, 2429 union perf_event *event, 2430 struct perf_sample *sample, 2431 struct machine *machine) 2432 { 2433 if (perf_event__process_text_poke(tool, event, sample, machine) < 0) 2434 return -1; 2435 2436 return print_event(tool, event, sample, machine, sample->pid, 2437 sample->tid); 2438 } 2439 2440 static void sig_handler(int sig __maybe_unused) 2441 { 2442 session_done = 1; 2443 } 2444 2445 static void perf_script__fclose_per_event_dump(struct perf_script *script) 2446 { 2447 struct evlist *evlist = script->session->evlist; 2448 struct evsel *evsel; 2449 2450 evlist__for_each_entry(evlist, evsel) { 2451 if (!evsel->priv) 2452 break; 2453 perf_evsel_script__delete(evsel->priv); 2454 evsel->priv = NULL; 2455 } 2456 } 2457 2458 static int perf_script__fopen_per_event_dump(struct perf_script *script) 2459 { 2460 struct evsel *evsel; 2461 2462 evlist__for_each_entry(script->session->evlist, evsel) { 2463 /* 2464 * Already setup? I.e. we may be called twice in cases like 2465 * Intel PT, one for the intel_pt// and dummy events, then 2466 * for the evsels syntheized from the auxtrace info. 2467 * 2468 * Ses perf_script__process_auxtrace_info. 2469 */ 2470 if (evsel->priv != NULL) 2471 continue; 2472 2473 evsel->priv = perf_evsel_script__new(evsel, script->session->data); 2474 if (evsel->priv == NULL) 2475 goto out_err_fclose; 2476 } 2477 2478 return 0; 2479 2480 out_err_fclose: 2481 perf_script__fclose_per_event_dump(script); 2482 return -1; 2483 } 2484 2485 static int perf_script__setup_per_event_dump(struct perf_script *script) 2486 { 2487 struct evsel *evsel; 2488 static struct evsel_script es_stdout; 2489 2490 if (script->per_event_dump) 2491 return perf_script__fopen_per_event_dump(script); 2492 2493 es_stdout.fp = stdout; 2494 2495 evlist__for_each_entry(script->session->evlist, evsel) 2496 evsel->priv = &es_stdout; 2497 2498 return 0; 2499 } 2500 2501 static void perf_script__exit_per_event_dump_stats(struct perf_script *script) 2502 { 2503 struct evsel *evsel; 2504 2505 evlist__for_each_entry(script->session->evlist, evsel) { 2506 struct evsel_script *es = evsel->priv; 2507 2508 perf_evsel_script__fprintf(es, stdout); 2509 perf_evsel_script__delete(es); 2510 evsel->priv = NULL; 2511 } 2512 } 2513 2514 static int __cmd_script(struct perf_script *script) 2515 { 2516 int ret; 2517 2518 signal(SIGINT, sig_handler); 2519 2520 perf_stat__init_shadow_stats(); 2521 2522 /* override event processing functions */ 2523 if (script->show_task_events) { 2524 script->tool.comm = process_comm_event; 2525 script->tool.fork = process_fork_event; 2526 script->tool.exit = process_exit_event; 2527 } 2528 if (script->show_mmap_events) { 2529 script->tool.mmap = process_mmap_event; 2530 script->tool.mmap2 = process_mmap2_event; 2531 } 2532 if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch)) 2533 script->tool.context_switch = process_switch_event; 2534 if (script->show_namespace_events) 2535 script->tool.namespaces = process_namespaces_event; 2536 if (script->show_cgroup_events) 2537 script->tool.cgroup = process_cgroup_event; 2538 if (script->show_lost_events) 2539 script->tool.lost = process_lost_event; 2540 if (script->show_round_events) { 2541 script->tool.ordered_events = false; 2542 script->tool.finished_round = process_finished_round_event; 2543 } 2544 if (script->show_bpf_events) { 2545 script->tool.ksymbol = process_bpf_events; 2546 script->tool.bpf = process_bpf_events; 2547 } 2548 if (script->show_text_poke_events) { 2549 script->tool.ksymbol = process_bpf_events; 2550 script->tool.text_poke = process_text_poke_events; 2551 } 2552 2553 if (perf_script__setup_per_event_dump(script)) { 2554 pr_err("Couldn't create the per event dump files\n"); 2555 return -1; 2556 } 2557 2558 ret = perf_session__process_events(script->session); 2559 2560 if (script->per_event_dump) 2561 perf_script__exit_per_event_dump_stats(script); 2562 2563 if (debug_mode) 2564 pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); 2565 2566 return ret; 2567 } 2568 2569 struct script_spec { 2570 struct list_head node; 2571 struct scripting_ops *ops; 2572 char spec[]; 2573 }; 2574 2575 static LIST_HEAD(script_specs); 2576 2577 static struct script_spec *script_spec__new(const char *spec, 2578 struct scripting_ops *ops) 2579 { 2580 struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1); 2581 2582 if (s != NULL) { 2583 strcpy(s->spec, spec); 2584 s->ops = ops; 2585 } 2586 2587 return s; 2588 } 2589 2590 static void script_spec__add(struct script_spec *s) 2591 { 2592 list_add_tail(&s->node, &script_specs); 2593 } 2594 2595 static struct script_spec *script_spec__find(const char *spec) 2596 { 2597 struct script_spec *s; 2598 2599 list_for_each_entry(s, &script_specs, node) 2600 if (strcasecmp(s->spec, spec) == 0) 2601 return s; 2602 return NULL; 2603 } 2604 2605 int script_spec_register(const char *spec, struct scripting_ops *ops) 2606 { 2607 struct script_spec *s; 2608 2609 s = script_spec__find(spec); 2610 if (s) 2611 return -1; 2612 2613 s = script_spec__new(spec, ops); 2614 if (!s) 2615 return -1; 2616 else 2617 script_spec__add(s); 2618 2619 return 0; 2620 } 2621 2622 static struct scripting_ops *script_spec__lookup(const char *spec) 2623 { 2624 struct script_spec *s = script_spec__find(spec); 2625 if (!s) 2626 return NULL; 2627 2628 return s->ops; 2629 } 2630 2631 static void list_available_languages(void) 2632 { 2633 struct script_spec *s; 2634 2635 fprintf(stderr, "\n"); 2636 fprintf(stderr, "Scripting language extensions (used in " 2637 "perf script -s [spec:]script.[spec]):\n\n"); 2638 2639 list_for_each_entry(s, &script_specs, node) 2640 fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name); 2641 2642 fprintf(stderr, "\n"); 2643 } 2644 2645 static int parse_scriptname(const struct option *opt __maybe_unused, 2646 const char *str, int unset __maybe_unused) 2647 { 2648 char spec[PATH_MAX]; 2649 const char *script, *ext; 2650 int len; 2651 2652 if (strcmp(str, "lang") == 0) { 2653 list_available_languages(); 2654 exit(0); 2655 } 2656 2657 script = strchr(str, ':'); 2658 if (script) { 2659 len = script - str; 2660 if (len >= PATH_MAX) { 2661 fprintf(stderr, "invalid language specifier"); 2662 return -1; 2663 } 2664 strncpy(spec, str, len); 2665 spec[len] = '\0'; 2666 scripting_ops = script_spec__lookup(spec); 2667 if (!scripting_ops) { 2668 fprintf(stderr, "invalid language specifier"); 2669 return -1; 2670 } 2671 script++; 2672 } else { 2673 script = str; 2674 ext = strrchr(script, '.'); 2675 if (!ext) { 2676 fprintf(stderr, "invalid script extension"); 2677 return -1; 2678 } 2679 scripting_ops = script_spec__lookup(++ext); 2680 if (!scripting_ops) { 2681 fprintf(stderr, "invalid script extension"); 2682 return -1; 2683 } 2684 } 2685 2686 script_name = strdup(script); 2687 2688 return 0; 2689 } 2690 2691 static int parse_output_fields(const struct option *opt __maybe_unused, 2692 const char *arg, int unset __maybe_unused) 2693 { 2694 char *tok, *strtok_saveptr = NULL; 2695 int i, imax = ARRAY_SIZE(all_output_options); 2696 int j; 2697 int rc = 0; 2698 char *str = strdup(arg); 2699 int type = -1; 2700 enum { DEFAULT, SET, ADD, REMOVE } change = DEFAULT; 2701 2702 if (!str) 2703 return -ENOMEM; 2704 2705 /* first word can state for which event type the user is specifying 2706 * the fields. If no type exists, the specified fields apply to all 2707 * event types found in the file minus the invalid fields for a type. 2708 */ 2709 tok = strchr(str, ':'); 2710 if (tok) { 2711 *tok = '\0'; 2712 tok++; 2713 if (!strcmp(str, "hw")) 2714 type = PERF_TYPE_HARDWARE; 2715 else if (!strcmp(str, "sw")) 2716 type = PERF_TYPE_SOFTWARE; 2717 else if (!strcmp(str, "trace")) 2718 type = PERF_TYPE_TRACEPOINT; 2719 else if (!strcmp(str, "raw")) 2720 type = PERF_TYPE_RAW; 2721 else if (!strcmp(str, "break")) 2722 type = PERF_TYPE_BREAKPOINT; 2723 else if (!strcmp(str, "synth")) 2724 type = OUTPUT_TYPE_SYNTH; 2725 else { 2726 fprintf(stderr, "Invalid event type in field string.\n"); 2727 rc = -EINVAL; 2728 goto out; 2729 } 2730 2731 if (output[type].user_set) 2732 pr_warning("Overriding previous field request for %s events.\n", 2733 event_type(type)); 2734 2735 /* Don't override defaults for +- */ 2736 if (strchr(tok, '+') || strchr(tok, '-')) 2737 goto parse; 2738 2739 output[type].fields = 0; 2740 output[type].user_set = true; 2741 output[type].wildcard_set = false; 2742 2743 } else { 2744 tok = str; 2745 if (strlen(str) == 0) { 2746 fprintf(stderr, 2747 "Cannot set fields to 'none' for all event types.\n"); 2748 rc = -EINVAL; 2749 goto out; 2750 } 2751 2752 /* Don't override defaults for +- */ 2753 if (strchr(str, '+') || strchr(str, '-')) 2754 goto parse; 2755 2756 if (output_set_by_user()) 2757 pr_warning("Overriding previous field request for all events.\n"); 2758 2759 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) { 2760 output[j].fields = 0; 2761 output[j].user_set = true; 2762 output[j].wildcard_set = true; 2763 } 2764 } 2765 2766 parse: 2767 for (tok = strtok_r(tok, ",", &strtok_saveptr); tok; tok = strtok_r(NULL, ",", &strtok_saveptr)) { 2768 if (*tok == '+') { 2769 if (change == SET) 2770 goto out_badmix; 2771 change = ADD; 2772 tok++; 2773 } else if (*tok == '-') { 2774 if (change == SET) 2775 goto out_badmix; 2776 change = REMOVE; 2777 tok++; 2778 } else { 2779 if (change != SET && change != DEFAULT) 2780 goto out_badmix; 2781 change = SET; 2782 } 2783 2784 for (i = 0; i < imax; ++i) { 2785 if (strcmp(tok, all_output_options[i].str) == 0) 2786 break; 2787 } 2788 if (i == imax && strcmp(tok, "flags") == 0) { 2789 print_flags = change == REMOVE ? false : true; 2790 continue; 2791 } 2792 if (i == imax) { 2793 fprintf(stderr, "Invalid field requested.\n"); 2794 rc = -EINVAL; 2795 goto out; 2796 } 2797 2798 if (type == -1) { 2799 /* add user option to all events types for 2800 * which it is valid 2801 */ 2802 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) { 2803 if (output[j].invalid_fields & all_output_options[i].field) { 2804 pr_warning("\'%s\' not valid for %s events. Ignoring.\n", 2805 all_output_options[i].str, event_type(j)); 2806 } else { 2807 if (change == REMOVE) { 2808 output[j].fields &= ~all_output_options[i].field; 2809 output[j].user_set_fields &= ~all_output_options[i].field; 2810 output[j].user_unset_fields |= all_output_options[i].field; 2811 } else { 2812 output[j].fields |= all_output_options[i].field; 2813 output[j].user_set_fields |= all_output_options[i].field; 2814 output[j].user_unset_fields &= ~all_output_options[i].field; 2815 } 2816 output[j].user_set = true; 2817 output[j].wildcard_set = true; 2818 } 2819 } 2820 } else { 2821 if (output[type].invalid_fields & all_output_options[i].field) { 2822 fprintf(stderr, "\'%s\' not valid for %s events.\n", 2823 all_output_options[i].str, event_type(type)); 2824 2825 rc = -EINVAL; 2826 goto out; 2827 } 2828 if (change == REMOVE) 2829 output[type].fields &= ~all_output_options[i].field; 2830 else 2831 output[type].fields |= all_output_options[i].field; 2832 output[type].user_set = true; 2833 output[type].wildcard_set = true; 2834 } 2835 } 2836 2837 if (type >= 0) { 2838 if (output[type].fields == 0) { 2839 pr_debug("No fields requested for %s type. " 2840 "Events will not be displayed.\n", event_type(type)); 2841 } 2842 } 2843 goto out; 2844 2845 out_badmix: 2846 fprintf(stderr, "Cannot mix +-field with overridden fields\n"); 2847 rc = -EINVAL; 2848 out: 2849 free(str); 2850 return rc; 2851 } 2852 2853 #define for_each_lang(scripts_path, scripts_dir, lang_dirent) \ 2854 while ((lang_dirent = readdir(scripts_dir)) != NULL) \ 2855 if ((lang_dirent->d_type == DT_DIR || \ 2856 (lang_dirent->d_type == DT_UNKNOWN && \ 2857 is_directory(scripts_path, lang_dirent))) && \ 2858 (strcmp(lang_dirent->d_name, ".")) && \ 2859 (strcmp(lang_dirent->d_name, ".."))) 2860 2861 #define for_each_script(lang_path, lang_dir, script_dirent) \ 2862 while ((script_dirent = readdir(lang_dir)) != NULL) \ 2863 if (script_dirent->d_type != DT_DIR && \ 2864 (script_dirent->d_type != DT_UNKNOWN || \ 2865 !is_directory(lang_path, script_dirent))) 2866 2867 2868 #define RECORD_SUFFIX "-record" 2869 #define REPORT_SUFFIX "-report" 2870 2871 struct script_desc { 2872 struct list_head node; 2873 char *name; 2874 char *half_liner; 2875 char *args; 2876 }; 2877 2878 static LIST_HEAD(script_descs); 2879 2880 static struct script_desc *script_desc__new(const char *name) 2881 { 2882 struct script_desc *s = zalloc(sizeof(*s)); 2883 2884 if (s != NULL && name) 2885 s->name = strdup(name); 2886 2887 return s; 2888 } 2889 2890 static void script_desc__delete(struct script_desc *s) 2891 { 2892 zfree(&s->name); 2893 zfree(&s->half_liner); 2894 zfree(&s->args); 2895 free(s); 2896 } 2897 2898 static void script_desc__add(struct script_desc *s) 2899 { 2900 list_add_tail(&s->node, &script_descs); 2901 } 2902 2903 static struct script_desc *script_desc__find(const char *name) 2904 { 2905 struct script_desc *s; 2906 2907 list_for_each_entry(s, &script_descs, node) 2908 if (strcasecmp(s->name, name) == 0) 2909 return s; 2910 return NULL; 2911 } 2912 2913 static struct script_desc *script_desc__findnew(const char *name) 2914 { 2915 struct script_desc *s = script_desc__find(name); 2916 2917 if (s) 2918 return s; 2919 2920 s = script_desc__new(name); 2921 if (!s) 2922 return NULL; 2923 2924 script_desc__add(s); 2925 2926 return s; 2927 } 2928 2929 static const char *ends_with(const char *str, const char *suffix) 2930 { 2931 size_t suffix_len = strlen(suffix); 2932 const char *p = str; 2933 2934 if (strlen(str) > suffix_len) { 2935 p = str + strlen(str) - suffix_len; 2936 if (!strncmp(p, suffix, suffix_len)) 2937 return p; 2938 } 2939 2940 return NULL; 2941 } 2942 2943 static int read_script_info(struct script_desc *desc, const char *filename) 2944 { 2945 char line[BUFSIZ], *p; 2946 FILE *fp; 2947 2948 fp = fopen(filename, "r"); 2949 if (!fp) 2950 return -1; 2951 2952 while (fgets(line, sizeof(line), fp)) { 2953 p = skip_spaces(line); 2954 if (strlen(p) == 0) 2955 continue; 2956 if (*p != '#') 2957 continue; 2958 p++; 2959 if (strlen(p) && *p == '!') 2960 continue; 2961 2962 p = skip_spaces(p); 2963 if (strlen(p) && p[strlen(p) - 1] == '\n') 2964 p[strlen(p) - 1] = '\0'; 2965 2966 if (!strncmp(p, "description:", strlen("description:"))) { 2967 p += strlen("description:"); 2968 desc->half_liner = strdup(skip_spaces(p)); 2969 continue; 2970 } 2971 2972 if (!strncmp(p, "args:", strlen("args:"))) { 2973 p += strlen("args:"); 2974 desc->args = strdup(skip_spaces(p)); 2975 continue; 2976 } 2977 } 2978 2979 fclose(fp); 2980 2981 return 0; 2982 } 2983 2984 static char *get_script_root(struct dirent *script_dirent, const char *suffix) 2985 { 2986 char *script_root, *str; 2987 2988 script_root = strdup(script_dirent->d_name); 2989 if (!script_root) 2990 return NULL; 2991 2992 str = (char *)ends_with(script_root, suffix); 2993 if (!str) { 2994 free(script_root); 2995 return NULL; 2996 } 2997 2998 *str = '\0'; 2999 return script_root; 3000 } 3001 3002 static int list_available_scripts(const struct option *opt __maybe_unused, 3003 const char *s __maybe_unused, 3004 int unset __maybe_unused) 3005 { 3006 struct dirent *script_dirent, *lang_dirent; 3007 char scripts_path[MAXPATHLEN]; 3008 DIR *scripts_dir, *lang_dir; 3009 char script_path[MAXPATHLEN]; 3010 char lang_path[MAXPATHLEN]; 3011 struct script_desc *desc; 3012 char first_half[BUFSIZ]; 3013 char *script_root; 3014 3015 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); 3016 3017 scripts_dir = opendir(scripts_path); 3018 if (!scripts_dir) { 3019 fprintf(stdout, 3020 "open(%s) failed.\n" 3021 "Check \"PERF_EXEC_PATH\" env to set scripts dir.\n", 3022 scripts_path); 3023 exit(-1); 3024 } 3025 3026 for_each_lang(scripts_path, scripts_dir, lang_dirent) { 3027 scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 3028 lang_dirent->d_name); 3029 lang_dir = opendir(lang_path); 3030 if (!lang_dir) 3031 continue; 3032 3033 for_each_script(lang_path, lang_dir, script_dirent) { 3034 script_root = get_script_root(script_dirent, REPORT_SUFFIX); 3035 if (script_root) { 3036 desc = script_desc__findnew(script_root); 3037 scnprintf(script_path, MAXPATHLEN, "%s/%s", 3038 lang_path, script_dirent->d_name); 3039 read_script_info(desc, script_path); 3040 free(script_root); 3041 } 3042 } 3043 } 3044 3045 fprintf(stdout, "List of available trace scripts:\n"); 3046 list_for_each_entry(desc, &script_descs, node) { 3047 sprintf(first_half, "%s %s", desc->name, 3048 desc->args ? desc->args : ""); 3049 fprintf(stdout, " %-36s %s\n", first_half, 3050 desc->half_liner ? desc->half_liner : ""); 3051 } 3052 3053 exit(0); 3054 } 3055 3056 /* 3057 * Some scripts specify the required events in their "xxx-record" file, 3058 * this function will check if the events in perf.data match those 3059 * mentioned in the "xxx-record". 3060 * 3061 * Fixme: All existing "xxx-record" are all in good formats "-e event ", 3062 * which is covered well now. And new parsing code should be added to 3063 * cover the future complexing formats like event groups etc. 3064 */ 3065 static int check_ev_match(char *dir_name, char *scriptname, 3066 struct perf_session *session) 3067 { 3068 char filename[MAXPATHLEN], evname[128]; 3069 char line[BUFSIZ], *p; 3070 struct evsel *pos; 3071 int match, len; 3072 FILE *fp; 3073 3074 scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname); 3075 3076 fp = fopen(filename, "r"); 3077 if (!fp) 3078 return -1; 3079 3080 while (fgets(line, sizeof(line), fp)) { 3081 p = skip_spaces(line); 3082 if (*p == '#') 3083 continue; 3084 3085 while (strlen(p)) { 3086 p = strstr(p, "-e"); 3087 if (!p) 3088 break; 3089 3090 p += 2; 3091 p = skip_spaces(p); 3092 len = strcspn(p, " \t"); 3093 if (!len) 3094 break; 3095 3096 snprintf(evname, len + 1, "%s", p); 3097 3098 match = 0; 3099 evlist__for_each_entry(session->evlist, pos) { 3100 if (!strcmp(evsel__name(pos), evname)) { 3101 match = 1; 3102 break; 3103 } 3104 } 3105 3106 if (!match) { 3107 fclose(fp); 3108 return -1; 3109 } 3110 } 3111 } 3112 3113 fclose(fp); 3114 return 0; 3115 } 3116 3117 /* 3118 * Return -1 if none is found, otherwise the actual scripts number. 3119 * 3120 * Currently the only user of this function is the script browser, which 3121 * will list all statically runnable scripts, select one, execute it and 3122 * show the output in a perf browser. 3123 */ 3124 int find_scripts(char **scripts_array, char **scripts_path_array, int num, 3125 int pathlen) 3126 { 3127 struct dirent *script_dirent, *lang_dirent; 3128 char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN]; 3129 DIR *scripts_dir, *lang_dir; 3130 struct perf_session *session; 3131 struct perf_data data = { 3132 .path = input_name, 3133 .mode = PERF_DATA_MODE_READ, 3134 }; 3135 char *temp; 3136 int i = 0; 3137 3138 session = perf_session__new(&data, false, NULL); 3139 if (IS_ERR(session)) 3140 return PTR_ERR(session); 3141 3142 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); 3143 3144 scripts_dir = opendir(scripts_path); 3145 if (!scripts_dir) { 3146 perf_session__delete(session); 3147 return -1; 3148 } 3149 3150 for_each_lang(scripts_path, scripts_dir, lang_dirent) { 3151 scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, 3152 lang_dirent->d_name); 3153 #ifndef HAVE_LIBPERL_SUPPORT 3154 if (strstr(lang_path, "perl")) 3155 continue; 3156 #endif 3157 #ifndef HAVE_LIBPYTHON_SUPPORT 3158 if (strstr(lang_path, "python")) 3159 continue; 3160 #endif 3161 3162 lang_dir = opendir(lang_path); 3163 if (!lang_dir) 3164 continue; 3165 3166 for_each_script(lang_path, lang_dir, script_dirent) { 3167 /* Skip those real time scripts: xxxtop.p[yl] */ 3168 if (strstr(script_dirent->d_name, "top.")) 3169 continue; 3170 if (i >= num) 3171 break; 3172 snprintf(scripts_path_array[i], pathlen, "%s/%s", 3173 lang_path, 3174 script_dirent->d_name); 3175 temp = strchr(script_dirent->d_name, '.'); 3176 snprintf(scripts_array[i], 3177 (temp - script_dirent->d_name) + 1, 3178 "%s", script_dirent->d_name); 3179 3180 if (check_ev_match(lang_path, 3181 scripts_array[i], session)) 3182 continue; 3183 3184 i++; 3185 } 3186 closedir(lang_dir); 3187 } 3188 3189 closedir(scripts_dir); 3190 perf_session__delete(session); 3191 return i; 3192 } 3193 3194 static char *get_script_path(const char *script_root, const char *suffix) 3195 { 3196 struct dirent *script_dirent, *lang_dirent; 3197 char scripts_path[MAXPATHLEN]; 3198 char script_path[MAXPATHLEN]; 3199 DIR *scripts_dir, *lang_dir; 3200 char lang_path[MAXPATHLEN]; 3201 char *__script_root; 3202 3203 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); 3204 3205 scripts_dir = opendir(scripts_path); 3206 if (!scripts_dir) 3207 return NULL; 3208 3209 for_each_lang(scripts_path, scripts_dir, lang_dirent) { 3210 scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 3211 lang_dirent->d_name); 3212 lang_dir = opendir(lang_path); 3213 if (!lang_dir) 3214 continue; 3215 3216 for_each_script(lang_path, lang_dir, script_dirent) { 3217 __script_root = get_script_root(script_dirent, suffix); 3218 if (__script_root && !strcmp(script_root, __script_root)) { 3219 free(__script_root); 3220 closedir(scripts_dir); 3221 scnprintf(script_path, MAXPATHLEN, "%s/%s", 3222 lang_path, script_dirent->d_name); 3223 closedir(lang_dir); 3224 return strdup(script_path); 3225 } 3226 free(__script_root); 3227 } 3228 closedir(lang_dir); 3229 } 3230 closedir(scripts_dir); 3231 3232 return NULL; 3233 } 3234 3235 static bool is_top_script(const char *script_path) 3236 { 3237 return ends_with(script_path, "top") == NULL ? false : true; 3238 } 3239 3240 static int has_required_arg(char *script_path) 3241 { 3242 struct script_desc *desc; 3243 int n_args = 0; 3244 char *p; 3245 3246 desc = script_desc__new(NULL); 3247 3248 if (read_script_info(desc, script_path)) 3249 goto out; 3250 3251 if (!desc->args) 3252 goto out; 3253 3254 for (p = desc->args; *p; p++) 3255 if (*p == '<') 3256 n_args++; 3257 out: 3258 script_desc__delete(desc); 3259 3260 return n_args; 3261 } 3262 3263 static int have_cmd(int argc, const char **argv) 3264 { 3265 char **__argv = malloc(sizeof(const char *) * argc); 3266 3267 if (!__argv) { 3268 pr_err("malloc failed\n"); 3269 return -1; 3270 } 3271 3272 memcpy(__argv, argv, sizeof(const char *) * argc); 3273 argc = parse_options(argc, (const char **)__argv, record_options, 3274 NULL, PARSE_OPT_STOP_AT_NON_OPTION); 3275 free(__argv); 3276 3277 system_wide = (argc == 0); 3278 3279 return 0; 3280 } 3281 3282 static void script__setup_sample_type(struct perf_script *script) 3283 { 3284 struct perf_session *session = script->session; 3285 u64 sample_type = evlist__combined_sample_type(session->evlist); 3286 3287 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) { 3288 if ((sample_type & PERF_SAMPLE_REGS_USER) && 3289 (sample_type & PERF_SAMPLE_STACK_USER)) { 3290 callchain_param.record_mode = CALLCHAIN_DWARF; 3291 dwarf_callchain_users = true; 3292 } else if (sample_type & PERF_SAMPLE_BRANCH_STACK) 3293 callchain_param.record_mode = CALLCHAIN_LBR; 3294 else 3295 callchain_param.record_mode = CALLCHAIN_FP; 3296 } 3297 3298 if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { 3299 pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" 3300 "Please apply --call-graph lbr when recording.\n"); 3301 script->stitch_lbr = false; 3302 } 3303 } 3304 3305 static int process_stat_round_event(struct perf_session *session, 3306 union perf_event *event) 3307 { 3308 struct perf_record_stat_round *round = &event->stat_round; 3309 struct evsel *counter; 3310 3311 evlist__for_each_entry(session->evlist, counter) { 3312 perf_stat_process_counter(&stat_config, counter); 3313 process_stat(counter, round->time); 3314 } 3315 3316 process_stat_interval(round->time); 3317 return 0; 3318 } 3319 3320 static int process_stat_config_event(struct perf_session *session __maybe_unused, 3321 union perf_event *event) 3322 { 3323 perf_event__read_stat_config(&stat_config, &event->stat_config); 3324 return 0; 3325 } 3326 3327 static int set_maps(struct perf_script *script) 3328 { 3329 struct evlist *evlist = script->session->evlist; 3330 3331 if (!script->cpus || !script->threads) 3332 return 0; 3333 3334 if (WARN_ONCE(script->allocated, "stats double allocation\n")) 3335 return -EINVAL; 3336 3337 perf_evlist__set_maps(&evlist->core, script->cpus, script->threads); 3338 3339 if (evlist__alloc_stats(evlist, true)) 3340 return -ENOMEM; 3341 3342 script->allocated = true; 3343 return 0; 3344 } 3345 3346 static 3347 int process_thread_map_event(struct perf_session *session, 3348 union perf_event *event) 3349 { 3350 struct perf_tool *tool = session->tool; 3351 struct perf_script *script = container_of(tool, struct perf_script, tool); 3352 3353 if (script->threads) { 3354 pr_warning("Extra thread map event, ignoring.\n"); 3355 return 0; 3356 } 3357 3358 script->threads = thread_map__new_event(&event->thread_map); 3359 if (!script->threads) 3360 return -ENOMEM; 3361 3362 return set_maps(script); 3363 } 3364 3365 static 3366 int process_cpu_map_event(struct perf_session *session, 3367 union perf_event *event) 3368 { 3369 struct perf_tool *tool = session->tool; 3370 struct perf_script *script = container_of(tool, struct perf_script, tool); 3371 3372 if (script->cpus) { 3373 pr_warning("Extra cpu map event, ignoring.\n"); 3374 return 0; 3375 } 3376 3377 script->cpus = cpu_map__new_data(&event->cpu_map.data); 3378 if (!script->cpus) 3379 return -ENOMEM; 3380 3381 return set_maps(script); 3382 } 3383 3384 static int process_feature_event(struct perf_session *session, 3385 union perf_event *event) 3386 { 3387 if (event->feat.feat_id < HEADER_LAST_FEATURE) 3388 return perf_event__process_feature(session, event); 3389 return 0; 3390 } 3391 3392 #ifdef HAVE_AUXTRACE_SUPPORT 3393 static int perf_script__process_auxtrace_info(struct perf_session *session, 3394 union perf_event *event) 3395 { 3396 struct perf_tool *tool = session->tool; 3397 3398 int ret = perf_event__process_auxtrace_info(session, event); 3399 3400 if (ret == 0) { 3401 struct perf_script *script = container_of(tool, struct perf_script, tool); 3402 3403 ret = perf_script__setup_per_event_dump(script); 3404 } 3405 3406 return ret; 3407 } 3408 #else 3409 #define perf_script__process_auxtrace_info 0 3410 #endif 3411 3412 static int parse_insn_trace(const struct option *opt __maybe_unused, 3413 const char *str __maybe_unused, 3414 int unset __maybe_unused) 3415 { 3416 parse_output_fields(NULL, "+insn,-event,-period", 0); 3417 itrace_parse_synth_opts(opt, "i0ns", 0); 3418 symbol_conf.nanosecs = true; 3419 return 0; 3420 } 3421 3422 static int parse_xed(const struct option *opt __maybe_unused, 3423 const char *str __maybe_unused, 3424 int unset __maybe_unused) 3425 { 3426 if (isatty(1)) 3427 force_pager("xed -F insn: -A -64 | less"); 3428 else 3429 force_pager("xed -F insn: -A -64"); 3430 return 0; 3431 } 3432 3433 static int parse_call_trace(const struct option *opt __maybe_unused, 3434 const char *str __maybe_unused, 3435 int unset __maybe_unused) 3436 { 3437 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0); 3438 itrace_parse_synth_opts(opt, "cewp", 0); 3439 symbol_conf.nanosecs = true; 3440 symbol_conf.pad_output_len_dso = 50; 3441 return 0; 3442 } 3443 3444 static int parse_callret_trace(const struct option *opt __maybe_unused, 3445 const char *str __maybe_unused, 3446 int unset __maybe_unused) 3447 { 3448 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0); 3449 itrace_parse_synth_opts(opt, "crewp", 0); 3450 symbol_conf.nanosecs = true; 3451 return 0; 3452 } 3453 3454 int cmd_script(int argc, const char **argv) 3455 { 3456 bool show_full_info = false; 3457 bool header = false; 3458 bool header_only = false; 3459 bool script_started = false; 3460 char *rec_script_path = NULL; 3461 char *rep_script_path = NULL; 3462 struct perf_session *session; 3463 struct itrace_synth_opts itrace_synth_opts = { 3464 .set = false, 3465 .default_no_sample = true, 3466 }; 3467 struct utsname uts; 3468 char *script_path = NULL; 3469 const char **__argv; 3470 int i, j, err = 0; 3471 struct perf_script script = { 3472 .tool = { 3473 .sample = process_sample_event, 3474 .mmap = perf_event__process_mmap, 3475 .mmap2 = perf_event__process_mmap2, 3476 .comm = perf_event__process_comm, 3477 .namespaces = perf_event__process_namespaces, 3478 .cgroup = perf_event__process_cgroup, 3479 .exit = perf_event__process_exit, 3480 .fork = perf_event__process_fork, 3481 .attr = process_attr, 3482 .event_update = perf_event__process_event_update, 3483 .tracing_data = perf_event__process_tracing_data, 3484 .feature = process_feature_event, 3485 .build_id = perf_event__process_build_id, 3486 .id_index = perf_event__process_id_index, 3487 .auxtrace_info = perf_script__process_auxtrace_info, 3488 .auxtrace = perf_event__process_auxtrace, 3489 .auxtrace_error = perf_event__process_auxtrace_error, 3490 .stat = perf_event__process_stat_event, 3491 .stat_round = process_stat_round_event, 3492 .stat_config = process_stat_config_event, 3493 .thread_map = process_thread_map_event, 3494 .cpu_map = process_cpu_map_event, 3495 .ordered_events = true, 3496 .ordering_requires_timestamps = true, 3497 }, 3498 }; 3499 struct perf_data data = { 3500 .mode = PERF_DATA_MODE_READ, 3501 }; 3502 const struct option options[] = { 3503 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 3504 "dump raw trace in ASCII"), 3505 OPT_INCR('v', "verbose", &verbose, 3506 "be more verbose (show symbol address, etc)"), 3507 OPT_BOOLEAN('L', "Latency", &latency_format, 3508 "show latency attributes (irqs/preemption disabled, etc)"), 3509 OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts", 3510 list_available_scripts), 3511 OPT_CALLBACK('s', "script", NULL, "name", 3512 "script file name (lang:script name, script name, or *)", 3513 parse_scriptname), 3514 OPT_STRING('g', "gen-script", &generate_script_lang, "lang", 3515 "generate perf-script.xx script in specified language"), 3516 OPT_STRING('i', "input", &input_name, "file", "input file name"), 3517 OPT_BOOLEAN('d', "debug-mode", &debug_mode, 3518 "do various checks like samples ordering and lost events"), 3519 OPT_BOOLEAN(0, "header", &header, "Show data header."), 3520 OPT_BOOLEAN(0, "header-only", &header_only, "Show only data header."), 3521 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 3522 "file", "vmlinux pathname"), 3523 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 3524 "file", "kallsyms pathname"), 3525 OPT_BOOLEAN('G', "hide-call-graph", &no_callchain, 3526 "When printing symbols do not display call chain"), 3527 OPT_CALLBACK(0, "symfs", NULL, "directory", 3528 "Look for files with symbols relative to this directory", 3529 symbol__config_symfs), 3530 OPT_CALLBACK('F', "fields", NULL, "str", 3531 "comma separated output fields prepend with 'type:'. " 3532 "+field to add and -field to remove." 3533 "Valid types: hw,sw,trace,raw,synth. " 3534 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," 3535 "addr,symoff,srcline,period,iregs,uregs,brstack," 3536 "brstacksym,flags,bpf-output,brstackinsn,brstackoff," 3537 "callindent,insn,insnlen,synth,phys_addr,metric,misc,ipc,tod," 3538 "data_page_size", 3539 parse_output_fields), 3540 OPT_BOOLEAN('a', "all-cpus", &system_wide, 3541 "system-wide collection from all CPUs"), 3542 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 3543 "only consider these symbols"), 3544 OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL, 3545 "Decode instructions from itrace", parse_insn_trace), 3546 OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL, 3547 "Run xed disassembler on output", parse_xed), 3548 OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL, 3549 "Decode calls from from itrace", parse_call_trace), 3550 OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL, 3551 "Decode calls and returns from itrace", parse_callret_trace), 3552 OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]", 3553 "Only print symbols and callees with --call-trace/--call-ret-trace"), 3554 OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]", 3555 "Stop display of callgraph at these symbols"), 3556 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), 3557 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 3558 "only display events for these comms"), 3559 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 3560 "only consider symbols in these pids"), 3561 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 3562 "only consider symbols in these tids"), 3563 OPT_UINTEGER(0, "max-stack", &scripting_max_stack, 3564 "Set the maximum stack depth when parsing the callchain, " 3565 "anything beyond the specified depth will be ignored. " 3566 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 3567 OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"), 3568 OPT_BOOLEAN(0, "deltatime", &deltatime, "Show time stamps relative to previous event"), 3569 OPT_BOOLEAN('I', "show-info", &show_full_info, 3570 "display extended information from perf.data file"), 3571 OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path, 3572 "Show the path of [kernel.kallsyms]"), 3573 OPT_BOOLEAN('\0', "show-task-events", &script.show_task_events, 3574 "Show the fork/comm/exit events"), 3575 OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events, 3576 "Show the mmap events"), 3577 OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events, 3578 "Show context switch events (if recorded)"), 3579 OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events, 3580 "Show namespace events (if recorded)"), 3581 OPT_BOOLEAN('\0', "show-cgroup-events", &script.show_cgroup_events, 3582 "Show cgroup events (if recorded)"), 3583 OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events, 3584 "Show lost events (if recorded)"), 3585 OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events, 3586 "Show round events (if recorded)"), 3587 OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events, 3588 "Show bpf related events (if recorded)"), 3589 OPT_BOOLEAN('\0', "show-text-poke-events", &script.show_text_poke_events, 3590 "Show text poke related events (if recorded)"), 3591 OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump, 3592 "Dump trace output to files named by the monitored events"), 3593 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 3594 OPT_INTEGER(0, "max-blocks", &max_blocks, 3595 "Maximum number of code blocks to dump with brstackinsn"), 3596 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, 3597 "Use 9 decimal places when displaying time"), 3598 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", 3599 "Instruction Tracing options\n" ITRACE_HELP, 3600 itrace_parse_synth_opts), 3601 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename, 3602 "Show full source file name path for source lines"), 3603 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 3604 "Enable symbol demangling"), 3605 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 3606 "Enable kernel symbol demangling"), 3607 OPT_STRING(0, "time", &script.time_str, "str", 3608 "Time span of interest (start,stop)"), 3609 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, 3610 "Show inline function"), 3611 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", 3612 "guest mount directory under which every guest os" 3613 " instance has a subdir"), 3614 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name, 3615 "file", "file saving guest os vmlinux"), 3616 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms, 3617 "file", "file saving guest os /proc/kallsyms"), 3618 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules, 3619 "file", "file saving guest os /proc/modules"), 3620 OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr, 3621 "Enable LBR callgraph stitching approach"), 3622 OPTS_EVSWITCH(&script.evswitch), 3623 OPT_END() 3624 }; 3625 const char * const script_subcommands[] = { "record", "report", NULL }; 3626 const char *script_usage[] = { 3627 "perf script [<options>]", 3628 "perf script [<options>] record <script> [<record-options>] <command>", 3629 "perf script [<options>] report <script> [script-args]", 3630 "perf script [<options>] <script> [<record-options>] <command>", 3631 "perf script [<options>] <top-script> [script-args]", 3632 NULL 3633 }; 3634 3635 perf_set_singlethreaded(); 3636 3637 setup_scripting(); 3638 3639 argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage, 3640 PARSE_OPT_STOP_AT_NON_OPTION); 3641 3642 if (symbol_conf.guestmount || 3643 symbol_conf.default_guest_vmlinux_name || 3644 symbol_conf.default_guest_kallsyms || 3645 symbol_conf.default_guest_modules) { 3646 /* 3647 * Enable guest sample processing. 3648 */ 3649 perf_guest = true; 3650 } 3651 3652 data.path = input_name; 3653 data.force = symbol_conf.force; 3654 3655 if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) { 3656 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX); 3657 if (!rec_script_path) 3658 return cmd_record(argc, argv); 3659 } 3660 3661 if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) { 3662 rep_script_path = get_script_path(argv[1], REPORT_SUFFIX); 3663 if (!rep_script_path) { 3664 fprintf(stderr, 3665 "Please specify a valid report script" 3666 "(see 'perf script -l' for listing)\n"); 3667 return -1; 3668 } 3669 } 3670 3671 if (reltime && deltatime) { 3672 fprintf(stderr, 3673 "reltime and deltatime - the two don't get along well. " 3674 "Please limit to --reltime or --deltatime.\n"); 3675 return -1; 3676 } 3677 3678 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) && 3679 itrace_synth_opts.callchain_sz > scripting_max_stack) 3680 scripting_max_stack = itrace_synth_opts.callchain_sz; 3681 3682 /* make sure PERF_EXEC_PATH is set for scripts */ 3683 set_argv_exec_path(get_argv_exec_path()); 3684 3685 if (argc && !script_name && !rec_script_path && !rep_script_path) { 3686 int live_pipe[2]; 3687 int rep_args; 3688 pid_t pid; 3689 3690 rec_script_path = get_script_path(argv[0], RECORD_SUFFIX); 3691 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX); 3692 3693 if (!rec_script_path && !rep_script_path) { 3694 usage_with_options_msg(script_usage, options, 3695 "Couldn't find script `%s'\n\n See perf" 3696 " script -l for available scripts.\n", argv[0]); 3697 } 3698 3699 if (is_top_script(argv[0])) { 3700 rep_args = argc - 1; 3701 } else { 3702 int rec_args; 3703 3704 rep_args = has_required_arg(rep_script_path); 3705 rec_args = (argc - 1) - rep_args; 3706 if (rec_args < 0) { 3707 usage_with_options_msg(script_usage, options, 3708 "`%s' script requires options." 3709 "\n\n See perf script -l for available " 3710 "scripts and options.\n", argv[0]); 3711 } 3712 } 3713 3714 if (pipe(live_pipe) < 0) { 3715 perror("failed to create pipe"); 3716 return -1; 3717 } 3718 3719 pid = fork(); 3720 if (pid < 0) { 3721 perror("failed to fork"); 3722 return -1; 3723 } 3724 3725 if (!pid) { 3726 j = 0; 3727 3728 dup2(live_pipe[1], 1); 3729 close(live_pipe[0]); 3730 3731 if (is_top_script(argv[0])) { 3732 system_wide = true; 3733 } else if (!system_wide) { 3734 if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) { 3735 err = -1; 3736 goto out; 3737 } 3738 } 3739 3740 __argv = malloc((argc + 6) * sizeof(const char *)); 3741 if (!__argv) { 3742 pr_err("malloc failed\n"); 3743 err = -ENOMEM; 3744 goto out; 3745 } 3746 3747 __argv[j++] = "/bin/sh"; 3748 __argv[j++] = rec_script_path; 3749 if (system_wide) 3750 __argv[j++] = "-a"; 3751 __argv[j++] = "-q"; 3752 __argv[j++] = "-o"; 3753 __argv[j++] = "-"; 3754 for (i = rep_args + 1; i < argc; i++) 3755 __argv[j++] = argv[i]; 3756 __argv[j++] = NULL; 3757 3758 execvp("/bin/sh", (char **)__argv); 3759 free(__argv); 3760 exit(-1); 3761 } 3762 3763 dup2(live_pipe[0], 0); 3764 close(live_pipe[1]); 3765 3766 __argv = malloc((argc + 4) * sizeof(const char *)); 3767 if (!__argv) { 3768 pr_err("malloc failed\n"); 3769 err = -ENOMEM; 3770 goto out; 3771 } 3772 3773 j = 0; 3774 __argv[j++] = "/bin/sh"; 3775 __argv[j++] = rep_script_path; 3776 for (i = 1; i < rep_args + 1; i++) 3777 __argv[j++] = argv[i]; 3778 __argv[j++] = "-i"; 3779 __argv[j++] = "-"; 3780 __argv[j++] = NULL; 3781 3782 execvp("/bin/sh", (char **)__argv); 3783 free(__argv); 3784 exit(-1); 3785 } 3786 3787 if (rec_script_path) 3788 script_path = rec_script_path; 3789 if (rep_script_path) 3790 script_path = rep_script_path; 3791 3792 if (script_path) { 3793 j = 0; 3794 3795 if (!rec_script_path) 3796 system_wide = false; 3797 else if (!system_wide) { 3798 if (have_cmd(argc - 1, &argv[1]) != 0) { 3799 err = -1; 3800 goto out; 3801 } 3802 } 3803 3804 __argv = malloc((argc + 2) * sizeof(const char *)); 3805 if (!__argv) { 3806 pr_err("malloc failed\n"); 3807 err = -ENOMEM; 3808 goto out; 3809 } 3810 3811 __argv[j++] = "/bin/sh"; 3812 __argv[j++] = script_path; 3813 if (system_wide) 3814 __argv[j++] = "-a"; 3815 for (i = 2; i < argc; i++) 3816 __argv[j++] = argv[i]; 3817 __argv[j++] = NULL; 3818 3819 execvp("/bin/sh", (char **)__argv); 3820 free(__argv); 3821 exit(-1); 3822 } 3823 3824 if (!script_name) { 3825 setup_pager(); 3826 use_browser = 0; 3827 } 3828 3829 session = perf_session__new(&data, false, &script.tool); 3830 if (IS_ERR(session)) 3831 return PTR_ERR(session); 3832 3833 if (header || header_only) { 3834 script.tool.show_feat_hdr = SHOW_FEAT_HEADER; 3835 perf_session__fprintf_info(session, stdout, show_full_info); 3836 if (header_only) 3837 goto out_delete; 3838 } 3839 if (show_full_info) 3840 script.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO; 3841 3842 if (symbol__init(&session->header.env) < 0) 3843 goto out_delete; 3844 3845 uname(&uts); 3846 if (data.is_pipe || /* assume pipe_mode indicates native_arch */ 3847 !strcmp(uts.machine, session->header.env.arch) || 3848 (!strcmp(uts.machine, "x86_64") && 3849 !strcmp(session->header.env.arch, "i386"))) 3850 native_arch = true; 3851 3852 script.session = session; 3853 script__setup_sample_type(&script); 3854 3855 if ((output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) || 3856 symbol_conf.graph_function) 3857 itrace_synth_opts.thread_stack = true; 3858 3859 session->itrace_synth_opts = &itrace_synth_opts; 3860 3861 if (cpu_list) { 3862 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); 3863 if (err < 0) 3864 goto out_delete; 3865 itrace_synth_opts.cpu_bitmap = cpu_bitmap; 3866 } 3867 3868 if (!no_callchain) 3869 symbol_conf.use_callchain = true; 3870 else 3871 symbol_conf.use_callchain = false; 3872 3873 if (session->tevent.pevent && 3874 tep_set_function_resolver(session->tevent.pevent, 3875 machine__resolve_kernel_addr, 3876 &session->machines.host) < 0) { 3877 pr_err("%s: failed to set libtraceevent function resolver\n", __func__); 3878 err = -1; 3879 goto out_delete; 3880 } 3881 3882 if (generate_script_lang) { 3883 struct stat perf_stat; 3884 int input; 3885 3886 if (output_set_by_user()) { 3887 fprintf(stderr, 3888 "custom fields not supported for generated scripts"); 3889 err = -EINVAL; 3890 goto out_delete; 3891 } 3892 3893 input = open(data.path, O_RDONLY); /* input_name */ 3894 if (input < 0) { 3895 err = -errno; 3896 perror("failed to open file"); 3897 goto out_delete; 3898 } 3899 3900 err = fstat(input, &perf_stat); 3901 if (err < 0) { 3902 perror("failed to stat file"); 3903 goto out_delete; 3904 } 3905 3906 if (!perf_stat.st_size) { 3907 fprintf(stderr, "zero-sized file, nothing to do!\n"); 3908 goto out_delete; 3909 } 3910 3911 scripting_ops = script_spec__lookup(generate_script_lang); 3912 if (!scripting_ops) { 3913 fprintf(stderr, "invalid language specifier"); 3914 err = -ENOENT; 3915 goto out_delete; 3916 } 3917 3918 err = scripting_ops->generate_script(session->tevent.pevent, 3919 "perf-script"); 3920 goto out_delete; 3921 } 3922 3923 if (script_name) { 3924 err = scripting_ops->start_script(script_name, argc, argv); 3925 if (err) 3926 goto out_delete; 3927 pr_debug("perf script started with script %s\n\n", script_name); 3928 script_started = true; 3929 } 3930 3931 3932 err = perf_session__check_output_opt(session); 3933 if (err < 0) 3934 goto out_delete; 3935 3936 if (script.time_str) { 3937 err = perf_time__parse_for_ranges_reltime(script.time_str, session, 3938 &script.ptime_range, 3939 &script.range_size, 3940 &script.range_num, 3941 reltime); 3942 if (err < 0) 3943 goto out_delete; 3944 3945 itrace_synth_opts__set_time_range(&itrace_synth_opts, 3946 script.ptime_range, 3947 script.range_num); 3948 } 3949 3950 err = evswitch__init(&script.evswitch, session->evlist, stderr); 3951 if (err) 3952 goto out_delete; 3953 3954 if (zstd_init(&(session->zstd_data), 0) < 0) 3955 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); 3956 3957 err = __cmd_script(&script); 3958 3959 flush_scripting(); 3960 3961 out_delete: 3962 if (script.ptime_range) { 3963 itrace_synth_opts__clear_time_range(&itrace_synth_opts); 3964 zfree(&script.ptime_range); 3965 } 3966 3967 evlist__free_stats(session->evlist); 3968 perf_session__delete(session); 3969 3970 if (script_started) 3971 cleanup_scripting(); 3972 out: 3973 return err; 3974 } 3975