1 /* 2 * builtin-record.c 3 * 4 * Builtin record command: Record the profile of a workload 5 * (or a CPU, or a PID) into the perf.data output file - for 6 * later analysis via perf report. 7 */ 8 #include "builtin.h" 9 10 #include "perf.h" 11 12 #include "util/build-id.h" 13 #include "util/util.h" 14 #include "util/parse-options.h" 15 #include "util/parse-events.h" 16 17 #include "util/callchain.h" 18 #include "util/cgroup.h" 19 #include "util/header.h" 20 #include "util/event.h" 21 #include "util/evlist.h" 22 #include "util/evsel.h" 23 #include "util/debug.h" 24 #include "util/session.h" 25 #include "util/tool.h" 26 #include "util/symbol.h" 27 #include "util/cpumap.h" 28 #include "util/thread_map.h" 29 #include "util/data.h" 30 31 #include <unistd.h> 32 #include <sched.h> 33 #include <sys/mman.h> 34 35 36 struct record { 37 struct perf_tool tool; 38 struct record_opts opts; 39 u64 bytes_written; 40 struct perf_data_file file; 41 struct perf_evlist *evlist; 42 struct perf_session *session; 43 const char *progname; 44 int realtime_prio; 45 bool no_buildid; 46 bool no_buildid_cache; 47 long samples; 48 }; 49 50 static int record__write(struct record *rec, void *bf, size_t size) 51 { 52 if (perf_data_file__write(rec->session->file, bf, size) < 0) { 53 pr_err("failed to write perf data, error: %m\n"); 54 return -1; 55 } 56 57 rec->bytes_written += size; 58 return 0; 59 } 60 61 static int process_synthesized_event(struct perf_tool *tool, 62 union perf_event *event, 63 struct perf_sample *sample __maybe_unused, 64 struct machine *machine __maybe_unused) 65 { 66 struct record *rec = container_of(tool, struct record, tool); 67 return record__write(rec, event, event->header.size); 68 } 69 70 static int record__mmap_read(struct record *rec, int idx) 71 { 72 struct perf_mmap *md = &rec->evlist->mmap[idx]; 73 unsigned int head = perf_mmap__read_head(md); 74 unsigned int old = md->prev; 75 unsigned char *data = md->base + page_size; 76 unsigned long size; 77 void *buf; 78 int rc = 0; 79 80 if (old == head) 81 return 0; 82 83 rec->samples++; 84 85 size = head - old; 86 87 if ((old & md->mask) + size != (head & md->mask)) { 88 buf = &data[old & md->mask]; 89 size = md->mask + 1 - (old & md->mask); 90 old += size; 91 92 if (record__write(rec, buf, size) < 0) { 93 rc = -1; 94 goto out; 95 } 96 } 97 98 buf = &data[old & md->mask]; 99 size = head - old; 100 old += size; 101 102 if (record__write(rec, buf, size) < 0) { 103 rc = -1; 104 goto out; 105 } 106 107 md->prev = old; 108 perf_evlist__mmap_consume(rec->evlist, idx); 109 out: 110 return rc; 111 } 112 113 static volatile int done = 0; 114 static volatile int signr = -1; 115 static volatile int child_finished = 0; 116 117 static void sig_handler(int sig) 118 { 119 if (sig == SIGCHLD) 120 child_finished = 1; 121 else 122 signr = sig; 123 124 done = 1; 125 } 126 127 static void record__sig_exit(void) 128 { 129 if (signr == -1) 130 return; 131 132 signal(signr, SIG_DFL); 133 raise(signr); 134 } 135 136 static int record__open(struct record *rec) 137 { 138 char msg[512]; 139 struct perf_evsel *pos; 140 struct perf_evlist *evlist = rec->evlist; 141 struct perf_session *session = rec->session; 142 struct record_opts *opts = &rec->opts; 143 int rc = 0; 144 145 perf_evlist__config(evlist, opts); 146 147 evlist__for_each(evlist, pos) { 148 try_again: 149 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { 150 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) { 151 if (verbose) 152 ui__warning("%s\n", msg); 153 goto try_again; 154 } 155 156 rc = -errno; 157 perf_evsel__open_strerror(pos, &opts->target, 158 errno, msg, sizeof(msg)); 159 ui__error("%s\n", msg); 160 goto out; 161 } 162 } 163 164 if (perf_evlist__apply_filters(evlist)) { 165 error("failed to set filter with %d (%s)\n", errno, 166 strerror_r(errno, msg, sizeof(msg))); 167 rc = -1; 168 goto out; 169 } 170 171 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { 172 if (errno == EPERM) { 173 pr_err("Permission error mapping pages.\n" 174 "Consider increasing " 175 "/proc/sys/kernel/perf_event_mlock_kb,\n" 176 "or try again with a smaller value of -m/--mmap_pages.\n" 177 "(current value: %u)\n", opts->mmap_pages); 178 rc = -errno; 179 } else { 180 pr_err("failed to mmap with %d (%s)\n", errno, 181 strerror_r(errno, msg, sizeof(msg))); 182 rc = -errno; 183 } 184 goto out; 185 } 186 187 session->evlist = evlist; 188 perf_session__set_id_hdr_size(session); 189 out: 190 return rc; 191 } 192 193 static int process_sample_event(struct perf_tool *tool, 194 union perf_event *event, 195 struct perf_sample *sample, 196 struct perf_evsel *evsel, 197 struct machine *machine) 198 { 199 struct record *rec = container_of(tool, struct record, tool); 200 201 rec->samples++; 202 203 return build_id__mark_dso_hit(tool, event, sample, evsel, machine); 204 } 205 206 static int process_buildids(struct record *rec) 207 { 208 struct perf_data_file *file = &rec->file; 209 struct perf_session *session = rec->session; 210 211 u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR); 212 if (size == 0) 213 return 0; 214 215 file->size = size; 216 217 /* 218 * During this process, it'll load kernel map and replace the 219 * dso->long_name to a real pathname it found. In this case 220 * we prefer the vmlinux path like 221 * /lib/modules/3.16.4/build/vmlinux 222 * 223 * rather than build-id path (in debug directory). 224 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551 225 */ 226 symbol_conf.ignore_vmlinux_buildid = true; 227 228 return perf_session__process_events(session, &rec->tool); 229 } 230 231 static void perf_event__synthesize_guest_os(struct machine *machine, void *data) 232 { 233 int err; 234 struct perf_tool *tool = data; 235 /* 236 *As for guest kernel when processing subcommand record&report, 237 *we arrange module mmap prior to guest kernel mmap and trigger 238 *a preload dso because default guest module symbols are loaded 239 *from guest kallsyms instead of /lib/modules/XXX/XXX. This 240 *method is used to avoid symbol missing when the first addr is 241 *in module instead of in guest kernel. 242 */ 243 err = perf_event__synthesize_modules(tool, process_synthesized_event, 244 machine); 245 if (err < 0) 246 pr_err("Couldn't record guest kernel [%d]'s reference" 247 " relocation symbol.\n", machine->pid); 248 249 /* 250 * We use _stext for guest kernel because guest kernel's /proc/kallsyms 251 * have no _text sometimes. 252 */ 253 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 254 machine); 255 if (err < 0) 256 pr_err("Couldn't record guest kernel [%d]'s reference" 257 " relocation symbol.\n", machine->pid); 258 } 259 260 static struct perf_event_header finished_round_event = { 261 .size = sizeof(struct perf_event_header), 262 .type = PERF_RECORD_FINISHED_ROUND, 263 }; 264 265 static int record__mmap_read_all(struct record *rec) 266 { 267 u64 bytes_written = rec->bytes_written; 268 int i; 269 int rc = 0; 270 271 for (i = 0; i < rec->evlist->nr_mmaps; i++) { 272 if (rec->evlist->mmap[i].base) { 273 if (record__mmap_read(rec, i) != 0) { 274 rc = -1; 275 goto out; 276 } 277 } 278 } 279 280 /* 281 * Mark the round finished in case we wrote 282 * at least one event. 283 */ 284 if (bytes_written != rec->bytes_written) 285 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event)); 286 287 out: 288 return rc; 289 } 290 291 static void record__init_features(struct record *rec) 292 { 293 struct perf_session *session = rec->session; 294 int feat; 295 296 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 297 perf_header__set_feat(&session->header, feat); 298 299 if (rec->no_buildid) 300 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 301 302 if (!have_tracepoints(&rec->evlist->entries)) 303 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 304 305 if (!rec->opts.branch_stack) 306 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 307 } 308 309 static volatile int workload_exec_errno; 310 311 /* 312 * perf_evlist__prepare_workload will send a SIGUSR1 313 * if the fork fails, since we asked by setting its 314 * want_signal to true. 315 */ 316 static void workload_exec_failed_signal(int signo __maybe_unused, 317 siginfo_t *info, 318 void *ucontext __maybe_unused) 319 { 320 workload_exec_errno = info->si_value.sival_int; 321 done = 1; 322 child_finished = 1; 323 } 324 325 static int __cmd_record(struct record *rec, int argc, const char **argv) 326 { 327 int err; 328 int status = 0; 329 unsigned long waking = 0; 330 const bool forks = argc > 0; 331 struct machine *machine; 332 struct perf_tool *tool = &rec->tool; 333 struct record_opts *opts = &rec->opts; 334 struct perf_data_file *file = &rec->file; 335 struct perf_session *session; 336 bool disabled = false, draining = false; 337 int fd; 338 339 rec->progname = argv[0]; 340 341 atexit(record__sig_exit); 342 signal(SIGCHLD, sig_handler); 343 signal(SIGINT, sig_handler); 344 signal(SIGTERM, sig_handler); 345 346 session = perf_session__new(file, false, NULL); 347 if (session == NULL) { 348 pr_err("Perf session creation failed.\n"); 349 return -1; 350 } 351 352 fd = perf_data_file__fd(file); 353 rec->session = session; 354 355 record__init_features(rec); 356 357 if (forks) { 358 err = perf_evlist__prepare_workload(rec->evlist, &opts->target, 359 argv, file->is_pipe, 360 workload_exec_failed_signal); 361 if (err < 0) { 362 pr_err("Couldn't run the workload!\n"); 363 status = err; 364 goto out_delete_session; 365 } 366 } 367 368 if (record__open(rec) != 0) { 369 err = -1; 370 goto out_child; 371 } 372 373 if (!rec->evlist->nr_groups) 374 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 375 376 if (file->is_pipe) { 377 err = perf_header__write_pipe(fd); 378 if (err < 0) 379 goto out_child; 380 } else { 381 err = perf_session__write_header(session, rec->evlist, fd, false); 382 if (err < 0) 383 goto out_child; 384 } 385 386 if (!rec->no_buildid 387 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { 388 pr_err("Couldn't generate buildids. " 389 "Use --no-buildid to profile anyway.\n"); 390 err = -1; 391 goto out_child; 392 } 393 394 machine = &session->machines.host; 395 396 if (file->is_pipe) { 397 err = perf_event__synthesize_attrs(tool, session, 398 process_synthesized_event); 399 if (err < 0) { 400 pr_err("Couldn't synthesize attrs.\n"); 401 goto out_child; 402 } 403 404 if (have_tracepoints(&rec->evlist->entries)) { 405 /* 406 * FIXME err <= 0 here actually means that 407 * there were no tracepoints so its not really 408 * an error, just that we don't need to 409 * synthesize anything. We really have to 410 * return this more properly and also 411 * propagate errors that now are calling die() 412 */ 413 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist, 414 process_synthesized_event); 415 if (err <= 0) { 416 pr_err("Couldn't record tracing data.\n"); 417 goto out_child; 418 } 419 rec->bytes_written += err; 420 } 421 } 422 423 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 424 machine); 425 if (err < 0) 426 pr_err("Couldn't record kernel reference relocation symbol\n" 427 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 428 "Check /proc/kallsyms permission or run as root.\n"); 429 430 err = perf_event__synthesize_modules(tool, process_synthesized_event, 431 machine); 432 if (err < 0) 433 pr_err("Couldn't record kernel module information.\n" 434 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 435 "Check /proc/modules permission or run as root.\n"); 436 437 if (perf_guest) { 438 machines__process_guests(&session->machines, 439 perf_event__synthesize_guest_os, tool); 440 } 441 442 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads, 443 process_synthesized_event, opts->sample_address); 444 if (err != 0) 445 goto out_child; 446 447 if (rec->realtime_prio) { 448 struct sched_param param; 449 450 param.sched_priority = rec->realtime_prio; 451 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { 452 pr_err("Could not set realtime priority.\n"); 453 err = -1; 454 goto out_child; 455 } 456 } 457 458 /* 459 * When perf is starting the traced process, all the events 460 * (apart from group members) have enable_on_exec=1 set, 461 * so don't spoil it by prematurely enabling them. 462 */ 463 if (!target__none(&opts->target) && !opts->initial_delay) 464 perf_evlist__enable(rec->evlist); 465 466 /* 467 * Let the child rip 468 */ 469 if (forks) 470 perf_evlist__start_workload(rec->evlist); 471 472 if (opts->initial_delay) { 473 usleep(opts->initial_delay * 1000); 474 perf_evlist__enable(rec->evlist); 475 } 476 477 for (;;) { 478 int hits = rec->samples; 479 480 if (record__mmap_read_all(rec) < 0) { 481 err = -1; 482 goto out_child; 483 } 484 485 if (hits == rec->samples) { 486 if (done || draining) 487 break; 488 err = perf_evlist__poll(rec->evlist, -1); 489 /* 490 * Propagate error, only if there's any. Ignore positive 491 * number of returned events and interrupt error. 492 */ 493 if (err > 0 || (err < 0 && errno == EINTR)) 494 err = 0; 495 waking++; 496 497 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0) 498 draining = true; 499 } 500 501 /* 502 * When perf is starting the traced process, at the end events 503 * die with the process and we wait for that. Thus no need to 504 * disable events in this case. 505 */ 506 if (done && !disabled && !target__none(&opts->target)) { 507 perf_evlist__disable(rec->evlist); 508 disabled = true; 509 } 510 } 511 512 if (forks && workload_exec_errno) { 513 char msg[STRERR_BUFSIZE]; 514 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg)); 515 pr_err("Workload failed: %s\n", emsg); 516 err = -1; 517 goto out_child; 518 } 519 520 if (!quiet) 521 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); 522 523 out_child: 524 if (forks) { 525 int exit_status; 526 527 if (!child_finished) 528 kill(rec->evlist->workload.pid, SIGTERM); 529 530 wait(&exit_status); 531 532 if (err < 0) 533 status = err; 534 else if (WIFEXITED(exit_status)) 535 status = WEXITSTATUS(exit_status); 536 else if (WIFSIGNALED(exit_status)) 537 signr = WTERMSIG(exit_status); 538 } else 539 status = err; 540 541 /* this will be recalculated during process_buildids() */ 542 rec->samples = 0; 543 544 if (!err && !file->is_pipe) { 545 rec->session->header.data_size += rec->bytes_written; 546 547 if (!rec->no_buildid) 548 process_buildids(rec); 549 perf_session__write_header(rec->session, rec->evlist, fd, true); 550 } 551 552 if (!err && !quiet) { 553 char samples[128]; 554 555 if (rec->samples) 556 scnprintf(samples, sizeof(samples), 557 " (%" PRIu64 " samples)", rec->samples); 558 else 559 samples[0] = '\0'; 560 561 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n", 562 perf_data_file__size(file) / 1024.0 / 1024.0, 563 file->path, samples); 564 } 565 566 out_delete_session: 567 perf_session__delete(session); 568 return status; 569 } 570 571 #define BRANCH_OPT(n, m) \ 572 { .name = n, .mode = (m) } 573 574 #define BRANCH_END { .name = NULL } 575 576 struct branch_mode { 577 const char *name; 578 int mode; 579 }; 580 581 static const struct branch_mode branch_modes[] = { 582 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER), 583 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL), 584 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV), 585 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY), 586 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL), 587 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN), 588 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL), 589 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX), 590 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX), 591 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX), 592 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND), 593 BRANCH_END 594 }; 595 596 static int 597 parse_branch_stack(const struct option *opt, const char *str, int unset) 598 { 599 #define ONLY_PLM \ 600 (PERF_SAMPLE_BRANCH_USER |\ 601 PERF_SAMPLE_BRANCH_KERNEL |\ 602 PERF_SAMPLE_BRANCH_HV) 603 604 uint64_t *mode = (uint64_t *)opt->value; 605 const struct branch_mode *br; 606 char *s, *os = NULL, *p; 607 int ret = -1; 608 609 if (unset) 610 return 0; 611 612 /* 613 * cannot set it twice, -b + --branch-filter for instance 614 */ 615 if (*mode) 616 return -1; 617 618 /* str may be NULL in case no arg is passed to -b */ 619 if (str) { 620 /* because str is read-only */ 621 s = os = strdup(str); 622 if (!s) 623 return -1; 624 625 for (;;) { 626 p = strchr(s, ','); 627 if (p) 628 *p = '\0'; 629 630 for (br = branch_modes; br->name; br++) { 631 if (!strcasecmp(s, br->name)) 632 break; 633 } 634 if (!br->name) { 635 ui__warning("unknown branch filter %s," 636 " check man page\n", s); 637 goto error; 638 } 639 640 *mode |= br->mode; 641 642 if (!p) 643 break; 644 645 s = p + 1; 646 } 647 } 648 ret = 0; 649 650 /* default to any branch */ 651 if ((*mode & ~ONLY_PLM) == 0) { 652 *mode = PERF_SAMPLE_BRANCH_ANY; 653 } 654 error: 655 free(os); 656 return ret; 657 } 658 659 static void callchain_debug(void) 660 { 661 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" }; 662 663 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]); 664 665 if (callchain_param.record_mode == CALLCHAIN_DWARF) 666 pr_debug("callchain: stack dump size %d\n", 667 callchain_param.dump_size); 668 } 669 670 int record_parse_callchain_opt(const struct option *opt __maybe_unused, 671 const char *arg, 672 int unset) 673 { 674 int ret; 675 676 callchain_param.enabled = !unset; 677 678 /* --no-call-graph */ 679 if (unset) { 680 callchain_param.record_mode = CALLCHAIN_NONE; 681 pr_debug("callchain: disabled\n"); 682 return 0; 683 } 684 685 ret = parse_callchain_record_opt(arg); 686 if (!ret) 687 callchain_debug(); 688 689 return ret; 690 } 691 692 int record_callchain_opt(const struct option *opt __maybe_unused, 693 const char *arg __maybe_unused, 694 int unset __maybe_unused) 695 { 696 callchain_param.enabled = true; 697 698 if (callchain_param.record_mode == CALLCHAIN_NONE) 699 callchain_param.record_mode = CALLCHAIN_FP; 700 701 callchain_debug(); 702 return 0; 703 } 704 705 static int perf_record_config(const char *var, const char *value, void *cb) 706 { 707 if (!strcmp(var, "record.call-graph")) 708 var = "call-graph.record-mode"; /* fall-through */ 709 710 return perf_default_config(var, value, cb); 711 } 712 713 static const char * const __record_usage[] = { 714 "perf record [<options>] [<command>]", 715 "perf record [<options>] -- <command> [<options>]", 716 NULL 717 }; 718 const char * const *record_usage = __record_usage; 719 720 /* 721 * XXX Ideally would be local to cmd_record() and passed to a record__new 722 * because we need to have access to it in record__exit, that is called 723 * after cmd_record() exits, but since record_options need to be accessible to 724 * builtin-script, leave it here. 725 * 726 * At least we don't ouch it in all the other functions here directly. 727 * 728 * Just say no to tons of global variables, sigh. 729 */ 730 static struct record record = { 731 .opts = { 732 .sample_time = true, 733 .mmap_pages = UINT_MAX, 734 .user_freq = UINT_MAX, 735 .user_interval = ULLONG_MAX, 736 .freq = 4000, 737 .target = { 738 .uses_mmap = true, 739 .default_per_cpu = true, 740 }, 741 }, 742 .tool = { 743 .sample = process_sample_event, 744 .fork = perf_event__process_fork, 745 .comm = perf_event__process_comm, 746 .mmap = perf_event__process_mmap, 747 .mmap2 = perf_event__process_mmap2, 748 }, 749 }; 750 751 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " 752 753 #ifdef HAVE_DWARF_UNWIND_SUPPORT 754 const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf"; 755 #else 756 const char record_callchain_help[] = CALLCHAIN_HELP "fp"; 757 #endif 758 759 /* 760 * XXX Will stay a global variable till we fix builtin-script.c to stop messing 761 * with it and switch to use the library functions in perf_evlist that came 762 * from builtin-record.c, i.e. use record_opts, 763 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record', 764 * using pipes, etc. 765 */ 766 struct option __record_options[] = { 767 OPT_CALLBACK('e', "event", &record.evlist, "event", 768 "event selector. use 'perf list' to list available events", 769 parse_events_option), 770 OPT_CALLBACK(0, "filter", &record.evlist, "filter", 771 "event filter", parse_filter), 772 OPT_STRING('p', "pid", &record.opts.target.pid, "pid", 773 "record events on existing process id"), 774 OPT_STRING('t', "tid", &record.opts.target.tid, "tid", 775 "record events on existing thread id"), 776 OPT_INTEGER('r', "realtime", &record.realtime_prio, 777 "collect data with this RT SCHED_FIFO priority"), 778 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering, 779 "collect data without buffering"), 780 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples, 781 "collect raw sample records from all opened counters"), 782 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide, 783 "system-wide collection from all CPUs"), 784 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu", 785 "list of cpus to monitor"), 786 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"), 787 OPT_STRING('o', "output", &record.file.path, "file", 788 "output file name"), 789 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit, 790 &record.opts.no_inherit_set, 791 "child tasks do not inherit counters"), 792 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"), 793 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages", 794 "number of mmap data pages", 795 perf_evlist__parse_mmap_pages), 796 OPT_BOOLEAN(0, "group", &record.opts.group, 797 "put the counters into a counter group"), 798 OPT_CALLBACK_NOOPT('g', NULL, &record.opts, 799 NULL, "enables call-graph recording" , 800 &record_callchain_opt), 801 OPT_CALLBACK(0, "call-graph", &record.opts, 802 "mode[,dump_size]", record_callchain_help, 803 &record_parse_callchain_opt), 804 OPT_INCR('v', "verbose", &verbose, 805 "be more verbose (show counter open errors, etc)"), 806 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), 807 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat, 808 "per thread counts"), 809 OPT_BOOLEAN('d', "data", &record.opts.sample_address, 810 "Sample addresses"), 811 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"), 812 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"), 813 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples, 814 "don't sample"), 815 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache, 816 "do not update the buildid cache"), 817 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid, 818 "do not collect buildids in perf.data"), 819 OPT_CALLBACK('G', "cgroup", &record.evlist, "name", 820 "monitor event in cgroup name only", 821 parse_cgroups), 822 OPT_UINTEGER('D', "delay", &record.opts.initial_delay, 823 "ms to wait before starting measurement after program start"), 824 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user", 825 "user to profile"), 826 827 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack, 828 "branch any", "sample any taken branches", 829 parse_branch_stack), 830 831 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack, 832 "branch filter mask", "branch stack filter modes", 833 parse_branch_stack), 834 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight, 835 "sample by weight (on special events only)"), 836 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction, 837 "sample transaction flags (special events only)"), 838 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread, 839 "use per-thread mmaps"), 840 OPT_BOOLEAN('I', "intr-regs", &record.opts.sample_intr_regs, 841 "Sample machine registers on interrupt"), 842 OPT_END() 843 }; 844 845 struct option *record_options = __record_options; 846 847 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) 848 { 849 int err = -ENOMEM; 850 struct record *rec = &record; 851 char errbuf[BUFSIZ]; 852 853 rec->evlist = perf_evlist__new(); 854 if (rec->evlist == NULL) 855 return -ENOMEM; 856 857 perf_config(perf_record_config, rec); 858 859 argc = parse_options(argc, argv, record_options, record_usage, 860 PARSE_OPT_STOP_AT_NON_OPTION); 861 if (!argc && target__none(&rec->opts.target)) 862 usage_with_options(record_usage, record_options); 863 864 if (nr_cgroups && !rec->opts.target.system_wide) { 865 ui__error("cgroup monitoring only available in" 866 " system-wide mode\n"); 867 usage_with_options(record_usage, record_options); 868 } 869 870 symbol__init(NULL); 871 872 if (symbol_conf.kptr_restrict) 873 pr_warning( 874 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 875 "check /proc/sys/kernel/kptr_restrict.\n\n" 876 "Samples in kernel functions may not be resolved if a suitable vmlinux\n" 877 "file is not found in the buildid cache or in the vmlinux path.\n\n" 878 "Samples in kernel modules won't be resolved at all.\n\n" 879 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n" 880 "even with a suitable vmlinux or kallsyms file.\n\n"); 881 882 if (rec->no_buildid_cache || rec->no_buildid) 883 disable_buildid_cache(); 884 885 if (rec->evlist->nr_entries == 0 && 886 perf_evlist__add_default(rec->evlist) < 0) { 887 pr_err("Not enough memory for event selector list\n"); 888 goto out_symbol_exit; 889 } 890 891 if (rec->opts.target.tid && !rec->opts.no_inherit_set) 892 rec->opts.no_inherit = true; 893 894 err = target__validate(&rec->opts.target); 895 if (err) { 896 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); 897 ui__warning("%s", errbuf); 898 } 899 900 err = target__parse_uid(&rec->opts.target); 901 if (err) { 902 int saved_errno = errno; 903 904 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); 905 ui__error("%s", errbuf); 906 907 err = -saved_errno; 908 goto out_symbol_exit; 909 } 910 911 err = -ENOMEM; 912 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0) 913 usage_with_options(record_usage, record_options); 914 915 if (record_opts__config(&rec->opts)) { 916 err = -EINVAL; 917 goto out_symbol_exit; 918 } 919 920 err = __cmd_record(&record, argc, argv); 921 out_symbol_exit: 922 perf_evlist__delete(rec->evlist); 923 symbol__exit(); 924 return err; 925 } 926