1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-inject.c 4 * 5 * Builtin inject command: Examine the live mode (stdin) event stream 6 * and repipe it to stdout while optionally injecting additional 7 * events into it. 8 */ 9 #include "builtin.h" 10 11 #include "util/color.h" 12 #include "util/dso.h" 13 #include "util/vdso.h" 14 #include "util/evlist.h" 15 #include "util/evsel.h" 16 #include "util/map.h" 17 #include "util/session.h" 18 #include "util/tool.h" 19 #include "util/debug.h" 20 #include "util/build-id.h" 21 #include "util/data.h" 22 #include "util/auxtrace.h" 23 #include "util/jit.h" 24 #include "util/symbol.h" 25 #include "util/synthetic-events.h" 26 #include "util/thread.h" 27 #include "util/namespaces.h" 28 29 #include <linux/err.h> 30 #include <subcmd/parse-options.h> 31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 32 33 #include <linux/list.h> 34 #include <linux/string.h> 35 #include <errno.h> 36 #include <signal.h> 37 38 struct perf_inject { 39 struct perf_tool tool; 40 struct perf_session *session; 41 bool build_ids; 42 bool build_id_all; 43 bool sched_stat; 44 bool have_auxtrace; 45 bool strip; 46 bool jit_mode; 47 bool in_place_update; 48 bool in_place_update_dry_run; 49 bool is_pipe; 50 const char *input_name; 51 struct perf_data output; 52 u64 bytes_written; 53 u64 aux_id; 54 struct list_head samples; 55 struct itrace_synth_opts itrace_synth_opts; 56 char event_copy[PERF_SAMPLE_MAX_SIZE]; 57 }; 58 59 struct event_entry { 60 struct list_head node; 61 u32 tid; 62 union perf_event event[]; 63 }; 64 65 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, 66 struct machine *machine, u8 cpumode, u32 flags); 67 68 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz) 69 { 70 ssize_t size; 71 72 size = perf_data__write(&inject->output, buf, sz); 73 if (size < 0) 74 return -errno; 75 76 inject->bytes_written += size; 77 return 0; 78 } 79 80 static int perf_event__repipe_synth(struct perf_tool *tool, 81 union perf_event *event) 82 { 83 struct perf_inject *inject = container_of(tool, struct perf_inject, 84 tool); 85 86 return output_bytes(inject, event, event->header.size); 87 } 88 89 static int perf_event__repipe_oe_synth(struct perf_tool *tool, 90 union perf_event *event, 91 struct ordered_events *oe __maybe_unused) 92 { 93 return perf_event__repipe_synth(tool, event); 94 } 95 96 #ifdef HAVE_JITDUMP 97 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused, 98 union perf_event *event __maybe_unused, 99 struct ordered_events *oe __maybe_unused) 100 { 101 return 0; 102 } 103 #endif 104 105 static int perf_event__repipe_op2_synth(struct perf_session *session, 106 union perf_event *event) 107 { 108 return perf_event__repipe_synth(session->tool, event); 109 } 110 111 static int perf_event__repipe_op4_synth(struct perf_session *session, 112 union perf_event *event, 113 u64 data __maybe_unused) 114 { 115 return perf_event__repipe_synth(session->tool, event); 116 } 117 118 static int perf_event__repipe_attr(struct perf_tool *tool, 119 union perf_event *event, 120 struct evlist **pevlist) 121 { 122 struct perf_inject *inject = container_of(tool, struct perf_inject, 123 tool); 124 int ret; 125 126 ret = perf_event__process_attr(tool, event, pevlist); 127 if (ret) 128 return ret; 129 130 if (!inject->is_pipe) 131 return 0; 132 133 return perf_event__repipe_synth(tool, event); 134 } 135 136 static int perf_event__repipe_event_update(struct perf_tool *tool, 137 union perf_event *event, 138 struct evlist **pevlist __maybe_unused) 139 { 140 return perf_event__repipe_synth(tool, event); 141 } 142 143 #ifdef HAVE_AUXTRACE_SUPPORT 144 145 static int copy_bytes(struct perf_inject *inject, int fd, off_t size) 146 { 147 char buf[4096]; 148 ssize_t ssz; 149 int ret; 150 151 while (size > 0) { 152 ssz = read(fd, buf, min(size, (off_t)sizeof(buf))); 153 if (ssz < 0) 154 return -errno; 155 ret = output_bytes(inject, buf, ssz); 156 if (ret) 157 return ret; 158 size -= ssz; 159 } 160 161 return 0; 162 } 163 164 static s64 perf_event__repipe_auxtrace(struct perf_session *session, 165 union perf_event *event) 166 { 167 struct perf_tool *tool = session->tool; 168 struct perf_inject *inject = container_of(tool, struct perf_inject, 169 tool); 170 int ret; 171 172 inject->have_auxtrace = true; 173 174 if (!inject->output.is_pipe) { 175 off_t offset; 176 177 offset = lseek(inject->output.file.fd, 0, SEEK_CUR); 178 if (offset == -1) 179 return -errno; 180 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index, 181 event, offset); 182 if (ret < 0) 183 return ret; 184 } 185 186 if (perf_data__is_pipe(session->data) || !session->one_mmap) { 187 ret = output_bytes(inject, event, event->header.size); 188 if (ret < 0) 189 return ret; 190 ret = copy_bytes(inject, perf_data__fd(session->data), 191 event->auxtrace.size); 192 } else { 193 ret = output_bytes(inject, event, 194 event->header.size + event->auxtrace.size); 195 } 196 if (ret < 0) 197 return ret; 198 199 return event->auxtrace.size; 200 } 201 202 #else 203 204 static s64 205 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused, 206 union perf_event *event __maybe_unused) 207 { 208 pr_err("AUX area tracing not supported\n"); 209 return -EINVAL; 210 } 211 212 #endif 213 214 static int perf_event__repipe(struct perf_tool *tool, 215 union perf_event *event, 216 struct perf_sample *sample __maybe_unused, 217 struct machine *machine __maybe_unused) 218 { 219 return perf_event__repipe_synth(tool, event); 220 } 221 222 static int perf_event__drop(struct perf_tool *tool __maybe_unused, 223 union perf_event *event __maybe_unused, 224 struct perf_sample *sample __maybe_unused, 225 struct machine *machine __maybe_unused) 226 { 227 return 0; 228 } 229 230 static int perf_event__drop_aux(struct perf_tool *tool, 231 union perf_event *event __maybe_unused, 232 struct perf_sample *sample, 233 struct machine *machine __maybe_unused) 234 { 235 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 236 237 if (!inject->aux_id) 238 inject->aux_id = sample->id; 239 240 return 0; 241 } 242 243 static union perf_event * 244 perf_inject__cut_auxtrace_sample(struct perf_inject *inject, 245 union perf_event *event, 246 struct perf_sample *sample) 247 { 248 size_t sz1 = sample->aux_sample.data - (void *)event; 249 size_t sz2 = event->header.size - sample->aux_sample.size - sz1; 250 union perf_event *ev = (union perf_event *)inject->event_copy; 251 252 if (sz1 > event->header.size || sz2 > event->header.size || 253 sz1 + sz2 > event->header.size || 254 sz1 < sizeof(struct perf_event_header) + sizeof(u64)) 255 return event; 256 257 memcpy(ev, event, sz1); 258 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2); 259 ev->header.size = sz1 + sz2; 260 ((u64 *)((void *)ev + sz1))[-1] = 0; 261 262 return ev; 263 } 264 265 typedef int (*inject_handler)(struct perf_tool *tool, 266 union perf_event *event, 267 struct perf_sample *sample, 268 struct evsel *evsel, 269 struct machine *machine); 270 271 static int perf_event__repipe_sample(struct perf_tool *tool, 272 union perf_event *event, 273 struct perf_sample *sample, 274 struct evsel *evsel, 275 struct machine *machine) 276 { 277 struct perf_inject *inject = container_of(tool, struct perf_inject, 278 tool); 279 280 if (evsel && evsel->handler) { 281 inject_handler f = evsel->handler; 282 return f(tool, event, sample, evsel, machine); 283 } 284 285 build_id__mark_dso_hit(tool, event, sample, evsel, machine); 286 287 if (inject->itrace_synth_opts.set && sample->aux_sample.size) 288 event = perf_inject__cut_auxtrace_sample(inject, event, sample); 289 290 return perf_event__repipe_synth(tool, event); 291 } 292 293 static int perf_event__repipe_mmap(struct perf_tool *tool, 294 union perf_event *event, 295 struct perf_sample *sample, 296 struct machine *machine) 297 { 298 int err; 299 300 err = perf_event__process_mmap(tool, event, sample, machine); 301 perf_event__repipe(tool, event, sample, machine); 302 303 return err; 304 } 305 306 #ifdef HAVE_JITDUMP 307 static int perf_event__jit_repipe_mmap(struct perf_tool *tool, 308 union perf_event *event, 309 struct perf_sample *sample, 310 struct machine *machine) 311 { 312 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 313 u64 n = 0; 314 int ret; 315 316 /* 317 * if jit marker, then inject jit mmaps and generate ELF images 318 */ 319 ret = jit_process(inject->session, &inject->output, machine, 320 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n); 321 if (ret < 0) 322 return ret; 323 if (ret) { 324 inject->bytes_written += n; 325 return 0; 326 } 327 return perf_event__repipe_mmap(tool, event, sample, machine); 328 } 329 #endif 330 331 static struct dso *findnew_dso(int pid, int tid, const char *filename, 332 struct dso_id *id, struct machine *machine) 333 { 334 struct thread *thread; 335 struct nsinfo *nsi = NULL; 336 struct nsinfo *nnsi; 337 struct dso *dso; 338 bool vdso; 339 340 thread = machine__findnew_thread(machine, pid, tid); 341 if (thread == NULL) { 342 pr_err("cannot find or create a task %d/%d.\n", tid, pid); 343 return NULL; 344 } 345 346 vdso = is_vdso_map(filename); 347 nsi = nsinfo__get(thread->nsinfo); 348 349 if (vdso) { 350 /* The vdso maps are always on the host and not the 351 * container. Ensure that we don't use setns to look 352 * them up. 353 */ 354 nnsi = nsinfo__copy(nsi); 355 if (nnsi) { 356 nsinfo__put(nsi); 357 nnsi->need_setns = false; 358 nsi = nnsi; 359 } 360 dso = machine__findnew_vdso(machine, thread); 361 } else { 362 dso = machine__findnew_dso_id(machine, filename, id); 363 } 364 365 if (dso) { 366 nsinfo__put(dso->nsinfo); 367 dso->nsinfo = nsi; 368 } else 369 nsinfo__put(nsi); 370 371 thread__put(thread); 372 return dso; 373 } 374 375 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool, 376 union perf_event *event, 377 struct perf_sample *sample, 378 struct machine *machine) 379 { 380 struct dso *dso; 381 382 dso = findnew_dso(event->mmap.pid, event->mmap.tid, 383 event->mmap.filename, NULL, machine); 384 385 if (dso && !dso->hit) { 386 dso->hit = 1; 387 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0); 388 } 389 dso__put(dso); 390 391 return perf_event__repipe(tool, event, sample, machine); 392 } 393 394 static int perf_event__repipe_mmap2(struct perf_tool *tool, 395 union perf_event *event, 396 struct perf_sample *sample, 397 struct machine *machine) 398 { 399 int err; 400 401 err = perf_event__process_mmap2(tool, event, sample, machine); 402 perf_event__repipe(tool, event, sample, machine); 403 404 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 405 struct dso *dso; 406 407 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 408 event->mmap2.filename, NULL, machine); 409 if (dso) { 410 /* mark it not to inject build-id */ 411 dso->hit = 1; 412 } 413 dso__put(dso); 414 } 415 416 return err; 417 } 418 419 #ifdef HAVE_JITDUMP 420 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool, 421 union perf_event *event, 422 struct perf_sample *sample, 423 struct machine *machine) 424 { 425 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 426 u64 n = 0; 427 int ret; 428 429 /* 430 * if jit marker, then inject jit mmaps and generate ELF images 431 */ 432 ret = jit_process(inject->session, &inject->output, machine, 433 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n); 434 if (ret < 0) 435 return ret; 436 if (ret) { 437 inject->bytes_written += n; 438 return 0; 439 } 440 return perf_event__repipe_mmap2(tool, event, sample, machine); 441 } 442 #endif 443 444 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool, 445 union perf_event *event, 446 struct perf_sample *sample, 447 struct machine *machine) 448 { 449 struct dso_id dso_id = { 450 .maj = event->mmap2.maj, 451 .min = event->mmap2.min, 452 .ino = event->mmap2.ino, 453 .ino_generation = event->mmap2.ino_generation, 454 }; 455 struct dso *dso; 456 457 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 458 /* cannot use dso_id since it'd have invalid info */ 459 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 460 event->mmap2.filename, NULL, machine); 461 if (dso) { 462 /* mark it not to inject build-id */ 463 dso->hit = 1; 464 } 465 dso__put(dso); 466 return 0; 467 } 468 469 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 470 event->mmap2.filename, &dso_id, machine); 471 472 if (dso && !dso->hit) { 473 dso->hit = 1; 474 dso__inject_build_id(dso, tool, machine, sample->cpumode, 475 event->mmap2.flags); 476 } 477 dso__put(dso); 478 479 perf_event__repipe(tool, event, sample, machine); 480 481 return 0; 482 } 483 484 static int perf_event__repipe_fork(struct perf_tool *tool, 485 union perf_event *event, 486 struct perf_sample *sample, 487 struct machine *machine) 488 { 489 int err; 490 491 err = perf_event__process_fork(tool, event, sample, machine); 492 perf_event__repipe(tool, event, sample, machine); 493 494 return err; 495 } 496 497 static int perf_event__repipe_comm(struct perf_tool *tool, 498 union perf_event *event, 499 struct perf_sample *sample, 500 struct machine *machine) 501 { 502 int err; 503 504 err = perf_event__process_comm(tool, event, sample, machine); 505 perf_event__repipe(tool, event, sample, machine); 506 507 return err; 508 } 509 510 static int perf_event__repipe_namespaces(struct perf_tool *tool, 511 union perf_event *event, 512 struct perf_sample *sample, 513 struct machine *machine) 514 { 515 int err = perf_event__process_namespaces(tool, event, sample, machine); 516 517 perf_event__repipe(tool, event, sample, machine); 518 519 return err; 520 } 521 522 static int perf_event__repipe_exit(struct perf_tool *tool, 523 union perf_event *event, 524 struct perf_sample *sample, 525 struct machine *machine) 526 { 527 int err; 528 529 err = perf_event__process_exit(tool, event, sample, machine); 530 perf_event__repipe(tool, event, sample, machine); 531 532 return err; 533 } 534 535 static int perf_event__repipe_tracing_data(struct perf_session *session, 536 union perf_event *event) 537 { 538 perf_event__repipe_synth(session->tool, event); 539 540 return perf_event__process_tracing_data(session, event); 541 } 542 543 static int dso__read_build_id(struct dso *dso) 544 { 545 struct nscookie nsc; 546 547 if (dso->has_build_id) 548 return 0; 549 550 nsinfo__mountns_enter(dso->nsinfo, &nsc); 551 if (filename__read_build_id(dso->long_name, &dso->bid) > 0) 552 dso->has_build_id = true; 553 nsinfo__mountns_exit(&nsc); 554 555 return dso->has_build_id ? 0 : -1; 556 } 557 558 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, 559 struct machine *machine, u8 cpumode, u32 flags) 560 { 561 int err; 562 563 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB) 564 return 0; 565 if (is_no_dso_memory(dso->long_name)) 566 return 0; 567 568 if (dso__read_build_id(dso) < 0) { 569 pr_debug("no build_id found for %s\n", dso->long_name); 570 return -1; 571 } 572 573 err = perf_event__synthesize_build_id(tool, dso, cpumode, 574 perf_event__repipe, machine); 575 if (err) { 576 pr_err("Can't synthesize build_id event for %s\n", dso->long_name); 577 return -1; 578 } 579 580 return 0; 581 } 582 583 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, 584 struct perf_sample *sample, 585 struct evsel *evsel __maybe_unused, 586 struct machine *machine) 587 { 588 struct addr_location al; 589 struct thread *thread; 590 591 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 592 if (thread == NULL) { 593 pr_err("problem processing %d event, skipping it.\n", 594 event->header.type); 595 goto repipe; 596 } 597 598 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) { 599 if (!al.map->dso->hit) { 600 al.map->dso->hit = 1; 601 dso__inject_build_id(al.map->dso, tool, machine, 602 sample->cpumode, al.map->flags); 603 } 604 } 605 606 thread__put(thread); 607 repipe: 608 perf_event__repipe(tool, event, sample, machine); 609 return 0; 610 } 611 612 static int perf_inject__sched_process_exit(struct perf_tool *tool, 613 union perf_event *event __maybe_unused, 614 struct perf_sample *sample, 615 struct evsel *evsel __maybe_unused, 616 struct machine *machine __maybe_unused) 617 { 618 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 619 struct event_entry *ent; 620 621 list_for_each_entry(ent, &inject->samples, node) { 622 if (sample->tid == ent->tid) { 623 list_del_init(&ent->node); 624 free(ent); 625 break; 626 } 627 } 628 629 return 0; 630 } 631 632 static int perf_inject__sched_switch(struct perf_tool *tool, 633 union perf_event *event, 634 struct perf_sample *sample, 635 struct evsel *evsel, 636 struct machine *machine) 637 { 638 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 639 struct event_entry *ent; 640 641 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); 642 643 ent = malloc(event->header.size + sizeof(struct event_entry)); 644 if (ent == NULL) { 645 color_fprintf(stderr, PERF_COLOR_RED, 646 "Not enough memory to process sched switch event!"); 647 return -1; 648 } 649 650 ent->tid = sample->tid; 651 memcpy(&ent->event, event, event->header.size); 652 list_add(&ent->node, &inject->samples); 653 return 0; 654 } 655 656 static int perf_inject__sched_stat(struct perf_tool *tool, 657 union perf_event *event __maybe_unused, 658 struct perf_sample *sample, 659 struct evsel *evsel, 660 struct machine *machine) 661 { 662 struct event_entry *ent; 663 union perf_event *event_sw; 664 struct perf_sample sample_sw; 665 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 666 u32 pid = evsel__intval(evsel, sample, "pid"); 667 668 list_for_each_entry(ent, &inject->samples, node) { 669 if (pid == ent->tid) 670 goto found; 671 } 672 673 return 0; 674 found: 675 event_sw = &ent->event[0]; 676 evsel__parse_sample(evsel, event_sw, &sample_sw); 677 678 sample_sw.period = sample->period; 679 sample_sw.time = sample->time; 680 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type, 681 evsel->core.attr.read_format, &sample_sw); 682 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine); 683 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 684 } 685 686 static void sig_handler(int sig __maybe_unused) 687 { 688 session_done = 1; 689 } 690 691 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg) 692 { 693 struct perf_event_attr *attr = &evsel->core.attr; 694 const char *name = evsel__name(evsel); 695 696 if (!(attr->sample_type & sample_type)) { 697 pr_err("Samples for %s event do not have %s attribute set.", 698 name, sample_msg); 699 return -EINVAL; 700 } 701 702 return 0; 703 } 704 705 static int drop_sample(struct perf_tool *tool __maybe_unused, 706 union perf_event *event __maybe_unused, 707 struct perf_sample *sample __maybe_unused, 708 struct evsel *evsel __maybe_unused, 709 struct machine *machine __maybe_unused) 710 { 711 return 0; 712 } 713 714 static void strip_init(struct perf_inject *inject) 715 { 716 struct evlist *evlist = inject->session->evlist; 717 struct evsel *evsel; 718 719 inject->tool.context_switch = perf_event__drop; 720 721 evlist__for_each_entry(evlist, evsel) 722 evsel->handler = drop_sample; 723 } 724 725 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset) 726 { 727 struct perf_inject *inject = opt->value; 728 const char *args; 729 char *dry_run; 730 731 if (unset) 732 return 0; 733 734 inject->itrace_synth_opts.set = true; 735 inject->itrace_synth_opts.vm_time_correlation = true; 736 inject->in_place_update = true; 737 738 if (!str) 739 return 0; 740 741 dry_run = skip_spaces(str); 742 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) { 743 inject->itrace_synth_opts.vm_tm_corr_dry_run = true; 744 inject->in_place_update_dry_run = true; 745 args = dry_run + strlen("dry-run"); 746 } else { 747 args = str; 748 } 749 750 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args); 751 752 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM; 753 } 754 755 static int output_fd(struct perf_inject *inject) 756 { 757 return inject->in_place_update ? -1 : perf_data__fd(&inject->output); 758 } 759 760 static int __cmd_inject(struct perf_inject *inject) 761 { 762 int ret = -EINVAL; 763 struct perf_session *session = inject->session; 764 int fd = output_fd(inject); 765 u64 output_data_offset; 766 767 signal(SIGINT, sig_handler); 768 769 if (inject->build_ids || inject->sched_stat || 770 inject->itrace_synth_opts.set || inject->build_id_all) { 771 inject->tool.mmap = perf_event__repipe_mmap; 772 inject->tool.mmap2 = perf_event__repipe_mmap2; 773 inject->tool.fork = perf_event__repipe_fork; 774 inject->tool.tracing_data = perf_event__repipe_tracing_data; 775 } 776 777 output_data_offset = session->header.data_offset; 778 779 if (inject->build_id_all) { 780 inject->tool.mmap = perf_event__repipe_buildid_mmap; 781 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2; 782 } else if (inject->build_ids) { 783 inject->tool.sample = perf_event__inject_buildid; 784 } else if (inject->sched_stat) { 785 struct evsel *evsel; 786 787 evlist__for_each_entry(session->evlist, evsel) { 788 const char *name = evsel__name(evsel); 789 790 if (!strcmp(name, "sched:sched_switch")) { 791 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID")) 792 return -EINVAL; 793 794 evsel->handler = perf_inject__sched_switch; 795 } else if (!strcmp(name, "sched:sched_process_exit")) 796 evsel->handler = perf_inject__sched_process_exit; 797 else if (!strncmp(name, "sched:sched_stat_", 17)) 798 evsel->handler = perf_inject__sched_stat; 799 } 800 } else if (inject->itrace_synth_opts.vm_time_correlation) { 801 session->itrace_synth_opts = &inject->itrace_synth_opts; 802 memset(&inject->tool, 0, sizeof(inject->tool)); 803 inject->tool.id_index = perf_event__process_id_index; 804 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 805 inject->tool.auxtrace = perf_event__process_auxtrace; 806 inject->tool.auxtrace_error = perf_event__process_auxtrace_error; 807 inject->tool.ordered_events = true; 808 inject->tool.ordering_requires_timestamps = true; 809 } else if (inject->itrace_synth_opts.set) { 810 session->itrace_synth_opts = &inject->itrace_synth_opts; 811 inject->itrace_synth_opts.inject = true; 812 inject->tool.comm = perf_event__repipe_comm; 813 inject->tool.namespaces = perf_event__repipe_namespaces; 814 inject->tool.exit = perf_event__repipe_exit; 815 inject->tool.id_index = perf_event__process_id_index; 816 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 817 inject->tool.auxtrace = perf_event__process_auxtrace; 818 inject->tool.aux = perf_event__drop_aux; 819 inject->tool.itrace_start = perf_event__drop_aux; 820 inject->tool.aux_output_hw_id = perf_event__drop_aux; 821 inject->tool.ordered_events = true; 822 inject->tool.ordering_requires_timestamps = true; 823 /* Allow space in the header for new attributes */ 824 output_data_offset = roundup(8192 + session->header.data_offset, 4096); 825 if (inject->strip) 826 strip_init(inject); 827 } 828 829 if (!inject->itrace_synth_opts.set) 830 auxtrace_index__free(&session->auxtrace_index); 831 832 if (!inject->is_pipe && !inject->in_place_update) 833 lseek(fd, output_data_offset, SEEK_SET); 834 835 ret = perf_session__process_events(session); 836 if (ret) 837 return ret; 838 839 if (!inject->is_pipe && !inject->in_place_update) { 840 if (inject->build_ids) 841 perf_header__set_feat(&session->header, 842 HEADER_BUILD_ID); 843 /* 844 * Keep all buildids when there is unprocessed AUX data because 845 * it is not known which ones the AUX trace hits. 846 */ 847 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) && 848 inject->have_auxtrace && !inject->itrace_synth_opts.set) 849 dsos__hit_all(session); 850 /* 851 * The AUX areas have been removed and replaced with 852 * synthesized hardware events, so clear the feature flag. 853 */ 854 if (inject->itrace_synth_opts.set) { 855 perf_header__clear_feat(&session->header, 856 HEADER_AUXTRACE); 857 if (inject->itrace_synth_opts.last_branch || 858 inject->itrace_synth_opts.add_last_branch) 859 perf_header__set_feat(&session->header, 860 HEADER_BRANCH_STACK); 861 } 862 session->header.data_offset = output_data_offset; 863 session->header.data_size = inject->bytes_written; 864 perf_session__write_header(session, session->evlist, fd, true); 865 } 866 867 return ret; 868 } 869 870 int cmd_inject(int argc, const char **argv) 871 { 872 struct perf_inject inject = { 873 .tool = { 874 .sample = perf_event__repipe_sample, 875 .read = perf_event__repipe_sample, 876 .mmap = perf_event__repipe, 877 .mmap2 = perf_event__repipe, 878 .comm = perf_event__repipe, 879 .namespaces = perf_event__repipe, 880 .cgroup = perf_event__repipe, 881 .fork = perf_event__repipe, 882 .exit = perf_event__repipe, 883 .lost = perf_event__repipe, 884 .lost_samples = perf_event__repipe, 885 .aux = perf_event__repipe, 886 .itrace_start = perf_event__repipe, 887 .aux_output_hw_id = perf_event__repipe, 888 .context_switch = perf_event__repipe, 889 .throttle = perf_event__repipe, 890 .unthrottle = perf_event__repipe, 891 .ksymbol = perf_event__repipe, 892 .bpf = perf_event__repipe, 893 .text_poke = perf_event__repipe, 894 .attr = perf_event__repipe_attr, 895 .event_update = perf_event__repipe_event_update, 896 .tracing_data = perf_event__repipe_op2_synth, 897 .finished_round = perf_event__repipe_oe_synth, 898 .build_id = perf_event__repipe_op2_synth, 899 .id_index = perf_event__repipe_op2_synth, 900 .auxtrace_info = perf_event__repipe_op2_synth, 901 .auxtrace_error = perf_event__repipe_op2_synth, 902 .time_conv = perf_event__repipe_op2_synth, 903 .thread_map = perf_event__repipe_op2_synth, 904 .cpu_map = perf_event__repipe_op2_synth, 905 .stat_config = perf_event__repipe_op2_synth, 906 .stat = perf_event__repipe_op2_synth, 907 .stat_round = perf_event__repipe_op2_synth, 908 .feature = perf_event__repipe_op2_synth, 909 .compressed = perf_event__repipe_op4_synth, 910 .auxtrace = perf_event__repipe_auxtrace, 911 }, 912 .input_name = "-", 913 .samples = LIST_HEAD_INIT(inject.samples), 914 .output = { 915 .path = "-", 916 .mode = PERF_DATA_MODE_WRITE, 917 .use_stdio = true, 918 }, 919 }; 920 struct perf_data data = { 921 .mode = PERF_DATA_MODE_READ, 922 .use_stdio = true, 923 }; 924 int ret; 925 bool repipe = true; 926 927 struct option options[] = { 928 OPT_BOOLEAN('b', "build-ids", &inject.build_ids, 929 "Inject build-ids into the output stream"), 930 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all, 931 "Inject build-ids of all DSOs into the output stream"), 932 OPT_STRING('i', "input", &inject.input_name, "file", 933 "input file name"), 934 OPT_STRING('o', "output", &inject.output.path, "file", 935 "output file name"), 936 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, 937 "Merge sched-stat and sched-switch for getting events " 938 "where and how long tasks slept"), 939 #ifdef HAVE_JITDUMP 940 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"), 941 #endif 942 OPT_INCR('v', "verbose", &verbose, 943 "be more verbose (show build ids, etc)"), 944 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 945 "file", "vmlinux pathname"), 946 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 947 "don't load vmlinux even if found"), 948 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", 949 "kallsyms pathname"), 950 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"), 951 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts, 952 NULL, "opts", "Instruction Tracing options\n" 953 ITRACE_HELP, 954 itrace_parse_synth_opts), 955 OPT_BOOLEAN(0, "strip", &inject.strip, 956 "strip non-synthesized events (use with --itrace)"), 957 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts", 958 "correlate time between VM guests and the host", 959 parse_vm_time_correlation), 960 OPT_END() 961 }; 962 const char * const inject_usage[] = { 963 "perf inject [<options>]", 964 NULL 965 }; 966 #ifndef HAVE_JITDUMP 967 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true); 968 #endif 969 argc = parse_options(argc, argv, options, inject_usage, 0); 970 971 /* 972 * Any (unrecognized) arguments left? 973 */ 974 if (argc) 975 usage_with_options(inject_usage, options); 976 977 if (inject.strip && !inject.itrace_synth_opts.set) { 978 pr_err("--strip option requires --itrace option\n"); 979 return -1; 980 } 981 982 if (symbol__validate_sym_arguments()) 983 return -1; 984 985 if (inject.in_place_update) { 986 if (!strcmp(inject.input_name, "-")) { 987 pr_err("Input file name required for in-place updating\n"); 988 return -1; 989 } 990 if (strcmp(inject.output.path, "-")) { 991 pr_err("Output file name must not be specified for in-place updating\n"); 992 return -1; 993 } 994 if (!data.force && !inject.in_place_update_dry_run) { 995 pr_err("The input file would be updated in place, " 996 "the --force option is required.\n"); 997 return -1; 998 } 999 if (!inject.in_place_update_dry_run) 1000 data.in_place_update = true; 1001 } else if (perf_data__open(&inject.output)) { 1002 perror("failed to create output file"); 1003 return -1; 1004 } 1005 1006 data.path = inject.input_name; 1007 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) { 1008 inject.is_pipe = true; 1009 /* 1010 * Do not repipe header when input is a regular file 1011 * since either it can rewrite the header at the end 1012 * or write a new pipe header. 1013 */ 1014 if (strcmp(inject.input_name, "-")) 1015 repipe = false; 1016 } 1017 1018 inject.session = __perf_session__new(&data, repipe, 1019 output_fd(&inject), 1020 &inject.tool); 1021 if (IS_ERR(inject.session)) { 1022 ret = PTR_ERR(inject.session); 1023 goto out_close_output; 1024 } 1025 1026 if (zstd_init(&(inject.session->zstd_data), 0) < 0) 1027 pr_warning("Decompression initialization failed.\n"); 1028 1029 if (!data.is_pipe && inject.output.is_pipe) { 1030 ret = perf_header__write_pipe(perf_data__fd(&inject.output)); 1031 if (ret < 0) { 1032 pr_err("Couldn't write a new pipe header.\n"); 1033 goto out_delete; 1034 } 1035 1036 ret = perf_event__synthesize_for_pipe(&inject.tool, 1037 inject.session, 1038 &inject.output, 1039 perf_event__repipe); 1040 if (ret < 0) 1041 goto out_delete; 1042 } 1043 1044 if (inject.build_ids && !inject.build_id_all) { 1045 /* 1046 * to make sure the mmap records are ordered correctly 1047 * and so that the correct especially due to jitted code 1048 * mmaps. We cannot generate the buildid hit list and 1049 * inject the jit mmaps at the same time for now. 1050 */ 1051 inject.tool.ordered_events = true; 1052 inject.tool.ordering_requires_timestamps = true; 1053 } 1054 1055 if (inject.sched_stat) { 1056 inject.tool.ordered_events = true; 1057 } 1058 1059 #ifdef HAVE_JITDUMP 1060 if (inject.jit_mode) { 1061 inject.tool.mmap2 = perf_event__jit_repipe_mmap2; 1062 inject.tool.mmap = perf_event__jit_repipe_mmap; 1063 inject.tool.ordered_events = true; 1064 inject.tool.ordering_requires_timestamps = true; 1065 /* 1066 * JIT MMAP injection injects all MMAP events in one go, so it 1067 * does not obey finished_round semantics. 1068 */ 1069 inject.tool.finished_round = perf_event__drop_oe; 1070 } 1071 #endif 1072 ret = symbol__init(&inject.session->header.env); 1073 if (ret < 0) 1074 goto out_delete; 1075 1076 ret = __cmd_inject(&inject); 1077 1078 out_delete: 1079 zstd_fini(&(inject.session->zstd_data)); 1080 perf_session__delete(inject.session); 1081 out_close_output: 1082 if (!inject.in_place_update) 1083 perf_data__close(&inject.output); 1084 free(inject.itrace_synth_opts.vm_tm_corr_args); 1085 return ret; 1086 } 1087