1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-inject.c 4 * 5 * Builtin inject command: Examine the live mode (stdin) event stream 6 * and repipe it to stdout while optionally injecting additional 7 * events into it. 8 */ 9 #include "builtin.h" 10 11 #include "util/color.h" 12 #include "util/dso.h" 13 #include "util/vdso.h" 14 #include "util/evlist.h" 15 #include "util/evsel.h" 16 #include "util/map.h" 17 #include "util/session.h" 18 #include "util/tool.h" 19 #include "util/debug.h" 20 #include "util/build-id.h" 21 #include "util/data.h" 22 #include "util/auxtrace.h" 23 #include "util/jit.h" 24 #include "util/symbol.h" 25 #include "util/synthetic-events.h" 26 #include "util/thread.h" 27 #include "util/namespaces.h" 28 29 #include <linux/err.h> 30 #include <subcmd/parse-options.h> 31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 32 33 #include <linux/list.h> 34 #include <linux/string.h> 35 #include <errno.h> 36 #include <signal.h> 37 38 struct perf_inject { 39 struct perf_tool tool; 40 struct perf_session *session; 41 bool build_ids; 42 bool build_id_all; 43 bool sched_stat; 44 bool have_auxtrace; 45 bool strip; 46 bool jit_mode; 47 bool in_place_update; 48 bool in_place_update_dry_run; 49 bool is_pipe; 50 const char *input_name; 51 struct perf_data output; 52 u64 bytes_written; 53 u64 aux_id; 54 struct list_head samples; 55 struct itrace_synth_opts itrace_synth_opts; 56 char event_copy[PERF_SAMPLE_MAX_SIZE]; 57 }; 58 59 struct event_entry { 60 struct list_head node; 61 u32 tid; 62 union perf_event event[]; 63 }; 64 65 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, 66 struct machine *machine, u8 cpumode, u32 flags); 67 68 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz) 69 { 70 ssize_t size; 71 72 size = perf_data__write(&inject->output, buf, sz); 73 if (size < 0) 74 return -errno; 75 76 inject->bytes_written += size; 77 return 0; 78 } 79 80 static int perf_event__repipe_synth(struct perf_tool *tool, 81 union perf_event *event) 82 { 83 struct perf_inject *inject = container_of(tool, struct perf_inject, 84 tool); 85 86 return output_bytes(inject, event, event->header.size); 87 } 88 89 static int perf_event__repipe_oe_synth(struct perf_tool *tool, 90 union perf_event *event, 91 struct ordered_events *oe __maybe_unused) 92 { 93 return perf_event__repipe_synth(tool, event); 94 } 95 96 #ifdef HAVE_JITDUMP 97 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused, 98 union perf_event *event __maybe_unused, 99 struct ordered_events *oe __maybe_unused) 100 { 101 return 0; 102 } 103 #endif 104 105 static int perf_event__repipe_op2_synth(struct perf_session *session, 106 union perf_event *event) 107 { 108 return perf_event__repipe_synth(session->tool, event); 109 } 110 111 static int perf_event__repipe_op4_synth(struct perf_session *session, 112 union perf_event *event, 113 u64 data __maybe_unused) 114 { 115 return perf_event__repipe_synth(session->tool, event); 116 } 117 118 static int perf_event__repipe_attr(struct perf_tool *tool, 119 union perf_event *event, 120 struct evlist **pevlist) 121 { 122 struct perf_inject *inject = container_of(tool, struct perf_inject, 123 tool); 124 int ret; 125 126 ret = perf_event__process_attr(tool, event, pevlist); 127 if (ret) 128 return ret; 129 130 if (!inject->is_pipe) 131 return 0; 132 133 return perf_event__repipe_synth(tool, event); 134 } 135 136 static int perf_event__repipe_event_update(struct perf_tool *tool, 137 union perf_event *event, 138 struct evlist **pevlist __maybe_unused) 139 { 140 return perf_event__repipe_synth(tool, event); 141 } 142 143 #ifdef HAVE_AUXTRACE_SUPPORT 144 145 static int copy_bytes(struct perf_inject *inject, int fd, off_t size) 146 { 147 char buf[4096]; 148 ssize_t ssz; 149 int ret; 150 151 while (size > 0) { 152 ssz = read(fd, buf, min(size, (off_t)sizeof(buf))); 153 if (ssz < 0) 154 return -errno; 155 ret = output_bytes(inject, buf, ssz); 156 if (ret) 157 return ret; 158 size -= ssz; 159 } 160 161 return 0; 162 } 163 164 static s64 perf_event__repipe_auxtrace(struct perf_session *session, 165 union perf_event *event) 166 { 167 struct perf_tool *tool = session->tool; 168 struct perf_inject *inject = container_of(tool, struct perf_inject, 169 tool); 170 int ret; 171 172 inject->have_auxtrace = true; 173 174 if (!inject->output.is_pipe) { 175 off_t offset; 176 177 offset = lseek(inject->output.file.fd, 0, SEEK_CUR); 178 if (offset == -1) 179 return -errno; 180 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index, 181 event, offset); 182 if (ret < 0) 183 return ret; 184 } 185 186 if (perf_data__is_pipe(session->data) || !session->one_mmap) { 187 ret = output_bytes(inject, event, event->header.size); 188 if (ret < 0) 189 return ret; 190 ret = copy_bytes(inject, perf_data__fd(session->data), 191 event->auxtrace.size); 192 } else { 193 ret = output_bytes(inject, event, 194 event->header.size + event->auxtrace.size); 195 } 196 if (ret < 0) 197 return ret; 198 199 return event->auxtrace.size; 200 } 201 202 #else 203 204 static s64 205 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused, 206 union perf_event *event __maybe_unused) 207 { 208 pr_err("AUX area tracing not supported\n"); 209 return -EINVAL; 210 } 211 212 #endif 213 214 static int perf_event__repipe(struct perf_tool *tool, 215 union perf_event *event, 216 struct perf_sample *sample __maybe_unused, 217 struct machine *machine __maybe_unused) 218 { 219 return perf_event__repipe_synth(tool, event); 220 } 221 222 static int perf_event__drop(struct perf_tool *tool __maybe_unused, 223 union perf_event *event __maybe_unused, 224 struct perf_sample *sample __maybe_unused, 225 struct machine *machine __maybe_unused) 226 { 227 return 0; 228 } 229 230 static int perf_event__drop_aux(struct perf_tool *tool, 231 union perf_event *event __maybe_unused, 232 struct perf_sample *sample, 233 struct machine *machine __maybe_unused) 234 { 235 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 236 237 if (!inject->aux_id) 238 inject->aux_id = sample->id; 239 240 return 0; 241 } 242 243 static union perf_event * 244 perf_inject__cut_auxtrace_sample(struct perf_inject *inject, 245 union perf_event *event, 246 struct perf_sample *sample) 247 { 248 size_t sz1 = sample->aux_sample.data - (void *)event; 249 size_t sz2 = event->header.size - sample->aux_sample.size - sz1; 250 union perf_event *ev = (union perf_event *)inject->event_copy; 251 252 if (sz1 > event->header.size || sz2 > event->header.size || 253 sz1 + sz2 > event->header.size || 254 sz1 < sizeof(struct perf_event_header) + sizeof(u64)) 255 return event; 256 257 memcpy(ev, event, sz1); 258 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2); 259 ev->header.size = sz1 + sz2; 260 ((u64 *)((void *)ev + sz1))[-1] = 0; 261 262 return ev; 263 } 264 265 typedef int (*inject_handler)(struct perf_tool *tool, 266 union perf_event *event, 267 struct perf_sample *sample, 268 struct evsel *evsel, 269 struct machine *machine); 270 271 static int perf_event__repipe_sample(struct perf_tool *tool, 272 union perf_event *event, 273 struct perf_sample *sample, 274 struct evsel *evsel, 275 struct machine *machine) 276 { 277 struct perf_inject *inject = container_of(tool, struct perf_inject, 278 tool); 279 280 if (evsel && evsel->handler) { 281 inject_handler f = evsel->handler; 282 return f(tool, event, sample, evsel, machine); 283 } 284 285 build_id__mark_dso_hit(tool, event, sample, evsel, machine); 286 287 if (inject->itrace_synth_opts.set && sample->aux_sample.size) 288 event = perf_inject__cut_auxtrace_sample(inject, event, sample); 289 290 return perf_event__repipe_synth(tool, event); 291 } 292 293 static int perf_event__repipe_mmap(struct perf_tool *tool, 294 union perf_event *event, 295 struct perf_sample *sample, 296 struct machine *machine) 297 { 298 int err; 299 300 err = perf_event__process_mmap(tool, event, sample, machine); 301 perf_event__repipe(tool, event, sample, machine); 302 303 return err; 304 } 305 306 #ifdef HAVE_JITDUMP 307 static int perf_event__jit_repipe_mmap(struct perf_tool *tool, 308 union perf_event *event, 309 struct perf_sample *sample, 310 struct machine *machine) 311 { 312 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 313 u64 n = 0; 314 int ret; 315 316 /* 317 * if jit marker, then inject jit mmaps and generate ELF images 318 */ 319 ret = jit_process(inject->session, &inject->output, machine, 320 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n); 321 if (ret < 0) 322 return ret; 323 if (ret) { 324 inject->bytes_written += n; 325 return 0; 326 } 327 return perf_event__repipe_mmap(tool, event, sample, machine); 328 } 329 #endif 330 331 static struct dso *findnew_dso(int pid, int tid, const char *filename, 332 struct dso_id *id, struct machine *machine) 333 { 334 struct thread *thread; 335 struct nsinfo *nsi = NULL; 336 struct nsinfo *nnsi; 337 struct dso *dso; 338 bool vdso; 339 340 thread = machine__findnew_thread(machine, pid, tid); 341 if (thread == NULL) { 342 pr_err("cannot find or create a task %d/%d.\n", tid, pid); 343 return NULL; 344 } 345 346 vdso = is_vdso_map(filename); 347 nsi = nsinfo__get(thread->nsinfo); 348 349 if (vdso) { 350 /* The vdso maps are always on the host and not the 351 * container. Ensure that we don't use setns to look 352 * them up. 353 */ 354 nnsi = nsinfo__copy(nsi); 355 if (nnsi) { 356 nsinfo__put(nsi); 357 nnsi->need_setns = false; 358 nsi = nnsi; 359 } 360 dso = machine__findnew_vdso(machine, thread); 361 } else { 362 dso = machine__findnew_dso_id(machine, filename, id); 363 } 364 365 if (dso) { 366 nsinfo__put(dso->nsinfo); 367 dso->nsinfo = nsi; 368 } else 369 nsinfo__put(nsi); 370 371 thread__put(thread); 372 return dso; 373 } 374 375 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool, 376 union perf_event *event, 377 struct perf_sample *sample, 378 struct machine *machine) 379 { 380 struct dso *dso; 381 382 dso = findnew_dso(event->mmap.pid, event->mmap.tid, 383 event->mmap.filename, NULL, machine); 384 385 if (dso && !dso->hit) { 386 dso->hit = 1; 387 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0); 388 } 389 dso__put(dso); 390 391 return perf_event__repipe(tool, event, sample, machine); 392 } 393 394 static int perf_event__repipe_mmap2(struct perf_tool *tool, 395 union perf_event *event, 396 struct perf_sample *sample, 397 struct machine *machine) 398 { 399 int err; 400 401 err = perf_event__process_mmap2(tool, event, sample, machine); 402 perf_event__repipe(tool, event, sample, machine); 403 404 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 405 struct dso *dso; 406 407 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 408 event->mmap2.filename, NULL, machine); 409 if (dso) { 410 /* mark it not to inject build-id */ 411 dso->hit = 1; 412 } 413 dso__put(dso); 414 } 415 416 return err; 417 } 418 419 #ifdef HAVE_JITDUMP 420 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool, 421 union perf_event *event, 422 struct perf_sample *sample, 423 struct machine *machine) 424 { 425 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 426 u64 n = 0; 427 int ret; 428 429 /* 430 * if jit marker, then inject jit mmaps and generate ELF images 431 */ 432 ret = jit_process(inject->session, &inject->output, machine, 433 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n); 434 if (ret < 0) 435 return ret; 436 if (ret) { 437 inject->bytes_written += n; 438 return 0; 439 } 440 return perf_event__repipe_mmap2(tool, event, sample, machine); 441 } 442 #endif 443 444 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool, 445 union perf_event *event, 446 struct perf_sample *sample, 447 struct machine *machine) 448 { 449 struct dso_id dso_id = { 450 .maj = event->mmap2.maj, 451 .min = event->mmap2.min, 452 .ino = event->mmap2.ino, 453 .ino_generation = event->mmap2.ino_generation, 454 }; 455 struct dso *dso; 456 457 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 458 /* cannot use dso_id since it'd have invalid info */ 459 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 460 event->mmap2.filename, NULL, machine); 461 if (dso) { 462 /* mark it not to inject build-id */ 463 dso->hit = 1; 464 } 465 dso__put(dso); 466 return 0; 467 } 468 469 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 470 event->mmap2.filename, &dso_id, machine); 471 472 if (dso && !dso->hit) { 473 dso->hit = 1; 474 dso__inject_build_id(dso, tool, machine, sample->cpumode, 475 event->mmap2.flags); 476 } 477 dso__put(dso); 478 479 perf_event__repipe(tool, event, sample, machine); 480 481 return 0; 482 } 483 484 static int perf_event__repipe_fork(struct perf_tool *tool, 485 union perf_event *event, 486 struct perf_sample *sample, 487 struct machine *machine) 488 { 489 int err; 490 491 err = perf_event__process_fork(tool, event, sample, machine); 492 perf_event__repipe(tool, event, sample, machine); 493 494 return err; 495 } 496 497 static int perf_event__repipe_comm(struct perf_tool *tool, 498 union perf_event *event, 499 struct perf_sample *sample, 500 struct machine *machine) 501 { 502 int err; 503 504 err = perf_event__process_comm(tool, event, sample, machine); 505 perf_event__repipe(tool, event, sample, machine); 506 507 return err; 508 } 509 510 static int perf_event__repipe_namespaces(struct perf_tool *tool, 511 union perf_event *event, 512 struct perf_sample *sample, 513 struct machine *machine) 514 { 515 int err = perf_event__process_namespaces(tool, event, sample, machine); 516 517 perf_event__repipe(tool, event, sample, machine); 518 519 return err; 520 } 521 522 static int perf_event__repipe_exit(struct perf_tool *tool, 523 union perf_event *event, 524 struct perf_sample *sample, 525 struct machine *machine) 526 { 527 int err; 528 529 err = perf_event__process_exit(tool, event, sample, machine); 530 perf_event__repipe(tool, event, sample, machine); 531 532 return err; 533 } 534 535 static int perf_event__repipe_tracing_data(struct perf_session *session, 536 union perf_event *event) 537 { 538 int err; 539 540 perf_event__repipe_synth(session->tool, event); 541 err = perf_event__process_tracing_data(session, event); 542 543 return err; 544 } 545 546 static int dso__read_build_id(struct dso *dso) 547 { 548 struct nscookie nsc; 549 550 if (dso->has_build_id) 551 return 0; 552 553 nsinfo__mountns_enter(dso->nsinfo, &nsc); 554 if (filename__read_build_id(dso->long_name, &dso->bid) > 0) 555 dso->has_build_id = true; 556 nsinfo__mountns_exit(&nsc); 557 558 return dso->has_build_id ? 0 : -1; 559 } 560 561 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, 562 struct machine *machine, u8 cpumode, u32 flags) 563 { 564 int err; 565 566 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB) 567 return 0; 568 if (is_no_dso_memory(dso->long_name)) 569 return 0; 570 571 if (dso__read_build_id(dso) < 0) { 572 pr_debug("no build_id found for %s\n", dso->long_name); 573 return -1; 574 } 575 576 err = perf_event__synthesize_build_id(tool, dso, cpumode, 577 perf_event__repipe, machine); 578 if (err) { 579 pr_err("Can't synthesize build_id event for %s\n", dso->long_name); 580 return -1; 581 } 582 583 return 0; 584 } 585 586 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, 587 struct perf_sample *sample, 588 struct evsel *evsel __maybe_unused, 589 struct machine *machine) 590 { 591 struct addr_location al; 592 struct thread *thread; 593 594 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 595 if (thread == NULL) { 596 pr_err("problem processing %d event, skipping it.\n", 597 event->header.type); 598 goto repipe; 599 } 600 601 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) { 602 if (!al.map->dso->hit) { 603 al.map->dso->hit = 1; 604 dso__inject_build_id(al.map->dso, tool, machine, 605 sample->cpumode, al.map->flags); 606 } 607 } 608 609 thread__put(thread); 610 repipe: 611 perf_event__repipe(tool, event, sample, machine); 612 return 0; 613 } 614 615 static int perf_inject__sched_process_exit(struct perf_tool *tool, 616 union perf_event *event __maybe_unused, 617 struct perf_sample *sample, 618 struct evsel *evsel __maybe_unused, 619 struct machine *machine __maybe_unused) 620 { 621 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 622 struct event_entry *ent; 623 624 list_for_each_entry(ent, &inject->samples, node) { 625 if (sample->tid == ent->tid) { 626 list_del_init(&ent->node); 627 free(ent); 628 break; 629 } 630 } 631 632 return 0; 633 } 634 635 static int perf_inject__sched_switch(struct perf_tool *tool, 636 union perf_event *event, 637 struct perf_sample *sample, 638 struct evsel *evsel, 639 struct machine *machine) 640 { 641 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 642 struct event_entry *ent; 643 644 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); 645 646 ent = malloc(event->header.size + sizeof(struct event_entry)); 647 if (ent == NULL) { 648 color_fprintf(stderr, PERF_COLOR_RED, 649 "Not enough memory to process sched switch event!"); 650 return -1; 651 } 652 653 ent->tid = sample->tid; 654 memcpy(&ent->event, event, event->header.size); 655 list_add(&ent->node, &inject->samples); 656 return 0; 657 } 658 659 static int perf_inject__sched_stat(struct perf_tool *tool, 660 union perf_event *event __maybe_unused, 661 struct perf_sample *sample, 662 struct evsel *evsel, 663 struct machine *machine) 664 { 665 struct event_entry *ent; 666 union perf_event *event_sw; 667 struct perf_sample sample_sw; 668 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 669 u32 pid = evsel__intval(evsel, sample, "pid"); 670 671 list_for_each_entry(ent, &inject->samples, node) { 672 if (pid == ent->tid) 673 goto found; 674 } 675 676 return 0; 677 found: 678 event_sw = &ent->event[0]; 679 evsel__parse_sample(evsel, event_sw, &sample_sw); 680 681 sample_sw.period = sample->period; 682 sample_sw.time = sample->time; 683 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type, 684 evsel->core.attr.read_format, &sample_sw); 685 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine); 686 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 687 } 688 689 static void sig_handler(int sig __maybe_unused) 690 { 691 session_done = 1; 692 } 693 694 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg) 695 { 696 struct perf_event_attr *attr = &evsel->core.attr; 697 const char *name = evsel__name(evsel); 698 699 if (!(attr->sample_type & sample_type)) { 700 pr_err("Samples for %s event do not have %s attribute set.", 701 name, sample_msg); 702 return -EINVAL; 703 } 704 705 return 0; 706 } 707 708 static int drop_sample(struct perf_tool *tool __maybe_unused, 709 union perf_event *event __maybe_unused, 710 struct perf_sample *sample __maybe_unused, 711 struct evsel *evsel __maybe_unused, 712 struct machine *machine __maybe_unused) 713 { 714 return 0; 715 } 716 717 static void strip_init(struct perf_inject *inject) 718 { 719 struct evlist *evlist = inject->session->evlist; 720 struct evsel *evsel; 721 722 inject->tool.context_switch = perf_event__drop; 723 724 evlist__for_each_entry(evlist, evsel) 725 evsel->handler = drop_sample; 726 } 727 728 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset) 729 { 730 struct perf_inject *inject = opt->value; 731 const char *args; 732 char *dry_run; 733 734 if (unset) 735 return 0; 736 737 inject->itrace_synth_opts.set = true; 738 inject->itrace_synth_opts.vm_time_correlation = true; 739 inject->in_place_update = true; 740 741 if (!str) 742 return 0; 743 744 dry_run = skip_spaces(str); 745 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) { 746 inject->itrace_synth_opts.vm_tm_corr_dry_run = true; 747 inject->in_place_update_dry_run = true; 748 args = dry_run + strlen("dry-run"); 749 } else { 750 args = str; 751 } 752 753 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args); 754 755 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM; 756 } 757 758 static int output_fd(struct perf_inject *inject) 759 { 760 return inject->in_place_update ? -1 : perf_data__fd(&inject->output); 761 } 762 763 static int __cmd_inject(struct perf_inject *inject) 764 { 765 int ret = -EINVAL; 766 struct perf_session *session = inject->session; 767 int fd = output_fd(inject); 768 u64 output_data_offset; 769 770 signal(SIGINT, sig_handler); 771 772 if (inject->build_ids || inject->sched_stat || 773 inject->itrace_synth_opts.set || inject->build_id_all) { 774 inject->tool.mmap = perf_event__repipe_mmap; 775 inject->tool.mmap2 = perf_event__repipe_mmap2; 776 inject->tool.fork = perf_event__repipe_fork; 777 inject->tool.tracing_data = perf_event__repipe_tracing_data; 778 } 779 780 output_data_offset = session->header.data_offset; 781 782 if (inject->build_id_all) { 783 inject->tool.mmap = perf_event__repipe_buildid_mmap; 784 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2; 785 } else if (inject->build_ids) { 786 inject->tool.sample = perf_event__inject_buildid; 787 } else if (inject->sched_stat) { 788 struct evsel *evsel; 789 790 evlist__for_each_entry(session->evlist, evsel) { 791 const char *name = evsel__name(evsel); 792 793 if (!strcmp(name, "sched:sched_switch")) { 794 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID")) 795 return -EINVAL; 796 797 evsel->handler = perf_inject__sched_switch; 798 } else if (!strcmp(name, "sched:sched_process_exit")) 799 evsel->handler = perf_inject__sched_process_exit; 800 else if (!strncmp(name, "sched:sched_stat_", 17)) 801 evsel->handler = perf_inject__sched_stat; 802 } 803 } else if (inject->itrace_synth_opts.vm_time_correlation) { 804 session->itrace_synth_opts = &inject->itrace_synth_opts; 805 memset(&inject->tool, 0, sizeof(inject->tool)); 806 inject->tool.id_index = perf_event__process_id_index; 807 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 808 inject->tool.auxtrace = perf_event__process_auxtrace; 809 inject->tool.auxtrace_error = perf_event__process_auxtrace_error; 810 inject->tool.ordered_events = true; 811 inject->tool.ordering_requires_timestamps = true; 812 } else if (inject->itrace_synth_opts.set) { 813 session->itrace_synth_opts = &inject->itrace_synth_opts; 814 inject->itrace_synth_opts.inject = true; 815 inject->tool.comm = perf_event__repipe_comm; 816 inject->tool.namespaces = perf_event__repipe_namespaces; 817 inject->tool.exit = perf_event__repipe_exit; 818 inject->tool.id_index = perf_event__process_id_index; 819 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 820 inject->tool.auxtrace = perf_event__process_auxtrace; 821 inject->tool.aux = perf_event__drop_aux; 822 inject->tool.itrace_start = perf_event__drop_aux; 823 inject->tool.aux_output_hw_id = perf_event__drop_aux; 824 inject->tool.ordered_events = true; 825 inject->tool.ordering_requires_timestamps = true; 826 /* Allow space in the header for new attributes */ 827 output_data_offset = roundup(8192 + session->header.data_offset, 4096); 828 if (inject->strip) 829 strip_init(inject); 830 } 831 832 if (!inject->itrace_synth_opts.set) 833 auxtrace_index__free(&session->auxtrace_index); 834 835 if (!inject->is_pipe && !inject->in_place_update) 836 lseek(fd, output_data_offset, SEEK_SET); 837 838 ret = perf_session__process_events(session); 839 if (ret) 840 return ret; 841 842 if (!inject->is_pipe && !inject->in_place_update) { 843 if (inject->build_ids) 844 perf_header__set_feat(&session->header, 845 HEADER_BUILD_ID); 846 /* 847 * Keep all buildids when there is unprocessed AUX data because 848 * it is not known which ones the AUX trace hits. 849 */ 850 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) && 851 inject->have_auxtrace && !inject->itrace_synth_opts.set) 852 dsos__hit_all(session); 853 /* 854 * The AUX areas have been removed and replaced with 855 * synthesized hardware events, so clear the feature flag. 856 */ 857 if (inject->itrace_synth_opts.set) { 858 perf_header__clear_feat(&session->header, 859 HEADER_AUXTRACE); 860 if (inject->itrace_synth_opts.last_branch || 861 inject->itrace_synth_opts.add_last_branch) 862 perf_header__set_feat(&session->header, 863 HEADER_BRANCH_STACK); 864 } 865 session->header.data_offset = output_data_offset; 866 session->header.data_size = inject->bytes_written; 867 perf_session__write_header(session, session->evlist, fd, true); 868 } 869 870 return ret; 871 } 872 873 int cmd_inject(int argc, const char **argv) 874 { 875 struct perf_inject inject = { 876 .tool = { 877 .sample = perf_event__repipe_sample, 878 .read = perf_event__repipe_sample, 879 .mmap = perf_event__repipe, 880 .mmap2 = perf_event__repipe, 881 .comm = perf_event__repipe, 882 .namespaces = perf_event__repipe, 883 .cgroup = perf_event__repipe, 884 .fork = perf_event__repipe, 885 .exit = perf_event__repipe, 886 .lost = perf_event__repipe, 887 .lost_samples = perf_event__repipe, 888 .aux = perf_event__repipe, 889 .itrace_start = perf_event__repipe, 890 .aux_output_hw_id = perf_event__repipe, 891 .context_switch = perf_event__repipe, 892 .throttle = perf_event__repipe, 893 .unthrottle = perf_event__repipe, 894 .ksymbol = perf_event__repipe, 895 .bpf = perf_event__repipe, 896 .text_poke = perf_event__repipe, 897 .attr = perf_event__repipe_attr, 898 .event_update = perf_event__repipe_event_update, 899 .tracing_data = perf_event__repipe_op2_synth, 900 .finished_round = perf_event__repipe_oe_synth, 901 .build_id = perf_event__repipe_op2_synth, 902 .id_index = perf_event__repipe_op2_synth, 903 .auxtrace_info = perf_event__repipe_op2_synth, 904 .auxtrace_error = perf_event__repipe_op2_synth, 905 .time_conv = perf_event__repipe_op2_synth, 906 .thread_map = perf_event__repipe_op2_synth, 907 .cpu_map = perf_event__repipe_op2_synth, 908 .stat_config = perf_event__repipe_op2_synth, 909 .stat = perf_event__repipe_op2_synth, 910 .stat_round = perf_event__repipe_op2_synth, 911 .feature = perf_event__repipe_op2_synth, 912 .compressed = perf_event__repipe_op4_synth, 913 .auxtrace = perf_event__repipe_auxtrace, 914 }, 915 .input_name = "-", 916 .samples = LIST_HEAD_INIT(inject.samples), 917 .output = { 918 .path = "-", 919 .mode = PERF_DATA_MODE_WRITE, 920 .use_stdio = true, 921 }, 922 }; 923 struct perf_data data = { 924 .mode = PERF_DATA_MODE_READ, 925 .use_stdio = true, 926 }; 927 int ret; 928 bool repipe = true; 929 930 struct option options[] = { 931 OPT_BOOLEAN('b', "build-ids", &inject.build_ids, 932 "Inject build-ids into the output stream"), 933 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all, 934 "Inject build-ids of all DSOs into the output stream"), 935 OPT_STRING('i', "input", &inject.input_name, "file", 936 "input file name"), 937 OPT_STRING('o', "output", &inject.output.path, "file", 938 "output file name"), 939 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, 940 "Merge sched-stat and sched-switch for getting events " 941 "where and how long tasks slept"), 942 #ifdef HAVE_JITDUMP 943 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"), 944 #endif 945 OPT_INCR('v', "verbose", &verbose, 946 "be more verbose (show build ids, etc)"), 947 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 948 "file", "vmlinux pathname"), 949 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 950 "don't load vmlinux even if found"), 951 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", 952 "kallsyms pathname"), 953 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"), 954 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts, 955 NULL, "opts", "Instruction Tracing options\n" 956 ITRACE_HELP, 957 itrace_parse_synth_opts), 958 OPT_BOOLEAN(0, "strip", &inject.strip, 959 "strip non-synthesized events (use with --itrace)"), 960 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts", 961 "correlate time between VM guests and the host", 962 parse_vm_time_correlation), 963 OPT_END() 964 }; 965 const char * const inject_usage[] = { 966 "perf inject [<options>]", 967 NULL 968 }; 969 #ifndef HAVE_JITDUMP 970 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true); 971 #endif 972 argc = parse_options(argc, argv, options, inject_usage, 0); 973 974 /* 975 * Any (unrecognized) arguments left? 976 */ 977 if (argc) 978 usage_with_options(inject_usage, options); 979 980 if (inject.strip && !inject.itrace_synth_opts.set) { 981 pr_err("--strip option requires --itrace option\n"); 982 return -1; 983 } 984 985 if (symbol__validate_sym_arguments()) 986 return -1; 987 988 if (inject.in_place_update) { 989 if (!strcmp(inject.input_name, "-")) { 990 pr_err("Input file name required for in-place updating\n"); 991 return -1; 992 } 993 if (strcmp(inject.output.path, "-")) { 994 pr_err("Output file name must not be specified for in-place updating\n"); 995 return -1; 996 } 997 if (!data.force && !inject.in_place_update_dry_run) { 998 pr_err("The input file would be updated in place, " 999 "the --force option is required.\n"); 1000 return -1; 1001 } 1002 if (!inject.in_place_update_dry_run) 1003 data.in_place_update = true; 1004 } else if (perf_data__open(&inject.output)) { 1005 perror("failed to create output file"); 1006 return -1; 1007 } 1008 1009 data.path = inject.input_name; 1010 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) { 1011 inject.is_pipe = true; 1012 /* 1013 * Do not repipe header when input is a regular file 1014 * since either it can rewrite the header at the end 1015 * or write a new pipe header. 1016 */ 1017 if (strcmp(inject.input_name, "-")) 1018 repipe = false; 1019 } 1020 1021 inject.session = __perf_session__new(&data, repipe, 1022 output_fd(&inject), 1023 &inject.tool); 1024 if (IS_ERR(inject.session)) { 1025 ret = PTR_ERR(inject.session); 1026 goto out_close_output; 1027 } 1028 1029 if (zstd_init(&(inject.session->zstd_data), 0) < 0) 1030 pr_warning("Decompression initialization failed.\n"); 1031 1032 if (!data.is_pipe && inject.output.is_pipe) { 1033 ret = perf_header__write_pipe(perf_data__fd(&inject.output)); 1034 if (ret < 0) { 1035 pr_err("Couldn't write a new pipe header.\n"); 1036 goto out_delete; 1037 } 1038 1039 ret = perf_event__synthesize_for_pipe(&inject.tool, 1040 inject.session, 1041 &inject.output, 1042 perf_event__repipe); 1043 if (ret < 0) 1044 goto out_delete; 1045 } 1046 1047 if (inject.build_ids && !inject.build_id_all) { 1048 /* 1049 * to make sure the mmap records are ordered correctly 1050 * and so that the correct especially due to jitted code 1051 * mmaps. We cannot generate the buildid hit list and 1052 * inject the jit mmaps at the same time for now. 1053 */ 1054 inject.tool.ordered_events = true; 1055 inject.tool.ordering_requires_timestamps = true; 1056 } 1057 1058 if (inject.sched_stat) { 1059 inject.tool.ordered_events = true; 1060 } 1061 1062 #ifdef HAVE_JITDUMP 1063 if (inject.jit_mode) { 1064 inject.tool.mmap2 = perf_event__jit_repipe_mmap2; 1065 inject.tool.mmap = perf_event__jit_repipe_mmap; 1066 inject.tool.ordered_events = true; 1067 inject.tool.ordering_requires_timestamps = true; 1068 /* 1069 * JIT MMAP injection injects all MMAP events in one go, so it 1070 * does not obey finished_round semantics. 1071 */ 1072 inject.tool.finished_round = perf_event__drop_oe; 1073 } 1074 #endif 1075 ret = symbol__init(&inject.session->header.env); 1076 if (ret < 0) 1077 goto out_delete; 1078 1079 ret = __cmd_inject(&inject); 1080 1081 out_delete: 1082 zstd_fini(&(inject.session->zstd_data)); 1083 perf_session__delete(inject.session); 1084 out_close_output: 1085 if (!inject.in_place_update) 1086 perf_data__close(&inject.output); 1087 free(inject.itrace_synth_opts.vm_tm_corr_args); 1088 return ret; 1089 } 1090