1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-inject.c 4 * 5 * Builtin inject command: Examine the live mode (stdin) event stream 6 * and repipe it to stdout while optionally injecting additional 7 * events into it. 8 */ 9 #include "builtin.h" 10 11 #include "util/color.h" 12 #include "util/dso.h" 13 #include "util/vdso.h" 14 #include "util/evlist.h" 15 #include "util/evsel.h" 16 #include "util/map.h" 17 #include "util/session.h" 18 #include "util/tool.h" 19 #include "util/debug.h" 20 #include "util/build-id.h" 21 #include "util/data.h" 22 #include "util/auxtrace.h" 23 #include "util/jit.h" 24 #include "util/symbol.h" 25 #include "util/synthetic-events.h" 26 #include "util/thread.h" 27 #include "util/namespaces.h" 28 29 #include <linux/err.h> 30 #include <subcmd/parse-options.h> 31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 32 33 #include <linux/list.h> 34 #include <linux/string.h> 35 #include <errno.h> 36 #include <signal.h> 37 38 struct perf_inject { 39 struct perf_tool tool; 40 struct perf_session *session; 41 bool build_ids; 42 bool build_id_all; 43 bool sched_stat; 44 bool have_auxtrace; 45 bool strip; 46 bool jit_mode; 47 bool in_place_update; 48 bool in_place_update_dry_run; 49 bool is_pipe; 50 const char *input_name; 51 struct perf_data output; 52 u64 bytes_written; 53 u64 aux_id; 54 struct list_head samples; 55 struct itrace_synth_opts itrace_synth_opts; 56 char event_copy[PERF_SAMPLE_MAX_SIZE]; 57 }; 58 59 struct event_entry { 60 struct list_head node; 61 u32 tid; 62 union perf_event event[]; 63 }; 64 65 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, 66 struct machine *machine, u8 cpumode, u32 flags); 67 68 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz) 69 { 70 ssize_t size; 71 72 size = perf_data__write(&inject->output, buf, sz); 73 if (size < 0) 74 return -errno; 75 76 inject->bytes_written += size; 77 return 0; 78 } 79 80 static int perf_event__repipe_synth(struct perf_tool *tool, 81 union perf_event *event) 82 { 83 struct perf_inject *inject = container_of(tool, struct perf_inject, 84 tool); 85 86 return output_bytes(inject, event, event->header.size); 87 } 88 89 static int perf_event__repipe_oe_synth(struct perf_tool *tool, 90 union perf_event *event, 91 struct ordered_events *oe __maybe_unused) 92 { 93 return perf_event__repipe_synth(tool, event); 94 } 95 96 #ifdef HAVE_JITDUMP 97 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused, 98 union perf_event *event __maybe_unused, 99 struct ordered_events *oe __maybe_unused) 100 { 101 return 0; 102 } 103 #endif 104 105 static int perf_event__repipe_op2_synth(struct perf_session *session, 106 union perf_event *event) 107 { 108 return perf_event__repipe_synth(session->tool, event); 109 } 110 111 static int perf_event__repipe_op4_synth(struct perf_session *session, 112 union perf_event *event, 113 u64 data __maybe_unused) 114 { 115 return perf_event__repipe_synth(session->tool, event); 116 } 117 118 static int perf_event__repipe_attr(struct perf_tool *tool, 119 union perf_event *event, 120 struct evlist **pevlist) 121 { 122 struct perf_inject *inject = container_of(tool, struct perf_inject, 123 tool); 124 int ret; 125 126 ret = perf_event__process_attr(tool, event, pevlist); 127 if (ret) 128 return ret; 129 130 if (!inject->is_pipe) 131 return 0; 132 133 return perf_event__repipe_synth(tool, event); 134 } 135 136 static int perf_event__repipe_event_update(struct perf_tool *tool, 137 union perf_event *event, 138 struct evlist **pevlist __maybe_unused) 139 { 140 return perf_event__repipe_synth(tool, event); 141 } 142 143 #ifdef HAVE_AUXTRACE_SUPPORT 144 145 static int copy_bytes(struct perf_inject *inject, int fd, off_t size) 146 { 147 char buf[4096]; 148 ssize_t ssz; 149 int ret; 150 151 while (size > 0) { 152 ssz = read(fd, buf, min(size, (off_t)sizeof(buf))); 153 if (ssz < 0) 154 return -errno; 155 ret = output_bytes(inject, buf, ssz); 156 if (ret) 157 return ret; 158 size -= ssz; 159 } 160 161 return 0; 162 } 163 164 static s64 perf_event__repipe_auxtrace(struct perf_session *session, 165 union perf_event *event) 166 { 167 struct perf_tool *tool = session->tool; 168 struct perf_inject *inject = container_of(tool, struct perf_inject, 169 tool); 170 int ret; 171 172 inject->have_auxtrace = true; 173 174 if (!inject->output.is_pipe) { 175 off_t offset; 176 177 offset = lseek(inject->output.file.fd, 0, SEEK_CUR); 178 if (offset == -1) 179 return -errno; 180 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index, 181 event, offset); 182 if (ret < 0) 183 return ret; 184 } 185 186 if (perf_data__is_pipe(session->data) || !session->one_mmap) { 187 ret = output_bytes(inject, event, event->header.size); 188 if (ret < 0) 189 return ret; 190 ret = copy_bytes(inject, perf_data__fd(session->data), 191 event->auxtrace.size); 192 } else { 193 ret = output_bytes(inject, event, 194 event->header.size + event->auxtrace.size); 195 } 196 if (ret < 0) 197 return ret; 198 199 return event->auxtrace.size; 200 } 201 202 #else 203 204 static s64 205 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused, 206 union perf_event *event __maybe_unused) 207 { 208 pr_err("AUX area tracing not supported\n"); 209 return -EINVAL; 210 } 211 212 #endif 213 214 static int perf_event__repipe(struct perf_tool *tool, 215 union perf_event *event, 216 struct perf_sample *sample __maybe_unused, 217 struct machine *machine __maybe_unused) 218 { 219 return perf_event__repipe_synth(tool, event); 220 } 221 222 static int perf_event__drop(struct perf_tool *tool __maybe_unused, 223 union perf_event *event __maybe_unused, 224 struct perf_sample *sample __maybe_unused, 225 struct machine *machine __maybe_unused) 226 { 227 return 0; 228 } 229 230 static int perf_event__drop_aux(struct perf_tool *tool, 231 union perf_event *event __maybe_unused, 232 struct perf_sample *sample, 233 struct machine *machine __maybe_unused) 234 { 235 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 236 237 if (!inject->aux_id) 238 inject->aux_id = sample->id; 239 240 return 0; 241 } 242 243 static union perf_event * 244 perf_inject__cut_auxtrace_sample(struct perf_inject *inject, 245 union perf_event *event, 246 struct perf_sample *sample) 247 { 248 size_t sz1 = sample->aux_sample.data - (void *)event; 249 size_t sz2 = event->header.size - sample->aux_sample.size - sz1; 250 union perf_event *ev = (union perf_event *)inject->event_copy; 251 252 if (sz1 > event->header.size || sz2 > event->header.size || 253 sz1 + sz2 > event->header.size || 254 sz1 < sizeof(struct perf_event_header) + sizeof(u64)) 255 return event; 256 257 memcpy(ev, event, sz1); 258 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2); 259 ev->header.size = sz1 + sz2; 260 ((u64 *)((void *)ev + sz1))[-1] = 0; 261 262 return ev; 263 } 264 265 typedef int (*inject_handler)(struct perf_tool *tool, 266 union perf_event *event, 267 struct perf_sample *sample, 268 struct evsel *evsel, 269 struct machine *machine); 270 271 static int perf_event__repipe_sample(struct perf_tool *tool, 272 union perf_event *event, 273 struct perf_sample *sample, 274 struct evsel *evsel, 275 struct machine *machine) 276 { 277 struct perf_inject *inject = container_of(tool, struct perf_inject, 278 tool); 279 280 if (evsel && evsel->handler) { 281 inject_handler f = evsel->handler; 282 return f(tool, event, sample, evsel, machine); 283 } 284 285 build_id__mark_dso_hit(tool, event, sample, evsel, machine); 286 287 if (inject->itrace_synth_opts.set && sample->aux_sample.size) 288 event = perf_inject__cut_auxtrace_sample(inject, event, sample); 289 290 return perf_event__repipe_synth(tool, event); 291 } 292 293 static int perf_event__repipe_mmap(struct perf_tool *tool, 294 union perf_event *event, 295 struct perf_sample *sample, 296 struct machine *machine) 297 { 298 int err; 299 300 err = perf_event__process_mmap(tool, event, sample, machine); 301 perf_event__repipe(tool, event, sample, machine); 302 303 return err; 304 } 305 306 #ifdef HAVE_JITDUMP 307 static int perf_event__jit_repipe_mmap(struct perf_tool *tool, 308 union perf_event *event, 309 struct perf_sample *sample, 310 struct machine *machine) 311 { 312 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 313 u64 n = 0; 314 int ret; 315 316 /* 317 * if jit marker, then inject jit mmaps and generate ELF images 318 */ 319 ret = jit_process(inject->session, &inject->output, machine, 320 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n); 321 if (ret < 0) 322 return ret; 323 if (ret) { 324 inject->bytes_written += n; 325 return 0; 326 } 327 return perf_event__repipe_mmap(tool, event, sample, machine); 328 } 329 #endif 330 331 static struct dso *findnew_dso(int pid, int tid, const char *filename, 332 struct dso_id *id, struct machine *machine) 333 { 334 struct thread *thread; 335 struct nsinfo *nsi = NULL; 336 struct nsinfo *nnsi; 337 struct dso *dso; 338 bool vdso; 339 340 thread = machine__findnew_thread(machine, pid, tid); 341 if (thread == NULL) { 342 pr_err("cannot find or create a task %d/%d.\n", tid, pid); 343 return NULL; 344 } 345 346 vdso = is_vdso_map(filename); 347 nsi = nsinfo__get(thread->nsinfo); 348 349 if (vdso) { 350 /* The vdso maps are always on the host and not the 351 * container. Ensure that we don't use setns to look 352 * them up. 353 */ 354 nnsi = nsinfo__copy(nsi); 355 if (nnsi) { 356 nsinfo__put(nsi); 357 nnsi->need_setns = false; 358 nsi = nnsi; 359 } 360 dso = machine__findnew_vdso(machine, thread); 361 } else { 362 dso = machine__findnew_dso_id(machine, filename, id); 363 } 364 365 if (dso) { 366 nsinfo__put(dso->nsinfo); 367 dso->nsinfo = nsi; 368 } else 369 nsinfo__put(nsi); 370 371 thread__put(thread); 372 return dso; 373 } 374 375 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool, 376 union perf_event *event, 377 struct perf_sample *sample, 378 struct machine *machine) 379 { 380 struct dso *dso; 381 382 dso = findnew_dso(event->mmap.pid, event->mmap.tid, 383 event->mmap.filename, NULL, machine); 384 385 if (dso && !dso->hit) { 386 dso->hit = 1; 387 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0); 388 } 389 dso__put(dso); 390 391 return perf_event__repipe(tool, event, sample, machine); 392 } 393 394 static int perf_event__repipe_mmap2(struct perf_tool *tool, 395 union perf_event *event, 396 struct perf_sample *sample, 397 struct machine *machine) 398 { 399 int err; 400 401 err = perf_event__process_mmap2(tool, event, sample, machine); 402 perf_event__repipe(tool, event, sample, machine); 403 404 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 405 struct dso *dso; 406 407 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 408 event->mmap2.filename, NULL, machine); 409 if (dso) { 410 /* mark it not to inject build-id */ 411 dso->hit = 1; 412 } 413 dso__put(dso); 414 } 415 416 return err; 417 } 418 419 #ifdef HAVE_JITDUMP 420 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool, 421 union perf_event *event, 422 struct perf_sample *sample, 423 struct machine *machine) 424 { 425 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 426 u64 n = 0; 427 int ret; 428 429 /* 430 * if jit marker, then inject jit mmaps and generate ELF images 431 */ 432 ret = jit_process(inject->session, &inject->output, machine, 433 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n); 434 if (ret < 0) 435 return ret; 436 if (ret) { 437 inject->bytes_written += n; 438 return 0; 439 } 440 return perf_event__repipe_mmap2(tool, event, sample, machine); 441 } 442 #endif 443 444 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool, 445 union perf_event *event, 446 struct perf_sample *sample, 447 struct machine *machine) 448 { 449 struct dso_id dso_id = { 450 .maj = event->mmap2.maj, 451 .min = event->mmap2.min, 452 .ino = event->mmap2.ino, 453 .ino_generation = event->mmap2.ino_generation, 454 }; 455 struct dso *dso; 456 457 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 458 /* cannot use dso_id since it'd have invalid info */ 459 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 460 event->mmap2.filename, NULL, machine); 461 if (dso) { 462 /* mark it not to inject build-id */ 463 dso->hit = 1; 464 } 465 dso__put(dso); 466 return 0; 467 } 468 469 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, 470 event->mmap2.filename, &dso_id, machine); 471 472 if (dso && !dso->hit) { 473 dso->hit = 1; 474 dso__inject_build_id(dso, tool, machine, sample->cpumode, 475 event->mmap2.flags); 476 } 477 dso__put(dso); 478 479 perf_event__repipe(tool, event, sample, machine); 480 481 return 0; 482 } 483 484 static int perf_event__repipe_fork(struct perf_tool *tool, 485 union perf_event *event, 486 struct perf_sample *sample, 487 struct machine *machine) 488 { 489 int err; 490 491 err = perf_event__process_fork(tool, event, sample, machine); 492 perf_event__repipe(tool, event, sample, machine); 493 494 return err; 495 } 496 497 static int perf_event__repipe_comm(struct perf_tool *tool, 498 union perf_event *event, 499 struct perf_sample *sample, 500 struct machine *machine) 501 { 502 int err; 503 504 err = perf_event__process_comm(tool, event, sample, machine); 505 perf_event__repipe(tool, event, sample, machine); 506 507 return err; 508 } 509 510 static int perf_event__repipe_namespaces(struct perf_tool *tool, 511 union perf_event *event, 512 struct perf_sample *sample, 513 struct machine *machine) 514 { 515 int err = perf_event__process_namespaces(tool, event, sample, machine); 516 517 perf_event__repipe(tool, event, sample, machine); 518 519 return err; 520 } 521 522 static int perf_event__repipe_exit(struct perf_tool *tool, 523 union perf_event *event, 524 struct perf_sample *sample, 525 struct machine *machine) 526 { 527 int err; 528 529 err = perf_event__process_exit(tool, event, sample, machine); 530 perf_event__repipe(tool, event, sample, machine); 531 532 return err; 533 } 534 535 static int perf_event__repipe_tracing_data(struct perf_session *session, 536 union perf_event *event) 537 { 538 int err; 539 540 perf_event__repipe_synth(session->tool, event); 541 err = perf_event__process_tracing_data(session, event); 542 543 return err; 544 } 545 546 static int dso__read_build_id(struct dso *dso) 547 { 548 struct nscookie nsc; 549 550 if (dso->has_build_id) 551 return 0; 552 553 nsinfo__mountns_enter(dso->nsinfo, &nsc); 554 if (filename__read_build_id(dso->long_name, &dso->bid) > 0) 555 dso->has_build_id = true; 556 nsinfo__mountns_exit(&nsc); 557 558 return dso->has_build_id ? 0 : -1; 559 } 560 561 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, 562 struct machine *machine, u8 cpumode, u32 flags) 563 { 564 int err; 565 566 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB) 567 return 0; 568 if (is_no_dso_memory(dso->long_name)) 569 return 0; 570 571 if (dso__read_build_id(dso) < 0) { 572 pr_debug("no build_id found for %s\n", dso->long_name); 573 return -1; 574 } 575 576 err = perf_event__synthesize_build_id(tool, dso, cpumode, 577 perf_event__repipe, machine); 578 if (err) { 579 pr_err("Can't synthesize build_id event for %s\n", dso->long_name); 580 return -1; 581 } 582 583 return 0; 584 } 585 586 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, 587 struct perf_sample *sample, 588 struct evsel *evsel __maybe_unused, 589 struct machine *machine) 590 { 591 struct addr_location al; 592 struct thread *thread; 593 594 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 595 if (thread == NULL) { 596 pr_err("problem processing %d event, skipping it.\n", 597 event->header.type); 598 goto repipe; 599 } 600 601 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) { 602 if (!al.map->dso->hit) { 603 al.map->dso->hit = 1; 604 dso__inject_build_id(al.map->dso, tool, machine, 605 sample->cpumode, al.map->flags); 606 } 607 } 608 609 thread__put(thread); 610 repipe: 611 perf_event__repipe(tool, event, sample, machine); 612 return 0; 613 } 614 615 static int perf_inject__sched_process_exit(struct perf_tool *tool, 616 union perf_event *event __maybe_unused, 617 struct perf_sample *sample, 618 struct evsel *evsel __maybe_unused, 619 struct machine *machine __maybe_unused) 620 { 621 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 622 struct event_entry *ent; 623 624 list_for_each_entry(ent, &inject->samples, node) { 625 if (sample->tid == ent->tid) { 626 list_del_init(&ent->node); 627 free(ent); 628 break; 629 } 630 } 631 632 return 0; 633 } 634 635 static int perf_inject__sched_switch(struct perf_tool *tool, 636 union perf_event *event, 637 struct perf_sample *sample, 638 struct evsel *evsel, 639 struct machine *machine) 640 { 641 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 642 struct event_entry *ent; 643 644 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); 645 646 ent = malloc(event->header.size + sizeof(struct event_entry)); 647 if (ent == NULL) { 648 color_fprintf(stderr, PERF_COLOR_RED, 649 "Not enough memory to process sched switch event!"); 650 return -1; 651 } 652 653 ent->tid = sample->tid; 654 memcpy(&ent->event, event, event->header.size); 655 list_add(&ent->node, &inject->samples); 656 return 0; 657 } 658 659 static int perf_inject__sched_stat(struct perf_tool *tool, 660 union perf_event *event __maybe_unused, 661 struct perf_sample *sample, 662 struct evsel *evsel, 663 struct machine *machine) 664 { 665 struct event_entry *ent; 666 union perf_event *event_sw; 667 struct perf_sample sample_sw; 668 struct perf_inject *inject = container_of(tool, struct perf_inject, tool); 669 u32 pid = evsel__intval(evsel, sample, "pid"); 670 671 list_for_each_entry(ent, &inject->samples, node) { 672 if (pid == ent->tid) 673 goto found; 674 } 675 676 return 0; 677 found: 678 event_sw = &ent->event[0]; 679 evsel__parse_sample(evsel, event_sw, &sample_sw); 680 681 sample_sw.period = sample->period; 682 sample_sw.time = sample->time; 683 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type, 684 evsel->core.attr.read_format, &sample_sw); 685 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine); 686 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 687 } 688 689 static void sig_handler(int sig __maybe_unused) 690 { 691 session_done = 1; 692 } 693 694 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg) 695 { 696 struct perf_event_attr *attr = &evsel->core.attr; 697 const char *name = evsel__name(evsel); 698 699 if (!(attr->sample_type & sample_type)) { 700 pr_err("Samples for %s event do not have %s attribute set.", 701 name, sample_msg); 702 return -EINVAL; 703 } 704 705 return 0; 706 } 707 708 static int drop_sample(struct perf_tool *tool __maybe_unused, 709 union perf_event *event __maybe_unused, 710 struct perf_sample *sample __maybe_unused, 711 struct evsel *evsel __maybe_unused, 712 struct machine *machine __maybe_unused) 713 { 714 return 0; 715 } 716 717 static void strip_init(struct perf_inject *inject) 718 { 719 struct evlist *evlist = inject->session->evlist; 720 struct evsel *evsel; 721 722 inject->tool.context_switch = perf_event__drop; 723 724 evlist__for_each_entry(evlist, evsel) 725 evsel->handler = drop_sample; 726 } 727 728 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset) 729 { 730 struct perf_inject *inject = opt->value; 731 const char *args; 732 char *dry_run; 733 734 if (unset) 735 return 0; 736 737 inject->itrace_synth_opts.set = true; 738 inject->itrace_synth_opts.vm_time_correlation = true; 739 inject->in_place_update = true; 740 741 if (!str) 742 return 0; 743 744 dry_run = skip_spaces(str); 745 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) { 746 inject->itrace_synth_opts.vm_tm_corr_dry_run = true; 747 inject->in_place_update_dry_run = true; 748 args = dry_run + strlen("dry-run"); 749 } else { 750 args = str; 751 } 752 753 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args); 754 755 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM; 756 } 757 758 static int __cmd_inject(struct perf_inject *inject) 759 { 760 int ret = -EINVAL; 761 struct perf_session *session = inject->session; 762 struct perf_data *data_out = &inject->output; 763 int fd = inject->in_place_update ? -1 : perf_data__fd(data_out); 764 u64 output_data_offset; 765 766 signal(SIGINT, sig_handler); 767 768 if (inject->build_ids || inject->sched_stat || 769 inject->itrace_synth_opts.set || inject->build_id_all) { 770 inject->tool.mmap = perf_event__repipe_mmap; 771 inject->tool.mmap2 = perf_event__repipe_mmap2; 772 inject->tool.fork = perf_event__repipe_fork; 773 inject->tool.tracing_data = perf_event__repipe_tracing_data; 774 } 775 776 output_data_offset = session->header.data_offset; 777 778 if (inject->build_id_all) { 779 inject->tool.mmap = perf_event__repipe_buildid_mmap; 780 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2; 781 } else if (inject->build_ids) { 782 inject->tool.sample = perf_event__inject_buildid; 783 } else if (inject->sched_stat) { 784 struct evsel *evsel; 785 786 evlist__for_each_entry(session->evlist, evsel) { 787 const char *name = evsel__name(evsel); 788 789 if (!strcmp(name, "sched:sched_switch")) { 790 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID")) 791 return -EINVAL; 792 793 evsel->handler = perf_inject__sched_switch; 794 } else if (!strcmp(name, "sched:sched_process_exit")) 795 evsel->handler = perf_inject__sched_process_exit; 796 else if (!strncmp(name, "sched:sched_stat_", 17)) 797 evsel->handler = perf_inject__sched_stat; 798 } 799 } else if (inject->itrace_synth_opts.vm_time_correlation) { 800 session->itrace_synth_opts = &inject->itrace_synth_opts; 801 memset(&inject->tool, 0, sizeof(inject->tool)); 802 inject->tool.id_index = perf_event__process_id_index; 803 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 804 inject->tool.auxtrace = perf_event__process_auxtrace; 805 inject->tool.auxtrace_error = perf_event__process_auxtrace_error; 806 inject->tool.ordered_events = true; 807 inject->tool.ordering_requires_timestamps = true; 808 } else if (inject->itrace_synth_opts.set) { 809 session->itrace_synth_opts = &inject->itrace_synth_opts; 810 inject->itrace_synth_opts.inject = true; 811 inject->tool.comm = perf_event__repipe_comm; 812 inject->tool.namespaces = perf_event__repipe_namespaces; 813 inject->tool.exit = perf_event__repipe_exit; 814 inject->tool.id_index = perf_event__process_id_index; 815 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 816 inject->tool.auxtrace = perf_event__process_auxtrace; 817 inject->tool.aux = perf_event__drop_aux; 818 inject->tool.itrace_start = perf_event__drop_aux; 819 inject->tool.aux_output_hw_id = perf_event__drop_aux; 820 inject->tool.ordered_events = true; 821 inject->tool.ordering_requires_timestamps = true; 822 /* Allow space in the header for new attributes */ 823 output_data_offset = 4096; 824 if (inject->strip) 825 strip_init(inject); 826 } 827 828 if (!inject->itrace_synth_opts.set) 829 auxtrace_index__free(&session->auxtrace_index); 830 831 if (!inject->is_pipe && !inject->in_place_update) 832 lseek(fd, output_data_offset, SEEK_SET); 833 834 ret = perf_session__process_events(session); 835 if (ret) 836 return ret; 837 838 if (!inject->is_pipe && !inject->in_place_update) { 839 if (inject->build_ids) 840 perf_header__set_feat(&session->header, 841 HEADER_BUILD_ID); 842 /* 843 * Keep all buildids when there is unprocessed AUX data because 844 * it is not known which ones the AUX trace hits. 845 */ 846 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) && 847 inject->have_auxtrace && !inject->itrace_synth_opts.set) 848 dsos__hit_all(session); 849 /* 850 * The AUX areas have been removed and replaced with 851 * synthesized hardware events, so clear the feature flag. 852 */ 853 if (inject->itrace_synth_opts.set) { 854 perf_header__clear_feat(&session->header, 855 HEADER_AUXTRACE); 856 if (inject->itrace_synth_opts.last_branch || 857 inject->itrace_synth_opts.add_last_branch) 858 perf_header__set_feat(&session->header, 859 HEADER_BRANCH_STACK); 860 } 861 session->header.data_offset = output_data_offset; 862 session->header.data_size = inject->bytes_written; 863 perf_session__write_header(session, session->evlist, fd, true); 864 } 865 866 return ret; 867 } 868 869 int cmd_inject(int argc, const char **argv) 870 { 871 struct perf_inject inject = { 872 .tool = { 873 .sample = perf_event__repipe_sample, 874 .read = perf_event__repipe_sample, 875 .mmap = perf_event__repipe, 876 .mmap2 = perf_event__repipe, 877 .comm = perf_event__repipe, 878 .namespaces = perf_event__repipe, 879 .cgroup = perf_event__repipe, 880 .fork = perf_event__repipe, 881 .exit = perf_event__repipe, 882 .lost = perf_event__repipe, 883 .lost_samples = perf_event__repipe, 884 .aux = perf_event__repipe, 885 .itrace_start = perf_event__repipe, 886 .aux_output_hw_id = perf_event__repipe, 887 .context_switch = perf_event__repipe, 888 .throttle = perf_event__repipe, 889 .unthrottle = perf_event__repipe, 890 .ksymbol = perf_event__repipe, 891 .bpf = perf_event__repipe, 892 .text_poke = perf_event__repipe, 893 .attr = perf_event__repipe_attr, 894 .event_update = perf_event__repipe_event_update, 895 .tracing_data = perf_event__repipe_op2_synth, 896 .finished_round = perf_event__repipe_oe_synth, 897 .build_id = perf_event__repipe_op2_synth, 898 .id_index = perf_event__repipe_op2_synth, 899 .auxtrace_info = perf_event__repipe_op2_synth, 900 .auxtrace_error = perf_event__repipe_op2_synth, 901 .time_conv = perf_event__repipe_op2_synth, 902 .thread_map = perf_event__repipe_op2_synth, 903 .cpu_map = perf_event__repipe_op2_synth, 904 .stat_config = perf_event__repipe_op2_synth, 905 .stat = perf_event__repipe_op2_synth, 906 .stat_round = perf_event__repipe_op2_synth, 907 .feature = perf_event__repipe_op2_synth, 908 .compressed = perf_event__repipe_op4_synth, 909 .auxtrace = perf_event__repipe_auxtrace, 910 }, 911 .input_name = "-", 912 .samples = LIST_HEAD_INIT(inject.samples), 913 .output = { 914 .path = "-", 915 .mode = PERF_DATA_MODE_WRITE, 916 .use_stdio = true, 917 }, 918 }; 919 struct perf_data data = { 920 .mode = PERF_DATA_MODE_READ, 921 .use_stdio = true, 922 }; 923 int ret; 924 bool repipe = true; 925 926 struct option options[] = { 927 OPT_BOOLEAN('b', "build-ids", &inject.build_ids, 928 "Inject build-ids into the output stream"), 929 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all, 930 "Inject build-ids of all DSOs into the output stream"), 931 OPT_STRING('i', "input", &inject.input_name, "file", 932 "input file name"), 933 OPT_STRING('o', "output", &inject.output.path, "file", 934 "output file name"), 935 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, 936 "Merge sched-stat and sched-switch for getting events " 937 "where and how long tasks slept"), 938 #ifdef HAVE_JITDUMP 939 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"), 940 #endif 941 OPT_INCR('v', "verbose", &verbose, 942 "be more verbose (show build ids, etc)"), 943 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 944 "file", "vmlinux pathname"), 945 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 946 "don't load vmlinux even if found"), 947 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", 948 "kallsyms pathname"), 949 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"), 950 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts, 951 NULL, "opts", "Instruction Tracing options\n" 952 ITRACE_HELP, 953 itrace_parse_synth_opts), 954 OPT_BOOLEAN(0, "strip", &inject.strip, 955 "strip non-synthesized events (use with --itrace)"), 956 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts", 957 "correlate time between VM guests and the host", 958 parse_vm_time_correlation), 959 OPT_END() 960 }; 961 const char * const inject_usage[] = { 962 "perf inject [<options>]", 963 NULL 964 }; 965 #ifndef HAVE_JITDUMP 966 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true); 967 #endif 968 argc = parse_options(argc, argv, options, inject_usage, 0); 969 970 /* 971 * Any (unrecognized) arguments left? 972 */ 973 if (argc) 974 usage_with_options(inject_usage, options); 975 976 if (inject.strip && !inject.itrace_synth_opts.set) { 977 pr_err("--strip option requires --itrace option\n"); 978 return -1; 979 } 980 981 if (symbol__validate_sym_arguments()) 982 return -1; 983 984 if (inject.in_place_update) { 985 if (!strcmp(inject.input_name, "-")) { 986 pr_err("Input file name required for in-place updating\n"); 987 return -1; 988 } 989 if (strcmp(inject.output.path, "-")) { 990 pr_err("Output file name must not be specified for in-place updating\n"); 991 return -1; 992 } 993 if (!data.force && !inject.in_place_update_dry_run) { 994 pr_err("The input file would be updated in place, " 995 "the --force option is required.\n"); 996 return -1; 997 } 998 if (!inject.in_place_update_dry_run) 999 data.in_place_update = true; 1000 } else if (perf_data__open(&inject.output)) { 1001 perror("failed to create output file"); 1002 return -1; 1003 } 1004 1005 data.path = inject.input_name; 1006 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) { 1007 inject.is_pipe = true; 1008 /* 1009 * Do not repipe header when input is a regular file 1010 * since either it can rewrite the header at the end 1011 * or write a new pipe header. 1012 */ 1013 if (strcmp(inject.input_name, "-")) 1014 repipe = false; 1015 } 1016 1017 inject.session = __perf_session__new(&data, repipe, 1018 perf_data__fd(&inject.output), 1019 &inject.tool); 1020 if (IS_ERR(inject.session)) { 1021 ret = PTR_ERR(inject.session); 1022 goto out_close_output; 1023 } 1024 1025 if (zstd_init(&(inject.session->zstd_data), 0) < 0) 1026 pr_warning("Decompression initialization failed.\n"); 1027 1028 if (!data.is_pipe && inject.output.is_pipe) { 1029 ret = perf_header__write_pipe(perf_data__fd(&inject.output)); 1030 if (ret < 0) { 1031 pr_err("Couldn't write a new pipe header.\n"); 1032 goto out_delete; 1033 } 1034 1035 ret = perf_event__synthesize_for_pipe(&inject.tool, 1036 inject.session, 1037 &inject.output, 1038 perf_event__repipe); 1039 if (ret < 0) 1040 goto out_delete; 1041 } 1042 1043 if (inject.build_ids && !inject.build_id_all) { 1044 /* 1045 * to make sure the mmap records are ordered correctly 1046 * and so that the correct especially due to jitted code 1047 * mmaps. We cannot generate the buildid hit list and 1048 * inject the jit mmaps at the same time for now. 1049 */ 1050 inject.tool.ordered_events = true; 1051 inject.tool.ordering_requires_timestamps = true; 1052 } 1053 1054 if (inject.sched_stat) { 1055 inject.tool.ordered_events = true; 1056 } 1057 1058 #ifdef HAVE_JITDUMP 1059 if (inject.jit_mode) { 1060 inject.tool.mmap2 = perf_event__jit_repipe_mmap2; 1061 inject.tool.mmap = perf_event__jit_repipe_mmap; 1062 inject.tool.ordered_events = true; 1063 inject.tool.ordering_requires_timestamps = true; 1064 /* 1065 * JIT MMAP injection injects all MMAP events in one go, so it 1066 * does not obey finished_round semantics. 1067 */ 1068 inject.tool.finished_round = perf_event__drop_oe; 1069 } 1070 #endif 1071 ret = symbol__init(&inject.session->header.env); 1072 if (ret < 0) 1073 goto out_delete; 1074 1075 ret = __cmd_inject(&inject); 1076 1077 out_delete: 1078 zstd_fini(&(inject.session->zstd_data)); 1079 perf_session__delete(inject.session); 1080 out_close_output: 1081 perf_data__close(&inject.output); 1082 free(inject.itrace_synth_opts.vm_tm_corr_args); 1083 return ret; 1084 } 1085