xref: /openbmc/linux/tools/perf/builtin-inject.c (revision 6562c9ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-inject.c
4  *
5  * Builtin inject command: Examine the live mode (stdin) event stream
6  * and repipe it to stdout while optionally injecting additional
7  * events into it.
8  */
9 #include "builtin.h"
10 
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/string2.h"
25 #include "util/symbol.h"
26 #include "util/synthetic-events.h"
27 #include "util/thread.h"
28 #include "util/namespaces.h"
29 #include "util/util.h"
30 #include "util/tsc.h"
31 
32 #include <internal/lib.h>
33 
34 #include <linux/err.h>
35 #include <subcmd/parse-options.h>
36 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
37 
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/zalloc.h>
41 #include <linux/hash.h>
42 #include <ctype.h>
43 #include <errno.h>
44 #include <signal.h>
45 #include <inttypes.h>
46 
47 struct guest_event {
48 	struct perf_sample		sample;
49 	union perf_event		*event;
50 	char				event_buf[PERF_SAMPLE_MAX_SIZE];
51 };
52 
53 struct guest_id {
54 	/* hlist_node must be first, see free_hlist() */
55 	struct hlist_node		node;
56 	u64				id;
57 	u64				host_id;
58 	u32				vcpu;
59 };
60 
61 struct guest_tid {
62 	/* hlist_node must be first, see free_hlist() */
63 	struct hlist_node		node;
64 	/* Thread ID of QEMU thread */
65 	u32				tid;
66 	u32				vcpu;
67 };
68 
69 struct guest_vcpu {
70 	/* Current host CPU */
71 	u32				cpu;
72 	/* Thread ID of QEMU thread */
73 	u32				tid;
74 };
75 
76 struct guest_session {
77 	char				*perf_data_file;
78 	u32				machine_pid;
79 	u64				time_offset;
80 	double				time_scale;
81 	struct perf_tool		tool;
82 	struct perf_data		data;
83 	struct perf_session		*session;
84 	char				*tmp_file_name;
85 	int				tmp_fd;
86 	struct perf_tsc_conversion	host_tc;
87 	struct perf_tsc_conversion	guest_tc;
88 	bool				copy_kcore_dir;
89 	bool				have_tc;
90 	bool				fetched;
91 	bool				ready;
92 	u16				dflt_id_hdr_size;
93 	u64				dflt_id;
94 	u64				highest_id;
95 	/* Array of guest_vcpu */
96 	struct guest_vcpu		*vcpu;
97 	size_t				vcpu_cnt;
98 	/* Hash table for guest_id */
99 	struct hlist_head		heads[PERF_EVLIST__HLIST_SIZE];
100 	/* Hash table for guest_tid */
101 	struct hlist_head		tids[PERF_EVLIST__HLIST_SIZE];
102 	/* Place to stash next guest event */
103 	struct guest_event		ev;
104 };
105 
106 struct perf_inject {
107 	struct perf_tool	tool;
108 	struct perf_session	*session;
109 	bool			build_ids;
110 	bool			build_id_all;
111 	bool			sched_stat;
112 	bool			have_auxtrace;
113 	bool			strip;
114 	bool			jit_mode;
115 	bool			in_place_update;
116 	bool			in_place_update_dry_run;
117 	bool			is_pipe;
118 	bool			copy_kcore_dir;
119 	const char		*input_name;
120 	struct perf_data	output;
121 	u64			bytes_written;
122 	u64			aux_id;
123 	struct list_head	samples;
124 	struct itrace_synth_opts itrace_synth_opts;
125 	char			event_copy[PERF_SAMPLE_MAX_SIZE];
126 	struct perf_file_section secs[HEADER_FEAT_BITS];
127 	struct guest_session	guest_session;
128 	struct strlist		*known_build_ids;
129 };
130 
131 struct event_entry {
132 	struct list_head node;
133 	u32		 tid;
134 	union perf_event event[];
135 };
136 
137 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
138 				struct machine *machine, u8 cpumode, u32 flags);
139 
140 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
141 {
142 	ssize_t size;
143 
144 	size = perf_data__write(&inject->output, buf, sz);
145 	if (size < 0)
146 		return -errno;
147 
148 	inject->bytes_written += size;
149 	return 0;
150 }
151 
152 static int perf_event__repipe_synth(struct perf_tool *tool,
153 				    union perf_event *event)
154 {
155 	struct perf_inject *inject = container_of(tool, struct perf_inject,
156 						  tool);
157 
158 	return output_bytes(inject, event, event->header.size);
159 }
160 
161 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
162 				       union perf_event *event,
163 				       struct ordered_events *oe __maybe_unused)
164 {
165 	return perf_event__repipe_synth(tool, event);
166 }
167 
168 #ifdef HAVE_JITDUMP
169 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
170 			       union perf_event *event __maybe_unused,
171 			       struct ordered_events *oe __maybe_unused)
172 {
173 	return 0;
174 }
175 #endif
176 
177 static int perf_event__repipe_op2_synth(struct perf_session *session,
178 					union perf_event *event)
179 {
180 	return perf_event__repipe_synth(session->tool, event);
181 }
182 
183 static int perf_event__repipe_op4_synth(struct perf_session *session,
184 					union perf_event *event,
185 					u64 data __maybe_unused,
186 					const char *str __maybe_unused)
187 {
188 	return perf_event__repipe_synth(session->tool, event);
189 }
190 
191 static int perf_event__repipe_attr(struct perf_tool *tool,
192 				   union perf_event *event,
193 				   struct evlist **pevlist)
194 {
195 	struct perf_inject *inject = container_of(tool, struct perf_inject,
196 						  tool);
197 	int ret;
198 
199 	ret = perf_event__process_attr(tool, event, pevlist);
200 	if (ret)
201 		return ret;
202 
203 	if (!inject->is_pipe)
204 		return 0;
205 
206 	return perf_event__repipe_synth(tool, event);
207 }
208 
209 static int perf_event__repipe_event_update(struct perf_tool *tool,
210 					   union perf_event *event,
211 					   struct evlist **pevlist __maybe_unused)
212 {
213 	return perf_event__repipe_synth(tool, event);
214 }
215 
216 #ifdef HAVE_AUXTRACE_SUPPORT
217 
218 static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
219 {
220 	char buf[4096];
221 	ssize_t ssz;
222 	int ret;
223 
224 	while (size > 0) {
225 		ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
226 		if (ssz < 0)
227 			return -errno;
228 		ret = output_bytes(inject, buf, ssz);
229 		if (ret)
230 			return ret;
231 		size -= ssz;
232 	}
233 
234 	return 0;
235 }
236 
237 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
238 				       union perf_event *event)
239 {
240 	struct perf_tool *tool = session->tool;
241 	struct perf_inject *inject = container_of(tool, struct perf_inject,
242 						  tool);
243 	int ret;
244 
245 	inject->have_auxtrace = true;
246 
247 	if (!inject->output.is_pipe) {
248 		off_t offset;
249 
250 		offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
251 		if (offset == -1)
252 			return -errno;
253 		ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
254 						     event, offset);
255 		if (ret < 0)
256 			return ret;
257 	}
258 
259 	if (perf_data__is_pipe(session->data) || !session->one_mmap) {
260 		ret = output_bytes(inject, event, event->header.size);
261 		if (ret < 0)
262 			return ret;
263 		ret = copy_bytes(inject, perf_data__fd(session->data),
264 				 event->auxtrace.size);
265 	} else {
266 		ret = output_bytes(inject, event,
267 				   event->header.size + event->auxtrace.size);
268 	}
269 	if (ret < 0)
270 		return ret;
271 
272 	return event->auxtrace.size;
273 }
274 
275 #else
276 
277 static s64
278 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
279 			    union perf_event *event __maybe_unused)
280 {
281 	pr_err("AUX area tracing not supported\n");
282 	return -EINVAL;
283 }
284 
285 #endif
286 
287 static int perf_event__repipe(struct perf_tool *tool,
288 			      union perf_event *event,
289 			      struct perf_sample *sample __maybe_unused,
290 			      struct machine *machine __maybe_unused)
291 {
292 	return perf_event__repipe_synth(tool, event);
293 }
294 
295 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
296 			    union perf_event *event __maybe_unused,
297 			    struct perf_sample *sample __maybe_unused,
298 			    struct machine *machine __maybe_unused)
299 {
300 	return 0;
301 }
302 
303 static int perf_event__drop_aux(struct perf_tool *tool,
304 				union perf_event *event __maybe_unused,
305 				struct perf_sample *sample,
306 				struct machine *machine __maybe_unused)
307 {
308 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
309 
310 	if (!inject->aux_id)
311 		inject->aux_id = sample->id;
312 
313 	return 0;
314 }
315 
316 static union perf_event *
317 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
318 				 union perf_event *event,
319 				 struct perf_sample *sample)
320 {
321 	size_t sz1 = sample->aux_sample.data - (void *)event;
322 	size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
323 	union perf_event *ev = (union perf_event *)inject->event_copy;
324 
325 	if (sz1 > event->header.size || sz2 > event->header.size ||
326 	    sz1 + sz2 > event->header.size ||
327 	    sz1 < sizeof(struct perf_event_header) + sizeof(u64))
328 		return event;
329 
330 	memcpy(ev, event, sz1);
331 	memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
332 	ev->header.size = sz1 + sz2;
333 	((u64 *)((void *)ev + sz1))[-1] = 0;
334 
335 	return ev;
336 }
337 
338 typedef int (*inject_handler)(struct perf_tool *tool,
339 			      union perf_event *event,
340 			      struct perf_sample *sample,
341 			      struct evsel *evsel,
342 			      struct machine *machine);
343 
344 static int perf_event__repipe_sample(struct perf_tool *tool,
345 				     union perf_event *event,
346 				     struct perf_sample *sample,
347 				     struct evsel *evsel,
348 				     struct machine *machine)
349 {
350 	struct perf_inject *inject = container_of(tool, struct perf_inject,
351 						  tool);
352 
353 	if (evsel && evsel->handler) {
354 		inject_handler f = evsel->handler;
355 		return f(tool, event, sample, evsel, machine);
356 	}
357 
358 	build_id__mark_dso_hit(tool, event, sample, evsel, machine);
359 
360 	if (inject->itrace_synth_opts.set && sample->aux_sample.size)
361 		event = perf_inject__cut_auxtrace_sample(inject, event, sample);
362 
363 	return perf_event__repipe_synth(tool, event);
364 }
365 
366 static int perf_event__repipe_mmap(struct perf_tool *tool,
367 				   union perf_event *event,
368 				   struct perf_sample *sample,
369 				   struct machine *machine)
370 {
371 	int err;
372 
373 	err = perf_event__process_mmap(tool, event, sample, machine);
374 	perf_event__repipe(tool, event, sample, machine);
375 
376 	return err;
377 }
378 
379 #ifdef HAVE_JITDUMP
380 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
381 				       union perf_event *event,
382 				       struct perf_sample *sample,
383 				       struct machine *machine)
384 {
385 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
386 	u64 n = 0;
387 	int ret;
388 
389 	/*
390 	 * if jit marker, then inject jit mmaps and generate ELF images
391 	 */
392 	ret = jit_process(inject->session, &inject->output, machine,
393 			  event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
394 	if (ret < 0)
395 		return ret;
396 	if (ret) {
397 		inject->bytes_written += n;
398 		return 0;
399 	}
400 	return perf_event__repipe_mmap(tool, event, sample, machine);
401 }
402 #endif
403 
404 static struct dso *findnew_dso(int pid, int tid, const char *filename,
405 			       struct dso_id *id, struct machine *machine)
406 {
407 	struct thread *thread;
408 	struct nsinfo *nsi = NULL;
409 	struct nsinfo *nnsi;
410 	struct dso *dso;
411 	bool vdso;
412 
413 	thread = machine__findnew_thread(machine, pid, tid);
414 	if (thread == NULL) {
415 		pr_err("cannot find or create a task %d/%d.\n", tid, pid);
416 		return NULL;
417 	}
418 
419 	vdso = is_vdso_map(filename);
420 	nsi = nsinfo__get(thread->nsinfo);
421 
422 	if (vdso) {
423 		/* The vdso maps are always on the host and not the
424 		 * container.  Ensure that we don't use setns to look
425 		 * them up.
426 		 */
427 		nnsi = nsinfo__copy(nsi);
428 		if (nnsi) {
429 			nsinfo__put(nsi);
430 			nsinfo__clear_need_setns(nnsi);
431 			nsi = nnsi;
432 		}
433 		dso = machine__findnew_vdso(machine, thread);
434 	} else {
435 		dso = machine__findnew_dso_id(machine, filename, id);
436 	}
437 
438 	if (dso) {
439 		nsinfo__put(dso->nsinfo);
440 		dso->nsinfo = nsi;
441 	} else
442 		nsinfo__put(nsi);
443 
444 	thread__put(thread);
445 	return dso;
446 }
447 
448 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
449 					   union perf_event *event,
450 					   struct perf_sample *sample,
451 					   struct machine *machine)
452 {
453 	struct dso *dso;
454 
455 	dso = findnew_dso(event->mmap.pid, event->mmap.tid,
456 			  event->mmap.filename, NULL, machine);
457 
458 	if (dso && !dso->hit) {
459 		dso->hit = 1;
460 		dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
461 	}
462 	dso__put(dso);
463 
464 	return perf_event__repipe(tool, event, sample, machine);
465 }
466 
467 static int perf_event__repipe_mmap2(struct perf_tool *tool,
468 				   union perf_event *event,
469 				   struct perf_sample *sample,
470 				   struct machine *machine)
471 {
472 	int err;
473 
474 	err = perf_event__process_mmap2(tool, event, sample, machine);
475 	perf_event__repipe(tool, event, sample, machine);
476 
477 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
478 		struct dso *dso;
479 
480 		dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
481 				  event->mmap2.filename, NULL, machine);
482 		if (dso) {
483 			/* mark it not to inject build-id */
484 			dso->hit = 1;
485 		}
486 		dso__put(dso);
487 	}
488 
489 	return err;
490 }
491 
492 #ifdef HAVE_JITDUMP
493 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
494 					union perf_event *event,
495 					struct perf_sample *sample,
496 					struct machine *machine)
497 {
498 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
499 	u64 n = 0;
500 	int ret;
501 
502 	/*
503 	 * if jit marker, then inject jit mmaps and generate ELF images
504 	 */
505 	ret = jit_process(inject->session, &inject->output, machine,
506 			  event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
507 	if (ret < 0)
508 		return ret;
509 	if (ret) {
510 		inject->bytes_written += n;
511 		return 0;
512 	}
513 	return perf_event__repipe_mmap2(tool, event, sample, machine);
514 }
515 #endif
516 
517 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
518 					    union perf_event *event,
519 					    struct perf_sample *sample,
520 					    struct machine *machine)
521 {
522 	struct dso_id dso_id = {
523 		.maj = event->mmap2.maj,
524 		.min = event->mmap2.min,
525 		.ino = event->mmap2.ino,
526 		.ino_generation = event->mmap2.ino_generation,
527 	};
528 	struct dso *dso;
529 
530 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
531 		/* cannot use dso_id since it'd have invalid info */
532 		dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
533 				  event->mmap2.filename, NULL, machine);
534 		if (dso) {
535 			/* mark it not to inject build-id */
536 			dso->hit = 1;
537 		}
538 		dso__put(dso);
539 		return 0;
540 	}
541 
542 	dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
543 			  event->mmap2.filename, &dso_id, machine);
544 
545 	if (dso && !dso->hit) {
546 		dso->hit = 1;
547 		dso__inject_build_id(dso, tool, machine, sample->cpumode,
548 				     event->mmap2.flags);
549 	}
550 	dso__put(dso);
551 
552 	perf_event__repipe(tool, event, sample, machine);
553 
554 	return 0;
555 }
556 
557 static int perf_event__repipe_fork(struct perf_tool *tool,
558 				   union perf_event *event,
559 				   struct perf_sample *sample,
560 				   struct machine *machine)
561 {
562 	int err;
563 
564 	err = perf_event__process_fork(tool, event, sample, machine);
565 	perf_event__repipe(tool, event, sample, machine);
566 
567 	return err;
568 }
569 
570 static int perf_event__repipe_comm(struct perf_tool *tool,
571 				   union perf_event *event,
572 				   struct perf_sample *sample,
573 				   struct machine *machine)
574 {
575 	int err;
576 
577 	err = perf_event__process_comm(tool, event, sample, machine);
578 	perf_event__repipe(tool, event, sample, machine);
579 
580 	return err;
581 }
582 
583 static int perf_event__repipe_namespaces(struct perf_tool *tool,
584 					 union perf_event *event,
585 					 struct perf_sample *sample,
586 					 struct machine *machine)
587 {
588 	int err = perf_event__process_namespaces(tool, event, sample, machine);
589 
590 	perf_event__repipe(tool, event, sample, machine);
591 
592 	return err;
593 }
594 
595 static int perf_event__repipe_exit(struct perf_tool *tool,
596 				   union perf_event *event,
597 				   struct perf_sample *sample,
598 				   struct machine *machine)
599 {
600 	int err;
601 
602 	err = perf_event__process_exit(tool, event, sample, machine);
603 	perf_event__repipe(tool, event, sample, machine);
604 
605 	return err;
606 }
607 
608 static int perf_event__repipe_tracing_data(struct perf_session *session,
609 					   union perf_event *event)
610 {
611 	perf_event__repipe_synth(session->tool, event);
612 
613 	return perf_event__process_tracing_data(session, event);
614 }
615 
616 static int dso__read_build_id(struct dso *dso)
617 {
618 	struct nscookie nsc;
619 
620 	if (dso->has_build_id)
621 		return 0;
622 
623 	nsinfo__mountns_enter(dso->nsinfo, &nsc);
624 	if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
625 		dso->has_build_id = true;
626 	else if (dso->nsinfo) {
627 		char *new_name;
628 
629 		new_name = filename_with_chroot(dso->nsinfo->pid,
630 						dso->long_name);
631 		if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
632 			dso->has_build_id = true;
633 		free(new_name);
634 	}
635 	nsinfo__mountns_exit(&nsc);
636 
637 	return dso->has_build_id ? 0 : -1;
638 }
639 
640 static struct strlist *perf_inject__parse_known_build_ids(
641 	const char *known_build_ids_string)
642 {
643 	struct str_node *pos, *tmp;
644 	struct strlist *known_build_ids;
645 	int bid_len;
646 
647 	known_build_ids = strlist__new(known_build_ids_string, NULL);
648 	if (known_build_ids == NULL)
649 		return NULL;
650 	strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
651 		const char *build_id, *dso_name;
652 
653 		build_id = skip_spaces(pos->s);
654 		dso_name = strchr(build_id, ' ');
655 		if (dso_name == NULL) {
656 			strlist__remove(known_build_ids, pos);
657 			continue;
658 		}
659 		bid_len = dso_name - pos->s;
660 		dso_name = skip_spaces(dso_name);
661 		if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
662 			strlist__remove(known_build_ids, pos);
663 			continue;
664 		}
665 		for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
666 			if (!isxdigit(build_id[2 * ix]) ||
667 			    !isxdigit(build_id[2 * ix + 1])) {
668 				strlist__remove(known_build_ids, pos);
669 				break;
670 			}
671 		}
672 	}
673 	return known_build_ids;
674 }
675 
676 static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
677 					       struct dso *dso)
678 {
679 	struct str_node *pos;
680 	int bid_len;
681 
682 	strlist__for_each_entry(pos, inject->known_build_ids) {
683 		const char *build_id, *dso_name;
684 
685 		build_id = skip_spaces(pos->s);
686 		dso_name = strchr(build_id, ' ');
687 		bid_len = dso_name - pos->s;
688 		dso_name = skip_spaces(dso_name);
689 		if (strcmp(dso->long_name, dso_name))
690 			continue;
691 		for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
692 			dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
693 					     hex(build_id[2 * ix + 1]));
694 		}
695 		dso->bid.size = bid_len / 2;
696 		dso->has_build_id = 1;
697 		return true;
698 	}
699 	return false;
700 }
701 
702 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
703 				struct machine *machine, u8 cpumode, u32 flags)
704 {
705 	struct perf_inject *inject = container_of(tool, struct perf_inject,
706 						  tool);
707 	int err;
708 
709 	if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
710 		return 0;
711 	if (is_no_dso_memory(dso->long_name))
712 		return 0;
713 
714 	if (inject->known_build_ids != NULL &&
715 	    perf_inject__lookup_known_build_id(inject, dso))
716 		return 1;
717 
718 	if (dso__read_build_id(dso) < 0) {
719 		pr_debug("no build_id found for %s\n", dso->long_name);
720 		return -1;
721 	}
722 
723 	err = perf_event__synthesize_build_id(tool, dso, cpumode,
724 					      perf_event__repipe, machine);
725 	if (err) {
726 		pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
727 		return -1;
728 	}
729 
730 	return 0;
731 }
732 
733 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
734 			       struct perf_sample *sample,
735 			       struct evsel *evsel __maybe_unused,
736 			       struct machine *machine)
737 {
738 	struct addr_location al;
739 	struct thread *thread;
740 
741 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
742 	if (thread == NULL) {
743 		pr_err("problem processing %d event, skipping it.\n",
744 		       event->header.type);
745 		goto repipe;
746 	}
747 
748 	if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
749 		if (!al.map->dso->hit) {
750 			al.map->dso->hit = 1;
751 			dso__inject_build_id(al.map->dso, tool, machine,
752 					     sample->cpumode, al.map->flags);
753 		}
754 	}
755 
756 	thread__put(thread);
757 repipe:
758 	perf_event__repipe(tool, event, sample, machine);
759 	return 0;
760 }
761 
762 static int perf_inject__sched_process_exit(struct perf_tool *tool,
763 					   union perf_event *event __maybe_unused,
764 					   struct perf_sample *sample,
765 					   struct evsel *evsel __maybe_unused,
766 					   struct machine *machine __maybe_unused)
767 {
768 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
769 	struct event_entry *ent;
770 
771 	list_for_each_entry(ent, &inject->samples, node) {
772 		if (sample->tid == ent->tid) {
773 			list_del_init(&ent->node);
774 			free(ent);
775 			break;
776 		}
777 	}
778 
779 	return 0;
780 }
781 
782 static int perf_inject__sched_switch(struct perf_tool *tool,
783 				     union perf_event *event,
784 				     struct perf_sample *sample,
785 				     struct evsel *evsel,
786 				     struct machine *machine)
787 {
788 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
789 	struct event_entry *ent;
790 
791 	perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
792 
793 	ent = malloc(event->header.size + sizeof(struct event_entry));
794 	if (ent == NULL) {
795 		color_fprintf(stderr, PERF_COLOR_RED,
796 			     "Not enough memory to process sched switch event!");
797 		return -1;
798 	}
799 
800 	ent->tid = sample->tid;
801 	memcpy(&ent->event, event, event->header.size);
802 	list_add(&ent->node, &inject->samples);
803 	return 0;
804 }
805 
806 static int perf_inject__sched_stat(struct perf_tool *tool,
807 				   union perf_event *event __maybe_unused,
808 				   struct perf_sample *sample,
809 				   struct evsel *evsel,
810 				   struct machine *machine)
811 {
812 	struct event_entry *ent;
813 	union perf_event *event_sw;
814 	struct perf_sample sample_sw;
815 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
816 	u32 pid = evsel__intval(evsel, sample, "pid");
817 
818 	list_for_each_entry(ent, &inject->samples, node) {
819 		if (pid == ent->tid)
820 			goto found;
821 	}
822 
823 	return 0;
824 found:
825 	event_sw = &ent->event[0];
826 	evsel__parse_sample(evsel, event_sw, &sample_sw);
827 
828 	sample_sw.period = sample->period;
829 	sample_sw.time	 = sample->time;
830 	perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
831 				      evsel->core.attr.read_format, &sample_sw);
832 	build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
833 	return perf_event__repipe(tool, event_sw, &sample_sw, machine);
834 }
835 
836 static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
837 {
838 	if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
839 		return NULL;
840 	return &gs->vcpu[vcpu];
841 }
842 
843 static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
844 {
845 	ssize_t ret = writen(gs->tmp_fd, buf, sz);
846 
847 	return ret < 0 ? ret : 0;
848 }
849 
850 static int guest_session__repipe(struct perf_tool *tool,
851 				 union perf_event *event,
852 				 struct perf_sample *sample __maybe_unused,
853 				 struct machine *machine __maybe_unused)
854 {
855 	struct guest_session *gs = container_of(tool, struct guest_session, tool);
856 
857 	return guest_session__output_bytes(gs, event, event->header.size);
858 }
859 
860 static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
861 {
862 	struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
863 	int hash;
864 
865 	if (!guest_tid)
866 		return -ENOMEM;
867 
868 	guest_tid->tid = tid;
869 	guest_tid->vcpu = vcpu;
870 	hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
871 	hlist_add_head(&guest_tid->node, &gs->tids[hash]);
872 
873 	return 0;
874 }
875 
876 static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
877 				 union perf_event *event,
878 				 u64 offset __maybe_unused, void *data)
879 {
880 	struct guest_session *gs = data;
881 	unsigned int vcpu;
882 	struct guest_vcpu *guest_vcpu;
883 	int ret;
884 
885 	if (event->header.type != PERF_RECORD_COMM ||
886 	    event->comm.pid != gs->machine_pid)
887 		return 0;
888 
889 	/*
890 	 * QEMU option -name debug-threads=on, causes thread names formatted as
891 	 * below, although it is not an ABI. Also libvirt seems to use this by
892 	 * default. Here we rely on it to tell us which thread is which VCPU.
893 	 */
894 	ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
895 	if (ret <= 0)
896 		return ret;
897 	pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
898 		 event->comm.tid, event->comm.comm, vcpu);
899 	if (vcpu > INT_MAX) {
900 		pr_err("Invalid VCPU %u\n", vcpu);
901 		return -EINVAL;
902 	}
903 	guest_vcpu = guest_session__vcpu(gs, vcpu);
904 	if (!guest_vcpu)
905 		return -ENOMEM;
906 	if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
907 		pr_err("Fatal error: Two threads found with the same VCPU\n");
908 		return -EINVAL;
909 	}
910 	guest_vcpu->tid = event->comm.tid;
911 
912 	return guest_session__map_tid(gs, event->comm.tid, vcpu);
913 }
914 
915 static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
916 {
917 	return perf_session__peek_events(session, session->header.data_offset,
918 					 session->header.data_size,
919 					 host_peek_vm_comms_cb, gs);
920 }
921 
922 static bool evlist__is_id_used(struct evlist *evlist, u64 id)
923 {
924 	return evlist__id2sid(evlist, id);
925 }
926 
927 static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
928 {
929 	do {
930 		gs->highest_id += 1;
931 	} while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
932 
933 	return gs->highest_id;
934 }
935 
936 static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
937 {
938 	struct guest_id *guest_id = zalloc(sizeof(*guest_id));
939 	int hash;
940 
941 	if (!guest_id)
942 		return -ENOMEM;
943 
944 	guest_id->id = id;
945 	guest_id->host_id = host_id;
946 	guest_id->vcpu = vcpu;
947 	hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
948 	hlist_add_head(&guest_id->node, &gs->heads[hash]);
949 
950 	return 0;
951 }
952 
953 static u64 evlist__find_highest_id(struct evlist *evlist)
954 {
955 	struct evsel *evsel;
956 	u64 highest_id = 1;
957 
958 	evlist__for_each_entry(evlist, evsel) {
959 		u32 j;
960 
961 		for (j = 0; j < evsel->core.ids; j++) {
962 			u64 id = evsel->core.id[j];
963 
964 			if (id > highest_id)
965 				highest_id = id;
966 		}
967 	}
968 
969 	return highest_id;
970 }
971 
972 static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
973 {
974 	struct evlist *evlist = gs->session->evlist;
975 	struct evsel *evsel;
976 	int ret;
977 
978 	evlist__for_each_entry(evlist, evsel) {
979 		u32 j;
980 
981 		for (j = 0; j < evsel->core.ids; j++) {
982 			struct perf_sample_id *sid;
983 			u64 host_id;
984 			u64 id;
985 
986 			id = evsel->core.id[j];
987 			sid = evlist__id2sid(evlist, id);
988 			if (!sid || sid->cpu.cpu == -1)
989 				continue;
990 			host_id = guest_session__allocate_new_id(gs, host_evlist);
991 			ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
992 			if (ret)
993 				return ret;
994 		}
995 	}
996 
997 	return 0;
998 }
999 
1000 static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
1001 {
1002 	struct hlist_head *head;
1003 	struct guest_id *guest_id;
1004 	int hash;
1005 
1006 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1007 	head = &gs->heads[hash];
1008 
1009 	hlist_for_each_entry(guest_id, head, node)
1010 		if (guest_id->id == id)
1011 			return guest_id;
1012 
1013 	return NULL;
1014 }
1015 
1016 static int process_attr(struct perf_tool *tool, union perf_event *event,
1017 			struct perf_sample *sample __maybe_unused,
1018 			struct machine *machine __maybe_unused)
1019 {
1020 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1021 
1022 	return perf_event__process_attr(tool, event, &inject->session->evlist);
1023 }
1024 
1025 static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
1026 {
1027 	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1028 	struct perf_event_attr attr = evsel->core.attr;
1029 	u64 *id_array;
1030 	u32 *vcpu_array;
1031 	int ret = -ENOMEM;
1032 	u32 i;
1033 
1034 	id_array = calloc(evsel->core.ids, sizeof(*id_array));
1035 	if (!id_array)
1036 		return -ENOMEM;
1037 
1038 	vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
1039 	if (!vcpu_array)
1040 		goto out;
1041 
1042 	for (i = 0; i < evsel->core.ids; i++) {
1043 		u64 id = evsel->core.id[i];
1044 		struct guest_id *guest_id = guest_session__lookup_id(gs, id);
1045 
1046 		if (!guest_id) {
1047 			pr_err("Failed to find guest id %"PRIu64"\n", id);
1048 			ret = -EINVAL;
1049 			goto out;
1050 		}
1051 		id_array[i] = guest_id->host_id;
1052 		vcpu_array[i] = guest_id->vcpu;
1053 	}
1054 
1055 	attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
1056 	attr.exclude_host = 1;
1057 	attr.exclude_guest = 0;
1058 
1059 	ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
1060 					  id_array, process_attr);
1061 	if (ret)
1062 		pr_err("Failed to add guest attr.\n");
1063 
1064 	for (i = 0; i < evsel->core.ids; i++) {
1065 		struct perf_sample_id *sid;
1066 		u32 vcpu = vcpu_array[i];
1067 
1068 		sid = evlist__id2sid(inject->session->evlist, id_array[i]);
1069 		/* Guest event is per-thread from the host point of view */
1070 		sid->cpu.cpu = -1;
1071 		sid->tid = gs->vcpu[vcpu].tid;
1072 		sid->machine_pid = gs->machine_pid;
1073 		sid->vcpu.cpu = vcpu;
1074 	}
1075 out:
1076 	free(vcpu_array);
1077 	free(id_array);
1078 	return ret;
1079 }
1080 
1081 static int guest_session__add_attrs(struct guest_session *gs)
1082 {
1083 	struct evlist *evlist = gs->session->evlist;
1084 	struct evsel *evsel;
1085 	int ret;
1086 
1087 	evlist__for_each_entry(evlist, evsel) {
1088 		ret = guest_session__add_attr(gs, evsel);
1089 		if (ret)
1090 			return ret;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
1097 {
1098 	struct perf_session *session = inject->session;
1099 	struct evlist *evlist = session->evlist;
1100 	struct machine *machine = &session->machines.host;
1101 	size_t from = evlist->core.nr_entries - new_cnt;
1102 
1103 	return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
1104 						 evlist, machine, from);
1105 }
1106 
1107 static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
1108 {
1109 	struct hlist_head *head;
1110 	struct guest_tid *guest_tid;
1111 	int hash;
1112 
1113 	hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
1114 	head = &gs->tids[hash];
1115 
1116 	hlist_for_each_entry(guest_tid, head, node)
1117 		if (guest_tid->tid == tid)
1118 			return guest_tid;
1119 
1120 	return NULL;
1121 }
1122 
1123 static bool dso__is_in_kernel_space(struct dso *dso)
1124 {
1125 	if (dso__is_vdso(dso))
1126 		return false;
1127 
1128 	return dso__is_kcore(dso) ||
1129 	       dso->kernel ||
1130 	       is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
1131 }
1132 
1133 static u64 evlist__first_id(struct evlist *evlist)
1134 {
1135 	struct evsel *evsel;
1136 
1137 	evlist__for_each_entry(evlist, evsel) {
1138 		if (evsel->core.ids)
1139 			return evsel->core.id[0];
1140 	}
1141 	return 0;
1142 }
1143 
1144 static int process_build_id(struct perf_tool *tool,
1145 			    union perf_event *event,
1146 			    struct perf_sample *sample __maybe_unused,
1147 			    struct machine *machine __maybe_unused)
1148 {
1149 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1150 
1151 	return perf_event__process_build_id(inject->session, event);
1152 }
1153 
1154 static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
1155 {
1156 	struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
1157 	u8 cpumode = dso__is_in_kernel_space(dso) ?
1158 			PERF_RECORD_MISC_GUEST_KERNEL :
1159 			PERF_RECORD_MISC_GUEST_USER;
1160 
1161 	if (!machine)
1162 		return -ENOMEM;
1163 
1164 	dso->hit = 1;
1165 
1166 	return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
1167 					       process_build_id, machine);
1168 }
1169 
1170 static int guest_session__add_build_ids(struct guest_session *gs)
1171 {
1172 	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1173 	struct machine *machine = &gs->session->machines.host;
1174 	struct dso *dso;
1175 	int ret;
1176 
1177 	/* Build IDs will be put in the Build ID feature section */
1178 	perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
1179 
1180 	dsos__for_each_with_build_id(dso, &machine->dsos.head) {
1181 		ret = synthesize_build_id(inject, dso, gs->machine_pid);
1182 		if (ret)
1183 			return ret;
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static int guest_session__ksymbol_event(struct perf_tool *tool,
1190 					union perf_event *event,
1191 					struct perf_sample *sample __maybe_unused,
1192 					struct machine *machine __maybe_unused)
1193 {
1194 	struct guest_session *gs = container_of(tool, struct guest_session, tool);
1195 
1196 	/* Only support out-of-line i.e. no BPF support */
1197 	if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
1198 		return 0;
1199 
1200 	return guest_session__output_bytes(gs, event, event->header.size);
1201 }
1202 
1203 static int guest_session__start(struct guest_session *gs, const char *name, bool force)
1204 {
1205 	char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
1206 	struct perf_session *session;
1207 	int ret;
1208 
1209 	/* Only these events will be injected */
1210 	gs->tool.mmap		= guest_session__repipe;
1211 	gs->tool.mmap2		= guest_session__repipe;
1212 	gs->tool.comm		= guest_session__repipe;
1213 	gs->tool.fork		= guest_session__repipe;
1214 	gs->tool.exit		= guest_session__repipe;
1215 	gs->tool.lost		= guest_session__repipe;
1216 	gs->tool.context_switch	= guest_session__repipe;
1217 	gs->tool.ksymbol	= guest_session__ksymbol_event;
1218 	gs->tool.text_poke	= guest_session__repipe;
1219 	/*
1220 	 * Processing a build ID creates a struct dso with that build ID. Later,
1221 	 * all guest dsos are iterated and the build IDs processed into the host
1222 	 * session where they will be output to the Build ID feature section
1223 	 * when the perf.data file header is written.
1224 	 */
1225 	gs->tool.build_id	= perf_event__process_build_id;
1226 	/* Process the id index to know what VCPU an ID belongs to */
1227 	gs->tool.id_index	= perf_event__process_id_index;
1228 
1229 	gs->tool.ordered_events	= true;
1230 	gs->tool.ordering_requires_timestamps = true;
1231 
1232 	gs->data.path	= name;
1233 	gs->data.force	= force;
1234 	gs->data.mode	= PERF_DATA_MODE_READ;
1235 
1236 	session = perf_session__new(&gs->data, &gs->tool);
1237 	if (IS_ERR(session))
1238 		return PTR_ERR(session);
1239 	gs->session = session;
1240 
1241 	/*
1242 	 * Initial events have zero'd ID samples. Get default ID sample size
1243 	 * used for removing them.
1244 	 */
1245 	gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
1246 	/* And default ID for adding back a host-compatible ID sample */
1247 	gs->dflt_id = evlist__first_id(session->evlist);
1248 	if (!gs->dflt_id) {
1249 		pr_err("Guest data has no sample IDs");
1250 		return -EINVAL;
1251 	}
1252 
1253 	/* Temporary file for guest events */
1254 	gs->tmp_file_name = strdup(tmp_file_name);
1255 	if (!gs->tmp_file_name)
1256 		return -ENOMEM;
1257 	gs->tmp_fd = mkstemp(gs->tmp_file_name);
1258 	if (gs->tmp_fd < 0)
1259 		return -errno;
1260 
1261 	if (zstd_init(&gs->session->zstd_data, 0) < 0)
1262 		pr_warning("Guest session decompression initialization failed.\n");
1263 
1264 	/*
1265 	 * perf does not support processing 2 sessions simultaneously, so output
1266 	 * guest events to a temporary file.
1267 	 */
1268 	ret = perf_session__process_events(gs->session);
1269 	if (ret)
1270 		return ret;
1271 
1272 	if (lseek(gs->tmp_fd, 0, SEEK_SET))
1273 		return -errno;
1274 
1275 	return 0;
1276 }
1277 
1278 /* Free hlist nodes assuming hlist_node is the first member of hlist entries */
1279 static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
1280 {
1281 	struct hlist_node *pos, *n;
1282 	size_t i;
1283 
1284 	for (i = 0; i < hlist_sz; ++i) {
1285 		hlist_for_each_safe(pos, n, &heads[i]) {
1286 			hlist_del(pos);
1287 			free(pos);
1288 		}
1289 	}
1290 }
1291 
1292 static void guest_session__exit(struct guest_session *gs)
1293 {
1294 	if (gs->session) {
1295 		perf_session__delete(gs->session);
1296 		free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
1297 		free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
1298 	}
1299 	if (gs->tmp_file_name) {
1300 		if (gs->tmp_fd >= 0)
1301 			close(gs->tmp_fd);
1302 		unlink(gs->tmp_file_name);
1303 		free(gs->tmp_file_name);
1304 	}
1305 	free(gs->vcpu);
1306 	free(gs->perf_data_file);
1307 }
1308 
1309 static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
1310 {
1311 	tc->time_shift		= time_conv->time_shift;
1312 	tc->time_mult		= time_conv->time_mult;
1313 	tc->time_zero		= time_conv->time_zero;
1314 	tc->time_cycles		= time_conv->time_cycles;
1315 	tc->time_mask		= time_conv->time_mask;
1316 	tc->cap_user_time_zero	= time_conv->cap_user_time_zero;
1317 	tc->cap_user_time_short	= time_conv->cap_user_time_short;
1318 }
1319 
1320 static void guest_session__get_tc(struct guest_session *gs)
1321 {
1322 	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1323 
1324 	get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
1325 	get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
1326 }
1327 
1328 static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
1329 {
1330 	u64 tsc;
1331 
1332 	if (!guest_time) {
1333 		*host_time = 0;
1334 		return;
1335 	}
1336 
1337 	if (gs->guest_tc.cap_user_time_zero)
1338 		tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
1339 	else
1340 		tsc = guest_time;
1341 
1342 	/*
1343 	 * This is the correct order of operations for x86 if the TSC Offset and
1344 	 * Multiplier values are used.
1345 	 */
1346 	tsc -= gs->time_offset;
1347 	tsc /= gs->time_scale;
1348 
1349 	if (gs->host_tc.cap_user_time_zero)
1350 		*host_time = tsc_to_perf_time(tsc, &gs->host_tc);
1351 	else
1352 		*host_time = tsc;
1353 }
1354 
1355 static int guest_session__fetch(struct guest_session *gs)
1356 {
1357 	void *buf = gs->ev.event_buf;
1358 	struct perf_event_header *hdr = buf;
1359 	size_t hdr_sz = sizeof(*hdr);
1360 	ssize_t ret;
1361 
1362 	ret = readn(gs->tmp_fd, buf, hdr_sz);
1363 	if (ret < 0)
1364 		return ret;
1365 
1366 	if (!ret) {
1367 		/* Zero size means EOF */
1368 		hdr->size = 0;
1369 		return 0;
1370 	}
1371 
1372 	buf += hdr_sz;
1373 
1374 	ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
1375 	if (ret < 0)
1376 		return ret;
1377 
1378 	gs->ev.event = (union perf_event *)gs->ev.event_buf;
1379 	gs->ev.sample.time = 0;
1380 
1381 	if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
1382 		pr_err("Unexpected type fetching guest event");
1383 		return 0;
1384 	}
1385 
1386 	ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
1387 	if (ret) {
1388 		pr_err("Parse failed fetching guest event");
1389 		return ret;
1390 	}
1391 
1392 	if (!gs->have_tc) {
1393 		guest_session__get_tc(gs);
1394 		gs->have_tc = true;
1395 	}
1396 
1397 	guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
1398 
1399 	return 0;
1400 }
1401 
1402 static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
1403 				    const struct perf_sample *sample)
1404 {
1405 	struct evsel *evsel;
1406 	void *array;
1407 	int ret;
1408 
1409 	evsel = evlist__id2evsel(evlist, sample->id);
1410 	array = ev;
1411 
1412 	if (!evsel) {
1413 		pr_err("No evsel for id %"PRIu64"\n", sample->id);
1414 		return -EINVAL;
1415 	}
1416 
1417 	array += ev->header.size;
1418 	ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
1419 	if (ret < 0)
1420 		return ret;
1421 
1422 	if (ret & 7) {
1423 		pr_err("Bad id sample size %d\n", ret);
1424 		return -EINVAL;
1425 	}
1426 
1427 	ev->header.size += ret;
1428 
1429 	return 0;
1430 }
1431 
1432 static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
1433 {
1434 	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1435 	int ret;
1436 
1437 	if (!gs->ready)
1438 		return 0;
1439 
1440 	while (1) {
1441 		struct perf_sample *sample;
1442 		struct guest_id *guest_id;
1443 		union perf_event *ev;
1444 		u16 id_hdr_size;
1445 		u8 cpumode;
1446 		u64 id;
1447 
1448 		if (!gs->fetched) {
1449 			ret = guest_session__fetch(gs);
1450 			if (ret)
1451 				return ret;
1452 			gs->fetched = true;
1453 		}
1454 
1455 		ev = gs->ev.event;
1456 		sample = &gs->ev.sample;
1457 
1458 		if (!ev->header.size)
1459 			return 0; /* EOF */
1460 
1461 		if (sample->time > timestamp)
1462 			return 0;
1463 
1464 		/* Change cpumode to guest */
1465 		cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1466 		if (cpumode & PERF_RECORD_MISC_USER)
1467 			cpumode = PERF_RECORD_MISC_GUEST_USER;
1468 		else
1469 			cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1470 		ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
1471 		ev->header.misc |= cpumode;
1472 
1473 		id = sample->id;
1474 		if (!id) {
1475 			id = gs->dflt_id;
1476 			id_hdr_size = gs->dflt_id_hdr_size;
1477 		} else {
1478 			struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
1479 
1480 			id_hdr_size = evsel__id_hdr_size(evsel);
1481 		}
1482 
1483 		if (id_hdr_size & 7) {
1484 			pr_err("Bad id_hdr_size %u\n", id_hdr_size);
1485 			return -EINVAL;
1486 		}
1487 
1488 		if (ev->header.size & 7) {
1489 			pr_err("Bad event size %u\n", ev->header.size);
1490 			return -EINVAL;
1491 		}
1492 
1493 		/* Remove guest id sample */
1494 		ev->header.size -= id_hdr_size;
1495 
1496 		if (ev->header.size & 7) {
1497 			pr_err("Bad raw event size %u\n", ev->header.size);
1498 			return -EINVAL;
1499 		}
1500 
1501 		guest_id = guest_session__lookup_id(gs, id);
1502 		if (!guest_id) {
1503 			pr_err("Guest event with unknown id %llu\n",
1504 			       (unsigned long long)id);
1505 			return -EINVAL;
1506 		}
1507 
1508 		/* Change to host ID to avoid conflicting ID values */
1509 		sample->id = guest_id->host_id;
1510 		sample->stream_id = guest_id->host_id;
1511 
1512 		if (sample->cpu != (u32)-1) {
1513 			if (sample->cpu >= gs->vcpu_cnt) {
1514 				pr_err("Guest event with unknown VCPU %u\n",
1515 				       sample->cpu);
1516 				return -EINVAL;
1517 			}
1518 			/* Change to host CPU instead of guest VCPU */
1519 			sample->cpu = gs->vcpu[sample->cpu].cpu;
1520 		}
1521 
1522 		/* New id sample with new ID and CPU */
1523 		ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
1524 		if (ret)
1525 			return ret;
1526 
1527 		if (ev->header.size & 7) {
1528 			pr_err("Bad new event size %u\n", ev->header.size);
1529 			return -EINVAL;
1530 		}
1531 
1532 		gs->fetched = false;
1533 
1534 		ret = output_bytes(inject, ev, ev->header.size);
1535 		if (ret)
1536 			return ret;
1537 	}
1538 }
1539 
1540 static int guest_session__flush_events(struct guest_session *gs)
1541 {
1542 	return guest_session__inject_events(gs, -1);
1543 }
1544 
1545 static int host__repipe(struct perf_tool *tool,
1546 			union perf_event *event,
1547 			struct perf_sample *sample,
1548 			struct machine *machine)
1549 {
1550 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1551 	int ret;
1552 
1553 	ret = guest_session__inject_events(&inject->guest_session, sample->time);
1554 	if (ret)
1555 		return ret;
1556 
1557 	return perf_event__repipe(tool, event, sample, machine);
1558 }
1559 
1560 static int host__finished_init(struct perf_session *session, union perf_event *event)
1561 {
1562 	struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
1563 	struct guest_session *gs = &inject->guest_session;
1564 	int ret;
1565 
1566 	/*
1567 	 * Peek through host COMM events to find QEMU threads and the VCPU they
1568 	 * are running.
1569 	 */
1570 	ret = host_peek_vm_comms(session, gs);
1571 	if (ret)
1572 		return ret;
1573 
1574 	if (!gs->vcpu_cnt) {
1575 		pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
1576 		return -EINVAL;
1577 	}
1578 
1579 	/*
1580 	 * Allocate new (unused) host sample IDs and map them to the guest IDs.
1581 	 */
1582 	gs->highest_id = evlist__find_highest_id(session->evlist);
1583 	ret = guest_session__map_ids(gs, session->evlist);
1584 	if (ret)
1585 		return ret;
1586 
1587 	ret = guest_session__add_attrs(gs);
1588 	if (ret)
1589 		return ret;
1590 
1591 	ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
1592 	if (ret) {
1593 		pr_err("Failed to synthesize id_index\n");
1594 		return ret;
1595 	}
1596 
1597 	ret = guest_session__add_build_ids(gs);
1598 	if (ret) {
1599 		pr_err("Failed to add guest build IDs\n");
1600 		return ret;
1601 	}
1602 
1603 	gs->ready = true;
1604 
1605 	ret = guest_session__inject_events(gs, 0);
1606 	if (ret)
1607 		return ret;
1608 
1609 	return perf_event__repipe_op2_synth(session, event);
1610 }
1611 
1612 /*
1613  * Obey finished-round ordering. The FINISHED_ROUND event is first processed
1614  * which flushes host events to file up until the last flush time. Then inject
1615  * guest events up to the same time. Finally write out the FINISHED_ROUND event
1616  * itself.
1617  */
1618 static int host__finished_round(struct perf_tool *tool,
1619 				union perf_event *event,
1620 				struct ordered_events *oe)
1621 {
1622 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1623 	int ret = perf_event__process_finished_round(tool, event, oe);
1624 	u64 timestamp = ordered_events__last_flush_time(oe);
1625 
1626 	if (ret)
1627 		return ret;
1628 
1629 	ret = guest_session__inject_events(&inject->guest_session, timestamp);
1630 	if (ret)
1631 		return ret;
1632 
1633 	return perf_event__repipe_oe_synth(tool, event, oe);
1634 }
1635 
1636 static int host__context_switch(struct perf_tool *tool,
1637 				union perf_event *event,
1638 				struct perf_sample *sample,
1639 				struct machine *machine)
1640 {
1641 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1642 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1643 	struct guest_session *gs = &inject->guest_session;
1644 	u32 pid = event->context_switch.next_prev_pid;
1645 	u32 tid = event->context_switch.next_prev_tid;
1646 	struct guest_tid *guest_tid;
1647 	u32 vcpu;
1648 
1649 	if (out || pid != gs->machine_pid)
1650 		goto out;
1651 
1652 	guest_tid = guest_session__lookup_tid(gs, tid);
1653 	if (!guest_tid)
1654 		goto out;
1655 
1656 	if (sample->cpu == (u32)-1) {
1657 		pr_err("Switch event does not have CPU\n");
1658 		return -EINVAL;
1659 	}
1660 
1661 	vcpu = guest_tid->vcpu;
1662 	if (vcpu >= gs->vcpu_cnt)
1663 		return -EINVAL;
1664 
1665 	/* Guest is switching in, record which CPU the VCPU is now running on */
1666 	gs->vcpu[vcpu].cpu = sample->cpu;
1667 out:
1668 	return host__repipe(tool, event, sample, machine);
1669 }
1670 
1671 static void sig_handler(int sig __maybe_unused)
1672 {
1673 	session_done = 1;
1674 }
1675 
1676 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
1677 {
1678 	struct perf_event_attr *attr = &evsel->core.attr;
1679 	const char *name = evsel__name(evsel);
1680 
1681 	if (!(attr->sample_type & sample_type)) {
1682 		pr_err("Samples for %s event do not have %s attribute set.",
1683 			name, sample_msg);
1684 		return -EINVAL;
1685 	}
1686 
1687 	return 0;
1688 }
1689 
1690 static int drop_sample(struct perf_tool *tool __maybe_unused,
1691 		       union perf_event *event __maybe_unused,
1692 		       struct perf_sample *sample __maybe_unused,
1693 		       struct evsel *evsel __maybe_unused,
1694 		       struct machine *machine __maybe_unused)
1695 {
1696 	return 0;
1697 }
1698 
1699 static void strip_init(struct perf_inject *inject)
1700 {
1701 	struct evlist *evlist = inject->session->evlist;
1702 	struct evsel *evsel;
1703 
1704 	inject->tool.context_switch = perf_event__drop;
1705 
1706 	evlist__for_each_entry(evlist, evsel)
1707 		evsel->handler = drop_sample;
1708 }
1709 
1710 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
1711 {
1712 	struct perf_inject *inject = opt->value;
1713 	const char *args;
1714 	char *dry_run;
1715 
1716 	if (unset)
1717 		return 0;
1718 
1719 	inject->itrace_synth_opts.set = true;
1720 	inject->itrace_synth_opts.vm_time_correlation = true;
1721 	inject->in_place_update = true;
1722 
1723 	if (!str)
1724 		return 0;
1725 
1726 	dry_run = skip_spaces(str);
1727 	if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
1728 		inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
1729 		inject->in_place_update_dry_run = true;
1730 		args = dry_run + strlen("dry-run");
1731 	} else {
1732 		args = str;
1733 	}
1734 
1735 	inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
1736 
1737 	return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
1738 }
1739 
1740 static int parse_guest_data(const struct option *opt, const char *str, int unset)
1741 {
1742 	struct perf_inject *inject = opt->value;
1743 	struct guest_session *gs = &inject->guest_session;
1744 	char *tok;
1745 	char *s;
1746 
1747 	if (unset)
1748 		return 0;
1749 
1750 	if (!str)
1751 		goto bad_args;
1752 
1753 	s = strdup(str);
1754 	if (!s)
1755 		return -ENOMEM;
1756 
1757 	gs->perf_data_file = strsep(&s, ",");
1758 	if (!gs->perf_data_file)
1759 		goto bad_args;
1760 
1761 	gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
1762 	if (gs->copy_kcore_dir)
1763 		inject->output.is_dir = true;
1764 
1765 	tok = strsep(&s, ",");
1766 	if (!tok)
1767 		goto bad_args;
1768 	gs->machine_pid = strtoul(tok, NULL, 0);
1769 	if (!inject->guest_session.machine_pid)
1770 		goto bad_args;
1771 
1772 	gs->time_scale = 1;
1773 
1774 	tok = strsep(&s, ",");
1775 	if (!tok)
1776 		goto out;
1777 	gs->time_offset = strtoull(tok, NULL, 0);
1778 
1779 	tok = strsep(&s, ",");
1780 	if (!tok)
1781 		goto out;
1782 	gs->time_scale = strtod(tok, NULL);
1783 	if (!gs->time_scale)
1784 		goto bad_args;
1785 out:
1786 	return 0;
1787 
1788 bad_args:
1789 	pr_err("--guest-data option requires guest perf.data file name, "
1790 	       "guest machine PID, and optionally guest timestamp offset, "
1791 	       "and guest timestamp scale factor, separated by commas.\n");
1792 	return -1;
1793 }
1794 
1795 static int save_section_info_cb(struct perf_file_section *section,
1796 				struct perf_header *ph __maybe_unused,
1797 				int feat, int fd __maybe_unused, void *data)
1798 {
1799 	struct perf_inject *inject = data;
1800 
1801 	inject->secs[feat] = *section;
1802 	return 0;
1803 }
1804 
1805 static int save_section_info(struct perf_inject *inject)
1806 {
1807 	struct perf_header *header = &inject->session->header;
1808 	int fd = perf_data__fd(inject->session->data);
1809 
1810 	return perf_header__process_sections(header, fd, inject, save_section_info_cb);
1811 }
1812 
1813 static bool keep_feat(int feat)
1814 {
1815 	switch (feat) {
1816 	/* Keep original information that describes the machine or software */
1817 	case HEADER_TRACING_DATA:
1818 	case HEADER_HOSTNAME:
1819 	case HEADER_OSRELEASE:
1820 	case HEADER_VERSION:
1821 	case HEADER_ARCH:
1822 	case HEADER_NRCPUS:
1823 	case HEADER_CPUDESC:
1824 	case HEADER_CPUID:
1825 	case HEADER_TOTAL_MEM:
1826 	case HEADER_CPU_TOPOLOGY:
1827 	case HEADER_NUMA_TOPOLOGY:
1828 	case HEADER_PMU_MAPPINGS:
1829 	case HEADER_CACHE:
1830 	case HEADER_MEM_TOPOLOGY:
1831 	case HEADER_CLOCKID:
1832 	case HEADER_BPF_PROG_INFO:
1833 	case HEADER_BPF_BTF:
1834 	case HEADER_CPU_PMU_CAPS:
1835 	case HEADER_CLOCK_DATA:
1836 	case HEADER_HYBRID_TOPOLOGY:
1837 	case HEADER_PMU_CAPS:
1838 		return true;
1839 	/* Information that can be updated */
1840 	case HEADER_BUILD_ID:
1841 	case HEADER_CMDLINE:
1842 	case HEADER_EVENT_DESC:
1843 	case HEADER_BRANCH_STACK:
1844 	case HEADER_GROUP_DESC:
1845 	case HEADER_AUXTRACE:
1846 	case HEADER_STAT:
1847 	case HEADER_SAMPLE_TIME:
1848 	case HEADER_DIR_FORMAT:
1849 	case HEADER_COMPRESSED:
1850 	default:
1851 		return false;
1852 	};
1853 }
1854 
1855 static int read_file(int fd, u64 offs, void *buf, size_t sz)
1856 {
1857 	ssize_t ret = preadn(fd, buf, sz, offs);
1858 
1859 	if (ret < 0)
1860 		return -errno;
1861 	if ((size_t)ret != sz)
1862 		return -EINVAL;
1863 	return 0;
1864 }
1865 
1866 static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
1867 {
1868 	int fd = perf_data__fd(inject->session->data);
1869 	u64 offs = inject->secs[feat].offset;
1870 	size_t sz = inject->secs[feat].size;
1871 	void *buf = malloc(sz);
1872 	int ret;
1873 
1874 	if (!buf)
1875 		return -ENOMEM;
1876 
1877 	ret = read_file(fd, offs, buf, sz);
1878 	if (ret)
1879 		goto out_free;
1880 
1881 	ret = fw->write(fw, buf, sz);
1882 out_free:
1883 	free(buf);
1884 	return ret;
1885 }
1886 
1887 struct inject_fc {
1888 	struct feat_copier fc;
1889 	struct perf_inject *inject;
1890 };
1891 
1892 static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
1893 {
1894 	struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
1895 	struct perf_inject *inject = inj_fc->inject;
1896 	int ret;
1897 
1898 	if (!inject->secs[feat].offset ||
1899 	    !keep_feat(feat))
1900 		return 0;
1901 
1902 	ret = feat_copy(inject, feat, fw);
1903 	if (ret < 0)
1904 		return ret;
1905 
1906 	return 1; /* Feature section copied */
1907 }
1908 
1909 static int copy_kcore_dir(struct perf_inject *inject)
1910 {
1911 	char *cmd;
1912 	int ret;
1913 
1914 	ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
1915 		       inject->input_name, inject->output.path);
1916 	if (ret < 0)
1917 		return ret;
1918 	pr_debug("%s\n", cmd);
1919 	ret = system(cmd);
1920 	free(cmd);
1921 	return ret;
1922 }
1923 
1924 static int guest_session__copy_kcore_dir(struct guest_session *gs)
1925 {
1926 	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1927 	char *cmd;
1928 	int ret;
1929 
1930 	ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
1931 		       gs->perf_data_file, inject->output.path, gs->machine_pid);
1932 	if (ret < 0)
1933 		return ret;
1934 	pr_debug("%s\n", cmd);
1935 	ret = system(cmd);
1936 	free(cmd);
1937 	return ret;
1938 }
1939 
1940 static int output_fd(struct perf_inject *inject)
1941 {
1942 	return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
1943 }
1944 
1945 static int __cmd_inject(struct perf_inject *inject)
1946 {
1947 	int ret = -EINVAL;
1948 	struct guest_session *gs = &inject->guest_session;
1949 	struct perf_session *session = inject->session;
1950 	int fd = output_fd(inject);
1951 	u64 output_data_offset;
1952 
1953 	signal(SIGINT, sig_handler);
1954 
1955 	if (inject->build_ids || inject->sched_stat ||
1956 	    inject->itrace_synth_opts.set || inject->build_id_all) {
1957 		inject->tool.mmap	  = perf_event__repipe_mmap;
1958 		inject->tool.mmap2	  = perf_event__repipe_mmap2;
1959 		inject->tool.fork	  = perf_event__repipe_fork;
1960 		inject->tool.tracing_data = perf_event__repipe_tracing_data;
1961 	}
1962 
1963 	output_data_offset = perf_session__data_offset(session->evlist);
1964 
1965 	if (inject->build_id_all) {
1966 		inject->tool.mmap	  = perf_event__repipe_buildid_mmap;
1967 		inject->tool.mmap2	  = perf_event__repipe_buildid_mmap2;
1968 	} else if (inject->build_ids) {
1969 		inject->tool.sample = perf_event__inject_buildid;
1970 	} else if (inject->sched_stat) {
1971 		struct evsel *evsel;
1972 
1973 		evlist__for_each_entry(session->evlist, evsel) {
1974 			const char *name = evsel__name(evsel);
1975 
1976 			if (!strcmp(name, "sched:sched_switch")) {
1977 				if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
1978 					return -EINVAL;
1979 
1980 				evsel->handler = perf_inject__sched_switch;
1981 			} else if (!strcmp(name, "sched:sched_process_exit"))
1982 				evsel->handler = perf_inject__sched_process_exit;
1983 			else if (!strncmp(name, "sched:sched_stat_", 17))
1984 				evsel->handler = perf_inject__sched_stat;
1985 		}
1986 	} else if (inject->itrace_synth_opts.vm_time_correlation) {
1987 		session->itrace_synth_opts = &inject->itrace_synth_opts;
1988 		memset(&inject->tool, 0, sizeof(inject->tool));
1989 		inject->tool.id_index	    = perf_event__process_id_index;
1990 		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
1991 		inject->tool.auxtrace	    = perf_event__process_auxtrace;
1992 		inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
1993 		inject->tool.ordered_events = true;
1994 		inject->tool.ordering_requires_timestamps = true;
1995 	} else if (inject->itrace_synth_opts.set) {
1996 		session->itrace_synth_opts = &inject->itrace_synth_opts;
1997 		inject->itrace_synth_opts.inject = true;
1998 		inject->tool.comm	    = perf_event__repipe_comm;
1999 		inject->tool.namespaces	    = perf_event__repipe_namespaces;
2000 		inject->tool.exit	    = perf_event__repipe_exit;
2001 		inject->tool.id_index	    = perf_event__process_id_index;
2002 		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
2003 		inject->tool.auxtrace	    = perf_event__process_auxtrace;
2004 		inject->tool.aux	    = perf_event__drop_aux;
2005 		inject->tool.itrace_start   = perf_event__drop_aux;
2006 		inject->tool.aux_output_hw_id = perf_event__drop_aux;
2007 		inject->tool.ordered_events = true;
2008 		inject->tool.ordering_requires_timestamps = true;
2009 		/* Allow space in the header for new attributes */
2010 		output_data_offset = roundup(8192 + session->header.data_offset, 4096);
2011 		if (inject->strip)
2012 			strip_init(inject);
2013 	} else if (gs->perf_data_file) {
2014 		char *name = gs->perf_data_file;
2015 
2016 		/*
2017 		 * Not strictly necessary, but keep these events in order wrt
2018 		 * guest events.
2019 		 */
2020 		inject->tool.mmap		= host__repipe;
2021 		inject->tool.mmap2		= host__repipe;
2022 		inject->tool.comm		= host__repipe;
2023 		inject->tool.fork		= host__repipe;
2024 		inject->tool.exit		= host__repipe;
2025 		inject->tool.lost		= host__repipe;
2026 		inject->tool.context_switch	= host__repipe;
2027 		inject->tool.ksymbol		= host__repipe;
2028 		inject->tool.text_poke		= host__repipe;
2029 		/*
2030 		 * Once the host session has initialized, set up sample ID
2031 		 * mapping and feed in guest attrs, build IDs and initial
2032 		 * events.
2033 		 */
2034 		inject->tool.finished_init	= host__finished_init;
2035 		/* Obey finished round ordering */
2036 		inject->tool.finished_round	= host__finished_round,
2037 		/* Keep track of which CPU a VCPU is runnng on */
2038 		inject->tool.context_switch	= host__context_switch;
2039 		/*
2040 		 * Must order events to be able to obey finished round
2041 		 * ordering.
2042 		 */
2043 		inject->tool.ordered_events	= true;
2044 		inject->tool.ordering_requires_timestamps = true;
2045 		/* Set up a separate session to process guest perf.data file */
2046 		ret = guest_session__start(gs, name, session->data->force);
2047 		if (ret) {
2048 			pr_err("Failed to process %s, error %d\n", name, ret);
2049 			return ret;
2050 		}
2051 		/* Allow space in the header for guest attributes */
2052 		output_data_offset += gs->session->header.data_offset;
2053 		output_data_offset = roundup(output_data_offset, 4096);
2054 	}
2055 
2056 	if (!inject->itrace_synth_opts.set)
2057 		auxtrace_index__free(&session->auxtrace_index);
2058 
2059 	if (!inject->is_pipe && !inject->in_place_update)
2060 		lseek(fd, output_data_offset, SEEK_SET);
2061 
2062 	ret = perf_session__process_events(session);
2063 	if (ret)
2064 		return ret;
2065 
2066 	if (gs->session) {
2067 		/*
2068 		 * Remaining guest events have later timestamps. Flush them
2069 		 * out to file.
2070 		 */
2071 		ret = guest_session__flush_events(gs);
2072 		if (ret) {
2073 			pr_err("Failed to flush guest events\n");
2074 			return ret;
2075 		}
2076 	}
2077 
2078 	if (!inject->is_pipe && !inject->in_place_update) {
2079 		struct inject_fc inj_fc = {
2080 			.fc.copy = feat_copy_cb,
2081 			.inject = inject,
2082 		};
2083 
2084 		if (inject->build_ids)
2085 			perf_header__set_feat(&session->header,
2086 					      HEADER_BUILD_ID);
2087 		/*
2088 		 * Keep all buildids when there is unprocessed AUX data because
2089 		 * it is not known which ones the AUX trace hits.
2090 		 */
2091 		if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
2092 		    inject->have_auxtrace && !inject->itrace_synth_opts.set)
2093 			dsos__hit_all(session);
2094 		/*
2095 		 * The AUX areas have been removed and replaced with
2096 		 * synthesized hardware events, so clear the feature flag.
2097 		 */
2098 		if (inject->itrace_synth_opts.set) {
2099 			perf_header__clear_feat(&session->header,
2100 						HEADER_AUXTRACE);
2101 			if (inject->itrace_synth_opts.last_branch ||
2102 			    inject->itrace_synth_opts.add_last_branch)
2103 				perf_header__set_feat(&session->header,
2104 						      HEADER_BRANCH_STACK);
2105 		}
2106 		session->header.data_offset = output_data_offset;
2107 		session->header.data_size = inject->bytes_written;
2108 		perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
2109 
2110 		if (inject->copy_kcore_dir) {
2111 			ret = copy_kcore_dir(inject);
2112 			if (ret) {
2113 				pr_err("Failed to copy kcore\n");
2114 				return ret;
2115 			}
2116 		}
2117 		if (gs->copy_kcore_dir) {
2118 			ret = guest_session__copy_kcore_dir(gs);
2119 			if (ret) {
2120 				pr_err("Failed to copy guest kcore\n");
2121 				return ret;
2122 			}
2123 		}
2124 	}
2125 
2126 	return ret;
2127 }
2128 
2129 int cmd_inject(int argc, const char **argv)
2130 {
2131 	struct perf_inject inject = {
2132 		.tool = {
2133 			.sample		= perf_event__repipe_sample,
2134 			.read		= perf_event__repipe_sample,
2135 			.mmap		= perf_event__repipe,
2136 			.mmap2		= perf_event__repipe,
2137 			.comm		= perf_event__repipe,
2138 			.namespaces	= perf_event__repipe,
2139 			.cgroup		= perf_event__repipe,
2140 			.fork		= perf_event__repipe,
2141 			.exit		= perf_event__repipe,
2142 			.lost		= perf_event__repipe,
2143 			.lost_samples	= perf_event__repipe,
2144 			.aux		= perf_event__repipe,
2145 			.itrace_start	= perf_event__repipe,
2146 			.aux_output_hw_id = perf_event__repipe,
2147 			.context_switch	= perf_event__repipe,
2148 			.throttle	= perf_event__repipe,
2149 			.unthrottle	= perf_event__repipe,
2150 			.ksymbol	= perf_event__repipe,
2151 			.bpf		= perf_event__repipe,
2152 			.text_poke	= perf_event__repipe,
2153 			.attr		= perf_event__repipe_attr,
2154 			.event_update	= perf_event__repipe_event_update,
2155 			.tracing_data	= perf_event__repipe_op2_synth,
2156 			.finished_round	= perf_event__repipe_oe_synth,
2157 			.build_id	= perf_event__repipe_op2_synth,
2158 			.id_index	= perf_event__repipe_op2_synth,
2159 			.auxtrace_info	= perf_event__repipe_op2_synth,
2160 			.auxtrace_error	= perf_event__repipe_op2_synth,
2161 			.time_conv	= perf_event__repipe_op2_synth,
2162 			.thread_map	= perf_event__repipe_op2_synth,
2163 			.cpu_map	= perf_event__repipe_op2_synth,
2164 			.stat_config	= perf_event__repipe_op2_synth,
2165 			.stat		= perf_event__repipe_op2_synth,
2166 			.stat_round	= perf_event__repipe_op2_synth,
2167 			.feature	= perf_event__repipe_op2_synth,
2168 			.finished_init	= perf_event__repipe_op2_synth,
2169 			.compressed	= perf_event__repipe_op4_synth,
2170 			.auxtrace	= perf_event__repipe_auxtrace,
2171 		},
2172 		.input_name  = "-",
2173 		.samples = LIST_HEAD_INIT(inject.samples),
2174 		.output = {
2175 			.path = "-",
2176 			.mode = PERF_DATA_MODE_WRITE,
2177 			.use_stdio = true,
2178 		},
2179 	};
2180 	struct perf_data data = {
2181 		.mode = PERF_DATA_MODE_READ,
2182 		.use_stdio = true,
2183 	};
2184 	int ret;
2185 	bool repipe = true;
2186 	const char *known_build_ids = NULL;
2187 
2188 	struct option options[] = {
2189 		OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
2190 			    "Inject build-ids into the output stream"),
2191 		OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
2192 			    "Inject build-ids of all DSOs into the output stream"),
2193 		OPT_STRING(0, "known-build-ids", &known_build_ids,
2194 			   "buildid path [,buildid path...]",
2195 			   "build-ids to use for given paths"),
2196 		OPT_STRING('i', "input", &inject.input_name, "file",
2197 			   "input file name"),
2198 		OPT_STRING('o', "output", &inject.output.path, "file",
2199 			   "output file name"),
2200 		OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
2201 			    "Merge sched-stat and sched-switch for getting events "
2202 			    "where and how long tasks slept"),
2203 #ifdef HAVE_JITDUMP
2204 		OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
2205 #endif
2206 		OPT_INCR('v', "verbose", &verbose,
2207 			 "be more verbose (show build ids, etc)"),
2208 		OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2209 			   "file", "vmlinux pathname"),
2210 		OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
2211 			    "don't load vmlinux even if found"),
2212 		OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
2213 			   "kallsyms pathname"),
2214 		OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
2215 		OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
2216 				    NULL, "opts", "Instruction Tracing options\n"
2217 				    ITRACE_HELP,
2218 				    itrace_parse_synth_opts),
2219 		OPT_BOOLEAN(0, "strip", &inject.strip,
2220 			    "strip non-synthesized events (use with --itrace)"),
2221 		OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
2222 				    "correlate time between VM guests and the host",
2223 				    parse_vm_time_correlation),
2224 		OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
2225 				    "inject events from a guest perf.data file",
2226 				    parse_guest_data),
2227 		OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2228 			   "guest mount directory under which every guest os"
2229 			   " instance has a subdir"),
2230 		OPT_END()
2231 	};
2232 	const char * const inject_usage[] = {
2233 		"perf inject [<options>]",
2234 		NULL
2235 	};
2236 #ifndef HAVE_JITDUMP
2237 	set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
2238 #endif
2239 	argc = parse_options(argc, argv, options, inject_usage, 0);
2240 
2241 	/*
2242 	 * Any (unrecognized) arguments left?
2243 	 */
2244 	if (argc)
2245 		usage_with_options(inject_usage, options);
2246 
2247 	if (inject.strip && !inject.itrace_synth_opts.set) {
2248 		pr_err("--strip option requires --itrace option\n");
2249 		return -1;
2250 	}
2251 
2252 	if (symbol__validate_sym_arguments())
2253 		return -1;
2254 
2255 	if (inject.in_place_update) {
2256 		if (!strcmp(inject.input_name, "-")) {
2257 			pr_err("Input file name required for in-place updating\n");
2258 			return -1;
2259 		}
2260 		if (strcmp(inject.output.path, "-")) {
2261 			pr_err("Output file name must not be specified for in-place updating\n");
2262 			return -1;
2263 		}
2264 		if (!data.force && !inject.in_place_update_dry_run) {
2265 			pr_err("The input file would be updated in place, "
2266 				"the --force option is required.\n");
2267 			return -1;
2268 		}
2269 		if (!inject.in_place_update_dry_run)
2270 			data.in_place_update = true;
2271 	} else {
2272 		if (strcmp(inject.output.path, "-") && !inject.strip &&
2273 		    has_kcore_dir(inject.input_name)) {
2274 			inject.output.is_dir = true;
2275 			inject.copy_kcore_dir = true;
2276 		}
2277 		if (perf_data__open(&inject.output)) {
2278 			perror("failed to create output file");
2279 			return -1;
2280 		}
2281 	}
2282 
2283 	data.path = inject.input_name;
2284 	if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
2285 		inject.is_pipe = true;
2286 		/*
2287 		 * Do not repipe header when input is a regular file
2288 		 * since either it can rewrite the header at the end
2289 		 * or write a new pipe header.
2290 		 */
2291 		if (strcmp(inject.input_name, "-"))
2292 			repipe = false;
2293 	}
2294 
2295 	inject.session = __perf_session__new(&data, repipe,
2296 					     output_fd(&inject),
2297 					     &inject.tool);
2298 	if (IS_ERR(inject.session)) {
2299 		ret = PTR_ERR(inject.session);
2300 		goto out_close_output;
2301 	}
2302 
2303 	if (zstd_init(&(inject.session->zstd_data), 0) < 0)
2304 		pr_warning("Decompression initialization failed.\n");
2305 
2306 	/* Save original section info before feature bits change */
2307 	ret = save_section_info(&inject);
2308 	if (ret)
2309 		goto out_delete;
2310 
2311 	if (!data.is_pipe && inject.output.is_pipe) {
2312 		ret = perf_header__write_pipe(perf_data__fd(&inject.output));
2313 		if (ret < 0) {
2314 			pr_err("Couldn't write a new pipe header.\n");
2315 			goto out_delete;
2316 		}
2317 
2318 		ret = perf_event__synthesize_for_pipe(&inject.tool,
2319 						      inject.session,
2320 						      &inject.output,
2321 						      perf_event__repipe);
2322 		if (ret < 0)
2323 			goto out_delete;
2324 	}
2325 
2326 	if (inject.build_ids && !inject.build_id_all) {
2327 		/*
2328 		 * to make sure the mmap records are ordered correctly
2329 		 * and so that the correct especially due to jitted code
2330 		 * mmaps. We cannot generate the buildid hit list and
2331 		 * inject the jit mmaps at the same time for now.
2332 		 */
2333 		inject.tool.ordered_events = true;
2334 		inject.tool.ordering_requires_timestamps = true;
2335 		if (known_build_ids != NULL) {
2336 			inject.known_build_ids =
2337 				perf_inject__parse_known_build_ids(known_build_ids);
2338 
2339 			if (inject.known_build_ids == NULL) {
2340 				pr_err("Couldn't parse known build ids.\n");
2341 				goto out_delete;
2342 			}
2343 		}
2344 	}
2345 
2346 	if (inject.sched_stat) {
2347 		inject.tool.ordered_events = true;
2348 	}
2349 
2350 #ifdef HAVE_JITDUMP
2351 	if (inject.jit_mode) {
2352 		inject.tool.mmap2	   = perf_event__jit_repipe_mmap2;
2353 		inject.tool.mmap	   = perf_event__jit_repipe_mmap;
2354 		inject.tool.ordered_events = true;
2355 		inject.tool.ordering_requires_timestamps = true;
2356 		/*
2357 		 * JIT MMAP injection injects all MMAP events in one go, so it
2358 		 * does not obey finished_round semantics.
2359 		 */
2360 		inject.tool.finished_round = perf_event__drop_oe;
2361 	}
2362 #endif
2363 	ret = symbol__init(&inject.session->header.env);
2364 	if (ret < 0)
2365 		goto out_delete;
2366 
2367 	ret = __cmd_inject(&inject);
2368 
2369 	guest_session__exit(&inject.guest_session);
2370 
2371 out_delete:
2372 	strlist__delete(inject.known_build_ids);
2373 	zstd_fini(&(inject.session->zstd_data));
2374 	perf_session__delete(inject.session);
2375 out_close_output:
2376 	if (!inject.in_place_update)
2377 		perf_data__close(&inject.output);
2378 	free(inject.itrace_synth_opts.vm_tm_corr_args);
2379 	return ret;
2380 }
2381