xref: /openbmc/linux/tools/perf/util/session.c (revision 57fc032a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <traceevent/event-parse.h>
8 #include <api/fs/fs.h>
9 
10 #include <byteswap.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include <sys/mman.h>
14 #include <perf/cpumap.h>
15 
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "memswap.h"
19 #include "map.h"
20 #include "symbol.h"
21 #include "session.h"
22 #include "tool.h"
23 #include "sort.h"
24 #include "cpumap.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "arch/common.h"
33 
34 #ifdef HAVE_ZSTD_SUPPORT
35 static int perf_session__process_compressed_event(struct perf_session *session,
36 						  union perf_event *event, u64 file_offset)
37 {
38 	void *src;
39 	size_t decomp_size, src_size;
40 	u64 decomp_last_rem = 0;
41 	size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
42 	struct decomp *decomp, *decomp_last = session->decomp_last;
43 
44 	if (decomp_last) {
45 		decomp_last_rem = decomp_last->size - decomp_last->head;
46 		decomp_len += decomp_last_rem;
47 	}
48 
49 	mmap_len = sizeof(struct decomp) + decomp_len;
50 	decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
51 		      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
52 	if (decomp == MAP_FAILED) {
53 		pr_err("Couldn't allocate memory for decompression\n");
54 		return -1;
55 	}
56 
57 	decomp->file_pos = file_offset;
58 	decomp->mmap_len = mmap_len;
59 	decomp->head = 0;
60 
61 	if (decomp_last_rem) {
62 		memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
63 		decomp->size = decomp_last_rem;
64 	}
65 
66 	src = (void *)event + sizeof(struct compressed_event);
67 	src_size = event->pack.header.size - sizeof(struct compressed_event);
68 
69 	decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
70 				&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
71 	if (!decomp_size) {
72 		munmap(decomp, mmap_len);
73 		pr_err("Couldn't decompress data\n");
74 		return -1;
75 	}
76 
77 	decomp->size += decomp_size;
78 
79 	if (session->decomp == NULL) {
80 		session->decomp = decomp;
81 		session->decomp_last = decomp;
82 	} else {
83 		session->decomp_last->next = decomp;
84 		session->decomp_last = decomp;
85 	}
86 
87 	pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
88 
89 	return 0;
90 }
91 #else /* !HAVE_ZSTD_SUPPORT */
92 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
93 #endif
94 
95 static int perf_session__deliver_event(struct perf_session *session,
96 				       union perf_event *event,
97 				       struct perf_tool *tool,
98 				       u64 file_offset);
99 
100 static int perf_session__open(struct perf_session *session)
101 {
102 	struct perf_data *data = session->data;
103 
104 	if (perf_session__read_header(session) < 0) {
105 		pr_err("incompatible file format (rerun with -v to learn more)\n");
106 		return -1;
107 	}
108 
109 	if (perf_data__is_pipe(data))
110 		return 0;
111 
112 	if (perf_header__has_feat(&session->header, HEADER_STAT))
113 		return 0;
114 
115 	if (!perf_evlist__valid_sample_type(session->evlist)) {
116 		pr_err("non matching sample_type\n");
117 		return -1;
118 	}
119 
120 	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
121 		pr_err("non matching sample_id_all\n");
122 		return -1;
123 	}
124 
125 	if (!perf_evlist__valid_read_format(session->evlist)) {
126 		pr_err("non matching read_format\n");
127 		return -1;
128 	}
129 
130 	return 0;
131 }
132 
133 void perf_session__set_id_hdr_size(struct perf_session *session)
134 {
135 	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
136 
137 	machines__set_id_hdr_size(&session->machines, id_hdr_size);
138 }
139 
140 int perf_session__create_kernel_maps(struct perf_session *session)
141 {
142 	int ret = machine__create_kernel_maps(&session->machines.host);
143 
144 	if (ret >= 0)
145 		ret = machines__create_guest_kernel_maps(&session->machines);
146 	return ret;
147 }
148 
149 static void perf_session__destroy_kernel_maps(struct perf_session *session)
150 {
151 	machines__destroy_kernel_maps(&session->machines);
152 }
153 
154 static bool perf_session__has_comm_exec(struct perf_session *session)
155 {
156 	struct evsel *evsel;
157 
158 	evlist__for_each_entry(session->evlist, evsel) {
159 		if (evsel->core.attr.comm_exec)
160 			return true;
161 	}
162 
163 	return false;
164 }
165 
166 static void perf_session__set_comm_exec(struct perf_session *session)
167 {
168 	bool comm_exec = perf_session__has_comm_exec(session);
169 
170 	machines__set_comm_exec(&session->machines, comm_exec);
171 }
172 
173 static int ordered_events__deliver_event(struct ordered_events *oe,
174 					 struct ordered_event *event)
175 {
176 	struct perf_session *session = container_of(oe, struct perf_session,
177 						    ordered_events);
178 
179 	return perf_session__deliver_event(session, event->event,
180 					   session->tool, event->file_offset);
181 }
182 
183 struct perf_session *perf_session__new(struct perf_data *data,
184 				       bool repipe, struct perf_tool *tool)
185 {
186 	struct perf_session *session = zalloc(sizeof(*session));
187 
188 	if (!session)
189 		goto out;
190 
191 	session->repipe = repipe;
192 	session->tool   = tool;
193 	INIT_LIST_HEAD(&session->auxtrace_index);
194 	machines__init(&session->machines);
195 	ordered_events__init(&session->ordered_events,
196 			     ordered_events__deliver_event, NULL);
197 
198 	perf_env__init(&session->header.env);
199 	if (data) {
200 		if (perf_data__open(data))
201 			goto out_delete;
202 
203 		session->data = data;
204 
205 		if (perf_data__is_read(data)) {
206 			if (perf_session__open(session) < 0)
207 				goto out_delete;
208 
209 			/*
210 			 * set session attributes that are present in perf.data
211 			 * but not in pipe-mode.
212 			 */
213 			if (!data->is_pipe) {
214 				perf_session__set_id_hdr_size(session);
215 				perf_session__set_comm_exec(session);
216 			}
217 
218 			perf_evlist__init_trace_event_sample_raw(session->evlist);
219 
220 			/* Open the directory data. */
221 			if (data->is_dir && perf_data__open_dir(data))
222 				goto out_delete;
223 		}
224 	} else  {
225 		session->machines.host.env = &perf_env;
226 	}
227 
228 	session->machines.host.single_address_space =
229 		perf_env__single_address_space(session->machines.host.env);
230 
231 	if (!data || perf_data__is_write(data)) {
232 		/*
233 		 * In O_RDONLY mode this will be performed when reading the
234 		 * kernel MMAP event, in perf_event__process_mmap().
235 		 */
236 		if (perf_session__create_kernel_maps(session) < 0)
237 			pr_warning("Cannot read kernel map\n");
238 	}
239 
240 	/*
241 	 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
242 	 * processed, so perf_evlist__sample_id_all is not meaningful here.
243 	 */
244 	if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
245 	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
246 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
247 		tool->ordered_events = false;
248 	}
249 
250 	return session;
251 
252  out_delete:
253 	perf_session__delete(session);
254  out:
255 	return NULL;
256 }
257 
258 static void perf_session__delete_threads(struct perf_session *session)
259 {
260 	machine__delete_threads(&session->machines.host);
261 }
262 
263 static void perf_session__release_decomp_events(struct perf_session *session)
264 {
265 	struct decomp *next, *decomp;
266 	size_t mmap_len;
267 	next = session->decomp;
268 	do {
269 		decomp = next;
270 		if (decomp == NULL)
271 			break;
272 		next = decomp->next;
273 		mmap_len = decomp->mmap_len;
274 		munmap(decomp, mmap_len);
275 	} while (1);
276 }
277 
278 void perf_session__delete(struct perf_session *session)
279 {
280 	if (session == NULL)
281 		return;
282 	auxtrace__free(session);
283 	auxtrace_index__free(&session->auxtrace_index);
284 	perf_session__destroy_kernel_maps(session);
285 	perf_session__delete_threads(session);
286 	perf_session__release_decomp_events(session);
287 	perf_env__exit(&session->header.env);
288 	machines__exit(&session->machines);
289 	if (session->data)
290 		perf_data__close(session->data);
291 	free(session);
292 }
293 
294 static int process_event_synth_tracing_data_stub(struct perf_session *session
295 						 __maybe_unused,
296 						 union perf_event *event
297 						 __maybe_unused)
298 {
299 	dump_printf(": unhandled!\n");
300 	return 0;
301 }
302 
303 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
304 					 union perf_event *event __maybe_unused,
305 					 struct evlist **pevlist
306 					 __maybe_unused)
307 {
308 	dump_printf(": unhandled!\n");
309 	return 0;
310 }
311 
312 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
313 						 union perf_event *event __maybe_unused,
314 						 struct evlist **pevlist
315 						 __maybe_unused)
316 {
317 	if (dump_trace)
318 		perf_event__fprintf_event_update(event, stdout);
319 
320 	dump_printf(": unhandled!\n");
321 	return 0;
322 }
323 
324 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
325 				     union perf_event *event __maybe_unused,
326 				     struct perf_sample *sample __maybe_unused,
327 				     struct evsel *evsel __maybe_unused,
328 				     struct machine *machine __maybe_unused)
329 {
330 	dump_printf(": unhandled!\n");
331 	return 0;
332 }
333 
334 static int process_event_stub(struct perf_tool *tool __maybe_unused,
335 			      union perf_event *event __maybe_unused,
336 			      struct perf_sample *sample __maybe_unused,
337 			      struct machine *machine __maybe_unused)
338 {
339 	dump_printf(": unhandled!\n");
340 	return 0;
341 }
342 
343 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
344 				       union perf_event *event __maybe_unused,
345 				       struct ordered_events *oe __maybe_unused)
346 {
347 	dump_printf(": unhandled!\n");
348 	return 0;
349 }
350 
351 static int process_finished_round(struct perf_tool *tool,
352 				  union perf_event *event,
353 				  struct ordered_events *oe);
354 
355 static int skipn(int fd, off_t n)
356 {
357 	char buf[4096];
358 	ssize_t ret;
359 
360 	while (n > 0) {
361 		ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
362 		if (ret <= 0)
363 			return ret;
364 		n -= ret;
365 	}
366 
367 	return 0;
368 }
369 
370 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
371 				       union perf_event *event)
372 {
373 	dump_printf(": unhandled!\n");
374 	if (perf_data__is_pipe(session->data))
375 		skipn(perf_data__fd(session->data), event->auxtrace.size);
376 	return event->auxtrace.size;
377 }
378 
379 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
380 				  union perf_event *event __maybe_unused)
381 {
382 	dump_printf(": unhandled!\n");
383 	return 0;
384 }
385 
386 
387 static
388 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
389 				  union perf_event *event __maybe_unused)
390 {
391 	if (dump_trace)
392 		perf_event__fprintf_thread_map(event, stdout);
393 
394 	dump_printf(": unhandled!\n");
395 	return 0;
396 }
397 
398 static
399 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
400 			       union perf_event *event __maybe_unused)
401 {
402 	if (dump_trace)
403 		perf_event__fprintf_cpu_map(event, stdout);
404 
405 	dump_printf(": unhandled!\n");
406 	return 0;
407 }
408 
409 static
410 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
411 				   union perf_event *event __maybe_unused)
412 {
413 	if (dump_trace)
414 		perf_event__fprintf_stat_config(event, stdout);
415 
416 	dump_printf(": unhandled!\n");
417 	return 0;
418 }
419 
420 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
421 			     union perf_event *event)
422 {
423 	if (dump_trace)
424 		perf_event__fprintf_stat(event, stdout);
425 
426 	dump_printf(": unhandled!\n");
427 	return 0;
428 }
429 
430 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
431 				   union perf_event *event)
432 {
433 	if (dump_trace)
434 		perf_event__fprintf_stat_round(event, stdout);
435 
436 	dump_printf(": unhandled!\n");
437 	return 0;
438 }
439 
440 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
441 						       union perf_event *event __maybe_unused,
442 						       u64 file_offset __maybe_unused)
443 {
444        dump_printf(": unhandled!\n");
445        return 0;
446 }
447 
448 void perf_tool__fill_defaults(struct perf_tool *tool)
449 {
450 	if (tool->sample == NULL)
451 		tool->sample = process_event_sample_stub;
452 	if (tool->mmap == NULL)
453 		tool->mmap = process_event_stub;
454 	if (tool->mmap2 == NULL)
455 		tool->mmap2 = process_event_stub;
456 	if (tool->comm == NULL)
457 		tool->comm = process_event_stub;
458 	if (tool->namespaces == NULL)
459 		tool->namespaces = process_event_stub;
460 	if (tool->fork == NULL)
461 		tool->fork = process_event_stub;
462 	if (tool->exit == NULL)
463 		tool->exit = process_event_stub;
464 	if (tool->lost == NULL)
465 		tool->lost = perf_event__process_lost;
466 	if (tool->lost_samples == NULL)
467 		tool->lost_samples = perf_event__process_lost_samples;
468 	if (tool->aux == NULL)
469 		tool->aux = perf_event__process_aux;
470 	if (tool->itrace_start == NULL)
471 		tool->itrace_start = perf_event__process_itrace_start;
472 	if (tool->context_switch == NULL)
473 		tool->context_switch = perf_event__process_switch;
474 	if (tool->ksymbol == NULL)
475 		tool->ksymbol = perf_event__process_ksymbol;
476 	if (tool->bpf_event == NULL)
477 		tool->bpf_event = perf_event__process_bpf_event;
478 	if (tool->read == NULL)
479 		tool->read = process_event_sample_stub;
480 	if (tool->throttle == NULL)
481 		tool->throttle = process_event_stub;
482 	if (tool->unthrottle == NULL)
483 		tool->unthrottle = process_event_stub;
484 	if (tool->attr == NULL)
485 		tool->attr = process_event_synth_attr_stub;
486 	if (tool->event_update == NULL)
487 		tool->event_update = process_event_synth_event_update_stub;
488 	if (tool->tracing_data == NULL)
489 		tool->tracing_data = process_event_synth_tracing_data_stub;
490 	if (tool->build_id == NULL)
491 		tool->build_id = process_event_op2_stub;
492 	if (tool->finished_round == NULL) {
493 		if (tool->ordered_events)
494 			tool->finished_round = process_finished_round;
495 		else
496 			tool->finished_round = process_finished_round_stub;
497 	}
498 	if (tool->id_index == NULL)
499 		tool->id_index = process_event_op2_stub;
500 	if (tool->auxtrace_info == NULL)
501 		tool->auxtrace_info = process_event_op2_stub;
502 	if (tool->auxtrace == NULL)
503 		tool->auxtrace = process_event_auxtrace_stub;
504 	if (tool->auxtrace_error == NULL)
505 		tool->auxtrace_error = process_event_op2_stub;
506 	if (tool->thread_map == NULL)
507 		tool->thread_map = process_event_thread_map_stub;
508 	if (tool->cpu_map == NULL)
509 		tool->cpu_map = process_event_cpu_map_stub;
510 	if (tool->stat_config == NULL)
511 		tool->stat_config = process_event_stat_config_stub;
512 	if (tool->stat == NULL)
513 		tool->stat = process_stat_stub;
514 	if (tool->stat_round == NULL)
515 		tool->stat_round = process_stat_round_stub;
516 	if (tool->time_conv == NULL)
517 		tool->time_conv = process_event_op2_stub;
518 	if (tool->feature == NULL)
519 		tool->feature = process_event_op2_stub;
520 	if (tool->compressed == NULL)
521 		tool->compressed = perf_session__process_compressed_event;
522 }
523 
524 static void swap_sample_id_all(union perf_event *event, void *data)
525 {
526 	void *end = (void *) event + event->header.size;
527 	int size = end - data;
528 
529 	BUG_ON(size % sizeof(u64));
530 	mem_bswap_64(data, size);
531 }
532 
533 static void perf_event__all64_swap(union perf_event *event,
534 				   bool sample_id_all __maybe_unused)
535 {
536 	struct perf_event_header *hdr = &event->header;
537 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
538 }
539 
540 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
541 {
542 	event->comm.pid = bswap_32(event->comm.pid);
543 	event->comm.tid = bswap_32(event->comm.tid);
544 
545 	if (sample_id_all) {
546 		void *data = &event->comm.comm;
547 
548 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
549 		swap_sample_id_all(event, data);
550 	}
551 }
552 
553 static void perf_event__mmap_swap(union perf_event *event,
554 				  bool sample_id_all)
555 {
556 	event->mmap.pid	  = bswap_32(event->mmap.pid);
557 	event->mmap.tid	  = bswap_32(event->mmap.tid);
558 	event->mmap.start = bswap_64(event->mmap.start);
559 	event->mmap.len	  = bswap_64(event->mmap.len);
560 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
561 
562 	if (sample_id_all) {
563 		void *data = &event->mmap.filename;
564 
565 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
566 		swap_sample_id_all(event, data);
567 	}
568 }
569 
570 static void perf_event__mmap2_swap(union perf_event *event,
571 				  bool sample_id_all)
572 {
573 	event->mmap2.pid   = bswap_32(event->mmap2.pid);
574 	event->mmap2.tid   = bswap_32(event->mmap2.tid);
575 	event->mmap2.start = bswap_64(event->mmap2.start);
576 	event->mmap2.len   = bswap_64(event->mmap2.len);
577 	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
578 	event->mmap2.maj   = bswap_32(event->mmap2.maj);
579 	event->mmap2.min   = bswap_32(event->mmap2.min);
580 	event->mmap2.ino   = bswap_64(event->mmap2.ino);
581 
582 	if (sample_id_all) {
583 		void *data = &event->mmap2.filename;
584 
585 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
586 		swap_sample_id_all(event, data);
587 	}
588 }
589 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
590 {
591 	event->fork.pid	 = bswap_32(event->fork.pid);
592 	event->fork.tid	 = bswap_32(event->fork.tid);
593 	event->fork.ppid = bswap_32(event->fork.ppid);
594 	event->fork.ptid = bswap_32(event->fork.ptid);
595 	event->fork.time = bswap_64(event->fork.time);
596 
597 	if (sample_id_all)
598 		swap_sample_id_all(event, &event->fork + 1);
599 }
600 
601 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
602 {
603 	event->read.pid		 = bswap_32(event->read.pid);
604 	event->read.tid		 = bswap_32(event->read.tid);
605 	event->read.value	 = bswap_64(event->read.value);
606 	event->read.time_enabled = bswap_64(event->read.time_enabled);
607 	event->read.time_running = bswap_64(event->read.time_running);
608 	event->read.id		 = bswap_64(event->read.id);
609 
610 	if (sample_id_all)
611 		swap_sample_id_all(event, &event->read + 1);
612 }
613 
614 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
615 {
616 	event->aux.aux_offset = bswap_64(event->aux.aux_offset);
617 	event->aux.aux_size   = bswap_64(event->aux.aux_size);
618 	event->aux.flags      = bswap_64(event->aux.flags);
619 
620 	if (sample_id_all)
621 		swap_sample_id_all(event, &event->aux + 1);
622 }
623 
624 static void perf_event__itrace_start_swap(union perf_event *event,
625 					  bool sample_id_all)
626 {
627 	event->itrace_start.pid	 = bswap_32(event->itrace_start.pid);
628 	event->itrace_start.tid	 = bswap_32(event->itrace_start.tid);
629 
630 	if (sample_id_all)
631 		swap_sample_id_all(event, &event->itrace_start + 1);
632 }
633 
634 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
635 {
636 	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
637 		event->context_switch.next_prev_pid =
638 				bswap_32(event->context_switch.next_prev_pid);
639 		event->context_switch.next_prev_tid =
640 				bswap_32(event->context_switch.next_prev_tid);
641 	}
642 
643 	if (sample_id_all)
644 		swap_sample_id_all(event, &event->context_switch + 1);
645 }
646 
647 static void perf_event__throttle_swap(union perf_event *event,
648 				      bool sample_id_all)
649 {
650 	event->throttle.time	  = bswap_64(event->throttle.time);
651 	event->throttle.id	  = bswap_64(event->throttle.id);
652 	event->throttle.stream_id = bswap_64(event->throttle.stream_id);
653 
654 	if (sample_id_all)
655 		swap_sample_id_all(event, &event->throttle + 1);
656 }
657 
658 static void perf_event__namespaces_swap(union perf_event *event,
659 					bool sample_id_all)
660 {
661 	u64 i;
662 
663 	event->namespaces.pid		= bswap_32(event->namespaces.pid);
664 	event->namespaces.tid		= bswap_32(event->namespaces.tid);
665 	event->namespaces.nr_namespaces	= bswap_64(event->namespaces.nr_namespaces);
666 
667 	for (i = 0; i < event->namespaces.nr_namespaces; i++) {
668 		struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
669 
670 		ns->dev = bswap_64(ns->dev);
671 		ns->ino = bswap_64(ns->ino);
672 	}
673 
674 	if (sample_id_all)
675 		swap_sample_id_all(event, &event->namespaces.link_info[i]);
676 }
677 
678 static u8 revbyte(u8 b)
679 {
680 	int rev = (b >> 4) | ((b & 0xf) << 4);
681 	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
682 	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
683 	return (u8) rev;
684 }
685 
686 /*
687  * XXX this is hack in attempt to carry flags bitfield
688  * through endian village. ABI says:
689  *
690  * Bit-fields are allocated from right to left (least to most significant)
691  * on little-endian implementations and from left to right (most to least
692  * significant) on big-endian implementations.
693  *
694  * The above seems to be byte specific, so we need to reverse each
695  * byte of the bitfield. 'Internet' also says this might be implementation
696  * specific and we probably need proper fix and carry perf_event_attr
697  * bitfield flags in separate data file FEAT_ section. Thought this seems
698  * to work for now.
699  */
700 static void swap_bitfield(u8 *p, unsigned len)
701 {
702 	unsigned i;
703 
704 	for (i = 0; i < len; i++) {
705 		*p = revbyte(*p);
706 		p++;
707 	}
708 }
709 
710 /* exported for swapping attributes in file header */
711 void perf_event__attr_swap(struct perf_event_attr *attr)
712 {
713 	attr->type		= bswap_32(attr->type);
714 	attr->size		= bswap_32(attr->size);
715 
716 #define bswap_safe(f, n) 					\
717 	(attr->size > (offsetof(struct perf_event_attr, f) + 	\
718 		       sizeof(attr->f) * (n)))
719 #define bswap_field(f, sz) 			\
720 do { 						\
721 	if (bswap_safe(f, 0))			\
722 		attr->f = bswap_##sz(attr->f);	\
723 } while(0)
724 #define bswap_field_16(f) bswap_field(f, 16)
725 #define bswap_field_32(f) bswap_field(f, 32)
726 #define bswap_field_64(f) bswap_field(f, 64)
727 
728 	bswap_field_64(config);
729 	bswap_field_64(sample_period);
730 	bswap_field_64(sample_type);
731 	bswap_field_64(read_format);
732 	bswap_field_32(wakeup_events);
733 	bswap_field_32(bp_type);
734 	bswap_field_64(bp_addr);
735 	bswap_field_64(bp_len);
736 	bswap_field_64(branch_sample_type);
737 	bswap_field_64(sample_regs_user);
738 	bswap_field_32(sample_stack_user);
739 	bswap_field_32(aux_watermark);
740 	bswap_field_16(sample_max_stack);
741 
742 	/*
743 	 * After read_format are bitfields. Check read_format because
744 	 * we are unable to use offsetof on bitfield.
745 	 */
746 	if (bswap_safe(read_format, 1))
747 		swap_bitfield((u8 *) (&attr->read_format + 1),
748 			      sizeof(u64));
749 #undef bswap_field_64
750 #undef bswap_field_32
751 #undef bswap_field
752 #undef bswap_safe
753 }
754 
755 static void perf_event__hdr_attr_swap(union perf_event *event,
756 				      bool sample_id_all __maybe_unused)
757 {
758 	size_t size;
759 
760 	perf_event__attr_swap(&event->attr.attr);
761 
762 	size = event->header.size;
763 	size -= (void *)&event->attr.id - (void *)event;
764 	mem_bswap_64(event->attr.id, size);
765 }
766 
767 static void perf_event__event_update_swap(union perf_event *event,
768 					  bool sample_id_all __maybe_unused)
769 {
770 	event->event_update.type = bswap_64(event->event_update.type);
771 	event->event_update.id   = bswap_64(event->event_update.id);
772 }
773 
774 static void perf_event__event_type_swap(union perf_event *event,
775 					bool sample_id_all __maybe_unused)
776 {
777 	event->event_type.event_type.event_id =
778 		bswap_64(event->event_type.event_type.event_id);
779 }
780 
781 static void perf_event__tracing_data_swap(union perf_event *event,
782 					  bool sample_id_all __maybe_unused)
783 {
784 	event->tracing_data.size = bswap_32(event->tracing_data.size);
785 }
786 
787 static void perf_event__auxtrace_info_swap(union perf_event *event,
788 					   bool sample_id_all __maybe_unused)
789 {
790 	size_t size;
791 
792 	event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
793 
794 	size = event->header.size;
795 	size -= (void *)&event->auxtrace_info.priv - (void *)event;
796 	mem_bswap_64(event->auxtrace_info.priv, size);
797 }
798 
799 static void perf_event__auxtrace_swap(union perf_event *event,
800 				      bool sample_id_all __maybe_unused)
801 {
802 	event->auxtrace.size      = bswap_64(event->auxtrace.size);
803 	event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
804 	event->auxtrace.reference = bswap_64(event->auxtrace.reference);
805 	event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
806 	event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
807 	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
808 }
809 
810 static void perf_event__auxtrace_error_swap(union perf_event *event,
811 					    bool sample_id_all __maybe_unused)
812 {
813 	event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
814 	event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
815 	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
816 	event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
817 	event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
818 	event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
819 	event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
820 	if (event->auxtrace_error.fmt)
821 		event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
822 }
823 
824 static void perf_event__thread_map_swap(union perf_event *event,
825 					bool sample_id_all __maybe_unused)
826 {
827 	unsigned i;
828 
829 	event->thread_map.nr = bswap_64(event->thread_map.nr);
830 
831 	for (i = 0; i < event->thread_map.nr; i++)
832 		event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
833 }
834 
835 static void perf_event__cpu_map_swap(union perf_event *event,
836 				     bool sample_id_all __maybe_unused)
837 {
838 	struct cpu_map_data *data = &event->cpu_map.data;
839 	struct cpu_map_entries *cpus;
840 	struct cpu_map_mask *mask;
841 	unsigned i;
842 
843 	data->type = bswap_64(data->type);
844 
845 	switch (data->type) {
846 	case PERF_CPU_MAP__CPUS:
847 		cpus = (struct cpu_map_entries *)data->data;
848 
849 		cpus->nr = bswap_16(cpus->nr);
850 
851 		for (i = 0; i < cpus->nr; i++)
852 			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
853 		break;
854 	case PERF_CPU_MAP__MASK:
855 		mask = (struct cpu_map_mask *) data->data;
856 
857 		mask->nr = bswap_16(mask->nr);
858 		mask->long_size = bswap_16(mask->long_size);
859 
860 		switch (mask->long_size) {
861 		case 4: mem_bswap_32(&mask->mask, mask->nr); break;
862 		case 8: mem_bswap_64(&mask->mask, mask->nr); break;
863 		default:
864 			pr_err("cpu_map swap: unsupported long size\n");
865 		}
866 	default:
867 		break;
868 	}
869 }
870 
871 static void perf_event__stat_config_swap(union perf_event *event,
872 					 bool sample_id_all __maybe_unused)
873 {
874 	u64 size;
875 
876 	size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
877 	size += 1; /* nr item itself */
878 	mem_bswap_64(&event->stat_config.nr, size);
879 }
880 
881 static void perf_event__stat_swap(union perf_event *event,
882 				  bool sample_id_all __maybe_unused)
883 {
884 	event->stat.id     = bswap_64(event->stat.id);
885 	event->stat.thread = bswap_32(event->stat.thread);
886 	event->stat.cpu    = bswap_32(event->stat.cpu);
887 	event->stat.val    = bswap_64(event->stat.val);
888 	event->stat.ena    = bswap_64(event->stat.ena);
889 	event->stat.run    = bswap_64(event->stat.run);
890 }
891 
892 static void perf_event__stat_round_swap(union perf_event *event,
893 					bool sample_id_all __maybe_unused)
894 {
895 	event->stat_round.type = bswap_64(event->stat_round.type);
896 	event->stat_round.time = bswap_64(event->stat_round.time);
897 }
898 
899 typedef void (*perf_event__swap_op)(union perf_event *event,
900 				    bool sample_id_all);
901 
902 static perf_event__swap_op perf_event__swap_ops[] = {
903 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
904 	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
905 	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
906 	[PERF_RECORD_FORK]		  = perf_event__task_swap,
907 	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
908 	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
909 	[PERF_RECORD_READ]		  = perf_event__read_swap,
910 	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
911 	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
912 	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
913 	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
914 	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
915 	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
916 	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
917 	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
918 	[PERF_RECORD_NAMESPACES]	  = perf_event__namespaces_swap,
919 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
920 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
921 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
922 	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
923 	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
924 	[PERF_RECORD_AUXTRACE_INFO]	  = perf_event__auxtrace_info_swap,
925 	[PERF_RECORD_AUXTRACE]		  = perf_event__auxtrace_swap,
926 	[PERF_RECORD_AUXTRACE_ERROR]	  = perf_event__auxtrace_error_swap,
927 	[PERF_RECORD_THREAD_MAP]	  = perf_event__thread_map_swap,
928 	[PERF_RECORD_CPU_MAP]		  = perf_event__cpu_map_swap,
929 	[PERF_RECORD_STAT_CONFIG]	  = perf_event__stat_config_swap,
930 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
931 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
932 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
933 	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
934 	[PERF_RECORD_HEADER_MAX]	  = NULL,
935 };
936 
937 /*
938  * When perf record finishes a pass on every buffers, it records this pseudo
939  * event.
940  * We record the max timestamp t found in the pass n.
941  * Assuming these timestamps are monotonic across cpus, we know that if
942  * a buffer still has events with timestamps below t, they will be all
943  * available and then read in the pass n + 1.
944  * Hence when we start to read the pass n + 2, we can safely flush every
945  * events with timestamps below t.
946  *
947  *    ============ PASS n =================
948  *       CPU 0         |   CPU 1
949  *                     |
950  *    cnt1 timestamps  |   cnt2 timestamps
951  *          1          |         2
952  *          2          |         3
953  *          -          |         4  <--- max recorded
954  *
955  *    ============ PASS n + 1 ==============
956  *       CPU 0         |   CPU 1
957  *                     |
958  *    cnt1 timestamps  |   cnt2 timestamps
959  *          3          |         5
960  *          4          |         6
961  *          5          |         7 <---- max recorded
962  *
963  *      Flush every events below timestamp 4
964  *
965  *    ============ PASS n + 2 ==============
966  *       CPU 0         |   CPU 1
967  *                     |
968  *    cnt1 timestamps  |   cnt2 timestamps
969  *          6          |         8
970  *          7          |         9
971  *          -          |         10
972  *
973  *      Flush every events below timestamp 7
974  *      etc...
975  */
976 static int process_finished_round(struct perf_tool *tool __maybe_unused,
977 				  union perf_event *event __maybe_unused,
978 				  struct ordered_events *oe)
979 {
980 	if (dump_trace)
981 		fprintf(stdout, "\n");
982 	return ordered_events__flush(oe, OE_FLUSH__ROUND);
983 }
984 
985 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
986 			      u64 timestamp, u64 file_offset)
987 {
988 	return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
989 }
990 
991 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
992 {
993 	struct ip_callchain *callchain = sample->callchain;
994 	struct branch_stack *lbr_stack = sample->branch_stack;
995 	u64 kernel_callchain_nr = callchain->nr;
996 	unsigned int i;
997 
998 	for (i = 0; i < kernel_callchain_nr; i++) {
999 		if (callchain->ips[i] == PERF_CONTEXT_USER)
1000 			break;
1001 	}
1002 
1003 	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1004 		u64 total_nr;
1005 		/*
1006 		 * LBR callstack can only get user call chain,
1007 		 * i is kernel call chain number,
1008 		 * 1 is PERF_CONTEXT_USER.
1009 		 *
1010 		 * The user call chain is stored in LBR registers.
1011 		 * LBR are pair registers. The caller is stored
1012 		 * in "from" register, while the callee is stored
1013 		 * in "to" register.
1014 		 * For example, there is a call stack
1015 		 * "A"->"B"->"C"->"D".
1016 		 * The LBR registers will recorde like
1017 		 * "C"->"D", "B"->"C", "A"->"B".
1018 		 * So only the first "to" register and all "from"
1019 		 * registers are needed to construct the whole stack.
1020 		 */
1021 		total_nr = i + 1 + lbr_stack->nr + 1;
1022 		kernel_callchain_nr = i + 1;
1023 
1024 		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1025 
1026 		for (i = 0; i < kernel_callchain_nr; i++)
1027 			printf("..... %2d: %016" PRIx64 "\n",
1028 			       i, callchain->ips[i]);
1029 
1030 		printf("..... %2d: %016" PRIx64 "\n",
1031 		       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1032 		for (i = 0; i < lbr_stack->nr; i++)
1033 			printf("..... %2d: %016" PRIx64 "\n",
1034 			       (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1035 	}
1036 }
1037 
1038 static void callchain__printf(struct evsel *evsel,
1039 			      struct perf_sample *sample)
1040 {
1041 	unsigned int i;
1042 	struct ip_callchain *callchain = sample->callchain;
1043 
1044 	if (perf_evsel__has_branch_callstack(evsel))
1045 		callchain__lbr_callstack_printf(sample);
1046 
1047 	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1048 
1049 	for (i = 0; i < callchain->nr; i++)
1050 		printf("..... %2d: %016" PRIx64 "\n",
1051 		       i, callchain->ips[i]);
1052 }
1053 
1054 static void branch_stack__printf(struct perf_sample *sample)
1055 {
1056 	uint64_t i;
1057 
1058 	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
1059 
1060 	for (i = 0; i < sample->branch_stack->nr; i++) {
1061 		struct branch_entry *e = &sample->branch_stack->entries[i];
1062 
1063 		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1064 			i, e->from, e->to,
1065 			(unsigned short)e->flags.cycles,
1066 			e->flags.mispred ? "M" : " ",
1067 			e->flags.predicted ? "P" : " ",
1068 			e->flags.abort ? "A" : " ",
1069 			e->flags.in_tx ? "T" : " ",
1070 			(unsigned)e->flags.reserved);
1071 	}
1072 }
1073 
1074 static void regs_dump__printf(u64 mask, u64 *regs)
1075 {
1076 	unsigned rid, i = 0;
1077 
1078 	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1079 		u64 val = regs[i++];
1080 
1081 		printf(".... %-5s 0x%" PRIx64 "\n",
1082 		       perf_reg_name(rid), val);
1083 	}
1084 }
1085 
1086 static const char *regs_abi[] = {
1087 	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
1088 	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1089 	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1090 };
1091 
1092 static inline const char *regs_dump_abi(struct regs_dump *d)
1093 {
1094 	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1095 		return "unknown";
1096 
1097 	return regs_abi[d->abi];
1098 }
1099 
1100 static void regs__printf(const char *type, struct regs_dump *regs)
1101 {
1102 	u64 mask = regs->mask;
1103 
1104 	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1105 	       type,
1106 	       mask,
1107 	       regs_dump_abi(regs));
1108 
1109 	regs_dump__printf(mask, regs->regs);
1110 }
1111 
1112 static void regs_user__printf(struct perf_sample *sample)
1113 {
1114 	struct regs_dump *user_regs = &sample->user_regs;
1115 
1116 	if (user_regs->regs)
1117 		regs__printf("user", user_regs);
1118 }
1119 
1120 static void regs_intr__printf(struct perf_sample *sample)
1121 {
1122 	struct regs_dump *intr_regs = &sample->intr_regs;
1123 
1124 	if (intr_regs->regs)
1125 		regs__printf("intr", intr_regs);
1126 }
1127 
1128 static void stack_user__printf(struct stack_dump *dump)
1129 {
1130 	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1131 	       dump->size, dump->offset);
1132 }
1133 
1134 static void perf_evlist__print_tstamp(struct evlist *evlist,
1135 				       union perf_event *event,
1136 				       struct perf_sample *sample)
1137 {
1138 	u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1139 
1140 	if (event->header.type != PERF_RECORD_SAMPLE &&
1141 	    !perf_evlist__sample_id_all(evlist)) {
1142 		fputs("-1 -1 ", stdout);
1143 		return;
1144 	}
1145 
1146 	if ((sample_type & PERF_SAMPLE_CPU))
1147 		printf("%u ", sample->cpu);
1148 
1149 	if (sample_type & PERF_SAMPLE_TIME)
1150 		printf("%" PRIu64 " ", sample->time);
1151 }
1152 
1153 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1154 {
1155 	printf("... sample_read:\n");
1156 
1157 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1158 		printf("...... time enabled %016" PRIx64 "\n",
1159 		       sample->read.time_enabled);
1160 
1161 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1162 		printf("...... time running %016" PRIx64 "\n",
1163 		       sample->read.time_running);
1164 
1165 	if (read_format & PERF_FORMAT_GROUP) {
1166 		u64 i;
1167 
1168 		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1169 
1170 		for (i = 0; i < sample->read.group.nr; i++) {
1171 			struct sample_read_value *value;
1172 
1173 			value = &sample->read.group.values[i];
1174 			printf("..... id %016" PRIx64
1175 			       ", value %016" PRIx64 "\n",
1176 			       value->id, value->value);
1177 		}
1178 	} else
1179 		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1180 			sample->read.one.id, sample->read.one.value);
1181 }
1182 
1183 static void dump_event(struct evlist *evlist, union perf_event *event,
1184 		       u64 file_offset, struct perf_sample *sample)
1185 {
1186 	if (!dump_trace)
1187 		return;
1188 
1189 	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1190 	       file_offset, event->header.size, event->header.type);
1191 
1192 	trace_event(event);
1193 	if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1194 		evlist->trace_event_sample_raw(evlist, event, sample);
1195 
1196 	if (sample)
1197 		perf_evlist__print_tstamp(evlist, event, sample);
1198 
1199 	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1200 	       event->header.size, perf_event__name(event->header.type));
1201 }
1202 
1203 static void dump_sample(struct evsel *evsel, union perf_event *event,
1204 			struct perf_sample *sample)
1205 {
1206 	u64 sample_type;
1207 
1208 	if (!dump_trace)
1209 		return;
1210 
1211 	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1212 	       event->header.misc, sample->pid, sample->tid, sample->ip,
1213 	       sample->period, sample->addr);
1214 
1215 	sample_type = evsel->core.attr.sample_type;
1216 
1217 	if (evsel__has_callchain(evsel))
1218 		callchain__printf(evsel, sample);
1219 
1220 	if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1221 		branch_stack__printf(sample);
1222 
1223 	if (sample_type & PERF_SAMPLE_REGS_USER)
1224 		regs_user__printf(sample);
1225 
1226 	if (sample_type & PERF_SAMPLE_REGS_INTR)
1227 		regs_intr__printf(sample);
1228 
1229 	if (sample_type & PERF_SAMPLE_STACK_USER)
1230 		stack_user__printf(&sample->user_stack);
1231 
1232 	if (sample_type & PERF_SAMPLE_WEIGHT)
1233 		printf("... weight: %" PRIu64 "\n", sample->weight);
1234 
1235 	if (sample_type & PERF_SAMPLE_DATA_SRC)
1236 		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1237 
1238 	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1239 		printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1240 
1241 	if (sample_type & PERF_SAMPLE_TRANSACTION)
1242 		printf("... transaction: %" PRIx64 "\n", sample->transaction);
1243 
1244 	if (sample_type & PERF_SAMPLE_READ)
1245 		sample_read__printf(sample, evsel->core.attr.read_format);
1246 }
1247 
1248 static void dump_read(struct evsel *evsel, union perf_event *event)
1249 {
1250 	struct read_event *read_event = &event->read;
1251 	u64 read_format;
1252 
1253 	if (!dump_trace)
1254 		return;
1255 
1256 	printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1257 	       perf_evsel__name(evsel),
1258 	       event->read.value);
1259 
1260 	if (!evsel)
1261 		return;
1262 
1263 	read_format = evsel->core.attr.read_format;
1264 
1265 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1266 		printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1267 
1268 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1269 		printf("... time running : %" PRIu64 "\n", read_event->time_running);
1270 
1271 	if (read_format & PERF_FORMAT_ID)
1272 		printf("... id           : %" PRIu64 "\n", read_event->id);
1273 }
1274 
1275 static struct machine *machines__find_for_cpumode(struct machines *machines,
1276 					       union perf_event *event,
1277 					       struct perf_sample *sample)
1278 {
1279 	struct machine *machine;
1280 
1281 	if (perf_guest &&
1282 	    ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1283 	     (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1284 		u32 pid;
1285 
1286 		if (event->header.type == PERF_RECORD_MMAP
1287 		    || event->header.type == PERF_RECORD_MMAP2)
1288 			pid = event->mmap.pid;
1289 		else
1290 			pid = sample->pid;
1291 
1292 		machine = machines__find(machines, pid);
1293 		if (!machine)
1294 			machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1295 		return machine;
1296 	}
1297 
1298 	return &machines->host;
1299 }
1300 
1301 static int deliver_sample_value(struct evlist *evlist,
1302 				struct perf_tool *tool,
1303 				union perf_event *event,
1304 				struct perf_sample *sample,
1305 				struct sample_read_value *v,
1306 				struct machine *machine)
1307 {
1308 	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1309 
1310 	if (sid) {
1311 		sample->id     = v->id;
1312 		sample->period = v->value - sid->period;
1313 		sid->period    = v->value;
1314 	}
1315 
1316 	if (!sid || sid->evsel == NULL) {
1317 		++evlist->stats.nr_unknown_id;
1318 		return 0;
1319 	}
1320 
1321 	/*
1322 	 * There's no reason to deliver sample
1323 	 * for zero period, bail out.
1324 	 */
1325 	if (!sample->period)
1326 		return 0;
1327 
1328 	return tool->sample(tool, event, sample, sid->evsel, machine);
1329 }
1330 
1331 static int deliver_sample_group(struct evlist *evlist,
1332 				struct perf_tool *tool,
1333 				union  perf_event *event,
1334 				struct perf_sample *sample,
1335 				struct machine *machine)
1336 {
1337 	int ret = -EINVAL;
1338 	u64 i;
1339 
1340 	for (i = 0; i < sample->read.group.nr; i++) {
1341 		ret = deliver_sample_value(evlist, tool, event, sample,
1342 					   &sample->read.group.values[i],
1343 					   machine);
1344 		if (ret)
1345 			break;
1346 	}
1347 
1348 	return ret;
1349 }
1350 
1351 static int
1352  perf_evlist__deliver_sample(struct evlist *evlist,
1353 			     struct perf_tool *tool,
1354 			     union  perf_event *event,
1355 			     struct perf_sample *sample,
1356 			     struct evsel *evsel,
1357 			     struct machine *machine)
1358 {
1359 	/* We know evsel != NULL. */
1360 	u64 sample_type = evsel->core.attr.sample_type;
1361 	u64 read_format = evsel->core.attr.read_format;
1362 
1363 	/* Standard sample delivery. */
1364 	if (!(sample_type & PERF_SAMPLE_READ))
1365 		return tool->sample(tool, event, sample, evsel, machine);
1366 
1367 	/* For PERF_SAMPLE_READ we have either single or group mode. */
1368 	if (read_format & PERF_FORMAT_GROUP)
1369 		return deliver_sample_group(evlist, tool, event, sample,
1370 					    machine);
1371 	else
1372 		return deliver_sample_value(evlist, tool, event, sample,
1373 					    &sample->read.one, machine);
1374 }
1375 
1376 static int machines__deliver_event(struct machines *machines,
1377 				   struct evlist *evlist,
1378 				   union perf_event *event,
1379 				   struct perf_sample *sample,
1380 				   struct perf_tool *tool, u64 file_offset)
1381 {
1382 	struct evsel *evsel;
1383 	struct machine *machine;
1384 
1385 	dump_event(evlist, event, file_offset, sample);
1386 
1387 	evsel = perf_evlist__id2evsel(evlist, sample->id);
1388 
1389 	machine = machines__find_for_cpumode(machines, event, sample);
1390 
1391 	switch (event->header.type) {
1392 	case PERF_RECORD_SAMPLE:
1393 		if (evsel == NULL) {
1394 			++evlist->stats.nr_unknown_id;
1395 			return 0;
1396 		}
1397 		dump_sample(evsel, event, sample);
1398 		if (machine == NULL) {
1399 			++evlist->stats.nr_unprocessable_samples;
1400 			return 0;
1401 		}
1402 		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1403 	case PERF_RECORD_MMAP:
1404 		return tool->mmap(tool, event, sample, machine);
1405 	case PERF_RECORD_MMAP2:
1406 		if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1407 			++evlist->stats.nr_proc_map_timeout;
1408 		return tool->mmap2(tool, event, sample, machine);
1409 	case PERF_RECORD_COMM:
1410 		return tool->comm(tool, event, sample, machine);
1411 	case PERF_RECORD_NAMESPACES:
1412 		return tool->namespaces(tool, event, sample, machine);
1413 	case PERF_RECORD_FORK:
1414 		return tool->fork(tool, event, sample, machine);
1415 	case PERF_RECORD_EXIT:
1416 		return tool->exit(tool, event, sample, machine);
1417 	case PERF_RECORD_LOST:
1418 		if (tool->lost == perf_event__process_lost)
1419 			evlist->stats.total_lost += event->lost.lost;
1420 		return tool->lost(tool, event, sample, machine);
1421 	case PERF_RECORD_LOST_SAMPLES:
1422 		if (tool->lost_samples == perf_event__process_lost_samples)
1423 			evlist->stats.total_lost_samples += event->lost_samples.lost;
1424 		return tool->lost_samples(tool, event, sample, machine);
1425 	case PERF_RECORD_READ:
1426 		dump_read(evsel, event);
1427 		return tool->read(tool, event, sample, evsel, machine);
1428 	case PERF_RECORD_THROTTLE:
1429 		return tool->throttle(tool, event, sample, machine);
1430 	case PERF_RECORD_UNTHROTTLE:
1431 		return tool->unthrottle(tool, event, sample, machine);
1432 	case PERF_RECORD_AUX:
1433 		if (tool->aux == perf_event__process_aux) {
1434 			if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1435 				evlist->stats.total_aux_lost += 1;
1436 			if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1437 				evlist->stats.total_aux_partial += 1;
1438 		}
1439 		return tool->aux(tool, event, sample, machine);
1440 	case PERF_RECORD_ITRACE_START:
1441 		return tool->itrace_start(tool, event, sample, machine);
1442 	case PERF_RECORD_SWITCH:
1443 	case PERF_RECORD_SWITCH_CPU_WIDE:
1444 		return tool->context_switch(tool, event, sample, machine);
1445 	case PERF_RECORD_KSYMBOL:
1446 		return tool->ksymbol(tool, event, sample, machine);
1447 	case PERF_RECORD_BPF_EVENT:
1448 		return tool->bpf_event(tool, event, sample, machine);
1449 	default:
1450 		++evlist->stats.nr_unknown_events;
1451 		return -1;
1452 	}
1453 }
1454 
1455 static int perf_session__deliver_event(struct perf_session *session,
1456 				       union perf_event *event,
1457 				       struct perf_tool *tool,
1458 				       u64 file_offset)
1459 {
1460 	struct perf_sample sample;
1461 	int ret;
1462 
1463 	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1464 	if (ret) {
1465 		pr_err("Can't parse sample, err = %d\n", ret);
1466 		return ret;
1467 	}
1468 
1469 	ret = auxtrace__process_event(session, event, &sample, tool);
1470 	if (ret < 0)
1471 		return ret;
1472 	if (ret > 0)
1473 		return 0;
1474 
1475 	return machines__deliver_event(&session->machines, session->evlist,
1476 				       event, &sample, tool, file_offset);
1477 }
1478 
1479 static s64 perf_session__process_user_event(struct perf_session *session,
1480 					    union perf_event *event,
1481 					    u64 file_offset)
1482 {
1483 	struct ordered_events *oe = &session->ordered_events;
1484 	struct perf_tool *tool = session->tool;
1485 	struct perf_sample sample = { .time = 0, };
1486 	int fd = perf_data__fd(session->data);
1487 	int err;
1488 
1489 	if (event->header.type != PERF_RECORD_COMPRESSED ||
1490 	    tool->compressed == perf_session__process_compressed_event_stub)
1491 		dump_event(session->evlist, event, file_offset, &sample);
1492 
1493 	/* These events are processed right away */
1494 	switch (event->header.type) {
1495 	case PERF_RECORD_HEADER_ATTR:
1496 		err = tool->attr(tool, event, &session->evlist);
1497 		if (err == 0) {
1498 			perf_session__set_id_hdr_size(session);
1499 			perf_session__set_comm_exec(session);
1500 		}
1501 		return err;
1502 	case PERF_RECORD_EVENT_UPDATE:
1503 		return tool->event_update(tool, event, &session->evlist);
1504 	case PERF_RECORD_HEADER_EVENT_TYPE:
1505 		/*
1506 		 * Depreceated, but we need to handle it for sake
1507 		 * of old data files create in pipe mode.
1508 		 */
1509 		return 0;
1510 	case PERF_RECORD_HEADER_TRACING_DATA:
1511 		/* setup for reading amidst mmap */
1512 		lseek(fd, file_offset, SEEK_SET);
1513 		return tool->tracing_data(session, event);
1514 	case PERF_RECORD_HEADER_BUILD_ID:
1515 		return tool->build_id(session, event);
1516 	case PERF_RECORD_FINISHED_ROUND:
1517 		return tool->finished_round(tool, event, oe);
1518 	case PERF_RECORD_ID_INDEX:
1519 		return tool->id_index(session, event);
1520 	case PERF_RECORD_AUXTRACE_INFO:
1521 		return tool->auxtrace_info(session, event);
1522 	case PERF_RECORD_AUXTRACE:
1523 		/* setup for reading amidst mmap */
1524 		lseek(fd, file_offset + event->header.size, SEEK_SET);
1525 		return tool->auxtrace(session, event);
1526 	case PERF_RECORD_AUXTRACE_ERROR:
1527 		perf_session__auxtrace_error_inc(session, event);
1528 		return tool->auxtrace_error(session, event);
1529 	case PERF_RECORD_THREAD_MAP:
1530 		return tool->thread_map(session, event);
1531 	case PERF_RECORD_CPU_MAP:
1532 		return tool->cpu_map(session, event);
1533 	case PERF_RECORD_STAT_CONFIG:
1534 		return tool->stat_config(session, event);
1535 	case PERF_RECORD_STAT:
1536 		return tool->stat(session, event);
1537 	case PERF_RECORD_STAT_ROUND:
1538 		return tool->stat_round(session, event);
1539 	case PERF_RECORD_TIME_CONV:
1540 		session->time_conv = event->time_conv;
1541 		return tool->time_conv(session, event);
1542 	case PERF_RECORD_HEADER_FEATURE:
1543 		return tool->feature(session, event);
1544 	case PERF_RECORD_COMPRESSED:
1545 		err = tool->compressed(session, event, file_offset);
1546 		if (err)
1547 			dump_event(session->evlist, event, file_offset, &sample);
1548 		return err;
1549 	default:
1550 		return -EINVAL;
1551 	}
1552 }
1553 
1554 int perf_session__deliver_synth_event(struct perf_session *session,
1555 				      union perf_event *event,
1556 				      struct perf_sample *sample)
1557 {
1558 	struct evlist *evlist = session->evlist;
1559 	struct perf_tool *tool = session->tool;
1560 
1561 	events_stats__inc(&evlist->stats, event->header.type);
1562 
1563 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1564 		return perf_session__process_user_event(session, event, 0);
1565 
1566 	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1567 }
1568 
1569 static void event_swap(union perf_event *event, bool sample_id_all)
1570 {
1571 	perf_event__swap_op swap;
1572 
1573 	swap = perf_event__swap_ops[event->header.type];
1574 	if (swap)
1575 		swap(event, sample_id_all);
1576 }
1577 
1578 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1579 			     void *buf, size_t buf_sz,
1580 			     union perf_event **event_ptr,
1581 			     struct perf_sample *sample)
1582 {
1583 	union perf_event *event;
1584 	size_t hdr_sz, rest;
1585 	int fd;
1586 
1587 	if (session->one_mmap && !session->header.needs_swap) {
1588 		event = file_offset - session->one_mmap_offset +
1589 			session->one_mmap_addr;
1590 		goto out_parse_sample;
1591 	}
1592 
1593 	if (perf_data__is_pipe(session->data))
1594 		return -1;
1595 
1596 	fd = perf_data__fd(session->data);
1597 	hdr_sz = sizeof(struct perf_event_header);
1598 
1599 	if (buf_sz < hdr_sz)
1600 		return -1;
1601 
1602 	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1603 	    readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1604 		return -1;
1605 
1606 	event = (union perf_event *)buf;
1607 
1608 	if (session->header.needs_swap)
1609 		perf_event_header__bswap(&event->header);
1610 
1611 	if (event->header.size < hdr_sz || event->header.size > buf_sz)
1612 		return -1;
1613 
1614 	rest = event->header.size - hdr_sz;
1615 
1616 	if (readn(fd, buf, rest) != (ssize_t)rest)
1617 		return -1;
1618 
1619 	if (session->header.needs_swap)
1620 		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1621 
1622 out_parse_sample:
1623 
1624 	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1625 	    perf_evlist__parse_sample(session->evlist, event, sample))
1626 		return -1;
1627 
1628 	*event_ptr = event;
1629 
1630 	return 0;
1631 }
1632 
1633 static s64 perf_session__process_event(struct perf_session *session,
1634 				       union perf_event *event, u64 file_offset)
1635 {
1636 	struct evlist *evlist = session->evlist;
1637 	struct perf_tool *tool = session->tool;
1638 	int ret;
1639 
1640 	if (session->header.needs_swap)
1641 		event_swap(event, perf_evlist__sample_id_all(evlist));
1642 
1643 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
1644 		return -EINVAL;
1645 
1646 	events_stats__inc(&evlist->stats, event->header.type);
1647 
1648 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1649 		return perf_session__process_user_event(session, event, file_offset);
1650 
1651 	if (tool->ordered_events) {
1652 		u64 timestamp = -1ULL;
1653 
1654 		ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1655 		if (ret && ret != -1)
1656 			return ret;
1657 
1658 		ret = perf_session__queue_event(session, event, timestamp, file_offset);
1659 		if (ret != -ETIME)
1660 			return ret;
1661 	}
1662 
1663 	return perf_session__deliver_event(session, event, tool, file_offset);
1664 }
1665 
1666 void perf_event_header__bswap(struct perf_event_header *hdr)
1667 {
1668 	hdr->type = bswap_32(hdr->type);
1669 	hdr->misc = bswap_16(hdr->misc);
1670 	hdr->size = bswap_16(hdr->size);
1671 }
1672 
1673 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1674 {
1675 	return machine__findnew_thread(&session->machines.host, -1, pid);
1676 }
1677 
1678 /*
1679  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1680  * So here a single thread is created for that, but actually there is a separate
1681  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1682  * is only 1. That causes problems for some tools, requiring workarounds. For
1683  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1684  */
1685 int perf_session__register_idle_thread(struct perf_session *session)
1686 {
1687 	struct thread *thread;
1688 	int err = 0;
1689 
1690 	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1691 	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1692 		pr_err("problem inserting idle task.\n");
1693 		err = -1;
1694 	}
1695 
1696 	if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1697 		pr_err("problem inserting idle task.\n");
1698 		err = -1;
1699 	}
1700 
1701 	/* machine__findnew_thread() got the thread, so put it */
1702 	thread__put(thread);
1703 	return err;
1704 }
1705 
1706 static void
1707 perf_session__warn_order(const struct perf_session *session)
1708 {
1709 	const struct ordered_events *oe = &session->ordered_events;
1710 	struct evsel *evsel;
1711 	bool should_warn = true;
1712 
1713 	evlist__for_each_entry(session->evlist, evsel) {
1714 		if (evsel->core.attr.write_backward)
1715 			should_warn = false;
1716 	}
1717 
1718 	if (!should_warn)
1719 		return;
1720 	if (oe->nr_unordered_events != 0)
1721 		ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1722 }
1723 
1724 static void perf_session__warn_about_errors(const struct perf_session *session)
1725 {
1726 	const struct events_stats *stats = &session->evlist->stats;
1727 
1728 	if (session->tool->lost == perf_event__process_lost &&
1729 	    stats->nr_events[PERF_RECORD_LOST] != 0) {
1730 		ui__warning("Processed %d events and lost %d chunks!\n\n"
1731 			    "Check IO/CPU overload!\n\n",
1732 			    stats->nr_events[0],
1733 			    stats->nr_events[PERF_RECORD_LOST]);
1734 	}
1735 
1736 	if (session->tool->lost_samples == perf_event__process_lost_samples) {
1737 		double drop_rate;
1738 
1739 		drop_rate = (double)stats->total_lost_samples /
1740 			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1741 		if (drop_rate > 0.05) {
1742 			ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1743 				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1744 				    drop_rate * 100.0);
1745 		}
1746 	}
1747 
1748 	if (session->tool->aux == perf_event__process_aux &&
1749 	    stats->total_aux_lost != 0) {
1750 		ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1751 			    stats->total_aux_lost,
1752 			    stats->nr_events[PERF_RECORD_AUX]);
1753 	}
1754 
1755 	if (session->tool->aux == perf_event__process_aux &&
1756 	    stats->total_aux_partial != 0) {
1757 		bool vmm_exclusive = false;
1758 
1759 		(void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1760 		                       &vmm_exclusive);
1761 
1762 		ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1763 		            "Are you running a KVM guest in the background?%s\n\n",
1764 			    stats->total_aux_partial,
1765 			    stats->nr_events[PERF_RECORD_AUX],
1766 			    vmm_exclusive ?
1767 			    "\nReloading kvm_intel module with vmm_exclusive=0\n"
1768 			    "will reduce the gaps to only guest's timeslices." :
1769 			    "");
1770 	}
1771 
1772 	if (stats->nr_unknown_events != 0) {
1773 		ui__warning("Found %u unknown events!\n\n"
1774 			    "Is this an older tool processing a perf.data "
1775 			    "file generated by a more recent tool?\n\n"
1776 			    "If that is not the case, consider "
1777 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1778 			    stats->nr_unknown_events);
1779 	}
1780 
1781 	if (stats->nr_unknown_id != 0) {
1782 		ui__warning("%u samples with id not present in the header\n",
1783 			    stats->nr_unknown_id);
1784 	}
1785 
1786 	if (stats->nr_invalid_chains != 0) {
1787 		ui__warning("Found invalid callchains!\n\n"
1788 			    "%u out of %u events were discarded for this reason.\n\n"
1789 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1790 			    stats->nr_invalid_chains,
1791 			    stats->nr_events[PERF_RECORD_SAMPLE]);
1792 	}
1793 
1794 	if (stats->nr_unprocessable_samples != 0) {
1795 		ui__warning("%u unprocessable samples recorded.\n"
1796 			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1797 			    stats->nr_unprocessable_samples);
1798 	}
1799 
1800 	perf_session__warn_order(session);
1801 
1802 	events_stats__auxtrace_error_warn(stats);
1803 
1804 	if (stats->nr_proc_map_timeout != 0) {
1805 		ui__warning("%d map information files for pre-existing threads were\n"
1806 			    "not processed, if there are samples for addresses they\n"
1807 			    "will not be resolved, you may find out which are these\n"
1808 			    "threads by running with -v and redirecting the output\n"
1809 			    "to a file.\n"
1810 			    "The time limit to process proc map is too short?\n"
1811 			    "Increase it by --proc-map-timeout\n",
1812 			    stats->nr_proc_map_timeout);
1813 	}
1814 }
1815 
1816 static int perf_session__flush_thread_stack(struct thread *thread,
1817 					    void *p __maybe_unused)
1818 {
1819 	return thread_stack__flush(thread);
1820 }
1821 
1822 static int perf_session__flush_thread_stacks(struct perf_session *session)
1823 {
1824 	return machines__for_each_thread(&session->machines,
1825 					 perf_session__flush_thread_stack,
1826 					 NULL);
1827 }
1828 
1829 volatile int session_done;
1830 
1831 static int __perf_session__process_decomp_events(struct perf_session *session);
1832 
1833 static int __perf_session__process_pipe_events(struct perf_session *session)
1834 {
1835 	struct ordered_events *oe = &session->ordered_events;
1836 	struct perf_tool *tool = session->tool;
1837 	int fd = perf_data__fd(session->data);
1838 	union perf_event *event;
1839 	uint32_t size, cur_size = 0;
1840 	void *buf = NULL;
1841 	s64 skip = 0;
1842 	u64 head;
1843 	ssize_t err;
1844 	void *p;
1845 
1846 	perf_tool__fill_defaults(tool);
1847 
1848 	head = 0;
1849 	cur_size = sizeof(union perf_event);
1850 
1851 	buf = malloc(cur_size);
1852 	if (!buf)
1853 		return -errno;
1854 	ordered_events__set_copy_on_queue(oe, true);
1855 more:
1856 	event = buf;
1857 	err = readn(fd, event, sizeof(struct perf_event_header));
1858 	if (err <= 0) {
1859 		if (err == 0)
1860 			goto done;
1861 
1862 		pr_err("failed to read event header\n");
1863 		goto out_err;
1864 	}
1865 
1866 	if (session->header.needs_swap)
1867 		perf_event_header__bswap(&event->header);
1868 
1869 	size = event->header.size;
1870 	if (size < sizeof(struct perf_event_header)) {
1871 		pr_err("bad event header size\n");
1872 		goto out_err;
1873 	}
1874 
1875 	if (size > cur_size) {
1876 		void *new = realloc(buf, size);
1877 		if (!new) {
1878 			pr_err("failed to allocate memory to read event\n");
1879 			goto out_err;
1880 		}
1881 		buf = new;
1882 		cur_size = size;
1883 		event = buf;
1884 	}
1885 	p = event;
1886 	p += sizeof(struct perf_event_header);
1887 
1888 	if (size - sizeof(struct perf_event_header)) {
1889 		err = readn(fd, p, size - sizeof(struct perf_event_header));
1890 		if (err <= 0) {
1891 			if (err == 0) {
1892 				pr_err("unexpected end of event stream\n");
1893 				goto done;
1894 			}
1895 
1896 			pr_err("failed to read event data\n");
1897 			goto out_err;
1898 		}
1899 	}
1900 
1901 	if ((skip = perf_session__process_event(session, event, head)) < 0) {
1902 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1903 		       head, event->header.size, event->header.type);
1904 		err = -EINVAL;
1905 		goto out_err;
1906 	}
1907 
1908 	head += size;
1909 
1910 	if (skip > 0)
1911 		head += skip;
1912 
1913 	err = __perf_session__process_decomp_events(session);
1914 	if (err)
1915 		goto out_err;
1916 
1917 	if (!session_done())
1918 		goto more;
1919 done:
1920 	/* do the final flush for ordered samples */
1921 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1922 	if (err)
1923 		goto out_err;
1924 	err = auxtrace__flush_events(session, tool);
1925 	if (err)
1926 		goto out_err;
1927 	err = perf_session__flush_thread_stacks(session);
1928 out_err:
1929 	free(buf);
1930 	if (!tool->no_warn)
1931 		perf_session__warn_about_errors(session);
1932 	ordered_events__free(&session->ordered_events);
1933 	auxtrace__free_events(session);
1934 	return err;
1935 }
1936 
1937 static union perf_event *
1938 fetch_mmaped_event(struct perf_session *session,
1939 		   u64 head, size_t mmap_size, char *buf)
1940 {
1941 	union perf_event *event;
1942 
1943 	/*
1944 	 * Ensure we have enough space remaining to read
1945 	 * the size of the event in the headers.
1946 	 */
1947 	if (head + sizeof(event->header) > mmap_size)
1948 		return NULL;
1949 
1950 	event = (union perf_event *)(buf + head);
1951 
1952 	if (session->header.needs_swap)
1953 		perf_event_header__bswap(&event->header);
1954 
1955 	if (head + event->header.size > mmap_size) {
1956 		/* We're not fetching the event so swap back again */
1957 		if (session->header.needs_swap)
1958 			perf_event_header__bswap(&event->header);
1959 		pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
1960 			 __func__, head, event->header.size, mmap_size);
1961 		return ERR_PTR(-EINVAL);
1962 	}
1963 
1964 	return event;
1965 }
1966 
1967 static int __perf_session__process_decomp_events(struct perf_session *session)
1968 {
1969 	s64 skip;
1970 	u64 size, file_pos = 0;
1971 	struct decomp *decomp = session->decomp_last;
1972 
1973 	if (!decomp)
1974 		return 0;
1975 
1976 	while (decomp->head < decomp->size && !session_done()) {
1977 		union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1978 
1979 		if (IS_ERR(event))
1980 			return PTR_ERR(event);
1981 
1982 		if (!event)
1983 			break;
1984 
1985 		size = event->header.size;
1986 
1987 		if (size < sizeof(struct perf_event_header) ||
1988 		    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1989 			pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1990 				decomp->file_pos + decomp->head, event->header.size, event->header.type);
1991 			return -EINVAL;
1992 		}
1993 
1994 		if (skip)
1995 			size += skip;
1996 
1997 		decomp->head += size;
1998 	}
1999 
2000 	return 0;
2001 }
2002 
2003 /*
2004  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2005  * slices. On 32bit we use 32MB.
2006  */
2007 #if BITS_PER_LONG == 64
2008 #define MMAP_SIZE ULLONG_MAX
2009 #define NUM_MMAPS 1
2010 #else
2011 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2012 #define NUM_MMAPS 128
2013 #endif
2014 
2015 struct reader;
2016 
2017 typedef s64 (*reader_cb_t)(struct perf_session *session,
2018 			   union perf_event *event,
2019 			   u64 file_offset);
2020 
2021 struct reader {
2022 	int		 fd;
2023 	u64		 data_size;
2024 	u64		 data_offset;
2025 	reader_cb_t	 process;
2026 };
2027 
2028 static int
2029 reader__process_events(struct reader *rd, struct perf_session *session,
2030 		       struct ui_progress *prog)
2031 {
2032 	u64 data_size = rd->data_size;
2033 	u64 head, page_offset, file_offset, file_pos, size;
2034 	int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2035 	size_t	mmap_size;
2036 	char *buf, *mmaps[NUM_MMAPS];
2037 	union perf_event *event;
2038 	s64 skip;
2039 
2040 	page_offset = page_size * (rd->data_offset / page_size);
2041 	file_offset = page_offset;
2042 	head = rd->data_offset - page_offset;
2043 
2044 	ui_progress__init_size(prog, data_size, "Processing events...");
2045 
2046 	data_size += rd->data_offset;
2047 
2048 	mmap_size = MMAP_SIZE;
2049 	if (mmap_size > data_size) {
2050 		mmap_size = data_size;
2051 		session->one_mmap = true;
2052 	}
2053 
2054 	memset(mmaps, 0, sizeof(mmaps));
2055 
2056 	mmap_prot  = PROT_READ;
2057 	mmap_flags = MAP_SHARED;
2058 
2059 	if (session->header.needs_swap) {
2060 		mmap_prot  |= PROT_WRITE;
2061 		mmap_flags = MAP_PRIVATE;
2062 	}
2063 remap:
2064 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2065 		   file_offset);
2066 	if (buf == MAP_FAILED) {
2067 		pr_err("failed to mmap file\n");
2068 		err = -errno;
2069 		goto out;
2070 	}
2071 	mmaps[map_idx] = buf;
2072 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2073 	file_pos = file_offset + head;
2074 	if (session->one_mmap) {
2075 		session->one_mmap_addr = buf;
2076 		session->one_mmap_offset = file_offset;
2077 	}
2078 
2079 more:
2080 	event = fetch_mmaped_event(session, head, mmap_size, buf);
2081 	if (IS_ERR(event))
2082 		return PTR_ERR(event);
2083 
2084 	if (!event) {
2085 		if (mmaps[map_idx]) {
2086 			munmap(mmaps[map_idx], mmap_size);
2087 			mmaps[map_idx] = NULL;
2088 		}
2089 
2090 		page_offset = page_size * (head / page_size);
2091 		file_offset += page_offset;
2092 		head -= page_offset;
2093 		goto remap;
2094 	}
2095 
2096 	size = event->header.size;
2097 
2098 	skip = -EINVAL;
2099 
2100 	if (size < sizeof(struct perf_event_header) ||
2101 	    (skip = rd->process(session, event, file_pos)) < 0) {
2102 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2103 		       file_offset + head, event->header.size,
2104 		       event->header.type, strerror(-skip));
2105 		err = skip;
2106 		goto out;
2107 	}
2108 
2109 	if (skip)
2110 		size += skip;
2111 
2112 	head += size;
2113 	file_pos += size;
2114 
2115 	err = __perf_session__process_decomp_events(session);
2116 	if (err)
2117 		goto out;
2118 
2119 	ui_progress__update(prog, size);
2120 
2121 	if (session_done())
2122 		goto out;
2123 
2124 	if (file_pos < data_size)
2125 		goto more;
2126 
2127 out:
2128 	return err;
2129 }
2130 
2131 static s64 process_simple(struct perf_session *session,
2132 			  union perf_event *event,
2133 			  u64 file_offset)
2134 {
2135 	return perf_session__process_event(session, event, file_offset);
2136 }
2137 
2138 static int __perf_session__process_events(struct perf_session *session)
2139 {
2140 	struct reader rd = {
2141 		.fd		= perf_data__fd(session->data),
2142 		.data_size	= session->header.data_size,
2143 		.data_offset	= session->header.data_offset,
2144 		.process	= process_simple,
2145 	};
2146 	struct ordered_events *oe = &session->ordered_events;
2147 	struct perf_tool *tool = session->tool;
2148 	struct ui_progress prog;
2149 	int err;
2150 
2151 	perf_tool__fill_defaults(tool);
2152 
2153 	if (rd.data_size == 0)
2154 		return -1;
2155 
2156 	ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2157 
2158 	err = reader__process_events(&rd, session, &prog);
2159 	if (err)
2160 		goto out_err;
2161 	/* do the final flush for ordered samples */
2162 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2163 	if (err)
2164 		goto out_err;
2165 	err = auxtrace__flush_events(session, tool);
2166 	if (err)
2167 		goto out_err;
2168 	err = perf_session__flush_thread_stacks(session);
2169 out_err:
2170 	ui_progress__finish();
2171 	if (!tool->no_warn)
2172 		perf_session__warn_about_errors(session);
2173 	/*
2174 	 * We may switching perf.data output, make ordered_events
2175 	 * reusable.
2176 	 */
2177 	ordered_events__reinit(&session->ordered_events);
2178 	auxtrace__free_events(session);
2179 	session->one_mmap = false;
2180 	return err;
2181 }
2182 
2183 int perf_session__process_events(struct perf_session *session)
2184 {
2185 	if (perf_session__register_idle_thread(session) < 0)
2186 		return -ENOMEM;
2187 
2188 	if (perf_data__is_pipe(session->data))
2189 		return __perf_session__process_pipe_events(session);
2190 
2191 	return __perf_session__process_events(session);
2192 }
2193 
2194 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2195 {
2196 	struct evsel *evsel;
2197 
2198 	evlist__for_each_entry(session->evlist, evsel) {
2199 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2200 			return true;
2201 	}
2202 
2203 	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2204 	return false;
2205 }
2206 
2207 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2208 {
2209 	char *bracket;
2210 	struct ref_reloc_sym *ref;
2211 	struct kmap *kmap;
2212 
2213 	ref = zalloc(sizeof(struct ref_reloc_sym));
2214 	if (ref == NULL)
2215 		return -ENOMEM;
2216 
2217 	ref->name = strdup(symbol_name);
2218 	if (ref->name == NULL) {
2219 		free(ref);
2220 		return -ENOMEM;
2221 	}
2222 
2223 	bracket = strchr(ref->name, ']');
2224 	if (bracket)
2225 		*bracket = '\0';
2226 
2227 	ref->addr = addr;
2228 
2229 	kmap = map__kmap(map);
2230 	if (kmap)
2231 		kmap->ref_reloc_sym = ref;
2232 
2233 	return 0;
2234 }
2235 
2236 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2237 {
2238 	return machines__fprintf_dsos(&session->machines, fp);
2239 }
2240 
2241 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2242 					  bool (skip)(struct dso *dso, int parm), int parm)
2243 {
2244 	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2245 }
2246 
2247 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2248 {
2249 	size_t ret;
2250 	const char *msg = "";
2251 
2252 	if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2253 		msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2254 
2255 	ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2256 
2257 	ret += events_stats__fprintf(&session->evlist->stats, fp);
2258 	return ret;
2259 }
2260 
2261 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2262 {
2263 	/*
2264 	 * FIXME: Here we have to actually print all the machines in this
2265 	 * session, not just the host...
2266 	 */
2267 	return machine__fprintf(&session->machines.host, fp);
2268 }
2269 
2270 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2271 					      unsigned int type)
2272 {
2273 	struct evsel *pos;
2274 
2275 	evlist__for_each_entry(session->evlist, pos) {
2276 		if (pos->core.attr.type == type)
2277 			return pos;
2278 	}
2279 	return NULL;
2280 }
2281 
2282 int perf_session__cpu_bitmap(struct perf_session *session,
2283 			     const char *cpu_list, unsigned long *cpu_bitmap)
2284 {
2285 	int i, err = -1;
2286 	struct perf_cpu_map *map;
2287 
2288 	for (i = 0; i < PERF_TYPE_MAX; ++i) {
2289 		struct evsel *evsel;
2290 
2291 		evsel = perf_session__find_first_evtype(session, i);
2292 		if (!evsel)
2293 			continue;
2294 
2295 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2296 			pr_err("File does not contain CPU events. "
2297 			       "Remove -C option to proceed.\n");
2298 			return -1;
2299 		}
2300 	}
2301 
2302 	map = perf_cpu_map__new(cpu_list);
2303 	if (map == NULL) {
2304 		pr_err("Invalid cpu_list\n");
2305 		return -1;
2306 	}
2307 
2308 	for (i = 0; i < map->nr; i++) {
2309 		int cpu = map->map[i];
2310 
2311 		if (cpu >= MAX_NR_CPUS) {
2312 			pr_err("Requested CPU %d too large. "
2313 			       "Consider raising MAX_NR_CPUS\n", cpu);
2314 			goto out_delete_map;
2315 		}
2316 
2317 		set_bit(cpu, cpu_bitmap);
2318 	}
2319 
2320 	err = 0;
2321 
2322 out_delete_map:
2323 	perf_cpu_map__put(map);
2324 	return err;
2325 }
2326 
2327 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2328 				bool full)
2329 {
2330 	if (session == NULL || fp == NULL)
2331 		return;
2332 
2333 	fprintf(fp, "# ========\n");
2334 	perf_header__fprintf_info(session, fp, full);
2335 	fprintf(fp, "# ========\n#\n");
2336 }
2337 
2338 
2339 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2340 					     const struct evsel_str_handler *assocs,
2341 					     size_t nr_assocs)
2342 {
2343 	struct evsel *evsel;
2344 	size_t i;
2345 	int err;
2346 
2347 	for (i = 0; i < nr_assocs; i++) {
2348 		/*
2349 		 * Adding a handler for an event not in the session,
2350 		 * just ignore it.
2351 		 */
2352 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2353 		if (evsel == NULL)
2354 			continue;
2355 
2356 		err = -EEXIST;
2357 		if (evsel->handler != NULL)
2358 			goto out;
2359 		evsel->handler = assocs[i].handler;
2360 	}
2361 
2362 	err = 0;
2363 out:
2364 	return err;
2365 }
2366 
2367 int perf_event__process_id_index(struct perf_session *session,
2368 				 union perf_event *event)
2369 {
2370 	struct evlist *evlist = session->evlist;
2371 	struct id_index_event *ie = &event->id_index;
2372 	size_t i, nr, max_nr;
2373 
2374 	max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2375 		 sizeof(struct id_index_entry);
2376 	nr = ie->nr;
2377 	if (nr > max_nr)
2378 		return -EINVAL;
2379 
2380 	if (dump_trace)
2381 		fprintf(stdout, " nr: %zu\n", nr);
2382 
2383 	for (i = 0; i < nr; i++) {
2384 		struct id_index_entry *e = &ie->entries[i];
2385 		struct perf_sample_id *sid;
2386 
2387 		if (dump_trace) {
2388 			fprintf(stdout,	" ... id: %"PRIu64, e->id);
2389 			fprintf(stdout,	"  idx: %"PRIu64, e->idx);
2390 			fprintf(stdout,	"  cpu: %"PRId64, e->cpu);
2391 			fprintf(stdout,	"  tid: %"PRId64"\n", e->tid);
2392 		}
2393 
2394 		sid = perf_evlist__id2sid(evlist, e->id);
2395 		if (!sid)
2396 			return -ENOENT;
2397 		sid->idx = e->idx;
2398 		sid->cpu = e->cpu;
2399 		sid->tid = e->tid;
2400 	}
2401 	return 0;
2402 }
2403 
2404 int perf_event__synthesize_id_index(struct perf_tool *tool,
2405 				    perf_event__handler_t process,
2406 				    struct evlist *evlist,
2407 				    struct machine *machine)
2408 {
2409 	union perf_event *ev;
2410 	struct evsel *evsel;
2411 	size_t nr = 0, i = 0, sz, max_nr, n;
2412 	int err;
2413 
2414 	pr_debug2("Synthesizing id index\n");
2415 
2416 	max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2417 		 sizeof(struct id_index_entry);
2418 
2419 	evlist__for_each_entry(evlist, evsel)
2420 		nr += evsel->ids;
2421 
2422 	n = nr > max_nr ? max_nr : nr;
2423 	sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2424 	ev = zalloc(sz);
2425 	if (!ev)
2426 		return -ENOMEM;
2427 
2428 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2429 	ev->id_index.header.size = sz;
2430 	ev->id_index.nr = n;
2431 
2432 	evlist__for_each_entry(evlist, evsel) {
2433 		u32 j;
2434 
2435 		for (j = 0; j < evsel->ids; j++) {
2436 			struct id_index_entry *e;
2437 			struct perf_sample_id *sid;
2438 
2439 			if (i >= n) {
2440 				err = process(tool, ev, NULL, machine);
2441 				if (err)
2442 					goto out_err;
2443 				nr -= n;
2444 				i = 0;
2445 			}
2446 
2447 			e = &ev->id_index.entries[i++];
2448 
2449 			e->id = evsel->id[j];
2450 
2451 			sid = perf_evlist__id2sid(evlist, e->id);
2452 			if (!sid) {
2453 				free(ev);
2454 				return -ENOENT;
2455 			}
2456 
2457 			e->idx = sid->idx;
2458 			e->cpu = sid->cpu;
2459 			e->tid = sid->tid;
2460 		}
2461 	}
2462 
2463 	sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2464 	ev->id_index.header.size = sz;
2465 	ev->id_index.nr = nr;
2466 
2467 	err = process(tool, ev, NULL, machine);
2468 out_err:
2469 	free(ev);
2470 
2471 	return err;
2472 }
2473