xref: /openbmc/linux/tools/perf/util/session.c (revision c1a604df)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8 
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14 
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "memswap.h"
18 #include "map.h"
19 #include "symbol.h"
20 #include "session.h"
21 #include "tool.h"
22 #include "sort.h"
23 #include "cpumap.h"
24 #include "perf_regs.h"
25 #include "asm/bug.h"
26 #include "auxtrace.h"
27 #include "thread.h"
28 #include "thread-stack.h"
29 #include "sample-raw.h"
30 #include "stat.h"
31 #include "util.h"
32 #include "../perf.h"
33 #include "arch/common.h"
34 
35 #ifdef HAVE_ZSTD_SUPPORT
36 static int perf_session__process_compressed_event(struct perf_session *session,
37 						  union perf_event *event, u64 file_offset)
38 {
39 	void *src;
40 	size_t decomp_size, src_size;
41 	u64 decomp_last_rem = 0;
42 	size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
43 	struct decomp *decomp, *decomp_last = session->decomp_last;
44 
45 	if (decomp_last) {
46 		decomp_last_rem = decomp_last->size - decomp_last->head;
47 		decomp_len += decomp_last_rem;
48 	}
49 
50 	mmap_len = sizeof(struct decomp) + decomp_len;
51 	decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
52 		      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
53 	if (decomp == MAP_FAILED) {
54 		pr_err("Couldn't allocate memory for decompression\n");
55 		return -1;
56 	}
57 
58 	decomp->file_pos = file_offset;
59 	decomp->mmap_len = mmap_len;
60 	decomp->head = 0;
61 
62 	if (decomp_last_rem) {
63 		memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
64 		decomp->size = decomp_last_rem;
65 	}
66 
67 	src = (void *)event + sizeof(struct perf_record_compressed);
68 	src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
69 
70 	decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
71 				&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
72 	if (!decomp_size) {
73 		munmap(decomp, mmap_len);
74 		pr_err("Couldn't decompress data\n");
75 		return -1;
76 	}
77 
78 	decomp->size += decomp_size;
79 
80 	if (session->decomp == NULL) {
81 		session->decomp = decomp;
82 		session->decomp_last = decomp;
83 	} else {
84 		session->decomp_last->next = decomp;
85 		session->decomp_last = decomp;
86 	}
87 
88 	pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
89 
90 	return 0;
91 }
92 #else /* !HAVE_ZSTD_SUPPORT */
93 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
94 #endif
95 
96 static int perf_session__deliver_event(struct perf_session *session,
97 				       union perf_event *event,
98 				       struct perf_tool *tool,
99 				       u64 file_offset);
100 
101 static int perf_session__open(struct perf_session *session)
102 {
103 	struct perf_data *data = session->data;
104 
105 	if (perf_session__read_header(session) < 0) {
106 		pr_err("incompatible file format (rerun with -v to learn more)\n");
107 		return -1;
108 	}
109 
110 	if (perf_data__is_pipe(data))
111 		return 0;
112 
113 	if (perf_header__has_feat(&session->header, HEADER_STAT))
114 		return 0;
115 
116 	if (!perf_evlist__valid_sample_type(session->evlist)) {
117 		pr_err("non matching sample_type\n");
118 		return -1;
119 	}
120 
121 	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
122 		pr_err("non matching sample_id_all\n");
123 		return -1;
124 	}
125 
126 	if (!perf_evlist__valid_read_format(session->evlist)) {
127 		pr_err("non matching read_format\n");
128 		return -1;
129 	}
130 
131 	return 0;
132 }
133 
134 void perf_session__set_id_hdr_size(struct perf_session *session)
135 {
136 	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
137 
138 	machines__set_id_hdr_size(&session->machines, id_hdr_size);
139 }
140 
141 int perf_session__create_kernel_maps(struct perf_session *session)
142 {
143 	int ret = machine__create_kernel_maps(&session->machines.host);
144 
145 	if (ret >= 0)
146 		ret = machines__create_guest_kernel_maps(&session->machines);
147 	return ret;
148 }
149 
150 static void perf_session__destroy_kernel_maps(struct perf_session *session)
151 {
152 	machines__destroy_kernel_maps(&session->machines);
153 }
154 
155 static bool perf_session__has_comm_exec(struct perf_session *session)
156 {
157 	struct evsel *evsel;
158 
159 	evlist__for_each_entry(session->evlist, evsel) {
160 		if (evsel->core.attr.comm_exec)
161 			return true;
162 	}
163 
164 	return false;
165 }
166 
167 static void perf_session__set_comm_exec(struct perf_session *session)
168 {
169 	bool comm_exec = perf_session__has_comm_exec(session);
170 
171 	machines__set_comm_exec(&session->machines, comm_exec);
172 }
173 
174 static int ordered_events__deliver_event(struct ordered_events *oe,
175 					 struct ordered_event *event)
176 {
177 	struct perf_session *session = container_of(oe, struct perf_session,
178 						    ordered_events);
179 
180 	return perf_session__deliver_event(session, event->event,
181 					   session->tool, event->file_offset);
182 }
183 
184 struct perf_session *perf_session__new(struct perf_data *data,
185 				       bool repipe, struct perf_tool *tool)
186 {
187 	struct perf_session *session = zalloc(sizeof(*session));
188 
189 	if (!session)
190 		goto out;
191 
192 	session->repipe = repipe;
193 	session->tool   = tool;
194 	INIT_LIST_HEAD(&session->auxtrace_index);
195 	machines__init(&session->machines);
196 	ordered_events__init(&session->ordered_events,
197 			     ordered_events__deliver_event, NULL);
198 
199 	perf_env__init(&session->header.env);
200 	if (data) {
201 		if (perf_data__open(data))
202 			goto out_delete;
203 
204 		session->data = data;
205 
206 		if (perf_data__is_read(data)) {
207 			if (perf_session__open(session) < 0)
208 				goto out_delete;
209 
210 			/*
211 			 * set session attributes that are present in perf.data
212 			 * but not in pipe-mode.
213 			 */
214 			if (!data->is_pipe) {
215 				perf_session__set_id_hdr_size(session);
216 				perf_session__set_comm_exec(session);
217 			}
218 
219 			perf_evlist__init_trace_event_sample_raw(session->evlist);
220 
221 			/* Open the directory data. */
222 			if (data->is_dir && perf_data__open_dir(data))
223 				goto out_delete;
224 		}
225 	} else  {
226 		session->machines.host.env = &perf_env;
227 	}
228 
229 	session->machines.host.single_address_space =
230 		perf_env__single_address_space(session->machines.host.env);
231 
232 	if (!data || perf_data__is_write(data)) {
233 		/*
234 		 * In O_RDONLY mode this will be performed when reading the
235 		 * kernel MMAP event, in perf_event__process_mmap().
236 		 */
237 		if (perf_session__create_kernel_maps(session) < 0)
238 			pr_warning("Cannot read kernel map\n");
239 	}
240 
241 	/*
242 	 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
243 	 * processed, so perf_evlist__sample_id_all is not meaningful here.
244 	 */
245 	if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
246 	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
247 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
248 		tool->ordered_events = false;
249 	}
250 
251 	return session;
252 
253  out_delete:
254 	perf_session__delete(session);
255  out:
256 	return NULL;
257 }
258 
259 static void perf_session__delete_threads(struct perf_session *session)
260 {
261 	machine__delete_threads(&session->machines.host);
262 }
263 
264 static void perf_session__release_decomp_events(struct perf_session *session)
265 {
266 	struct decomp *next, *decomp;
267 	size_t mmap_len;
268 	next = session->decomp;
269 	do {
270 		decomp = next;
271 		if (decomp == NULL)
272 			break;
273 		next = decomp->next;
274 		mmap_len = decomp->mmap_len;
275 		munmap(decomp, mmap_len);
276 	} while (1);
277 }
278 
279 void perf_session__delete(struct perf_session *session)
280 {
281 	if (session == NULL)
282 		return;
283 	auxtrace__free(session);
284 	auxtrace_index__free(&session->auxtrace_index);
285 	perf_session__destroy_kernel_maps(session);
286 	perf_session__delete_threads(session);
287 	perf_session__release_decomp_events(session);
288 	perf_env__exit(&session->header.env);
289 	machines__exit(&session->machines);
290 	if (session->data)
291 		perf_data__close(session->data);
292 	free(session);
293 }
294 
295 static int process_event_synth_tracing_data_stub(struct perf_session *session
296 						 __maybe_unused,
297 						 union perf_event *event
298 						 __maybe_unused)
299 {
300 	dump_printf(": unhandled!\n");
301 	return 0;
302 }
303 
304 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
305 					 union perf_event *event __maybe_unused,
306 					 struct evlist **pevlist
307 					 __maybe_unused)
308 {
309 	dump_printf(": unhandled!\n");
310 	return 0;
311 }
312 
313 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
314 						 union perf_event *event __maybe_unused,
315 						 struct evlist **pevlist
316 						 __maybe_unused)
317 {
318 	if (dump_trace)
319 		perf_event__fprintf_event_update(event, stdout);
320 
321 	dump_printf(": unhandled!\n");
322 	return 0;
323 }
324 
325 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
326 				     union perf_event *event __maybe_unused,
327 				     struct perf_sample *sample __maybe_unused,
328 				     struct evsel *evsel __maybe_unused,
329 				     struct machine *machine __maybe_unused)
330 {
331 	dump_printf(": unhandled!\n");
332 	return 0;
333 }
334 
335 static int process_event_stub(struct perf_tool *tool __maybe_unused,
336 			      union perf_event *event __maybe_unused,
337 			      struct perf_sample *sample __maybe_unused,
338 			      struct machine *machine __maybe_unused)
339 {
340 	dump_printf(": unhandled!\n");
341 	return 0;
342 }
343 
344 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
345 				       union perf_event *event __maybe_unused,
346 				       struct ordered_events *oe __maybe_unused)
347 {
348 	dump_printf(": unhandled!\n");
349 	return 0;
350 }
351 
352 static int process_finished_round(struct perf_tool *tool,
353 				  union perf_event *event,
354 				  struct ordered_events *oe);
355 
356 static int skipn(int fd, off_t n)
357 {
358 	char buf[4096];
359 	ssize_t ret;
360 
361 	while (n > 0) {
362 		ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
363 		if (ret <= 0)
364 			return ret;
365 		n -= ret;
366 	}
367 
368 	return 0;
369 }
370 
371 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
372 				       union perf_event *event)
373 {
374 	dump_printf(": unhandled!\n");
375 	if (perf_data__is_pipe(session->data))
376 		skipn(perf_data__fd(session->data), event->auxtrace.size);
377 	return event->auxtrace.size;
378 }
379 
380 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
381 				  union perf_event *event __maybe_unused)
382 {
383 	dump_printf(": unhandled!\n");
384 	return 0;
385 }
386 
387 
388 static
389 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
390 				  union perf_event *event __maybe_unused)
391 {
392 	if (dump_trace)
393 		perf_event__fprintf_thread_map(event, stdout);
394 
395 	dump_printf(": unhandled!\n");
396 	return 0;
397 }
398 
399 static
400 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
401 			       union perf_event *event __maybe_unused)
402 {
403 	if (dump_trace)
404 		perf_event__fprintf_cpu_map(event, stdout);
405 
406 	dump_printf(": unhandled!\n");
407 	return 0;
408 }
409 
410 static
411 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
412 				   union perf_event *event __maybe_unused)
413 {
414 	if (dump_trace)
415 		perf_event__fprintf_stat_config(event, stdout);
416 
417 	dump_printf(": unhandled!\n");
418 	return 0;
419 }
420 
421 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
422 			     union perf_event *event)
423 {
424 	if (dump_trace)
425 		perf_event__fprintf_stat(event, stdout);
426 
427 	dump_printf(": unhandled!\n");
428 	return 0;
429 }
430 
431 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
432 				   union perf_event *event)
433 {
434 	if (dump_trace)
435 		perf_event__fprintf_stat_round(event, stdout);
436 
437 	dump_printf(": unhandled!\n");
438 	return 0;
439 }
440 
441 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
442 						       union perf_event *event __maybe_unused,
443 						       u64 file_offset __maybe_unused)
444 {
445        dump_printf(": unhandled!\n");
446        return 0;
447 }
448 
449 void perf_tool__fill_defaults(struct perf_tool *tool)
450 {
451 	if (tool->sample == NULL)
452 		tool->sample = process_event_sample_stub;
453 	if (tool->mmap == NULL)
454 		tool->mmap = process_event_stub;
455 	if (tool->mmap2 == NULL)
456 		tool->mmap2 = process_event_stub;
457 	if (tool->comm == NULL)
458 		tool->comm = process_event_stub;
459 	if (tool->namespaces == NULL)
460 		tool->namespaces = process_event_stub;
461 	if (tool->fork == NULL)
462 		tool->fork = process_event_stub;
463 	if (tool->exit == NULL)
464 		tool->exit = process_event_stub;
465 	if (tool->lost == NULL)
466 		tool->lost = perf_event__process_lost;
467 	if (tool->lost_samples == NULL)
468 		tool->lost_samples = perf_event__process_lost_samples;
469 	if (tool->aux == NULL)
470 		tool->aux = perf_event__process_aux;
471 	if (tool->itrace_start == NULL)
472 		tool->itrace_start = perf_event__process_itrace_start;
473 	if (tool->context_switch == NULL)
474 		tool->context_switch = perf_event__process_switch;
475 	if (tool->ksymbol == NULL)
476 		tool->ksymbol = perf_event__process_ksymbol;
477 	if (tool->bpf == NULL)
478 		tool->bpf = perf_event__process_bpf;
479 	if (tool->read == NULL)
480 		tool->read = process_event_sample_stub;
481 	if (tool->throttle == NULL)
482 		tool->throttle = process_event_stub;
483 	if (tool->unthrottle == NULL)
484 		tool->unthrottle = process_event_stub;
485 	if (tool->attr == NULL)
486 		tool->attr = process_event_synth_attr_stub;
487 	if (tool->event_update == NULL)
488 		tool->event_update = process_event_synth_event_update_stub;
489 	if (tool->tracing_data == NULL)
490 		tool->tracing_data = process_event_synth_tracing_data_stub;
491 	if (tool->build_id == NULL)
492 		tool->build_id = process_event_op2_stub;
493 	if (tool->finished_round == NULL) {
494 		if (tool->ordered_events)
495 			tool->finished_round = process_finished_round;
496 		else
497 			tool->finished_round = process_finished_round_stub;
498 	}
499 	if (tool->id_index == NULL)
500 		tool->id_index = process_event_op2_stub;
501 	if (tool->auxtrace_info == NULL)
502 		tool->auxtrace_info = process_event_op2_stub;
503 	if (tool->auxtrace == NULL)
504 		tool->auxtrace = process_event_auxtrace_stub;
505 	if (tool->auxtrace_error == NULL)
506 		tool->auxtrace_error = process_event_op2_stub;
507 	if (tool->thread_map == NULL)
508 		tool->thread_map = process_event_thread_map_stub;
509 	if (tool->cpu_map == NULL)
510 		tool->cpu_map = process_event_cpu_map_stub;
511 	if (tool->stat_config == NULL)
512 		tool->stat_config = process_event_stat_config_stub;
513 	if (tool->stat == NULL)
514 		tool->stat = process_stat_stub;
515 	if (tool->stat_round == NULL)
516 		tool->stat_round = process_stat_round_stub;
517 	if (tool->time_conv == NULL)
518 		tool->time_conv = process_event_op2_stub;
519 	if (tool->feature == NULL)
520 		tool->feature = process_event_op2_stub;
521 	if (tool->compressed == NULL)
522 		tool->compressed = perf_session__process_compressed_event;
523 }
524 
525 static void swap_sample_id_all(union perf_event *event, void *data)
526 {
527 	void *end = (void *) event + event->header.size;
528 	int size = end - data;
529 
530 	BUG_ON(size % sizeof(u64));
531 	mem_bswap_64(data, size);
532 }
533 
534 static void perf_event__all64_swap(union perf_event *event,
535 				   bool sample_id_all __maybe_unused)
536 {
537 	struct perf_event_header *hdr = &event->header;
538 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
539 }
540 
541 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
542 {
543 	event->comm.pid = bswap_32(event->comm.pid);
544 	event->comm.tid = bswap_32(event->comm.tid);
545 
546 	if (sample_id_all) {
547 		void *data = &event->comm.comm;
548 
549 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
550 		swap_sample_id_all(event, data);
551 	}
552 }
553 
554 static void perf_event__mmap_swap(union perf_event *event,
555 				  bool sample_id_all)
556 {
557 	event->mmap.pid	  = bswap_32(event->mmap.pid);
558 	event->mmap.tid	  = bswap_32(event->mmap.tid);
559 	event->mmap.start = bswap_64(event->mmap.start);
560 	event->mmap.len	  = bswap_64(event->mmap.len);
561 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
562 
563 	if (sample_id_all) {
564 		void *data = &event->mmap.filename;
565 
566 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
567 		swap_sample_id_all(event, data);
568 	}
569 }
570 
571 static void perf_event__mmap2_swap(union perf_event *event,
572 				  bool sample_id_all)
573 {
574 	event->mmap2.pid   = bswap_32(event->mmap2.pid);
575 	event->mmap2.tid   = bswap_32(event->mmap2.tid);
576 	event->mmap2.start = bswap_64(event->mmap2.start);
577 	event->mmap2.len   = bswap_64(event->mmap2.len);
578 	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
579 	event->mmap2.maj   = bswap_32(event->mmap2.maj);
580 	event->mmap2.min   = bswap_32(event->mmap2.min);
581 	event->mmap2.ino   = bswap_64(event->mmap2.ino);
582 
583 	if (sample_id_all) {
584 		void *data = &event->mmap2.filename;
585 
586 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
587 		swap_sample_id_all(event, data);
588 	}
589 }
590 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
591 {
592 	event->fork.pid	 = bswap_32(event->fork.pid);
593 	event->fork.tid	 = bswap_32(event->fork.tid);
594 	event->fork.ppid = bswap_32(event->fork.ppid);
595 	event->fork.ptid = bswap_32(event->fork.ptid);
596 	event->fork.time = bswap_64(event->fork.time);
597 
598 	if (sample_id_all)
599 		swap_sample_id_all(event, &event->fork + 1);
600 }
601 
602 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
603 {
604 	event->read.pid		 = bswap_32(event->read.pid);
605 	event->read.tid		 = bswap_32(event->read.tid);
606 	event->read.value	 = bswap_64(event->read.value);
607 	event->read.time_enabled = bswap_64(event->read.time_enabled);
608 	event->read.time_running = bswap_64(event->read.time_running);
609 	event->read.id		 = bswap_64(event->read.id);
610 
611 	if (sample_id_all)
612 		swap_sample_id_all(event, &event->read + 1);
613 }
614 
615 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
616 {
617 	event->aux.aux_offset = bswap_64(event->aux.aux_offset);
618 	event->aux.aux_size   = bswap_64(event->aux.aux_size);
619 	event->aux.flags      = bswap_64(event->aux.flags);
620 
621 	if (sample_id_all)
622 		swap_sample_id_all(event, &event->aux + 1);
623 }
624 
625 static void perf_event__itrace_start_swap(union perf_event *event,
626 					  bool sample_id_all)
627 {
628 	event->itrace_start.pid	 = bswap_32(event->itrace_start.pid);
629 	event->itrace_start.tid	 = bswap_32(event->itrace_start.tid);
630 
631 	if (sample_id_all)
632 		swap_sample_id_all(event, &event->itrace_start + 1);
633 }
634 
635 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
636 {
637 	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
638 		event->context_switch.next_prev_pid =
639 				bswap_32(event->context_switch.next_prev_pid);
640 		event->context_switch.next_prev_tid =
641 				bswap_32(event->context_switch.next_prev_tid);
642 	}
643 
644 	if (sample_id_all)
645 		swap_sample_id_all(event, &event->context_switch + 1);
646 }
647 
648 static void perf_event__throttle_swap(union perf_event *event,
649 				      bool sample_id_all)
650 {
651 	event->throttle.time	  = bswap_64(event->throttle.time);
652 	event->throttle.id	  = bswap_64(event->throttle.id);
653 	event->throttle.stream_id = bswap_64(event->throttle.stream_id);
654 
655 	if (sample_id_all)
656 		swap_sample_id_all(event, &event->throttle + 1);
657 }
658 
659 static void perf_event__namespaces_swap(union perf_event *event,
660 					bool sample_id_all)
661 {
662 	u64 i;
663 
664 	event->namespaces.pid		= bswap_32(event->namespaces.pid);
665 	event->namespaces.tid		= bswap_32(event->namespaces.tid);
666 	event->namespaces.nr_namespaces	= bswap_64(event->namespaces.nr_namespaces);
667 
668 	for (i = 0; i < event->namespaces.nr_namespaces; i++) {
669 		struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
670 
671 		ns->dev = bswap_64(ns->dev);
672 		ns->ino = bswap_64(ns->ino);
673 	}
674 
675 	if (sample_id_all)
676 		swap_sample_id_all(event, &event->namespaces.link_info[i]);
677 }
678 
679 static u8 revbyte(u8 b)
680 {
681 	int rev = (b >> 4) | ((b & 0xf) << 4);
682 	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
683 	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
684 	return (u8) rev;
685 }
686 
687 /*
688  * XXX this is hack in attempt to carry flags bitfield
689  * through endian village. ABI says:
690  *
691  * Bit-fields are allocated from right to left (least to most significant)
692  * on little-endian implementations and from left to right (most to least
693  * significant) on big-endian implementations.
694  *
695  * The above seems to be byte specific, so we need to reverse each
696  * byte of the bitfield. 'Internet' also says this might be implementation
697  * specific and we probably need proper fix and carry perf_event_attr
698  * bitfield flags in separate data file FEAT_ section. Thought this seems
699  * to work for now.
700  */
701 static void swap_bitfield(u8 *p, unsigned len)
702 {
703 	unsigned i;
704 
705 	for (i = 0; i < len; i++) {
706 		*p = revbyte(*p);
707 		p++;
708 	}
709 }
710 
711 /* exported for swapping attributes in file header */
712 void perf_event__attr_swap(struct perf_event_attr *attr)
713 {
714 	attr->type		= bswap_32(attr->type);
715 	attr->size		= bswap_32(attr->size);
716 
717 #define bswap_safe(f, n) 					\
718 	(attr->size > (offsetof(struct perf_event_attr, f) + 	\
719 		       sizeof(attr->f) * (n)))
720 #define bswap_field(f, sz) 			\
721 do { 						\
722 	if (bswap_safe(f, 0))			\
723 		attr->f = bswap_##sz(attr->f);	\
724 } while(0)
725 #define bswap_field_16(f) bswap_field(f, 16)
726 #define bswap_field_32(f) bswap_field(f, 32)
727 #define bswap_field_64(f) bswap_field(f, 64)
728 
729 	bswap_field_64(config);
730 	bswap_field_64(sample_period);
731 	bswap_field_64(sample_type);
732 	bswap_field_64(read_format);
733 	bswap_field_32(wakeup_events);
734 	bswap_field_32(bp_type);
735 	bswap_field_64(bp_addr);
736 	bswap_field_64(bp_len);
737 	bswap_field_64(branch_sample_type);
738 	bswap_field_64(sample_regs_user);
739 	bswap_field_32(sample_stack_user);
740 	bswap_field_32(aux_watermark);
741 	bswap_field_16(sample_max_stack);
742 
743 	/*
744 	 * After read_format are bitfields. Check read_format because
745 	 * we are unable to use offsetof on bitfield.
746 	 */
747 	if (bswap_safe(read_format, 1))
748 		swap_bitfield((u8 *) (&attr->read_format + 1),
749 			      sizeof(u64));
750 #undef bswap_field_64
751 #undef bswap_field_32
752 #undef bswap_field
753 #undef bswap_safe
754 }
755 
756 static void perf_event__hdr_attr_swap(union perf_event *event,
757 				      bool sample_id_all __maybe_unused)
758 {
759 	size_t size;
760 
761 	perf_event__attr_swap(&event->attr.attr);
762 
763 	size = event->header.size;
764 	size -= (void *)&event->attr.id - (void *)event;
765 	mem_bswap_64(event->attr.id, size);
766 }
767 
768 static void perf_event__event_update_swap(union perf_event *event,
769 					  bool sample_id_all __maybe_unused)
770 {
771 	event->event_update.type = bswap_64(event->event_update.type);
772 	event->event_update.id   = bswap_64(event->event_update.id);
773 }
774 
775 static void perf_event__event_type_swap(union perf_event *event,
776 					bool sample_id_all __maybe_unused)
777 {
778 	event->event_type.event_type.event_id =
779 		bswap_64(event->event_type.event_type.event_id);
780 }
781 
782 static void perf_event__tracing_data_swap(union perf_event *event,
783 					  bool sample_id_all __maybe_unused)
784 {
785 	event->tracing_data.size = bswap_32(event->tracing_data.size);
786 }
787 
788 static void perf_event__auxtrace_info_swap(union perf_event *event,
789 					   bool sample_id_all __maybe_unused)
790 {
791 	size_t size;
792 
793 	event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
794 
795 	size = event->header.size;
796 	size -= (void *)&event->auxtrace_info.priv - (void *)event;
797 	mem_bswap_64(event->auxtrace_info.priv, size);
798 }
799 
800 static void perf_event__auxtrace_swap(union perf_event *event,
801 				      bool sample_id_all __maybe_unused)
802 {
803 	event->auxtrace.size      = bswap_64(event->auxtrace.size);
804 	event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
805 	event->auxtrace.reference = bswap_64(event->auxtrace.reference);
806 	event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
807 	event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
808 	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
809 }
810 
811 static void perf_event__auxtrace_error_swap(union perf_event *event,
812 					    bool sample_id_all __maybe_unused)
813 {
814 	event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
815 	event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
816 	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
817 	event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
818 	event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
819 	event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
820 	event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
821 	if (event->auxtrace_error.fmt)
822 		event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
823 }
824 
825 static void perf_event__thread_map_swap(union perf_event *event,
826 					bool sample_id_all __maybe_unused)
827 {
828 	unsigned i;
829 
830 	event->thread_map.nr = bswap_64(event->thread_map.nr);
831 
832 	for (i = 0; i < event->thread_map.nr; i++)
833 		event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
834 }
835 
836 static void perf_event__cpu_map_swap(union perf_event *event,
837 				     bool sample_id_all __maybe_unused)
838 {
839 	struct perf_record_cpu_map_data *data = &event->cpu_map.data;
840 	struct cpu_map_entries *cpus;
841 	struct perf_record_record_cpu_map *mask;
842 	unsigned i;
843 
844 	data->type = bswap_64(data->type);
845 
846 	switch (data->type) {
847 	case PERF_CPU_MAP__CPUS:
848 		cpus = (struct cpu_map_entries *)data->data;
849 
850 		cpus->nr = bswap_16(cpus->nr);
851 
852 		for (i = 0; i < cpus->nr; i++)
853 			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
854 		break;
855 	case PERF_CPU_MAP__MASK:
856 		mask = (struct perf_record_record_cpu_map *)data->data;
857 
858 		mask->nr = bswap_16(mask->nr);
859 		mask->long_size = bswap_16(mask->long_size);
860 
861 		switch (mask->long_size) {
862 		case 4: mem_bswap_32(&mask->mask, mask->nr); break;
863 		case 8: mem_bswap_64(&mask->mask, mask->nr); break;
864 		default:
865 			pr_err("cpu_map swap: unsupported long size\n");
866 		}
867 	default:
868 		break;
869 	}
870 }
871 
872 static void perf_event__stat_config_swap(union perf_event *event,
873 					 bool sample_id_all __maybe_unused)
874 {
875 	u64 size;
876 
877 	size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
878 	size += 1; /* nr item itself */
879 	mem_bswap_64(&event->stat_config.nr, size);
880 }
881 
882 static void perf_event__stat_swap(union perf_event *event,
883 				  bool sample_id_all __maybe_unused)
884 {
885 	event->stat.id     = bswap_64(event->stat.id);
886 	event->stat.thread = bswap_32(event->stat.thread);
887 	event->stat.cpu    = bswap_32(event->stat.cpu);
888 	event->stat.val    = bswap_64(event->stat.val);
889 	event->stat.ena    = bswap_64(event->stat.ena);
890 	event->stat.run    = bswap_64(event->stat.run);
891 }
892 
893 static void perf_event__stat_round_swap(union perf_event *event,
894 					bool sample_id_all __maybe_unused)
895 {
896 	event->stat_round.type = bswap_64(event->stat_round.type);
897 	event->stat_round.time = bswap_64(event->stat_round.time);
898 }
899 
900 typedef void (*perf_event__swap_op)(union perf_event *event,
901 				    bool sample_id_all);
902 
903 static perf_event__swap_op perf_event__swap_ops[] = {
904 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
905 	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
906 	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
907 	[PERF_RECORD_FORK]		  = perf_event__task_swap,
908 	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
909 	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
910 	[PERF_RECORD_READ]		  = perf_event__read_swap,
911 	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
912 	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
913 	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
914 	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
915 	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
916 	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
917 	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
918 	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
919 	[PERF_RECORD_NAMESPACES]	  = perf_event__namespaces_swap,
920 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
921 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
922 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
923 	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
924 	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
925 	[PERF_RECORD_AUXTRACE_INFO]	  = perf_event__auxtrace_info_swap,
926 	[PERF_RECORD_AUXTRACE]		  = perf_event__auxtrace_swap,
927 	[PERF_RECORD_AUXTRACE_ERROR]	  = perf_event__auxtrace_error_swap,
928 	[PERF_RECORD_THREAD_MAP]	  = perf_event__thread_map_swap,
929 	[PERF_RECORD_CPU_MAP]		  = perf_event__cpu_map_swap,
930 	[PERF_RECORD_STAT_CONFIG]	  = perf_event__stat_config_swap,
931 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
932 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
933 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
934 	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
935 	[PERF_RECORD_HEADER_MAX]	  = NULL,
936 };
937 
938 /*
939  * When perf record finishes a pass on every buffers, it records this pseudo
940  * event.
941  * We record the max timestamp t found in the pass n.
942  * Assuming these timestamps are monotonic across cpus, we know that if
943  * a buffer still has events with timestamps below t, they will be all
944  * available and then read in the pass n + 1.
945  * Hence when we start to read the pass n + 2, we can safely flush every
946  * events with timestamps below t.
947  *
948  *    ============ PASS n =================
949  *       CPU 0         |   CPU 1
950  *                     |
951  *    cnt1 timestamps  |   cnt2 timestamps
952  *          1          |         2
953  *          2          |         3
954  *          -          |         4  <--- max recorded
955  *
956  *    ============ PASS n + 1 ==============
957  *       CPU 0         |   CPU 1
958  *                     |
959  *    cnt1 timestamps  |   cnt2 timestamps
960  *          3          |         5
961  *          4          |         6
962  *          5          |         7 <---- max recorded
963  *
964  *      Flush every events below timestamp 4
965  *
966  *    ============ PASS n + 2 ==============
967  *       CPU 0         |   CPU 1
968  *                     |
969  *    cnt1 timestamps  |   cnt2 timestamps
970  *          6          |         8
971  *          7          |         9
972  *          -          |         10
973  *
974  *      Flush every events below timestamp 7
975  *      etc...
976  */
977 static int process_finished_round(struct perf_tool *tool __maybe_unused,
978 				  union perf_event *event __maybe_unused,
979 				  struct ordered_events *oe)
980 {
981 	if (dump_trace)
982 		fprintf(stdout, "\n");
983 	return ordered_events__flush(oe, OE_FLUSH__ROUND);
984 }
985 
986 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
987 			      u64 timestamp, u64 file_offset)
988 {
989 	return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
990 }
991 
992 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
993 {
994 	struct ip_callchain *callchain = sample->callchain;
995 	struct branch_stack *lbr_stack = sample->branch_stack;
996 	u64 kernel_callchain_nr = callchain->nr;
997 	unsigned int i;
998 
999 	for (i = 0; i < kernel_callchain_nr; i++) {
1000 		if (callchain->ips[i] == PERF_CONTEXT_USER)
1001 			break;
1002 	}
1003 
1004 	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1005 		u64 total_nr;
1006 		/*
1007 		 * LBR callstack can only get user call chain,
1008 		 * i is kernel call chain number,
1009 		 * 1 is PERF_CONTEXT_USER.
1010 		 *
1011 		 * The user call chain is stored in LBR registers.
1012 		 * LBR are pair registers. The caller is stored
1013 		 * in "from" register, while the callee is stored
1014 		 * in "to" register.
1015 		 * For example, there is a call stack
1016 		 * "A"->"B"->"C"->"D".
1017 		 * The LBR registers will recorde like
1018 		 * "C"->"D", "B"->"C", "A"->"B".
1019 		 * So only the first "to" register and all "from"
1020 		 * registers are needed to construct the whole stack.
1021 		 */
1022 		total_nr = i + 1 + lbr_stack->nr + 1;
1023 		kernel_callchain_nr = i + 1;
1024 
1025 		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1026 
1027 		for (i = 0; i < kernel_callchain_nr; i++)
1028 			printf("..... %2d: %016" PRIx64 "\n",
1029 			       i, callchain->ips[i]);
1030 
1031 		printf("..... %2d: %016" PRIx64 "\n",
1032 		       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1033 		for (i = 0; i < lbr_stack->nr; i++)
1034 			printf("..... %2d: %016" PRIx64 "\n",
1035 			       (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1036 	}
1037 }
1038 
1039 static void callchain__printf(struct evsel *evsel,
1040 			      struct perf_sample *sample)
1041 {
1042 	unsigned int i;
1043 	struct ip_callchain *callchain = sample->callchain;
1044 
1045 	if (perf_evsel__has_branch_callstack(evsel))
1046 		callchain__lbr_callstack_printf(sample);
1047 
1048 	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1049 
1050 	for (i = 0; i < callchain->nr; i++)
1051 		printf("..... %2d: %016" PRIx64 "\n",
1052 		       i, callchain->ips[i]);
1053 }
1054 
1055 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1056 {
1057 	uint64_t i;
1058 
1059 	printf("%s: nr:%" PRIu64 "\n",
1060 		!callstack ? "... branch stack" : "... branch callstack",
1061 		sample->branch_stack->nr);
1062 
1063 	for (i = 0; i < sample->branch_stack->nr; i++) {
1064 		struct branch_entry *e = &sample->branch_stack->entries[i];
1065 
1066 		if (!callstack) {
1067 			printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1068 				i, e->from, e->to,
1069 				(unsigned short)e->flags.cycles,
1070 				e->flags.mispred ? "M" : " ",
1071 				e->flags.predicted ? "P" : " ",
1072 				e->flags.abort ? "A" : " ",
1073 				e->flags.in_tx ? "T" : " ",
1074 				(unsigned)e->flags.reserved);
1075 		} else {
1076 			printf("..... %2"PRIu64": %016" PRIx64 "\n",
1077 				i, i > 0 ? e->from : e->to);
1078 		}
1079 	}
1080 }
1081 
1082 static void regs_dump__printf(u64 mask, u64 *regs)
1083 {
1084 	unsigned rid, i = 0;
1085 
1086 	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1087 		u64 val = regs[i++];
1088 
1089 		printf(".... %-5s 0x%" PRIx64 "\n",
1090 		       perf_reg_name(rid), val);
1091 	}
1092 }
1093 
1094 static const char *regs_abi[] = {
1095 	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
1096 	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1097 	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1098 };
1099 
1100 static inline const char *regs_dump_abi(struct regs_dump *d)
1101 {
1102 	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1103 		return "unknown";
1104 
1105 	return regs_abi[d->abi];
1106 }
1107 
1108 static void regs__printf(const char *type, struct regs_dump *regs)
1109 {
1110 	u64 mask = regs->mask;
1111 
1112 	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1113 	       type,
1114 	       mask,
1115 	       regs_dump_abi(regs));
1116 
1117 	regs_dump__printf(mask, regs->regs);
1118 }
1119 
1120 static void regs_user__printf(struct perf_sample *sample)
1121 {
1122 	struct regs_dump *user_regs = &sample->user_regs;
1123 
1124 	if (user_regs->regs)
1125 		regs__printf("user", user_regs);
1126 }
1127 
1128 static void regs_intr__printf(struct perf_sample *sample)
1129 {
1130 	struct regs_dump *intr_regs = &sample->intr_regs;
1131 
1132 	if (intr_regs->regs)
1133 		regs__printf("intr", intr_regs);
1134 }
1135 
1136 static void stack_user__printf(struct stack_dump *dump)
1137 {
1138 	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1139 	       dump->size, dump->offset);
1140 }
1141 
1142 static void perf_evlist__print_tstamp(struct evlist *evlist,
1143 				       union perf_event *event,
1144 				       struct perf_sample *sample)
1145 {
1146 	u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1147 
1148 	if (event->header.type != PERF_RECORD_SAMPLE &&
1149 	    !perf_evlist__sample_id_all(evlist)) {
1150 		fputs("-1 -1 ", stdout);
1151 		return;
1152 	}
1153 
1154 	if ((sample_type & PERF_SAMPLE_CPU))
1155 		printf("%u ", sample->cpu);
1156 
1157 	if (sample_type & PERF_SAMPLE_TIME)
1158 		printf("%" PRIu64 " ", sample->time);
1159 }
1160 
1161 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1162 {
1163 	printf("... sample_read:\n");
1164 
1165 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1166 		printf("...... time enabled %016" PRIx64 "\n",
1167 		       sample->read.time_enabled);
1168 
1169 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1170 		printf("...... time running %016" PRIx64 "\n",
1171 		       sample->read.time_running);
1172 
1173 	if (read_format & PERF_FORMAT_GROUP) {
1174 		u64 i;
1175 
1176 		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1177 
1178 		for (i = 0; i < sample->read.group.nr; i++) {
1179 			struct sample_read_value *value;
1180 
1181 			value = &sample->read.group.values[i];
1182 			printf("..... id %016" PRIx64
1183 			       ", value %016" PRIx64 "\n",
1184 			       value->id, value->value);
1185 		}
1186 	} else
1187 		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1188 			sample->read.one.id, sample->read.one.value);
1189 }
1190 
1191 static void dump_event(struct evlist *evlist, union perf_event *event,
1192 		       u64 file_offset, struct perf_sample *sample)
1193 {
1194 	if (!dump_trace)
1195 		return;
1196 
1197 	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1198 	       file_offset, event->header.size, event->header.type);
1199 
1200 	trace_event(event);
1201 	if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1202 		evlist->trace_event_sample_raw(evlist, event, sample);
1203 
1204 	if (sample)
1205 		perf_evlist__print_tstamp(evlist, event, sample);
1206 
1207 	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1208 	       event->header.size, perf_event__name(event->header.type));
1209 }
1210 
1211 static void dump_sample(struct evsel *evsel, union perf_event *event,
1212 			struct perf_sample *sample)
1213 {
1214 	u64 sample_type;
1215 
1216 	if (!dump_trace)
1217 		return;
1218 
1219 	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1220 	       event->header.misc, sample->pid, sample->tid, sample->ip,
1221 	       sample->period, sample->addr);
1222 
1223 	sample_type = evsel->core.attr.sample_type;
1224 
1225 	if (evsel__has_callchain(evsel))
1226 		callchain__printf(evsel, sample);
1227 
1228 	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1229 		branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
1230 
1231 	if (sample_type & PERF_SAMPLE_REGS_USER)
1232 		regs_user__printf(sample);
1233 
1234 	if (sample_type & PERF_SAMPLE_REGS_INTR)
1235 		regs_intr__printf(sample);
1236 
1237 	if (sample_type & PERF_SAMPLE_STACK_USER)
1238 		stack_user__printf(&sample->user_stack);
1239 
1240 	if (sample_type & PERF_SAMPLE_WEIGHT)
1241 		printf("... weight: %" PRIu64 "\n", sample->weight);
1242 
1243 	if (sample_type & PERF_SAMPLE_DATA_SRC)
1244 		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1245 
1246 	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1247 		printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1248 
1249 	if (sample_type & PERF_SAMPLE_TRANSACTION)
1250 		printf("... transaction: %" PRIx64 "\n", sample->transaction);
1251 
1252 	if (sample_type & PERF_SAMPLE_READ)
1253 		sample_read__printf(sample, evsel->core.attr.read_format);
1254 }
1255 
1256 static void dump_read(struct evsel *evsel, union perf_event *event)
1257 {
1258 	struct perf_record_read *read_event = &event->read;
1259 	u64 read_format;
1260 
1261 	if (!dump_trace)
1262 		return;
1263 
1264 	printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1265 	       perf_evsel__name(evsel),
1266 	       event->read.value);
1267 
1268 	if (!evsel)
1269 		return;
1270 
1271 	read_format = evsel->core.attr.read_format;
1272 
1273 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1274 		printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1275 
1276 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1277 		printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1278 
1279 	if (read_format & PERF_FORMAT_ID)
1280 		printf("... id           : %" PRI_lu64 "\n", read_event->id);
1281 }
1282 
1283 static struct machine *machines__find_for_cpumode(struct machines *machines,
1284 					       union perf_event *event,
1285 					       struct perf_sample *sample)
1286 {
1287 	struct machine *machine;
1288 
1289 	if (perf_guest &&
1290 	    ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1291 	     (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1292 		u32 pid;
1293 
1294 		if (event->header.type == PERF_RECORD_MMAP
1295 		    || event->header.type == PERF_RECORD_MMAP2)
1296 			pid = event->mmap.pid;
1297 		else
1298 			pid = sample->pid;
1299 
1300 		machine = machines__find(machines, pid);
1301 		if (!machine)
1302 			machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1303 		return machine;
1304 	}
1305 
1306 	return &machines->host;
1307 }
1308 
1309 static int deliver_sample_value(struct evlist *evlist,
1310 				struct perf_tool *tool,
1311 				union perf_event *event,
1312 				struct perf_sample *sample,
1313 				struct sample_read_value *v,
1314 				struct machine *machine)
1315 {
1316 	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1317 
1318 	if (sid) {
1319 		sample->id     = v->id;
1320 		sample->period = v->value - sid->period;
1321 		sid->period    = v->value;
1322 	}
1323 
1324 	if (!sid || sid->evsel == NULL) {
1325 		++evlist->stats.nr_unknown_id;
1326 		return 0;
1327 	}
1328 
1329 	/*
1330 	 * There's no reason to deliver sample
1331 	 * for zero period, bail out.
1332 	 */
1333 	if (!sample->period)
1334 		return 0;
1335 
1336 	return tool->sample(tool, event, sample, sid->evsel, machine);
1337 }
1338 
1339 static int deliver_sample_group(struct evlist *evlist,
1340 				struct perf_tool *tool,
1341 				union  perf_event *event,
1342 				struct perf_sample *sample,
1343 				struct machine *machine)
1344 {
1345 	int ret = -EINVAL;
1346 	u64 i;
1347 
1348 	for (i = 0; i < sample->read.group.nr; i++) {
1349 		ret = deliver_sample_value(evlist, tool, event, sample,
1350 					   &sample->read.group.values[i],
1351 					   machine);
1352 		if (ret)
1353 			break;
1354 	}
1355 
1356 	return ret;
1357 }
1358 
1359 static int
1360  perf_evlist__deliver_sample(struct evlist *evlist,
1361 			     struct perf_tool *tool,
1362 			     union  perf_event *event,
1363 			     struct perf_sample *sample,
1364 			     struct evsel *evsel,
1365 			     struct machine *machine)
1366 {
1367 	/* We know evsel != NULL. */
1368 	u64 sample_type = evsel->core.attr.sample_type;
1369 	u64 read_format = evsel->core.attr.read_format;
1370 
1371 	/* Standard sample delivery. */
1372 	if (!(sample_type & PERF_SAMPLE_READ))
1373 		return tool->sample(tool, event, sample, evsel, machine);
1374 
1375 	/* For PERF_SAMPLE_READ we have either single or group mode. */
1376 	if (read_format & PERF_FORMAT_GROUP)
1377 		return deliver_sample_group(evlist, tool, event, sample,
1378 					    machine);
1379 	else
1380 		return deliver_sample_value(evlist, tool, event, sample,
1381 					    &sample->read.one, machine);
1382 }
1383 
1384 static int machines__deliver_event(struct machines *machines,
1385 				   struct evlist *evlist,
1386 				   union perf_event *event,
1387 				   struct perf_sample *sample,
1388 				   struct perf_tool *tool, u64 file_offset)
1389 {
1390 	struct evsel *evsel;
1391 	struct machine *machine;
1392 
1393 	dump_event(evlist, event, file_offset, sample);
1394 
1395 	evsel = perf_evlist__id2evsel(evlist, sample->id);
1396 
1397 	machine = machines__find_for_cpumode(machines, event, sample);
1398 
1399 	switch (event->header.type) {
1400 	case PERF_RECORD_SAMPLE:
1401 		if (evsel == NULL) {
1402 			++evlist->stats.nr_unknown_id;
1403 			return 0;
1404 		}
1405 		dump_sample(evsel, event, sample);
1406 		if (machine == NULL) {
1407 			++evlist->stats.nr_unprocessable_samples;
1408 			return 0;
1409 		}
1410 		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1411 	case PERF_RECORD_MMAP:
1412 		return tool->mmap(tool, event, sample, machine);
1413 	case PERF_RECORD_MMAP2:
1414 		if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1415 			++evlist->stats.nr_proc_map_timeout;
1416 		return tool->mmap2(tool, event, sample, machine);
1417 	case PERF_RECORD_COMM:
1418 		return tool->comm(tool, event, sample, machine);
1419 	case PERF_RECORD_NAMESPACES:
1420 		return tool->namespaces(tool, event, sample, machine);
1421 	case PERF_RECORD_FORK:
1422 		return tool->fork(tool, event, sample, machine);
1423 	case PERF_RECORD_EXIT:
1424 		return tool->exit(tool, event, sample, machine);
1425 	case PERF_RECORD_LOST:
1426 		if (tool->lost == perf_event__process_lost)
1427 			evlist->stats.total_lost += event->lost.lost;
1428 		return tool->lost(tool, event, sample, machine);
1429 	case PERF_RECORD_LOST_SAMPLES:
1430 		if (tool->lost_samples == perf_event__process_lost_samples)
1431 			evlist->stats.total_lost_samples += event->lost_samples.lost;
1432 		return tool->lost_samples(tool, event, sample, machine);
1433 	case PERF_RECORD_READ:
1434 		dump_read(evsel, event);
1435 		return tool->read(tool, event, sample, evsel, machine);
1436 	case PERF_RECORD_THROTTLE:
1437 		return tool->throttle(tool, event, sample, machine);
1438 	case PERF_RECORD_UNTHROTTLE:
1439 		return tool->unthrottle(tool, event, sample, machine);
1440 	case PERF_RECORD_AUX:
1441 		if (tool->aux == perf_event__process_aux) {
1442 			if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1443 				evlist->stats.total_aux_lost += 1;
1444 			if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1445 				evlist->stats.total_aux_partial += 1;
1446 		}
1447 		return tool->aux(tool, event, sample, machine);
1448 	case PERF_RECORD_ITRACE_START:
1449 		return tool->itrace_start(tool, event, sample, machine);
1450 	case PERF_RECORD_SWITCH:
1451 	case PERF_RECORD_SWITCH_CPU_WIDE:
1452 		return tool->context_switch(tool, event, sample, machine);
1453 	case PERF_RECORD_KSYMBOL:
1454 		return tool->ksymbol(tool, event, sample, machine);
1455 	case PERF_RECORD_BPF_EVENT:
1456 		return tool->bpf(tool, event, sample, machine);
1457 	default:
1458 		++evlist->stats.nr_unknown_events;
1459 		return -1;
1460 	}
1461 }
1462 
1463 static int perf_session__deliver_event(struct perf_session *session,
1464 				       union perf_event *event,
1465 				       struct perf_tool *tool,
1466 				       u64 file_offset)
1467 {
1468 	struct perf_sample sample;
1469 	int ret;
1470 
1471 	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1472 	if (ret) {
1473 		pr_err("Can't parse sample, err = %d\n", ret);
1474 		return ret;
1475 	}
1476 
1477 	ret = auxtrace__process_event(session, event, &sample, tool);
1478 	if (ret < 0)
1479 		return ret;
1480 	if (ret > 0)
1481 		return 0;
1482 
1483 	return machines__deliver_event(&session->machines, session->evlist,
1484 				       event, &sample, tool, file_offset);
1485 }
1486 
1487 static s64 perf_session__process_user_event(struct perf_session *session,
1488 					    union perf_event *event,
1489 					    u64 file_offset)
1490 {
1491 	struct ordered_events *oe = &session->ordered_events;
1492 	struct perf_tool *tool = session->tool;
1493 	struct perf_sample sample = { .time = 0, };
1494 	int fd = perf_data__fd(session->data);
1495 	int err;
1496 
1497 	if (event->header.type != PERF_RECORD_COMPRESSED ||
1498 	    tool->compressed == perf_session__process_compressed_event_stub)
1499 		dump_event(session->evlist, event, file_offset, &sample);
1500 
1501 	/* These events are processed right away */
1502 	switch (event->header.type) {
1503 	case PERF_RECORD_HEADER_ATTR:
1504 		err = tool->attr(tool, event, &session->evlist);
1505 		if (err == 0) {
1506 			perf_session__set_id_hdr_size(session);
1507 			perf_session__set_comm_exec(session);
1508 		}
1509 		return err;
1510 	case PERF_RECORD_EVENT_UPDATE:
1511 		return tool->event_update(tool, event, &session->evlist);
1512 	case PERF_RECORD_HEADER_EVENT_TYPE:
1513 		/*
1514 		 * Depreceated, but we need to handle it for sake
1515 		 * of old data files create in pipe mode.
1516 		 */
1517 		return 0;
1518 	case PERF_RECORD_HEADER_TRACING_DATA:
1519 		/* setup for reading amidst mmap */
1520 		lseek(fd, file_offset, SEEK_SET);
1521 		return tool->tracing_data(session, event);
1522 	case PERF_RECORD_HEADER_BUILD_ID:
1523 		return tool->build_id(session, event);
1524 	case PERF_RECORD_FINISHED_ROUND:
1525 		return tool->finished_round(tool, event, oe);
1526 	case PERF_RECORD_ID_INDEX:
1527 		return tool->id_index(session, event);
1528 	case PERF_RECORD_AUXTRACE_INFO:
1529 		return tool->auxtrace_info(session, event);
1530 	case PERF_RECORD_AUXTRACE:
1531 		/* setup for reading amidst mmap */
1532 		lseek(fd, file_offset + event->header.size, SEEK_SET);
1533 		return tool->auxtrace(session, event);
1534 	case PERF_RECORD_AUXTRACE_ERROR:
1535 		perf_session__auxtrace_error_inc(session, event);
1536 		return tool->auxtrace_error(session, event);
1537 	case PERF_RECORD_THREAD_MAP:
1538 		return tool->thread_map(session, event);
1539 	case PERF_RECORD_CPU_MAP:
1540 		return tool->cpu_map(session, event);
1541 	case PERF_RECORD_STAT_CONFIG:
1542 		return tool->stat_config(session, event);
1543 	case PERF_RECORD_STAT:
1544 		return tool->stat(session, event);
1545 	case PERF_RECORD_STAT_ROUND:
1546 		return tool->stat_round(session, event);
1547 	case PERF_RECORD_TIME_CONV:
1548 		session->time_conv = event->time_conv;
1549 		return tool->time_conv(session, event);
1550 	case PERF_RECORD_HEADER_FEATURE:
1551 		return tool->feature(session, event);
1552 	case PERF_RECORD_COMPRESSED:
1553 		err = tool->compressed(session, event, file_offset);
1554 		if (err)
1555 			dump_event(session->evlist, event, file_offset, &sample);
1556 		return err;
1557 	default:
1558 		return -EINVAL;
1559 	}
1560 }
1561 
1562 int perf_session__deliver_synth_event(struct perf_session *session,
1563 				      union perf_event *event,
1564 				      struct perf_sample *sample)
1565 {
1566 	struct evlist *evlist = session->evlist;
1567 	struct perf_tool *tool = session->tool;
1568 
1569 	events_stats__inc(&evlist->stats, event->header.type);
1570 
1571 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1572 		return perf_session__process_user_event(session, event, 0);
1573 
1574 	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1575 }
1576 
1577 static void event_swap(union perf_event *event, bool sample_id_all)
1578 {
1579 	perf_event__swap_op swap;
1580 
1581 	swap = perf_event__swap_ops[event->header.type];
1582 	if (swap)
1583 		swap(event, sample_id_all);
1584 }
1585 
1586 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1587 			     void *buf, size_t buf_sz,
1588 			     union perf_event **event_ptr,
1589 			     struct perf_sample *sample)
1590 {
1591 	union perf_event *event;
1592 	size_t hdr_sz, rest;
1593 	int fd;
1594 
1595 	if (session->one_mmap && !session->header.needs_swap) {
1596 		event = file_offset - session->one_mmap_offset +
1597 			session->one_mmap_addr;
1598 		goto out_parse_sample;
1599 	}
1600 
1601 	if (perf_data__is_pipe(session->data))
1602 		return -1;
1603 
1604 	fd = perf_data__fd(session->data);
1605 	hdr_sz = sizeof(struct perf_event_header);
1606 
1607 	if (buf_sz < hdr_sz)
1608 		return -1;
1609 
1610 	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1611 	    readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1612 		return -1;
1613 
1614 	event = (union perf_event *)buf;
1615 
1616 	if (session->header.needs_swap)
1617 		perf_event_header__bswap(&event->header);
1618 
1619 	if (event->header.size < hdr_sz || event->header.size > buf_sz)
1620 		return -1;
1621 
1622 	rest = event->header.size - hdr_sz;
1623 
1624 	if (readn(fd, buf, rest) != (ssize_t)rest)
1625 		return -1;
1626 
1627 	if (session->header.needs_swap)
1628 		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1629 
1630 out_parse_sample:
1631 
1632 	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1633 	    perf_evlist__parse_sample(session->evlist, event, sample))
1634 		return -1;
1635 
1636 	*event_ptr = event;
1637 
1638 	return 0;
1639 }
1640 
1641 static s64 perf_session__process_event(struct perf_session *session,
1642 				       union perf_event *event, u64 file_offset)
1643 {
1644 	struct evlist *evlist = session->evlist;
1645 	struct perf_tool *tool = session->tool;
1646 	int ret;
1647 
1648 	if (session->header.needs_swap)
1649 		event_swap(event, perf_evlist__sample_id_all(evlist));
1650 
1651 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
1652 		return -EINVAL;
1653 
1654 	events_stats__inc(&evlist->stats, event->header.type);
1655 
1656 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1657 		return perf_session__process_user_event(session, event, file_offset);
1658 
1659 	if (tool->ordered_events) {
1660 		u64 timestamp = -1ULL;
1661 
1662 		ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1663 		if (ret && ret != -1)
1664 			return ret;
1665 
1666 		ret = perf_session__queue_event(session, event, timestamp, file_offset);
1667 		if (ret != -ETIME)
1668 			return ret;
1669 	}
1670 
1671 	return perf_session__deliver_event(session, event, tool, file_offset);
1672 }
1673 
1674 void perf_event_header__bswap(struct perf_event_header *hdr)
1675 {
1676 	hdr->type = bswap_32(hdr->type);
1677 	hdr->misc = bswap_16(hdr->misc);
1678 	hdr->size = bswap_16(hdr->size);
1679 }
1680 
1681 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1682 {
1683 	return machine__findnew_thread(&session->machines.host, -1, pid);
1684 }
1685 
1686 /*
1687  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1688  * So here a single thread is created for that, but actually there is a separate
1689  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1690  * is only 1. That causes problems for some tools, requiring workarounds. For
1691  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1692  */
1693 int perf_session__register_idle_thread(struct perf_session *session)
1694 {
1695 	struct thread *thread;
1696 	int err = 0;
1697 
1698 	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1699 	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1700 		pr_err("problem inserting idle task.\n");
1701 		err = -1;
1702 	}
1703 
1704 	if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1705 		pr_err("problem inserting idle task.\n");
1706 		err = -1;
1707 	}
1708 
1709 	/* machine__findnew_thread() got the thread, so put it */
1710 	thread__put(thread);
1711 	return err;
1712 }
1713 
1714 static void
1715 perf_session__warn_order(const struct perf_session *session)
1716 {
1717 	const struct ordered_events *oe = &session->ordered_events;
1718 	struct evsel *evsel;
1719 	bool should_warn = true;
1720 
1721 	evlist__for_each_entry(session->evlist, evsel) {
1722 		if (evsel->core.attr.write_backward)
1723 			should_warn = false;
1724 	}
1725 
1726 	if (!should_warn)
1727 		return;
1728 	if (oe->nr_unordered_events != 0)
1729 		ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1730 }
1731 
1732 static void perf_session__warn_about_errors(const struct perf_session *session)
1733 {
1734 	const struct events_stats *stats = &session->evlist->stats;
1735 
1736 	if (session->tool->lost == perf_event__process_lost &&
1737 	    stats->nr_events[PERF_RECORD_LOST] != 0) {
1738 		ui__warning("Processed %d events and lost %d chunks!\n\n"
1739 			    "Check IO/CPU overload!\n\n",
1740 			    stats->nr_events[0],
1741 			    stats->nr_events[PERF_RECORD_LOST]);
1742 	}
1743 
1744 	if (session->tool->lost_samples == perf_event__process_lost_samples) {
1745 		double drop_rate;
1746 
1747 		drop_rate = (double)stats->total_lost_samples /
1748 			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1749 		if (drop_rate > 0.05) {
1750 			ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1751 				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1752 				    drop_rate * 100.0);
1753 		}
1754 	}
1755 
1756 	if (session->tool->aux == perf_event__process_aux &&
1757 	    stats->total_aux_lost != 0) {
1758 		ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1759 			    stats->total_aux_lost,
1760 			    stats->nr_events[PERF_RECORD_AUX]);
1761 	}
1762 
1763 	if (session->tool->aux == perf_event__process_aux &&
1764 	    stats->total_aux_partial != 0) {
1765 		bool vmm_exclusive = false;
1766 
1767 		(void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1768 		                       &vmm_exclusive);
1769 
1770 		ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1771 		            "Are you running a KVM guest in the background?%s\n\n",
1772 			    stats->total_aux_partial,
1773 			    stats->nr_events[PERF_RECORD_AUX],
1774 			    vmm_exclusive ?
1775 			    "\nReloading kvm_intel module with vmm_exclusive=0\n"
1776 			    "will reduce the gaps to only guest's timeslices." :
1777 			    "");
1778 	}
1779 
1780 	if (stats->nr_unknown_events != 0) {
1781 		ui__warning("Found %u unknown events!\n\n"
1782 			    "Is this an older tool processing a perf.data "
1783 			    "file generated by a more recent tool?\n\n"
1784 			    "If that is not the case, consider "
1785 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1786 			    stats->nr_unknown_events);
1787 	}
1788 
1789 	if (stats->nr_unknown_id != 0) {
1790 		ui__warning("%u samples with id not present in the header\n",
1791 			    stats->nr_unknown_id);
1792 	}
1793 
1794 	if (stats->nr_invalid_chains != 0) {
1795 		ui__warning("Found invalid callchains!\n\n"
1796 			    "%u out of %u events were discarded for this reason.\n\n"
1797 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1798 			    stats->nr_invalid_chains,
1799 			    stats->nr_events[PERF_RECORD_SAMPLE]);
1800 	}
1801 
1802 	if (stats->nr_unprocessable_samples != 0) {
1803 		ui__warning("%u unprocessable samples recorded.\n"
1804 			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1805 			    stats->nr_unprocessable_samples);
1806 	}
1807 
1808 	perf_session__warn_order(session);
1809 
1810 	events_stats__auxtrace_error_warn(stats);
1811 
1812 	if (stats->nr_proc_map_timeout != 0) {
1813 		ui__warning("%d map information files for pre-existing threads were\n"
1814 			    "not processed, if there are samples for addresses they\n"
1815 			    "will not be resolved, you may find out which are these\n"
1816 			    "threads by running with -v and redirecting the output\n"
1817 			    "to a file.\n"
1818 			    "The time limit to process proc map is too short?\n"
1819 			    "Increase it by --proc-map-timeout\n",
1820 			    stats->nr_proc_map_timeout);
1821 	}
1822 }
1823 
1824 static int perf_session__flush_thread_stack(struct thread *thread,
1825 					    void *p __maybe_unused)
1826 {
1827 	return thread_stack__flush(thread);
1828 }
1829 
1830 static int perf_session__flush_thread_stacks(struct perf_session *session)
1831 {
1832 	return machines__for_each_thread(&session->machines,
1833 					 perf_session__flush_thread_stack,
1834 					 NULL);
1835 }
1836 
1837 volatile int session_done;
1838 
1839 static int __perf_session__process_decomp_events(struct perf_session *session);
1840 
1841 static int __perf_session__process_pipe_events(struct perf_session *session)
1842 {
1843 	struct ordered_events *oe = &session->ordered_events;
1844 	struct perf_tool *tool = session->tool;
1845 	int fd = perf_data__fd(session->data);
1846 	union perf_event *event;
1847 	uint32_t size, cur_size = 0;
1848 	void *buf = NULL;
1849 	s64 skip = 0;
1850 	u64 head;
1851 	ssize_t err;
1852 	void *p;
1853 
1854 	perf_tool__fill_defaults(tool);
1855 
1856 	head = 0;
1857 	cur_size = sizeof(union perf_event);
1858 
1859 	buf = malloc(cur_size);
1860 	if (!buf)
1861 		return -errno;
1862 	ordered_events__set_copy_on_queue(oe, true);
1863 more:
1864 	event = buf;
1865 	err = readn(fd, event, sizeof(struct perf_event_header));
1866 	if (err <= 0) {
1867 		if (err == 0)
1868 			goto done;
1869 
1870 		pr_err("failed to read event header\n");
1871 		goto out_err;
1872 	}
1873 
1874 	if (session->header.needs_swap)
1875 		perf_event_header__bswap(&event->header);
1876 
1877 	size = event->header.size;
1878 	if (size < sizeof(struct perf_event_header)) {
1879 		pr_err("bad event header size\n");
1880 		goto out_err;
1881 	}
1882 
1883 	if (size > cur_size) {
1884 		void *new = realloc(buf, size);
1885 		if (!new) {
1886 			pr_err("failed to allocate memory to read event\n");
1887 			goto out_err;
1888 		}
1889 		buf = new;
1890 		cur_size = size;
1891 		event = buf;
1892 	}
1893 	p = event;
1894 	p += sizeof(struct perf_event_header);
1895 
1896 	if (size - sizeof(struct perf_event_header)) {
1897 		err = readn(fd, p, size - sizeof(struct perf_event_header));
1898 		if (err <= 0) {
1899 			if (err == 0) {
1900 				pr_err("unexpected end of event stream\n");
1901 				goto done;
1902 			}
1903 
1904 			pr_err("failed to read event data\n");
1905 			goto out_err;
1906 		}
1907 	}
1908 
1909 	if ((skip = perf_session__process_event(session, event, head)) < 0) {
1910 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1911 		       head, event->header.size, event->header.type);
1912 		err = -EINVAL;
1913 		goto out_err;
1914 	}
1915 
1916 	head += size;
1917 
1918 	if (skip > 0)
1919 		head += skip;
1920 
1921 	err = __perf_session__process_decomp_events(session);
1922 	if (err)
1923 		goto out_err;
1924 
1925 	if (!session_done())
1926 		goto more;
1927 done:
1928 	/* do the final flush for ordered samples */
1929 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1930 	if (err)
1931 		goto out_err;
1932 	err = auxtrace__flush_events(session, tool);
1933 	if (err)
1934 		goto out_err;
1935 	err = perf_session__flush_thread_stacks(session);
1936 out_err:
1937 	free(buf);
1938 	if (!tool->no_warn)
1939 		perf_session__warn_about_errors(session);
1940 	ordered_events__free(&session->ordered_events);
1941 	auxtrace__free_events(session);
1942 	return err;
1943 }
1944 
1945 static union perf_event *
1946 fetch_mmaped_event(struct perf_session *session,
1947 		   u64 head, size_t mmap_size, char *buf)
1948 {
1949 	union perf_event *event;
1950 
1951 	/*
1952 	 * Ensure we have enough space remaining to read
1953 	 * the size of the event in the headers.
1954 	 */
1955 	if (head + sizeof(event->header) > mmap_size)
1956 		return NULL;
1957 
1958 	event = (union perf_event *)(buf + head);
1959 
1960 	if (session->header.needs_swap)
1961 		perf_event_header__bswap(&event->header);
1962 
1963 	if (head + event->header.size > mmap_size) {
1964 		/* We're not fetching the event so swap back again */
1965 		if (session->header.needs_swap)
1966 			perf_event_header__bswap(&event->header);
1967 		pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
1968 			 __func__, head, event->header.size, mmap_size);
1969 		return ERR_PTR(-EINVAL);
1970 	}
1971 
1972 	return event;
1973 }
1974 
1975 static int __perf_session__process_decomp_events(struct perf_session *session)
1976 {
1977 	s64 skip;
1978 	u64 size, file_pos = 0;
1979 	struct decomp *decomp = session->decomp_last;
1980 
1981 	if (!decomp)
1982 		return 0;
1983 
1984 	while (decomp->head < decomp->size && !session_done()) {
1985 		union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1986 
1987 		if (IS_ERR(event))
1988 			return PTR_ERR(event);
1989 
1990 		if (!event)
1991 			break;
1992 
1993 		size = event->header.size;
1994 
1995 		if (size < sizeof(struct perf_event_header) ||
1996 		    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1997 			pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1998 				decomp->file_pos + decomp->head, event->header.size, event->header.type);
1999 			return -EINVAL;
2000 		}
2001 
2002 		if (skip)
2003 			size += skip;
2004 
2005 		decomp->head += size;
2006 	}
2007 
2008 	return 0;
2009 }
2010 
2011 /*
2012  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2013  * slices. On 32bit we use 32MB.
2014  */
2015 #if BITS_PER_LONG == 64
2016 #define MMAP_SIZE ULLONG_MAX
2017 #define NUM_MMAPS 1
2018 #else
2019 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2020 #define NUM_MMAPS 128
2021 #endif
2022 
2023 struct reader;
2024 
2025 typedef s64 (*reader_cb_t)(struct perf_session *session,
2026 			   union perf_event *event,
2027 			   u64 file_offset);
2028 
2029 struct reader {
2030 	int		 fd;
2031 	u64		 data_size;
2032 	u64		 data_offset;
2033 	reader_cb_t	 process;
2034 };
2035 
2036 static int
2037 reader__process_events(struct reader *rd, struct perf_session *session,
2038 		       struct ui_progress *prog)
2039 {
2040 	u64 data_size = rd->data_size;
2041 	u64 head, page_offset, file_offset, file_pos, size;
2042 	int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2043 	size_t	mmap_size;
2044 	char *buf, *mmaps[NUM_MMAPS];
2045 	union perf_event *event;
2046 	s64 skip;
2047 
2048 	page_offset = page_size * (rd->data_offset / page_size);
2049 	file_offset = page_offset;
2050 	head = rd->data_offset - page_offset;
2051 
2052 	ui_progress__init_size(prog, data_size, "Processing events...");
2053 
2054 	data_size += rd->data_offset;
2055 
2056 	mmap_size = MMAP_SIZE;
2057 	if (mmap_size > data_size) {
2058 		mmap_size = data_size;
2059 		session->one_mmap = true;
2060 	}
2061 
2062 	memset(mmaps, 0, sizeof(mmaps));
2063 
2064 	mmap_prot  = PROT_READ;
2065 	mmap_flags = MAP_SHARED;
2066 
2067 	if (session->header.needs_swap) {
2068 		mmap_prot  |= PROT_WRITE;
2069 		mmap_flags = MAP_PRIVATE;
2070 	}
2071 remap:
2072 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2073 		   file_offset);
2074 	if (buf == MAP_FAILED) {
2075 		pr_err("failed to mmap file\n");
2076 		err = -errno;
2077 		goto out;
2078 	}
2079 	mmaps[map_idx] = buf;
2080 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2081 	file_pos = file_offset + head;
2082 	if (session->one_mmap) {
2083 		session->one_mmap_addr = buf;
2084 		session->one_mmap_offset = file_offset;
2085 	}
2086 
2087 more:
2088 	event = fetch_mmaped_event(session, head, mmap_size, buf);
2089 	if (IS_ERR(event))
2090 		return PTR_ERR(event);
2091 
2092 	if (!event) {
2093 		if (mmaps[map_idx]) {
2094 			munmap(mmaps[map_idx], mmap_size);
2095 			mmaps[map_idx] = NULL;
2096 		}
2097 
2098 		page_offset = page_size * (head / page_size);
2099 		file_offset += page_offset;
2100 		head -= page_offset;
2101 		goto remap;
2102 	}
2103 
2104 	size = event->header.size;
2105 
2106 	skip = -EINVAL;
2107 
2108 	if (size < sizeof(struct perf_event_header) ||
2109 	    (skip = rd->process(session, event, file_pos)) < 0) {
2110 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2111 		       file_offset + head, event->header.size,
2112 		       event->header.type, strerror(-skip));
2113 		err = skip;
2114 		goto out;
2115 	}
2116 
2117 	if (skip)
2118 		size += skip;
2119 
2120 	head += size;
2121 	file_pos += size;
2122 
2123 	err = __perf_session__process_decomp_events(session);
2124 	if (err)
2125 		goto out;
2126 
2127 	ui_progress__update(prog, size);
2128 
2129 	if (session_done())
2130 		goto out;
2131 
2132 	if (file_pos < data_size)
2133 		goto more;
2134 
2135 out:
2136 	return err;
2137 }
2138 
2139 static s64 process_simple(struct perf_session *session,
2140 			  union perf_event *event,
2141 			  u64 file_offset)
2142 {
2143 	return perf_session__process_event(session, event, file_offset);
2144 }
2145 
2146 static int __perf_session__process_events(struct perf_session *session)
2147 {
2148 	struct reader rd = {
2149 		.fd		= perf_data__fd(session->data),
2150 		.data_size	= session->header.data_size,
2151 		.data_offset	= session->header.data_offset,
2152 		.process	= process_simple,
2153 	};
2154 	struct ordered_events *oe = &session->ordered_events;
2155 	struct perf_tool *tool = session->tool;
2156 	struct ui_progress prog;
2157 	int err;
2158 
2159 	perf_tool__fill_defaults(tool);
2160 
2161 	if (rd.data_size == 0)
2162 		return -1;
2163 
2164 	ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2165 
2166 	err = reader__process_events(&rd, session, &prog);
2167 	if (err)
2168 		goto out_err;
2169 	/* do the final flush for ordered samples */
2170 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2171 	if (err)
2172 		goto out_err;
2173 	err = auxtrace__flush_events(session, tool);
2174 	if (err)
2175 		goto out_err;
2176 	err = perf_session__flush_thread_stacks(session);
2177 out_err:
2178 	ui_progress__finish();
2179 	if (!tool->no_warn)
2180 		perf_session__warn_about_errors(session);
2181 	/*
2182 	 * We may switching perf.data output, make ordered_events
2183 	 * reusable.
2184 	 */
2185 	ordered_events__reinit(&session->ordered_events);
2186 	auxtrace__free_events(session);
2187 	session->one_mmap = false;
2188 	return err;
2189 }
2190 
2191 int perf_session__process_events(struct perf_session *session)
2192 {
2193 	if (perf_session__register_idle_thread(session) < 0)
2194 		return -ENOMEM;
2195 
2196 	if (perf_data__is_pipe(session->data))
2197 		return __perf_session__process_pipe_events(session);
2198 
2199 	return __perf_session__process_events(session);
2200 }
2201 
2202 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2203 {
2204 	struct evsel *evsel;
2205 
2206 	evlist__for_each_entry(session->evlist, evsel) {
2207 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2208 			return true;
2209 	}
2210 
2211 	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2212 	return false;
2213 }
2214 
2215 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2216 {
2217 	char *bracket;
2218 	struct ref_reloc_sym *ref;
2219 	struct kmap *kmap;
2220 
2221 	ref = zalloc(sizeof(struct ref_reloc_sym));
2222 	if (ref == NULL)
2223 		return -ENOMEM;
2224 
2225 	ref->name = strdup(symbol_name);
2226 	if (ref->name == NULL) {
2227 		free(ref);
2228 		return -ENOMEM;
2229 	}
2230 
2231 	bracket = strchr(ref->name, ']');
2232 	if (bracket)
2233 		*bracket = '\0';
2234 
2235 	ref->addr = addr;
2236 
2237 	kmap = map__kmap(map);
2238 	if (kmap)
2239 		kmap->ref_reloc_sym = ref;
2240 
2241 	return 0;
2242 }
2243 
2244 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2245 {
2246 	return machines__fprintf_dsos(&session->machines, fp);
2247 }
2248 
2249 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2250 					  bool (skip)(struct dso *dso, int parm), int parm)
2251 {
2252 	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2253 }
2254 
2255 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2256 {
2257 	size_t ret;
2258 	const char *msg = "";
2259 
2260 	if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2261 		msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2262 
2263 	ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2264 
2265 	ret += events_stats__fprintf(&session->evlist->stats, fp);
2266 	return ret;
2267 }
2268 
2269 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2270 {
2271 	/*
2272 	 * FIXME: Here we have to actually print all the machines in this
2273 	 * session, not just the host...
2274 	 */
2275 	return machine__fprintf(&session->machines.host, fp);
2276 }
2277 
2278 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2279 					      unsigned int type)
2280 {
2281 	struct evsel *pos;
2282 
2283 	evlist__for_each_entry(session->evlist, pos) {
2284 		if (pos->core.attr.type == type)
2285 			return pos;
2286 	}
2287 	return NULL;
2288 }
2289 
2290 int perf_session__cpu_bitmap(struct perf_session *session,
2291 			     const char *cpu_list, unsigned long *cpu_bitmap)
2292 {
2293 	int i, err = -1;
2294 	struct perf_cpu_map *map;
2295 	int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2296 
2297 	for (i = 0; i < PERF_TYPE_MAX; ++i) {
2298 		struct evsel *evsel;
2299 
2300 		evsel = perf_session__find_first_evtype(session, i);
2301 		if (!evsel)
2302 			continue;
2303 
2304 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2305 			pr_err("File does not contain CPU events. "
2306 			       "Remove -C option to proceed.\n");
2307 			return -1;
2308 		}
2309 	}
2310 
2311 	map = perf_cpu_map__new(cpu_list);
2312 	if (map == NULL) {
2313 		pr_err("Invalid cpu_list\n");
2314 		return -1;
2315 	}
2316 
2317 	for (i = 0; i < map->nr; i++) {
2318 		int cpu = map->map[i];
2319 
2320 		if (cpu >= nr_cpus) {
2321 			pr_err("Requested CPU %d too large. "
2322 			       "Consider raising MAX_NR_CPUS\n", cpu);
2323 			goto out_delete_map;
2324 		}
2325 
2326 		set_bit(cpu, cpu_bitmap);
2327 	}
2328 
2329 	err = 0;
2330 
2331 out_delete_map:
2332 	perf_cpu_map__put(map);
2333 	return err;
2334 }
2335 
2336 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2337 				bool full)
2338 {
2339 	if (session == NULL || fp == NULL)
2340 		return;
2341 
2342 	fprintf(fp, "# ========\n");
2343 	perf_header__fprintf_info(session, fp, full);
2344 	fprintf(fp, "# ========\n#\n");
2345 }
2346 
2347 
2348 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2349 					     const struct evsel_str_handler *assocs,
2350 					     size_t nr_assocs)
2351 {
2352 	struct evsel *evsel;
2353 	size_t i;
2354 	int err;
2355 
2356 	for (i = 0; i < nr_assocs; i++) {
2357 		/*
2358 		 * Adding a handler for an event not in the session,
2359 		 * just ignore it.
2360 		 */
2361 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2362 		if (evsel == NULL)
2363 			continue;
2364 
2365 		err = -EEXIST;
2366 		if (evsel->handler != NULL)
2367 			goto out;
2368 		evsel->handler = assocs[i].handler;
2369 	}
2370 
2371 	err = 0;
2372 out:
2373 	return err;
2374 }
2375 
2376 int perf_event__process_id_index(struct perf_session *session,
2377 				 union perf_event *event)
2378 {
2379 	struct evlist *evlist = session->evlist;
2380 	struct perf_record_id_index *ie = &event->id_index;
2381 	size_t i, nr, max_nr;
2382 
2383 	max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2384 		 sizeof(struct id_index_entry);
2385 	nr = ie->nr;
2386 	if (nr > max_nr)
2387 		return -EINVAL;
2388 
2389 	if (dump_trace)
2390 		fprintf(stdout, " nr: %zu\n", nr);
2391 
2392 	for (i = 0; i < nr; i++) {
2393 		struct id_index_entry *e = &ie->entries[i];
2394 		struct perf_sample_id *sid;
2395 
2396 		if (dump_trace) {
2397 			fprintf(stdout,	" ... id: %"PRI_lu64, e->id);
2398 			fprintf(stdout,	"  idx: %"PRI_lu64, e->idx);
2399 			fprintf(stdout,	"  cpu: %"PRI_ld64, e->cpu);
2400 			fprintf(stdout,	"  tid: %"PRI_ld64"\n", e->tid);
2401 		}
2402 
2403 		sid = perf_evlist__id2sid(evlist, e->id);
2404 		if (!sid)
2405 			return -ENOENT;
2406 		sid->idx = e->idx;
2407 		sid->cpu = e->cpu;
2408 		sid->tid = e->tid;
2409 	}
2410 	return 0;
2411 }
2412 
2413 int perf_event__synthesize_id_index(struct perf_tool *tool,
2414 				    perf_event__handler_t process,
2415 				    struct evlist *evlist,
2416 				    struct machine *machine)
2417 {
2418 	union perf_event *ev;
2419 	struct evsel *evsel;
2420 	size_t nr = 0, i = 0, sz, max_nr, n;
2421 	int err;
2422 
2423 	pr_debug2("Synthesizing id index\n");
2424 
2425 	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
2426 		 sizeof(struct id_index_entry);
2427 
2428 	evlist__for_each_entry(evlist, evsel)
2429 		nr += evsel->ids;
2430 
2431 	n = nr > max_nr ? max_nr : nr;
2432 	sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
2433 	ev = zalloc(sz);
2434 	if (!ev)
2435 		return -ENOMEM;
2436 
2437 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2438 	ev->id_index.header.size = sz;
2439 	ev->id_index.nr = n;
2440 
2441 	evlist__for_each_entry(evlist, evsel) {
2442 		u32 j;
2443 
2444 		for (j = 0; j < evsel->ids; j++) {
2445 			struct id_index_entry *e;
2446 			struct perf_sample_id *sid;
2447 
2448 			if (i >= n) {
2449 				err = process(tool, ev, NULL, machine);
2450 				if (err)
2451 					goto out_err;
2452 				nr -= n;
2453 				i = 0;
2454 			}
2455 
2456 			e = &ev->id_index.entries[i++];
2457 
2458 			e->id = evsel->id[j];
2459 
2460 			sid = perf_evlist__id2sid(evlist, e->id);
2461 			if (!sid) {
2462 				free(ev);
2463 				return -ENOENT;
2464 			}
2465 
2466 			e->idx = sid->idx;
2467 			e->cpu = sid->cpu;
2468 			e->tid = sid->tid;
2469 		}
2470 	}
2471 
2472 	sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
2473 	ev->id_index.header.size = sz;
2474 	ev->id_index.nr = nr;
2475 
2476 	err = process(tool, ev, NULL, machine);
2477 out_err:
2478 	free(ev);
2479 
2480 	return err;
2481 }
2482