xref: /openbmc/linux/tools/perf/util/session.c (revision 076c6e45)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 
10 #include "session.h"
11 #include "sort.h"
12 #include "util.h"
13 
14 static int perf_session__open(struct perf_session *self, bool force)
15 {
16 	struct stat input_stat;
17 
18 	if (!strcmp(self->filename, "-")) {
19 		self->fd_pipe = true;
20 		self->fd = STDIN_FILENO;
21 
22 		if (perf_header__read(self, self->fd) < 0)
23 			pr_err("incompatible file format");
24 
25 		return 0;
26 	}
27 
28 	self->fd = open(self->filename, O_RDONLY);
29 	if (self->fd < 0) {
30 		int err = errno;
31 
32 		pr_err("failed to open %s: %s", self->filename, strerror(err));
33 		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 			pr_err("  (try 'perf record' first)");
35 		pr_err("\n");
36 		return -errno;
37 	}
38 
39 	if (fstat(self->fd, &input_stat) < 0)
40 		goto out_close;
41 
42 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
43 		pr_err("file %s not owned by current user or root\n",
44 		       self->filename);
45 		goto out_close;
46 	}
47 
48 	if (!input_stat.st_size) {
49 		pr_info("zero-sized file (%s), nothing to do!\n",
50 			self->filename);
51 		goto out_close;
52 	}
53 
54 	if (perf_header__read(self, self->fd) < 0) {
55 		pr_err("incompatible file format");
56 		goto out_close;
57 	}
58 
59 	self->size = input_stat.st_size;
60 	return 0;
61 
62 out_close:
63 	close(self->fd);
64 	self->fd = -1;
65 	return -1;
66 }
67 
68 void perf_session__update_sample_type(struct perf_session *self)
69 {
70 	self->sample_type = perf_header__sample_type(&self->header);
71 }
72 
73 int perf_session__create_kernel_maps(struct perf_session *self)
74 {
75 	int ret = machine__create_kernel_maps(&self->host_machine);
76 
77 	if (ret >= 0)
78 		ret = machines__create_guest_kernel_maps(&self->machines);
79 	return ret;
80 }
81 
82 static void perf_session__destroy_kernel_maps(struct perf_session *self)
83 {
84 	machine__destroy_kernel_maps(&self->host_machine);
85 	machines__destroy_guest_kernel_maps(&self->machines);
86 }
87 
88 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
89 {
90 	size_t len = filename ? strlen(filename) + 1 : 0;
91 	struct perf_session *self = zalloc(sizeof(*self) + len);
92 
93 	if (self == NULL)
94 		goto out;
95 
96 	if (perf_header__init(&self->header) < 0)
97 		goto out_free;
98 
99 	memcpy(self->filename, filename, len);
100 	self->threads = RB_ROOT;
101 	INIT_LIST_HEAD(&self->dead_threads);
102 	self->hists_tree = RB_ROOT;
103 	self->last_match = NULL;
104 	self->mmap_window = 32;
105 	self->machines = RB_ROOT;
106 	self->repipe = repipe;
107 	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
108 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
109 
110 	if (mode == O_RDONLY) {
111 		if (perf_session__open(self, force) < 0)
112 			goto out_delete;
113 	} else if (mode == O_WRONLY) {
114 		/*
115 		 * In O_RDONLY mode this will be performed when reading the
116 		 * kernel MMAP event, in event__process_mmap().
117 		 */
118 		if (perf_session__create_kernel_maps(self) < 0)
119 			goto out_delete;
120 	}
121 
122 	perf_session__update_sample_type(self);
123 out:
124 	return self;
125 out_free:
126 	free(self);
127 	return NULL;
128 out_delete:
129 	perf_session__delete(self);
130 	return NULL;
131 }
132 
133 static void perf_session__delete_dead_threads(struct perf_session *self)
134 {
135 	struct thread *n, *t;
136 
137 	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
138 		list_del(&t->node);
139 		thread__delete(t);
140 	}
141 }
142 
143 static void perf_session__delete_threads(struct perf_session *self)
144 {
145 	struct rb_node *nd = rb_first(&self->threads);
146 
147 	while (nd) {
148 		struct thread *t = rb_entry(nd, struct thread, rb_node);
149 
150 		rb_erase(&t->rb_node, &self->threads);
151 		nd = rb_next(nd);
152 		thread__delete(t);
153 	}
154 }
155 
156 void perf_session__delete(struct perf_session *self)
157 {
158 	perf_header__exit(&self->header);
159 	perf_session__destroy_kernel_maps(self);
160 	perf_session__delete_dead_threads(self);
161 	perf_session__delete_threads(self);
162 	machine__exit(&self->host_machine);
163 	close(self->fd);
164 	free(self);
165 }
166 
167 void perf_session__remove_thread(struct perf_session *self, struct thread *th)
168 {
169 	rb_erase(&th->rb_node, &self->threads);
170 	/*
171 	 * We may have references to this thread, for instance in some hist_entry
172 	 * instances, so just move them to a separate list.
173 	 */
174 	list_add_tail(&th->node, &self->dead_threads);
175 }
176 
177 static bool symbol__match_parent_regex(struct symbol *sym)
178 {
179 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
180 		return 1;
181 
182 	return 0;
183 }
184 
185 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
186 						   struct thread *thread,
187 						   struct ip_callchain *chain,
188 						   struct symbol **parent)
189 {
190 	u8 cpumode = PERF_RECORD_MISC_USER;
191 	unsigned int i;
192 	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
193 
194 	if (!syms)
195 		return NULL;
196 
197 	for (i = 0; i < chain->nr; i++) {
198 		u64 ip = chain->ips[i];
199 		struct addr_location al;
200 
201 		if (ip >= PERF_CONTEXT_MAX) {
202 			switch (ip) {
203 			case PERF_CONTEXT_HV:
204 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
205 			case PERF_CONTEXT_KERNEL:
206 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
207 			case PERF_CONTEXT_USER:
208 				cpumode = PERF_RECORD_MISC_USER;	break;
209 			default:
210 				break;
211 			}
212 			continue;
213 		}
214 
215 		al.filtered = false;
216 		thread__find_addr_location(thread, self, cpumode,
217 				MAP__FUNCTION, thread->pid, ip, &al, NULL);
218 		if (al.sym != NULL) {
219 			if (sort__has_parent && !*parent &&
220 			    symbol__match_parent_regex(al.sym))
221 				*parent = al.sym;
222 			if (!symbol_conf.use_callchain)
223 				break;
224 			syms[i].map = al.map;
225 			syms[i].sym = al.sym;
226 		}
227 	}
228 
229 	return syms;
230 }
231 
232 static int process_event_stub(event_t *event __used,
233 			      struct perf_session *session __used)
234 {
235 	dump_printf(": unhandled!\n");
236 	return 0;
237 }
238 
239 static int process_finished_round_stub(event_t *event __used,
240 				       struct perf_session *session __used,
241 				       struct perf_event_ops *ops __used)
242 {
243 	dump_printf(": unhandled!\n");
244 	return 0;
245 }
246 
247 static int process_finished_round(event_t *event,
248 				  struct perf_session *session,
249 				  struct perf_event_ops *ops);
250 
251 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
252 {
253 	if (handler->sample == NULL)
254 		handler->sample = process_event_stub;
255 	if (handler->mmap == NULL)
256 		handler->mmap = process_event_stub;
257 	if (handler->comm == NULL)
258 		handler->comm = process_event_stub;
259 	if (handler->fork == NULL)
260 		handler->fork = process_event_stub;
261 	if (handler->exit == NULL)
262 		handler->exit = process_event_stub;
263 	if (handler->lost == NULL)
264 		handler->lost = process_event_stub;
265 	if (handler->read == NULL)
266 		handler->read = process_event_stub;
267 	if (handler->throttle == NULL)
268 		handler->throttle = process_event_stub;
269 	if (handler->unthrottle == NULL)
270 		handler->unthrottle = process_event_stub;
271 	if (handler->attr == NULL)
272 		handler->attr = process_event_stub;
273 	if (handler->event_type == NULL)
274 		handler->event_type = process_event_stub;
275 	if (handler->tracing_data == NULL)
276 		handler->tracing_data = process_event_stub;
277 	if (handler->build_id == NULL)
278 		handler->build_id = process_event_stub;
279 	if (handler->finished_round == NULL) {
280 		if (handler->ordered_samples)
281 			handler->finished_round = process_finished_round;
282 		else
283 			handler->finished_round = process_finished_round_stub;
284 	}
285 }
286 
287 void mem_bswap_64(void *src, int byte_size)
288 {
289 	u64 *m = src;
290 
291 	while (byte_size > 0) {
292 		*m = bswap_64(*m);
293 		byte_size -= sizeof(u64);
294 		++m;
295 	}
296 }
297 
298 static void event__all64_swap(event_t *self)
299 {
300 	struct perf_event_header *hdr = &self->header;
301 	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
302 }
303 
304 static void event__comm_swap(event_t *self)
305 {
306 	self->comm.pid = bswap_32(self->comm.pid);
307 	self->comm.tid = bswap_32(self->comm.tid);
308 }
309 
310 static void event__mmap_swap(event_t *self)
311 {
312 	self->mmap.pid	 = bswap_32(self->mmap.pid);
313 	self->mmap.tid	 = bswap_32(self->mmap.tid);
314 	self->mmap.start = bswap_64(self->mmap.start);
315 	self->mmap.len	 = bswap_64(self->mmap.len);
316 	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
317 }
318 
319 static void event__task_swap(event_t *self)
320 {
321 	self->fork.pid	= bswap_32(self->fork.pid);
322 	self->fork.tid	= bswap_32(self->fork.tid);
323 	self->fork.ppid	= bswap_32(self->fork.ppid);
324 	self->fork.ptid	= bswap_32(self->fork.ptid);
325 	self->fork.time	= bswap_64(self->fork.time);
326 }
327 
328 static void event__read_swap(event_t *self)
329 {
330 	self->read.pid		= bswap_32(self->read.pid);
331 	self->read.tid		= bswap_32(self->read.tid);
332 	self->read.value	= bswap_64(self->read.value);
333 	self->read.time_enabled	= bswap_64(self->read.time_enabled);
334 	self->read.time_running	= bswap_64(self->read.time_running);
335 	self->read.id		= bswap_64(self->read.id);
336 }
337 
338 static void event__attr_swap(event_t *self)
339 {
340 	size_t size;
341 
342 	self->attr.attr.type		= bswap_32(self->attr.attr.type);
343 	self->attr.attr.size		= bswap_32(self->attr.attr.size);
344 	self->attr.attr.config		= bswap_64(self->attr.attr.config);
345 	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
346 	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
347 	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
348 	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
349 	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
350 	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
351 	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);
352 
353 	size = self->header.size;
354 	size -= (void *)&self->attr.id - (void *)self;
355 	mem_bswap_64(self->attr.id, size);
356 }
357 
358 static void event__event_type_swap(event_t *self)
359 {
360 	self->event_type.event_type.event_id =
361 		bswap_64(self->event_type.event_type.event_id);
362 }
363 
364 static void event__tracing_data_swap(event_t *self)
365 {
366 	self->tracing_data.size = bswap_32(self->tracing_data.size);
367 }
368 
369 typedef void (*event__swap_op)(event_t *self);
370 
371 static event__swap_op event__swap_ops[] = {
372 	[PERF_RECORD_MMAP]   = event__mmap_swap,
373 	[PERF_RECORD_COMM]   = event__comm_swap,
374 	[PERF_RECORD_FORK]   = event__task_swap,
375 	[PERF_RECORD_EXIT]   = event__task_swap,
376 	[PERF_RECORD_LOST]   = event__all64_swap,
377 	[PERF_RECORD_READ]   = event__read_swap,
378 	[PERF_RECORD_SAMPLE] = event__all64_swap,
379 	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
380 	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
381 	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
382 	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
383 	[PERF_RECORD_HEADER_MAX]    = NULL,
384 };
385 
386 struct sample_queue {
387 	u64			timestamp;
388 	struct sample_event	*event;
389 	struct list_head	list;
390 };
391 
392 static void flush_sample_queue(struct perf_session *s,
393 			       struct perf_event_ops *ops)
394 {
395 	struct list_head *head = &s->ordered_samples.samples_head;
396 	u64 limit = s->ordered_samples.next_flush;
397 	struct sample_queue *tmp, *iter;
398 
399 	if (!ops->ordered_samples || !limit)
400 		return;
401 
402 	list_for_each_entry_safe(iter, tmp, head, list) {
403 		if (iter->timestamp > limit)
404 			return;
405 
406 		if (iter == s->ordered_samples.last_inserted)
407 			s->ordered_samples.last_inserted = NULL;
408 
409 		ops->sample((event_t *)iter->event, s);
410 
411 		s->ordered_samples.last_flush = iter->timestamp;
412 		list_del(&iter->list);
413 		free(iter->event);
414 		free(iter);
415 	}
416 }
417 
418 /*
419  * When perf record finishes a pass on every buffers, it records this pseudo
420  * event.
421  * We record the max timestamp t found in the pass n.
422  * Assuming these timestamps are monotonic across cpus, we know that if
423  * a buffer still has events with timestamps below t, they will be all
424  * available and then read in the pass n + 1.
425  * Hence when we start to read the pass n + 2, we can safely flush every
426  * events with timestamps below t.
427  *
428  *    ============ PASS n =================
429  *       CPU 0         |   CPU 1
430  *                     |
431  *    cnt1 timestamps  |   cnt2 timestamps
432  *          1          |         2
433  *          2          |         3
434  *          -          |         4  <--- max recorded
435  *
436  *    ============ PASS n + 1 ==============
437  *       CPU 0         |   CPU 1
438  *                     |
439  *    cnt1 timestamps  |   cnt2 timestamps
440  *          3          |         5
441  *          4          |         6
442  *          5          |         7 <---- max recorded
443  *
444  *      Flush every events below timestamp 4
445  *
446  *    ============ PASS n + 2 ==============
447  *       CPU 0         |   CPU 1
448  *                     |
449  *    cnt1 timestamps  |   cnt2 timestamps
450  *          6          |         8
451  *          7          |         9
452  *          -          |         10
453  *
454  *      Flush every events below timestamp 7
455  *      etc...
456  */
457 static int process_finished_round(event_t *event __used,
458 				  struct perf_session *session,
459 				  struct perf_event_ops *ops)
460 {
461 	flush_sample_queue(session, ops);
462 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
463 
464 	return 0;
465 }
466 
467 static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
468 {
469 	struct sample_queue *iter;
470 
471 	list_for_each_entry_reverse(iter, head, list) {
472 		if (iter->timestamp < new->timestamp) {
473 			list_add(&new->list, &iter->list);
474 			return;
475 		}
476 	}
477 
478 	list_add(&new->list, head);
479 }
480 
481 static void __queue_sample_before(struct sample_queue *new,
482 				  struct sample_queue *iter,
483 				  struct list_head *head)
484 {
485 	list_for_each_entry_continue_reverse(iter, head, list) {
486 		if (iter->timestamp < new->timestamp) {
487 			list_add(&new->list, &iter->list);
488 			return;
489 		}
490 	}
491 
492 	list_add(&new->list, head);
493 }
494 
495 static void __queue_sample_after(struct sample_queue *new,
496 				 struct sample_queue *iter,
497 				 struct list_head *head)
498 {
499 	list_for_each_entry_continue(iter, head, list) {
500 		if (iter->timestamp > new->timestamp) {
501 			list_add_tail(&new->list, &iter->list);
502 			return;
503 		}
504 	}
505 	list_add_tail(&new->list, head);
506 }
507 
508 /* The queue is ordered by time */
509 static void __queue_sample_event(struct sample_queue *new,
510 				 struct perf_session *s)
511 {
512 	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
513 	struct list_head *head = &s->ordered_samples.samples_head;
514 
515 
516 	if (!last_inserted) {
517 		__queue_sample_end(new, head);
518 		return;
519 	}
520 
521 	/*
522 	 * Most of the time the current event has a timestamp
523 	 * very close to the last event inserted, unless we just switched
524 	 * to another event buffer. Having a sorting based on a list and
525 	 * on the last inserted event that is close to the current one is
526 	 * probably more efficient than an rbtree based sorting.
527 	 */
528 	if (last_inserted->timestamp >= new->timestamp)
529 		__queue_sample_before(new, last_inserted, head);
530 	else
531 		__queue_sample_after(new, last_inserted, head);
532 }
533 
534 static int queue_sample_event(event_t *event, struct sample_data *data,
535 			      struct perf_session *s)
536 {
537 	u64 timestamp = data->time;
538 	struct sample_queue *new;
539 
540 
541 	if (timestamp < s->ordered_samples.last_flush) {
542 		printf("Warning: Timestamp below last timeslice flush\n");
543 		return -EINVAL;
544 	}
545 
546 	new = malloc(sizeof(*new));
547 	if (!new)
548 		return -ENOMEM;
549 
550 	new->timestamp = timestamp;
551 
552 	new->event = malloc(event->header.size);
553 	if (!new->event) {
554 		free(new);
555 		return -ENOMEM;
556 	}
557 
558 	memcpy(new->event, event, event->header.size);
559 
560 	__queue_sample_event(new, s);
561 	s->ordered_samples.last_inserted = new;
562 
563 	if (new->timestamp > s->ordered_samples.max_timestamp)
564 		s->ordered_samples.max_timestamp = new->timestamp;
565 
566 	return 0;
567 }
568 
569 static int perf_session__process_sample(event_t *event, struct perf_session *s,
570 					struct perf_event_ops *ops)
571 {
572 	struct sample_data data;
573 
574 	if (!ops->ordered_samples)
575 		return ops->sample(event, s);
576 
577 	bzero(&data, sizeof(struct sample_data));
578 	event__parse_sample(event, s->sample_type, &data);
579 
580 	queue_sample_event(event, &data, s);
581 
582 	return 0;
583 }
584 
585 static int perf_session__process_event(struct perf_session *self,
586 				       event_t *event,
587 				       struct perf_event_ops *ops,
588 				       u64 offset, u64 head)
589 {
590 	trace_event(event);
591 
592 	if (event->header.type < PERF_RECORD_HEADER_MAX) {
593 		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
594 			    offset + head, event->header.size,
595 			    event__name[event->header.type]);
596 		hists__inc_nr_events(&self->hists, event->header.type);
597 	}
598 
599 	if (self->header.needs_swap && event__swap_ops[event->header.type])
600 		event__swap_ops[event->header.type](event);
601 
602 	switch (event->header.type) {
603 	case PERF_RECORD_SAMPLE:
604 		return perf_session__process_sample(event, self, ops);
605 	case PERF_RECORD_MMAP:
606 		return ops->mmap(event, self);
607 	case PERF_RECORD_COMM:
608 		return ops->comm(event, self);
609 	case PERF_RECORD_FORK:
610 		return ops->fork(event, self);
611 	case PERF_RECORD_EXIT:
612 		return ops->exit(event, self);
613 	case PERF_RECORD_LOST:
614 		return ops->lost(event, self);
615 	case PERF_RECORD_READ:
616 		return ops->read(event, self);
617 	case PERF_RECORD_THROTTLE:
618 		return ops->throttle(event, self);
619 	case PERF_RECORD_UNTHROTTLE:
620 		return ops->unthrottle(event, self);
621 	case PERF_RECORD_HEADER_ATTR:
622 		return ops->attr(event, self);
623 	case PERF_RECORD_HEADER_EVENT_TYPE:
624 		return ops->event_type(event, self);
625 	case PERF_RECORD_HEADER_TRACING_DATA:
626 		/* setup for reading amidst mmap */
627 		lseek(self->fd, offset + head, SEEK_SET);
628 		return ops->tracing_data(event, self);
629 	case PERF_RECORD_HEADER_BUILD_ID:
630 		return ops->build_id(event, self);
631 	case PERF_RECORD_FINISHED_ROUND:
632 		return ops->finished_round(event, self, ops);
633 	default:
634 		++self->hists.stats.nr_unknown_events;
635 		return -1;
636 	}
637 }
638 
639 void perf_event_header__bswap(struct perf_event_header *self)
640 {
641 	self->type = bswap_32(self->type);
642 	self->misc = bswap_16(self->misc);
643 	self->size = bswap_16(self->size);
644 }
645 
646 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
647 {
648 	struct thread *thread = perf_session__findnew(self, 0);
649 
650 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
651 		pr_err("problem inserting idle task.\n");
652 		thread = NULL;
653 	}
654 
655 	return thread;
656 }
657 
658 int do_read(int fd, void *buf, size_t size)
659 {
660 	void *buf_start = buf;
661 
662 	while (size) {
663 		int ret = read(fd, buf, size);
664 
665 		if (ret <= 0)
666 			return ret;
667 
668 		size -= ret;
669 		buf += ret;
670 	}
671 
672 	return buf - buf_start;
673 }
674 
675 #define session_done()	(*(volatile int *)(&session_done))
676 volatile int session_done;
677 
678 static int __perf_session__process_pipe_events(struct perf_session *self,
679 					       struct perf_event_ops *ops)
680 {
681 	event_t event;
682 	uint32_t size;
683 	int skip = 0;
684 	u64 head;
685 	int err;
686 	void *p;
687 
688 	perf_event_ops__fill_defaults(ops);
689 
690 	head = 0;
691 more:
692 	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
693 	if (err <= 0) {
694 		if (err == 0)
695 			goto done;
696 
697 		pr_err("failed to read event header\n");
698 		goto out_err;
699 	}
700 
701 	if (self->header.needs_swap)
702 		perf_event_header__bswap(&event.header);
703 
704 	size = event.header.size;
705 	if (size == 0)
706 		size = 8;
707 
708 	p = &event;
709 	p += sizeof(struct perf_event_header);
710 
711 	if (size - sizeof(struct perf_event_header)) {
712 		err = do_read(self->fd, p,
713 			      size - sizeof(struct perf_event_header));
714 		if (err <= 0) {
715 			if (err == 0) {
716 				pr_err("unexpected end of event stream\n");
717 				goto done;
718 			}
719 
720 			pr_err("failed to read event data\n");
721 			goto out_err;
722 		}
723 	}
724 
725 	if (size == 0 ||
726 	    (skip = perf_session__process_event(self, &event, ops,
727 						0, head)) < 0) {
728 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
729 			    head, event.header.size, event.header.type);
730 		/*
731 		 * assume we lost track of the stream, check alignment, and
732 		 * increment a single u64 in the hope to catch on again 'soon'.
733 		 */
734 		if (unlikely(head & 7))
735 			head &= ~7ULL;
736 
737 		size = 8;
738 	}
739 
740 	head += size;
741 
742 	dump_printf("\n%#Lx [%#x]: event: %d\n",
743 		    head, event.header.size, event.header.type);
744 
745 	if (skip > 0)
746 		head += skip;
747 
748 	if (!session_done())
749 		goto more;
750 done:
751 	err = 0;
752 out_err:
753 	return err;
754 }
755 
756 int __perf_session__process_events(struct perf_session *self,
757 				   u64 data_offset, u64 data_size,
758 				   u64 file_size, struct perf_event_ops *ops)
759 {
760 	int err, mmap_prot, mmap_flags;
761 	u64 head, shift;
762 	u64 offset = 0;
763 	size_t	page_size;
764 	event_t *event;
765 	uint32_t size;
766 	char *buf;
767 	struct ui_progress *progress = ui_progress__new("Processing events...",
768 							self->size);
769 	if (progress == NULL)
770 		return -1;
771 
772 	perf_event_ops__fill_defaults(ops);
773 
774 	page_size = sysconf(_SC_PAGESIZE);
775 
776 	head = data_offset;
777 	shift = page_size * (head / page_size);
778 	offset += shift;
779 	head -= shift;
780 
781 	mmap_prot  = PROT_READ;
782 	mmap_flags = MAP_SHARED;
783 
784 	if (self->header.needs_swap) {
785 		mmap_prot  |= PROT_WRITE;
786 		mmap_flags = MAP_PRIVATE;
787 	}
788 remap:
789 	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
790 		   mmap_flags, self->fd, offset);
791 	if (buf == MAP_FAILED) {
792 		pr_err("failed to mmap file\n");
793 		err = -errno;
794 		goto out_err;
795 	}
796 
797 more:
798 	event = (event_t *)(buf + head);
799 	ui_progress__update(progress, offset);
800 
801 	if (self->header.needs_swap)
802 		perf_event_header__bswap(&event->header);
803 	size = event->header.size;
804 	if (size == 0)
805 		size = 8;
806 
807 	if (head + event->header.size >= page_size * self->mmap_window) {
808 		int munmap_ret;
809 
810 		shift = page_size * (head / page_size);
811 
812 		munmap_ret = munmap(buf, page_size * self->mmap_window);
813 		assert(munmap_ret == 0);
814 
815 		offset += shift;
816 		head -= shift;
817 		goto remap;
818 	}
819 
820 	size = event->header.size;
821 
822 	dump_printf("\n%#Lx [%#x]: event: %d\n",
823 		    offset + head, event->header.size, event->header.type);
824 
825 	if (size == 0 ||
826 	    perf_session__process_event(self, event, ops, offset, head) < 0) {
827 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
828 			    offset + head, event->header.size,
829 			    event->header.type);
830 		/*
831 		 * assume we lost track of the stream, check alignment, and
832 		 * increment a single u64 in the hope to catch on again 'soon'.
833 		 */
834 		if (unlikely(head & 7))
835 			head &= ~7ULL;
836 
837 		size = 8;
838 	}
839 
840 	head += size;
841 
842 	if (offset + head >= data_offset + data_size)
843 		goto done;
844 
845 	if (offset + head < file_size)
846 		goto more;
847 done:
848 	err = 0;
849 	/* do the final flush for ordered samples */
850 	self->ordered_samples.next_flush = ULLONG_MAX;
851 	flush_sample_queue(self, ops);
852 out_err:
853 	ui_progress__delete(progress);
854 	return err;
855 }
856 
857 int perf_session__process_events(struct perf_session *self,
858 				 struct perf_event_ops *ops)
859 {
860 	int err;
861 
862 	if (perf_session__register_idle_thread(self) == NULL)
863 		return -ENOMEM;
864 
865 	if (!self->fd_pipe)
866 		err = __perf_session__process_events(self,
867 						     self->header.data_offset,
868 						     self->header.data_size,
869 						     self->size, ops);
870 	else
871 		err = __perf_session__process_pipe_events(self, ops);
872 
873 	return err;
874 }
875 
876 bool perf_session__has_traces(struct perf_session *self, const char *msg)
877 {
878 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
879 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
880 		return false;
881 	}
882 
883 	return true;
884 }
885 
886 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
887 					     const char *symbol_name,
888 					     u64 addr)
889 {
890 	char *bracket;
891 	enum map_type i;
892 	struct ref_reloc_sym *ref;
893 
894 	ref = zalloc(sizeof(struct ref_reloc_sym));
895 	if (ref == NULL)
896 		return -ENOMEM;
897 
898 	ref->name = strdup(symbol_name);
899 	if (ref->name == NULL) {
900 		free(ref);
901 		return -ENOMEM;
902 	}
903 
904 	bracket = strchr(ref->name, ']');
905 	if (bracket)
906 		*bracket = '\0';
907 
908 	ref->addr = addr;
909 
910 	for (i = 0; i < MAP__NR_TYPES; ++i) {
911 		struct kmap *kmap = map__kmap(maps[i]);
912 		kmap->ref_reloc_sym = ref;
913 	}
914 
915 	return 0;
916 }
917 
918 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
919 {
920 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
921 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
922 	       machines__fprintf_dsos(&self->machines, fp);
923 }
924 
925 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
926 					  bool with_hits)
927 {
928 	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
929 	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
930 }
931