xref: /openbmc/linux/tools/perf/util/session.c (revision 88ca895d)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 
10 #include "session.h"
11 #include "sort.h"
12 #include "util.h"
13 
14 static int perf_session__open(struct perf_session *self, bool force)
15 {
16 	struct stat input_stat;
17 
18 	if (!strcmp(self->filename, "-")) {
19 		self->fd_pipe = true;
20 		self->fd = STDIN_FILENO;
21 
22 		if (perf_header__read(self, self->fd) < 0)
23 			pr_err("incompatible file format");
24 
25 		return 0;
26 	}
27 
28 	self->fd = open(self->filename, O_RDONLY);
29 	if (self->fd < 0) {
30 		int err = errno;
31 
32 		pr_err("failed to open %s: %s", self->filename, strerror(err));
33 		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 			pr_err("  (try 'perf record' first)");
35 		pr_err("\n");
36 		return -errno;
37 	}
38 
39 	if (fstat(self->fd, &input_stat) < 0)
40 		goto out_close;
41 
42 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
43 		pr_err("file %s not owned by current user or root\n",
44 		       self->filename);
45 		goto out_close;
46 	}
47 
48 	if (!input_stat.st_size) {
49 		pr_info("zero-sized file (%s), nothing to do!\n",
50 			self->filename);
51 		goto out_close;
52 	}
53 
54 	if (perf_header__read(self, self->fd) < 0) {
55 		pr_err("incompatible file format");
56 		goto out_close;
57 	}
58 
59 	self->size = input_stat.st_size;
60 	return 0;
61 
62 out_close:
63 	close(self->fd);
64 	self->fd = -1;
65 	return -1;
66 }
67 
68 void perf_session__update_sample_type(struct perf_session *self)
69 {
70 	self->sample_type = perf_header__sample_type(&self->header);
71 }
72 
73 int perf_session__create_kernel_maps(struct perf_session *self)
74 {
75 	int ret = machine__create_kernel_maps(&self->host_machine);
76 
77 	if (ret >= 0)
78 		ret = machines__create_guest_kernel_maps(&self->machines);
79 	return ret;
80 }
81 
82 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
83 {
84 	size_t len = filename ? strlen(filename) + 1 : 0;
85 	struct perf_session *self = zalloc(sizeof(*self) + len);
86 
87 	if (self == NULL)
88 		goto out;
89 
90 	if (perf_header__init(&self->header) < 0)
91 		goto out_free;
92 
93 	memcpy(self->filename, filename, len);
94 	self->threads = RB_ROOT;
95 	INIT_LIST_HEAD(&self->dead_threads);
96 	self->hists_tree = RB_ROOT;
97 	self->last_match = NULL;
98 	self->mmap_window = 32;
99 	self->machines = RB_ROOT;
100 	self->repipe = repipe;
101 	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
102 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
103 
104 	if (mode == O_RDONLY) {
105 		if (perf_session__open(self, force) < 0)
106 			goto out_delete;
107 	} else if (mode == O_WRONLY) {
108 		/*
109 		 * In O_RDONLY mode this will be performed when reading the
110 		 * kernel MMAP event, in event__process_mmap().
111 		 */
112 		if (perf_session__create_kernel_maps(self) < 0)
113 			goto out_delete;
114 	}
115 
116 	perf_session__update_sample_type(self);
117 out:
118 	return self;
119 out_free:
120 	free(self);
121 	return NULL;
122 out_delete:
123 	perf_session__delete(self);
124 	return NULL;
125 }
126 
127 void perf_session__delete(struct perf_session *self)
128 {
129 	perf_header__exit(&self->header);
130 	close(self->fd);
131 	free(self);
132 }
133 
134 void perf_session__remove_thread(struct perf_session *self, struct thread *th)
135 {
136 	rb_erase(&th->rb_node, &self->threads);
137 	/*
138 	 * We may have references to this thread, for instance in some hist_entry
139 	 * instances, so just move them to a separate list.
140 	 */
141 	list_add_tail(&th->node, &self->dead_threads);
142 }
143 
144 static bool symbol__match_parent_regex(struct symbol *sym)
145 {
146 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
147 		return 1;
148 
149 	return 0;
150 }
151 
152 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
153 						   struct thread *thread,
154 						   struct ip_callchain *chain,
155 						   struct symbol **parent)
156 {
157 	u8 cpumode = PERF_RECORD_MISC_USER;
158 	unsigned int i;
159 	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
160 
161 	if (!syms)
162 		return NULL;
163 
164 	for (i = 0; i < chain->nr; i++) {
165 		u64 ip = chain->ips[i];
166 		struct addr_location al;
167 
168 		if (ip >= PERF_CONTEXT_MAX) {
169 			switch (ip) {
170 			case PERF_CONTEXT_HV:
171 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
172 			case PERF_CONTEXT_KERNEL:
173 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
174 			case PERF_CONTEXT_USER:
175 				cpumode = PERF_RECORD_MISC_USER;	break;
176 			default:
177 				break;
178 			}
179 			continue;
180 		}
181 
182 		al.filtered = false;
183 		thread__find_addr_location(thread, self, cpumode,
184 				MAP__FUNCTION, thread->pid, ip, &al, NULL);
185 		if (al.sym != NULL) {
186 			if (sort__has_parent && !*parent &&
187 			    symbol__match_parent_regex(al.sym))
188 				*parent = al.sym;
189 			if (!symbol_conf.use_callchain)
190 				break;
191 			syms[i].map = al.map;
192 			syms[i].sym = al.sym;
193 		}
194 	}
195 
196 	return syms;
197 }
198 
199 static int process_event_stub(event_t *event __used,
200 			      struct perf_session *session __used)
201 {
202 	dump_printf(": unhandled!\n");
203 	return 0;
204 }
205 
206 static int process_finished_round_stub(event_t *event __used,
207 				       struct perf_session *session __used,
208 				       struct perf_event_ops *ops __used)
209 {
210 	dump_printf(": unhandled!\n");
211 	return 0;
212 }
213 
214 static int process_finished_round(event_t *event,
215 				  struct perf_session *session,
216 				  struct perf_event_ops *ops);
217 
218 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
219 {
220 	if (handler->sample == NULL)
221 		handler->sample = process_event_stub;
222 	if (handler->mmap == NULL)
223 		handler->mmap = process_event_stub;
224 	if (handler->comm == NULL)
225 		handler->comm = process_event_stub;
226 	if (handler->fork == NULL)
227 		handler->fork = process_event_stub;
228 	if (handler->exit == NULL)
229 		handler->exit = process_event_stub;
230 	if (handler->lost == NULL)
231 		handler->lost = process_event_stub;
232 	if (handler->read == NULL)
233 		handler->read = process_event_stub;
234 	if (handler->throttle == NULL)
235 		handler->throttle = process_event_stub;
236 	if (handler->unthrottle == NULL)
237 		handler->unthrottle = process_event_stub;
238 	if (handler->attr == NULL)
239 		handler->attr = process_event_stub;
240 	if (handler->event_type == NULL)
241 		handler->event_type = process_event_stub;
242 	if (handler->tracing_data == NULL)
243 		handler->tracing_data = process_event_stub;
244 	if (handler->build_id == NULL)
245 		handler->build_id = process_event_stub;
246 	if (handler->finished_round == NULL) {
247 		if (handler->ordered_samples)
248 			handler->finished_round = process_finished_round;
249 		else
250 			handler->finished_round = process_finished_round_stub;
251 	}
252 }
253 
254 void mem_bswap_64(void *src, int byte_size)
255 {
256 	u64 *m = src;
257 
258 	while (byte_size > 0) {
259 		*m = bswap_64(*m);
260 		byte_size -= sizeof(u64);
261 		++m;
262 	}
263 }
264 
265 static void event__all64_swap(event_t *self)
266 {
267 	struct perf_event_header *hdr = &self->header;
268 	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
269 }
270 
271 static void event__comm_swap(event_t *self)
272 {
273 	self->comm.pid = bswap_32(self->comm.pid);
274 	self->comm.tid = bswap_32(self->comm.tid);
275 }
276 
277 static void event__mmap_swap(event_t *self)
278 {
279 	self->mmap.pid	 = bswap_32(self->mmap.pid);
280 	self->mmap.tid	 = bswap_32(self->mmap.tid);
281 	self->mmap.start = bswap_64(self->mmap.start);
282 	self->mmap.len	 = bswap_64(self->mmap.len);
283 	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
284 }
285 
286 static void event__task_swap(event_t *self)
287 {
288 	self->fork.pid	= bswap_32(self->fork.pid);
289 	self->fork.tid	= bswap_32(self->fork.tid);
290 	self->fork.ppid	= bswap_32(self->fork.ppid);
291 	self->fork.ptid	= bswap_32(self->fork.ptid);
292 	self->fork.time	= bswap_64(self->fork.time);
293 }
294 
295 static void event__read_swap(event_t *self)
296 {
297 	self->read.pid		= bswap_32(self->read.pid);
298 	self->read.tid		= bswap_32(self->read.tid);
299 	self->read.value	= bswap_64(self->read.value);
300 	self->read.time_enabled	= bswap_64(self->read.time_enabled);
301 	self->read.time_running	= bswap_64(self->read.time_running);
302 	self->read.id		= bswap_64(self->read.id);
303 }
304 
305 static void event__attr_swap(event_t *self)
306 {
307 	size_t size;
308 
309 	self->attr.attr.type		= bswap_32(self->attr.attr.type);
310 	self->attr.attr.size		= bswap_32(self->attr.attr.size);
311 	self->attr.attr.config		= bswap_64(self->attr.attr.config);
312 	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
313 	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
314 	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
315 	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
316 	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
317 	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
318 	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);
319 
320 	size = self->header.size;
321 	size -= (void *)&self->attr.id - (void *)self;
322 	mem_bswap_64(self->attr.id, size);
323 }
324 
325 static void event__event_type_swap(event_t *self)
326 {
327 	self->event_type.event_type.event_id =
328 		bswap_64(self->event_type.event_type.event_id);
329 }
330 
331 static void event__tracing_data_swap(event_t *self)
332 {
333 	self->tracing_data.size = bswap_32(self->tracing_data.size);
334 }
335 
336 typedef void (*event__swap_op)(event_t *self);
337 
338 static event__swap_op event__swap_ops[] = {
339 	[PERF_RECORD_MMAP]   = event__mmap_swap,
340 	[PERF_RECORD_COMM]   = event__comm_swap,
341 	[PERF_RECORD_FORK]   = event__task_swap,
342 	[PERF_RECORD_EXIT]   = event__task_swap,
343 	[PERF_RECORD_LOST]   = event__all64_swap,
344 	[PERF_RECORD_READ]   = event__read_swap,
345 	[PERF_RECORD_SAMPLE] = event__all64_swap,
346 	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
347 	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
348 	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
349 	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
350 	[PERF_RECORD_HEADER_MAX]    = NULL,
351 };
352 
353 struct sample_queue {
354 	u64			timestamp;
355 	struct sample_event	*event;
356 	struct list_head	list;
357 };
358 
359 static void flush_sample_queue(struct perf_session *s,
360 			       struct perf_event_ops *ops)
361 {
362 	struct list_head *head = &s->ordered_samples.samples_head;
363 	u64 limit = s->ordered_samples.next_flush;
364 	struct sample_queue *tmp, *iter;
365 
366 	if (!ops->ordered_samples || !limit)
367 		return;
368 
369 	list_for_each_entry_safe(iter, tmp, head, list) {
370 		if (iter->timestamp > limit)
371 			return;
372 
373 		if (iter == s->ordered_samples.last_inserted)
374 			s->ordered_samples.last_inserted = NULL;
375 
376 		ops->sample((event_t *)iter->event, s);
377 
378 		s->ordered_samples.last_flush = iter->timestamp;
379 		list_del(&iter->list);
380 		free(iter->event);
381 		free(iter);
382 	}
383 }
384 
385 /*
386  * When perf record finishes a pass on every buffers, it records this pseudo
387  * event.
388  * We record the max timestamp t found in the pass n.
389  * Assuming these timestamps are monotonic across cpus, we know that if
390  * a buffer still has events with timestamps below t, they will be all
391  * available and then read in the pass n + 1.
392  * Hence when we start to read the pass n + 2, we can safely flush every
393  * events with timestamps below t.
394  *
395  *    ============ PASS n =================
396  *       CPU 0         |   CPU 1
397  *                     |
398  *    cnt1 timestamps  |   cnt2 timestamps
399  *          1          |         2
400  *          2          |         3
401  *          -          |         4  <--- max recorded
402  *
403  *    ============ PASS n + 1 ==============
404  *       CPU 0         |   CPU 1
405  *                     |
406  *    cnt1 timestamps  |   cnt2 timestamps
407  *          3          |         5
408  *          4          |         6
409  *          5          |         7 <---- max recorded
410  *
411  *      Flush every events below timestamp 4
412  *
413  *    ============ PASS n + 2 ==============
414  *       CPU 0         |   CPU 1
415  *                     |
416  *    cnt1 timestamps  |   cnt2 timestamps
417  *          6          |         8
418  *          7          |         9
419  *          -          |         10
420  *
421  *      Flush every events below timestamp 7
422  *      etc...
423  */
424 static int process_finished_round(event_t *event __used,
425 				  struct perf_session *session,
426 				  struct perf_event_ops *ops)
427 {
428 	flush_sample_queue(session, ops);
429 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
430 
431 	return 0;
432 }
433 
434 static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
435 {
436 	struct sample_queue *iter;
437 
438 	list_for_each_entry_reverse(iter, head, list) {
439 		if (iter->timestamp < new->timestamp) {
440 			list_add(&new->list, &iter->list);
441 			return;
442 		}
443 	}
444 
445 	list_add(&new->list, head);
446 }
447 
448 static void __queue_sample_before(struct sample_queue *new,
449 				  struct sample_queue *iter,
450 				  struct list_head *head)
451 {
452 	list_for_each_entry_continue_reverse(iter, head, list) {
453 		if (iter->timestamp < new->timestamp) {
454 			list_add(&new->list, &iter->list);
455 			return;
456 		}
457 	}
458 
459 	list_add(&new->list, head);
460 }
461 
462 static void __queue_sample_after(struct sample_queue *new,
463 				 struct sample_queue *iter,
464 				 struct list_head *head)
465 {
466 	list_for_each_entry_continue(iter, head, list) {
467 		if (iter->timestamp > new->timestamp) {
468 			list_add_tail(&new->list, &iter->list);
469 			return;
470 		}
471 	}
472 	list_add_tail(&new->list, head);
473 }
474 
475 /* The queue is ordered by time */
476 static void __queue_sample_event(struct sample_queue *new,
477 				 struct perf_session *s)
478 {
479 	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
480 	struct list_head *head = &s->ordered_samples.samples_head;
481 
482 
483 	if (!last_inserted) {
484 		__queue_sample_end(new, head);
485 		return;
486 	}
487 
488 	/*
489 	 * Most of the time the current event has a timestamp
490 	 * very close to the last event inserted, unless we just switched
491 	 * to another event buffer. Having a sorting based on a list and
492 	 * on the last inserted event that is close to the current one is
493 	 * probably more efficient than an rbtree based sorting.
494 	 */
495 	if (last_inserted->timestamp >= new->timestamp)
496 		__queue_sample_before(new, last_inserted, head);
497 	else
498 		__queue_sample_after(new, last_inserted, head);
499 }
500 
501 static int queue_sample_event(event_t *event, struct sample_data *data,
502 			      struct perf_session *s)
503 {
504 	u64 timestamp = data->time;
505 	struct sample_queue *new;
506 
507 
508 	if (timestamp < s->ordered_samples.last_flush) {
509 		printf("Warning: Timestamp below last timeslice flush\n");
510 		return -EINVAL;
511 	}
512 
513 	new = malloc(sizeof(*new));
514 	if (!new)
515 		return -ENOMEM;
516 
517 	new->timestamp = timestamp;
518 
519 	new->event = malloc(event->header.size);
520 	if (!new->event) {
521 		free(new);
522 		return -ENOMEM;
523 	}
524 
525 	memcpy(new->event, event, event->header.size);
526 
527 	__queue_sample_event(new, s);
528 	s->ordered_samples.last_inserted = new;
529 
530 	if (new->timestamp > s->ordered_samples.max_timestamp)
531 		s->ordered_samples.max_timestamp = new->timestamp;
532 
533 	return 0;
534 }
535 
536 static int perf_session__process_sample(event_t *event, struct perf_session *s,
537 					struct perf_event_ops *ops)
538 {
539 	struct sample_data data;
540 
541 	if (!ops->ordered_samples)
542 		return ops->sample(event, s);
543 
544 	bzero(&data, sizeof(struct sample_data));
545 	event__parse_sample(event, s->sample_type, &data);
546 
547 	queue_sample_event(event, &data, s);
548 
549 	return 0;
550 }
551 
552 static int perf_session__process_event(struct perf_session *self,
553 				       event_t *event,
554 				       struct perf_event_ops *ops,
555 				       u64 offset, u64 head)
556 {
557 	trace_event(event);
558 
559 	if (event->header.type < PERF_RECORD_HEADER_MAX) {
560 		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
561 			    offset + head, event->header.size,
562 			    event__name[event->header.type]);
563 		hists__inc_nr_events(&self->hists, event->header.type);
564 	}
565 
566 	if (self->header.needs_swap && event__swap_ops[event->header.type])
567 		event__swap_ops[event->header.type](event);
568 
569 	switch (event->header.type) {
570 	case PERF_RECORD_SAMPLE:
571 		return perf_session__process_sample(event, self, ops);
572 	case PERF_RECORD_MMAP:
573 		return ops->mmap(event, self);
574 	case PERF_RECORD_COMM:
575 		return ops->comm(event, self);
576 	case PERF_RECORD_FORK:
577 		return ops->fork(event, self);
578 	case PERF_RECORD_EXIT:
579 		return ops->exit(event, self);
580 	case PERF_RECORD_LOST:
581 		return ops->lost(event, self);
582 	case PERF_RECORD_READ:
583 		return ops->read(event, self);
584 	case PERF_RECORD_THROTTLE:
585 		return ops->throttle(event, self);
586 	case PERF_RECORD_UNTHROTTLE:
587 		return ops->unthrottle(event, self);
588 	case PERF_RECORD_HEADER_ATTR:
589 		return ops->attr(event, self);
590 	case PERF_RECORD_HEADER_EVENT_TYPE:
591 		return ops->event_type(event, self);
592 	case PERF_RECORD_HEADER_TRACING_DATA:
593 		/* setup for reading amidst mmap */
594 		lseek(self->fd, offset + head, SEEK_SET);
595 		return ops->tracing_data(event, self);
596 	case PERF_RECORD_HEADER_BUILD_ID:
597 		return ops->build_id(event, self);
598 	case PERF_RECORD_FINISHED_ROUND:
599 		return ops->finished_round(event, self, ops);
600 	default:
601 		++self->hists.stats.nr_unknown_events;
602 		return -1;
603 	}
604 }
605 
606 void perf_event_header__bswap(struct perf_event_header *self)
607 {
608 	self->type = bswap_32(self->type);
609 	self->misc = bswap_16(self->misc);
610 	self->size = bswap_16(self->size);
611 }
612 
613 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
614 {
615 	struct thread *thread = perf_session__findnew(self, 0);
616 
617 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
618 		pr_err("problem inserting idle task.\n");
619 		thread = NULL;
620 	}
621 
622 	return thread;
623 }
624 
625 int do_read(int fd, void *buf, size_t size)
626 {
627 	void *buf_start = buf;
628 
629 	while (size) {
630 		int ret = read(fd, buf, size);
631 
632 		if (ret <= 0)
633 			return ret;
634 
635 		size -= ret;
636 		buf += ret;
637 	}
638 
639 	return buf - buf_start;
640 }
641 
642 #define session_done()	(*(volatile int *)(&session_done))
643 volatile int session_done;
644 
645 static int __perf_session__process_pipe_events(struct perf_session *self,
646 					       struct perf_event_ops *ops)
647 {
648 	event_t event;
649 	uint32_t size;
650 	int skip = 0;
651 	u64 head;
652 	int err;
653 	void *p;
654 
655 	perf_event_ops__fill_defaults(ops);
656 
657 	head = 0;
658 more:
659 	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
660 	if (err <= 0) {
661 		if (err == 0)
662 			goto done;
663 
664 		pr_err("failed to read event header\n");
665 		goto out_err;
666 	}
667 
668 	if (self->header.needs_swap)
669 		perf_event_header__bswap(&event.header);
670 
671 	size = event.header.size;
672 	if (size == 0)
673 		size = 8;
674 
675 	p = &event;
676 	p += sizeof(struct perf_event_header);
677 
678 	if (size - sizeof(struct perf_event_header)) {
679 		err = do_read(self->fd, p,
680 			      size - sizeof(struct perf_event_header));
681 		if (err <= 0) {
682 			if (err == 0) {
683 				pr_err("unexpected end of event stream\n");
684 				goto done;
685 			}
686 
687 			pr_err("failed to read event data\n");
688 			goto out_err;
689 		}
690 	}
691 
692 	if (size == 0 ||
693 	    (skip = perf_session__process_event(self, &event, ops,
694 						0, head)) < 0) {
695 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
696 			    head, event.header.size, event.header.type);
697 		/*
698 		 * assume we lost track of the stream, check alignment, and
699 		 * increment a single u64 in the hope to catch on again 'soon'.
700 		 */
701 		if (unlikely(head & 7))
702 			head &= ~7ULL;
703 
704 		size = 8;
705 	}
706 
707 	head += size;
708 
709 	dump_printf("\n%#Lx [%#x]: event: %d\n",
710 		    head, event.header.size, event.header.type);
711 
712 	if (skip > 0)
713 		head += skip;
714 
715 	if (!session_done())
716 		goto more;
717 done:
718 	err = 0;
719 out_err:
720 	return err;
721 }
722 
723 int __perf_session__process_events(struct perf_session *self,
724 				   u64 data_offset, u64 data_size,
725 				   u64 file_size, struct perf_event_ops *ops)
726 {
727 	int err, mmap_prot, mmap_flags;
728 	u64 head, shift;
729 	u64 offset = 0;
730 	size_t	page_size;
731 	event_t *event;
732 	uint32_t size;
733 	char *buf;
734 	struct ui_progress *progress = ui_progress__new("Processing events...",
735 							self->size);
736 	if (progress == NULL)
737 		return -1;
738 
739 	perf_event_ops__fill_defaults(ops);
740 
741 	page_size = sysconf(_SC_PAGESIZE);
742 
743 	head = data_offset;
744 	shift = page_size * (head / page_size);
745 	offset += shift;
746 	head -= shift;
747 
748 	mmap_prot  = PROT_READ;
749 	mmap_flags = MAP_SHARED;
750 
751 	if (self->header.needs_swap) {
752 		mmap_prot  |= PROT_WRITE;
753 		mmap_flags = MAP_PRIVATE;
754 	}
755 remap:
756 	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
757 		   mmap_flags, self->fd, offset);
758 	if (buf == MAP_FAILED) {
759 		pr_err("failed to mmap file\n");
760 		err = -errno;
761 		goto out_err;
762 	}
763 
764 more:
765 	event = (event_t *)(buf + head);
766 	ui_progress__update(progress, offset);
767 
768 	if (self->header.needs_swap)
769 		perf_event_header__bswap(&event->header);
770 	size = event->header.size;
771 	if (size == 0)
772 		size = 8;
773 
774 	if (head + event->header.size >= page_size * self->mmap_window) {
775 		int munmap_ret;
776 
777 		shift = page_size * (head / page_size);
778 
779 		munmap_ret = munmap(buf, page_size * self->mmap_window);
780 		assert(munmap_ret == 0);
781 
782 		offset += shift;
783 		head -= shift;
784 		goto remap;
785 	}
786 
787 	size = event->header.size;
788 
789 	dump_printf("\n%#Lx [%#x]: event: %d\n",
790 		    offset + head, event->header.size, event->header.type);
791 
792 	if (size == 0 ||
793 	    perf_session__process_event(self, event, ops, offset, head) < 0) {
794 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
795 			    offset + head, event->header.size,
796 			    event->header.type);
797 		/*
798 		 * assume we lost track of the stream, check alignment, and
799 		 * increment a single u64 in the hope to catch on again 'soon'.
800 		 */
801 		if (unlikely(head & 7))
802 			head &= ~7ULL;
803 
804 		size = 8;
805 	}
806 
807 	head += size;
808 
809 	if (offset + head >= data_offset + data_size)
810 		goto done;
811 
812 	if (offset + head < file_size)
813 		goto more;
814 done:
815 	err = 0;
816 	/* do the final flush for ordered samples */
817 	self->ordered_samples.next_flush = ULLONG_MAX;
818 	flush_sample_queue(self, ops);
819 out_err:
820 	ui_progress__delete(progress);
821 	return err;
822 }
823 
824 int perf_session__process_events(struct perf_session *self,
825 				 struct perf_event_ops *ops)
826 {
827 	int err;
828 
829 	if (perf_session__register_idle_thread(self) == NULL)
830 		return -ENOMEM;
831 
832 	if (!self->fd_pipe)
833 		err = __perf_session__process_events(self,
834 						     self->header.data_offset,
835 						     self->header.data_size,
836 						     self->size, ops);
837 	else
838 		err = __perf_session__process_pipe_events(self, ops);
839 
840 	return err;
841 }
842 
843 bool perf_session__has_traces(struct perf_session *self, const char *msg)
844 {
845 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
846 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
847 		return false;
848 	}
849 
850 	return true;
851 }
852 
853 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
854 					     const char *symbol_name,
855 					     u64 addr)
856 {
857 	char *bracket;
858 	enum map_type i;
859 	struct ref_reloc_sym *ref;
860 
861 	ref = zalloc(sizeof(struct ref_reloc_sym));
862 	if (ref == NULL)
863 		return -ENOMEM;
864 
865 	ref->name = strdup(symbol_name);
866 	if (ref->name == NULL) {
867 		free(ref);
868 		return -ENOMEM;
869 	}
870 
871 	bracket = strchr(ref->name, ']');
872 	if (bracket)
873 		*bracket = '\0';
874 
875 	ref->addr = addr;
876 
877 	for (i = 0; i < MAP__NR_TYPES; ++i) {
878 		struct kmap *kmap = map__kmap(maps[i]);
879 		kmap->ref_reloc_sym = ref;
880 	}
881 
882 	return 0;
883 }
884 
885 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
886 {
887 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
888 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
889 	       machines__fprintf_dsos(&self->machines, fp);
890 }
891 
892 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
893 					  bool with_hits)
894 {
895 	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
896 	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
897 }
898