xref: /openbmc/linux/tools/perf/util/session.c (revision 0f2c3de2)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 
10 #include "session.h"
11 #include "sort.h"
12 #include "util.h"
13 
14 static int perf_session__open(struct perf_session *self, bool force)
15 {
16 	struct stat input_stat;
17 
18 	if (!strcmp(self->filename, "-")) {
19 		self->fd_pipe = true;
20 		self->fd = STDIN_FILENO;
21 
22 		if (perf_header__read(self, self->fd) < 0)
23 			pr_err("incompatible file format");
24 
25 		return 0;
26 	}
27 
28 	self->fd = open(self->filename, O_RDONLY);
29 	if (self->fd < 0) {
30 		int err = errno;
31 
32 		pr_err("failed to open %s: %s", self->filename, strerror(err));
33 		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 			pr_err("  (try 'perf record' first)");
35 		pr_err("\n");
36 		return -errno;
37 	}
38 
39 	if (fstat(self->fd, &input_stat) < 0)
40 		goto out_close;
41 
42 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
43 		pr_err("file %s not owned by current user or root\n",
44 		       self->filename);
45 		goto out_close;
46 	}
47 
48 	if (!input_stat.st_size) {
49 		pr_info("zero-sized file (%s), nothing to do!\n",
50 			self->filename);
51 		goto out_close;
52 	}
53 
54 	if (perf_header__read(self, self->fd) < 0) {
55 		pr_err("incompatible file format");
56 		goto out_close;
57 	}
58 
59 	self->size = input_stat.st_size;
60 	return 0;
61 
62 out_close:
63 	close(self->fd);
64 	self->fd = -1;
65 	return -1;
66 }
67 
68 void perf_session__update_sample_type(struct perf_session *self)
69 {
70 	self->sample_type = perf_header__sample_type(&self->header);
71 }
72 
73 int perf_session__create_kernel_maps(struct perf_session *self)
74 {
75 	int ret = machine__create_kernel_maps(&self->host_machine);
76 
77 	if (ret >= 0)
78 		ret = machines__create_guest_kernel_maps(&self->machines);
79 	return ret;
80 }
81 
82 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
83 {
84 	size_t len = filename ? strlen(filename) + 1 : 0;
85 	struct perf_session *self = zalloc(sizeof(*self) + len);
86 
87 	if (self == NULL)
88 		goto out;
89 
90 	if (perf_header__init(&self->header) < 0)
91 		goto out_free;
92 
93 	memcpy(self->filename, filename, len);
94 	self->threads = RB_ROOT;
95 	self->hists_tree = RB_ROOT;
96 	self->last_match = NULL;
97 	self->mmap_window = 32;
98 	self->cwd = NULL;
99 	self->cwdlen = 0;
100 	self->machines = RB_ROOT;
101 	self->repipe = repipe;
102 	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
103 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
104 
105 	if (mode == O_RDONLY) {
106 		if (perf_session__open(self, force) < 0)
107 			goto out_delete;
108 	} else if (mode == O_WRONLY) {
109 		/*
110 		 * In O_RDONLY mode this will be performed when reading the
111 		 * kernel MMAP event, in event__process_mmap().
112 		 */
113 		if (perf_session__create_kernel_maps(self) < 0)
114 			goto out_delete;
115 	}
116 
117 	perf_session__update_sample_type(self);
118 out:
119 	return self;
120 out_free:
121 	free(self);
122 	return NULL;
123 out_delete:
124 	perf_session__delete(self);
125 	return NULL;
126 }
127 
128 void perf_session__delete(struct perf_session *self)
129 {
130 	perf_header__exit(&self->header);
131 	close(self->fd);
132 	free(self->cwd);
133 	free(self);
134 }
135 
136 static bool symbol__match_parent_regex(struct symbol *sym)
137 {
138 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
139 		return 1;
140 
141 	return 0;
142 }
143 
144 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
145 						   struct thread *thread,
146 						   struct ip_callchain *chain,
147 						   struct symbol **parent)
148 {
149 	u8 cpumode = PERF_RECORD_MISC_USER;
150 	unsigned int i;
151 	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
152 
153 	if (!syms)
154 		return NULL;
155 
156 	for (i = 0; i < chain->nr; i++) {
157 		u64 ip = chain->ips[i];
158 		struct addr_location al;
159 
160 		if (ip >= PERF_CONTEXT_MAX) {
161 			switch (ip) {
162 			case PERF_CONTEXT_HV:
163 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
164 			case PERF_CONTEXT_KERNEL:
165 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
166 			case PERF_CONTEXT_USER:
167 				cpumode = PERF_RECORD_MISC_USER;	break;
168 			default:
169 				break;
170 			}
171 			continue;
172 		}
173 
174 		al.filtered = false;
175 		thread__find_addr_location(thread, self, cpumode,
176 				MAP__FUNCTION, thread->pid, ip, &al, NULL);
177 		if (al.sym != NULL) {
178 			if (sort__has_parent && !*parent &&
179 			    symbol__match_parent_regex(al.sym))
180 				*parent = al.sym;
181 			if (!symbol_conf.use_callchain)
182 				break;
183 			syms[i].map = al.map;
184 			syms[i].sym = al.sym;
185 		}
186 	}
187 
188 	return syms;
189 }
190 
191 static int process_event_stub(event_t *event __used,
192 			      struct perf_session *session __used)
193 {
194 	dump_printf(": unhandled!\n");
195 	return 0;
196 }
197 
198 static int process_finished_round_stub(event_t *event __used,
199 				       struct perf_session *session __used,
200 				       struct perf_event_ops *ops __used)
201 {
202 	dump_printf(": unhandled!\n");
203 	return 0;
204 }
205 
206 static int process_finished_round(event_t *event,
207 				  struct perf_session *session,
208 				  struct perf_event_ops *ops);
209 
210 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
211 {
212 	if (handler->sample == NULL)
213 		handler->sample = process_event_stub;
214 	if (handler->mmap == NULL)
215 		handler->mmap = process_event_stub;
216 	if (handler->comm == NULL)
217 		handler->comm = process_event_stub;
218 	if (handler->fork == NULL)
219 		handler->fork = process_event_stub;
220 	if (handler->exit == NULL)
221 		handler->exit = process_event_stub;
222 	if (handler->lost == NULL)
223 		handler->lost = process_event_stub;
224 	if (handler->read == NULL)
225 		handler->read = process_event_stub;
226 	if (handler->throttle == NULL)
227 		handler->throttle = process_event_stub;
228 	if (handler->unthrottle == NULL)
229 		handler->unthrottle = process_event_stub;
230 	if (handler->attr == NULL)
231 		handler->attr = process_event_stub;
232 	if (handler->event_type == NULL)
233 		handler->event_type = process_event_stub;
234 	if (handler->tracing_data == NULL)
235 		handler->tracing_data = process_event_stub;
236 	if (handler->build_id == NULL)
237 		handler->build_id = process_event_stub;
238 	if (handler->finished_round == NULL) {
239 		if (handler->ordered_samples)
240 			handler->finished_round = process_finished_round;
241 		else
242 			handler->finished_round = process_finished_round_stub;
243 	}
244 }
245 
246 void mem_bswap_64(void *src, int byte_size)
247 {
248 	u64 *m = src;
249 
250 	while (byte_size > 0) {
251 		*m = bswap_64(*m);
252 		byte_size -= sizeof(u64);
253 		++m;
254 	}
255 }
256 
257 static void event__all64_swap(event_t *self)
258 {
259 	struct perf_event_header *hdr = &self->header;
260 	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
261 }
262 
263 static void event__comm_swap(event_t *self)
264 {
265 	self->comm.pid = bswap_32(self->comm.pid);
266 	self->comm.tid = bswap_32(self->comm.tid);
267 }
268 
269 static void event__mmap_swap(event_t *self)
270 {
271 	self->mmap.pid	 = bswap_32(self->mmap.pid);
272 	self->mmap.tid	 = bswap_32(self->mmap.tid);
273 	self->mmap.start = bswap_64(self->mmap.start);
274 	self->mmap.len	 = bswap_64(self->mmap.len);
275 	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
276 }
277 
278 static void event__task_swap(event_t *self)
279 {
280 	self->fork.pid	= bswap_32(self->fork.pid);
281 	self->fork.tid	= bswap_32(self->fork.tid);
282 	self->fork.ppid	= bswap_32(self->fork.ppid);
283 	self->fork.ptid	= bswap_32(self->fork.ptid);
284 	self->fork.time	= bswap_64(self->fork.time);
285 }
286 
287 static void event__read_swap(event_t *self)
288 {
289 	self->read.pid		= bswap_32(self->read.pid);
290 	self->read.tid		= bswap_32(self->read.tid);
291 	self->read.value	= bswap_64(self->read.value);
292 	self->read.time_enabled	= bswap_64(self->read.time_enabled);
293 	self->read.time_running	= bswap_64(self->read.time_running);
294 	self->read.id		= bswap_64(self->read.id);
295 }
296 
297 static void event__attr_swap(event_t *self)
298 {
299 	size_t size;
300 
301 	self->attr.attr.type		= bswap_32(self->attr.attr.type);
302 	self->attr.attr.size		= bswap_32(self->attr.attr.size);
303 	self->attr.attr.config		= bswap_64(self->attr.attr.config);
304 	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
305 	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
306 	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
307 	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
308 	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
309 	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
310 	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);
311 
312 	size = self->header.size;
313 	size -= (void *)&self->attr.id - (void *)self;
314 	mem_bswap_64(self->attr.id, size);
315 }
316 
317 static void event__event_type_swap(event_t *self)
318 {
319 	self->event_type.event_type.event_id =
320 		bswap_64(self->event_type.event_type.event_id);
321 }
322 
323 static void event__tracing_data_swap(event_t *self)
324 {
325 	self->tracing_data.size = bswap_32(self->tracing_data.size);
326 }
327 
328 typedef void (*event__swap_op)(event_t *self);
329 
330 static event__swap_op event__swap_ops[] = {
331 	[PERF_RECORD_MMAP]   = event__mmap_swap,
332 	[PERF_RECORD_COMM]   = event__comm_swap,
333 	[PERF_RECORD_FORK]   = event__task_swap,
334 	[PERF_RECORD_EXIT]   = event__task_swap,
335 	[PERF_RECORD_LOST]   = event__all64_swap,
336 	[PERF_RECORD_READ]   = event__read_swap,
337 	[PERF_RECORD_SAMPLE] = event__all64_swap,
338 	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
339 	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
340 	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
341 	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
342 	[PERF_RECORD_HEADER_MAX]    = NULL,
343 };
344 
345 struct sample_queue {
346 	u64			timestamp;
347 	struct sample_event	*event;
348 	struct list_head	list;
349 };
350 
351 static void flush_sample_queue(struct perf_session *s,
352 			       struct perf_event_ops *ops)
353 {
354 	struct list_head *head = &s->ordered_samples.samples_head;
355 	u64 limit = s->ordered_samples.next_flush;
356 	struct sample_queue *tmp, *iter;
357 
358 	if (!ops->ordered_samples || !limit)
359 		return;
360 
361 	list_for_each_entry_safe(iter, tmp, head, list) {
362 		if (iter->timestamp > limit)
363 			return;
364 
365 		if (iter == s->ordered_samples.last_inserted)
366 			s->ordered_samples.last_inserted = NULL;
367 
368 		ops->sample((event_t *)iter->event, s);
369 
370 		s->ordered_samples.last_flush = iter->timestamp;
371 		list_del(&iter->list);
372 		free(iter->event);
373 		free(iter);
374 	}
375 }
376 
377 /*
378  * When perf record finishes a pass on every buffers, it records this pseudo
379  * event.
380  * We record the max timestamp t found in the pass n.
381  * Assuming these timestamps are monotonic across cpus, we know that if
382  * a buffer still has events with timestamps below t, they will be all
383  * available and then read in the pass n + 1.
384  * Hence when we start to read the pass n + 2, we can safely flush every
385  * events with timestamps below t.
386  *
387  *    ============ PASS n =================
388  *       CPU 0         |   CPU 1
389  *                     |
390  *    cnt1 timestamps  |   cnt2 timestamps
391  *          1          |         2
392  *          2          |         3
393  *          -          |         4  <--- max recorded
394  *
395  *    ============ PASS n + 1 ==============
396  *       CPU 0         |   CPU 1
397  *                     |
398  *    cnt1 timestamps  |   cnt2 timestamps
399  *          3          |         5
400  *          4          |         6
401  *          5          |         7 <---- max recorded
402  *
403  *      Flush every events below timestamp 4
404  *
405  *    ============ PASS n + 2 ==============
406  *       CPU 0         |   CPU 1
407  *                     |
408  *    cnt1 timestamps  |   cnt2 timestamps
409  *          6          |         8
410  *          7          |         9
411  *          -          |         10
412  *
413  *      Flush every events below timestamp 7
414  *      etc...
415  */
416 static int process_finished_round(event_t *event __used,
417 				  struct perf_session *session,
418 				  struct perf_event_ops *ops)
419 {
420 	flush_sample_queue(session, ops);
421 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
422 
423 	return 0;
424 }
425 
426 static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
427 {
428 	struct sample_queue *iter;
429 
430 	list_for_each_entry_reverse(iter, head, list) {
431 		if (iter->timestamp < new->timestamp) {
432 			list_add(&new->list, &iter->list);
433 			return;
434 		}
435 	}
436 
437 	list_add(&new->list, head);
438 }
439 
440 static void __queue_sample_before(struct sample_queue *new,
441 				  struct sample_queue *iter,
442 				  struct list_head *head)
443 {
444 	list_for_each_entry_continue_reverse(iter, head, list) {
445 		if (iter->timestamp < new->timestamp) {
446 			list_add(&new->list, &iter->list);
447 			return;
448 		}
449 	}
450 
451 	list_add(&new->list, head);
452 }
453 
454 static void __queue_sample_after(struct sample_queue *new,
455 				 struct sample_queue *iter,
456 				 struct list_head *head)
457 {
458 	list_for_each_entry_continue(iter, head, list) {
459 		if (iter->timestamp > new->timestamp) {
460 			list_add_tail(&new->list, &iter->list);
461 			return;
462 		}
463 	}
464 	list_add_tail(&new->list, head);
465 }
466 
467 /* The queue is ordered by time */
468 static void __queue_sample_event(struct sample_queue *new,
469 				 struct perf_session *s)
470 {
471 	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
472 	struct list_head *head = &s->ordered_samples.samples_head;
473 
474 
475 	if (!last_inserted) {
476 		__queue_sample_end(new, head);
477 		return;
478 	}
479 
480 	/*
481 	 * Most of the time the current event has a timestamp
482 	 * very close to the last event inserted, unless we just switched
483 	 * to another event buffer. Having a sorting based on a list and
484 	 * on the last inserted event that is close to the current one is
485 	 * probably more efficient than an rbtree based sorting.
486 	 */
487 	if (last_inserted->timestamp >= new->timestamp)
488 		__queue_sample_before(new, last_inserted, head);
489 	else
490 		__queue_sample_after(new, last_inserted, head);
491 }
492 
493 static int queue_sample_event(event_t *event, struct sample_data *data,
494 			      struct perf_session *s)
495 {
496 	u64 timestamp = data->time;
497 	struct sample_queue *new;
498 
499 
500 	if (timestamp < s->ordered_samples.last_flush) {
501 		printf("Warning: Timestamp below last timeslice flush\n");
502 		return -EINVAL;
503 	}
504 
505 	new = malloc(sizeof(*new));
506 	if (!new)
507 		return -ENOMEM;
508 
509 	new->timestamp = timestamp;
510 
511 	new->event = malloc(event->header.size);
512 	if (!new->event) {
513 		free(new);
514 		return -ENOMEM;
515 	}
516 
517 	memcpy(new->event, event, event->header.size);
518 
519 	__queue_sample_event(new, s);
520 	s->ordered_samples.last_inserted = new;
521 
522 	if (new->timestamp > s->ordered_samples.max_timestamp)
523 		s->ordered_samples.max_timestamp = new->timestamp;
524 
525 	return 0;
526 }
527 
528 static int perf_session__process_sample(event_t *event, struct perf_session *s,
529 					struct perf_event_ops *ops)
530 {
531 	struct sample_data data;
532 
533 	if (!ops->ordered_samples)
534 		return ops->sample(event, s);
535 
536 	bzero(&data, sizeof(struct sample_data));
537 	event__parse_sample(event, s->sample_type, &data);
538 
539 	queue_sample_event(event, &data, s);
540 
541 	return 0;
542 }
543 
544 static int perf_session__process_event(struct perf_session *self,
545 				       event_t *event,
546 				       struct perf_event_ops *ops,
547 				       u64 offset, u64 head)
548 {
549 	trace_event(event);
550 
551 	if (event->header.type < PERF_RECORD_HEADER_MAX) {
552 		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
553 			    offset + head, event->header.size,
554 			    event__name[event->header.type]);
555 		hists__inc_nr_events(&self->hists, event->header.type);
556 	}
557 
558 	if (self->header.needs_swap && event__swap_ops[event->header.type])
559 		event__swap_ops[event->header.type](event);
560 
561 	switch (event->header.type) {
562 	case PERF_RECORD_SAMPLE:
563 		return perf_session__process_sample(event, self, ops);
564 	case PERF_RECORD_MMAP:
565 		return ops->mmap(event, self);
566 	case PERF_RECORD_COMM:
567 		return ops->comm(event, self);
568 	case PERF_RECORD_FORK:
569 		return ops->fork(event, self);
570 	case PERF_RECORD_EXIT:
571 		return ops->exit(event, self);
572 	case PERF_RECORD_LOST:
573 		return ops->lost(event, self);
574 	case PERF_RECORD_READ:
575 		return ops->read(event, self);
576 	case PERF_RECORD_THROTTLE:
577 		return ops->throttle(event, self);
578 	case PERF_RECORD_UNTHROTTLE:
579 		return ops->unthrottle(event, self);
580 	case PERF_RECORD_HEADER_ATTR:
581 		return ops->attr(event, self);
582 	case PERF_RECORD_HEADER_EVENT_TYPE:
583 		return ops->event_type(event, self);
584 	case PERF_RECORD_HEADER_TRACING_DATA:
585 		/* setup for reading amidst mmap */
586 		lseek(self->fd, offset + head, SEEK_SET);
587 		return ops->tracing_data(event, self);
588 	case PERF_RECORD_HEADER_BUILD_ID:
589 		return ops->build_id(event, self);
590 	case PERF_RECORD_FINISHED_ROUND:
591 		return ops->finished_round(event, self, ops);
592 	default:
593 		++self->hists.stats.nr_unknown_events;
594 		return -1;
595 	}
596 }
597 
598 void perf_event_header__bswap(struct perf_event_header *self)
599 {
600 	self->type = bswap_32(self->type);
601 	self->misc = bswap_16(self->misc);
602 	self->size = bswap_16(self->size);
603 }
604 
605 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
606 {
607 	struct thread *thread = perf_session__findnew(self, 0);
608 
609 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
610 		pr_err("problem inserting idle task.\n");
611 		thread = NULL;
612 	}
613 
614 	return thread;
615 }
616 
617 int do_read(int fd, void *buf, size_t size)
618 {
619 	void *buf_start = buf;
620 
621 	while (size) {
622 		int ret = read(fd, buf, size);
623 
624 		if (ret <= 0)
625 			return ret;
626 
627 		size -= ret;
628 		buf += ret;
629 	}
630 
631 	return buf - buf_start;
632 }
633 
634 #define session_done()	(*(volatile int *)(&session_done))
635 volatile int session_done;
636 
637 static int __perf_session__process_pipe_events(struct perf_session *self,
638 					       struct perf_event_ops *ops)
639 {
640 	event_t event;
641 	uint32_t size;
642 	int skip = 0;
643 	u64 head;
644 	int err;
645 	void *p;
646 
647 	perf_event_ops__fill_defaults(ops);
648 
649 	head = 0;
650 more:
651 	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
652 	if (err <= 0) {
653 		if (err == 0)
654 			goto done;
655 
656 		pr_err("failed to read event header\n");
657 		goto out_err;
658 	}
659 
660 	if (self->header.needs_swap)
661 		perf_event_header__bswap(&event.header);
662 
663 	size = event.header.size;
664 	if (size == 0)
665 		size = 8;
666 
667 	p = &event;
668 	p += sizeof(struct perf_event_header);
669 
670 	if (size - sizeof(struct perf_event_header)) {
671 		err = do_read(self->fd, p,
672 			      size - sizeof(struct perf_event_header));
673 		if (err <= 0) {
674 			if (err == 0) {
675 				pr_err("unexpected end of event stream\n");
676 				goto done;
677 			}
678 
679 			pr_err("failed to read event data\n");
680 			goto out_err;
681 		}
682 	}
683 
684 	if (size == 0 ||
685 	    (skip = perf_session__process_event(self, &event, ops,
686 						0, head)) < 0) {
687 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
688 			    head, event.header.size, event.header.type);
689 		/*
690 		 * assume we lost track of the stream, check alignment, and
691 		 * increment a single u64 in the hope to catch on again 'soon'.
692 		 */
693 		if (unlikely(head & 7))
694 			head &= ~7ULL;
695 
696 		size = 8;
697 	}
698 
699 	head += size;
700 
701 	dump_printf("\n%#Lx [%#x]: event: %d\n",
702 		    head, event.header.size, event.header.type);
703 
704 	if (skip > 0)
705 		head += skip;
706 
707 	if (!session_done())
708 		goto more;
709 done:
710 	err = 0;
711 out_err:
712 	return err;
713 }
714 
715 int __perf_session__process_events(struct perf_session *self,
716 				   u64 data_offset, u64 data_size,
717 				   u64 file_size, struct perf_event_ops *ops)
718 {
719 	int err, mmap_prot, mmap_flags;
720 	u64 head, shift;
721 	u64 offset = 0;
722 	size_t	page_size;
723 	event_t *event;
724 	uint32_t size;
725 	char *buf;
726 	struct ui_progress *progress = ui_progress__new("Processing events...",
727 							self->size);
728 	if (progress == NULL)
729 		return -1;
730 
731 	perf_event_ops__fill_defaults(ops);
732 
733 	page_size = sysconf(_SC_PAGESIZE);
734 
735 	head = data_offset;
736 	shift = page_size * (head / page_size);
737 	offset += shift;
738 	head -= shift;
739 
740 	mmap_prot  = PROT_READ;
741 	mmap_flags = MAP_SHARED;
742 
743 	if (self->header.needs_swap) {
744 		mmap_prot  |= PROT_WRITE;
745 		mmap_flags = MAP_PRIVATE;
746 	}
747 remap:
748 	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
749 		   mmap_flags, self->fd, offset);
750 	if (buf == MAP_FAILED) {
751 		pr_err("failed to mmap file\n");
752 		err = -errno;
753 		goto out_err;
754 	}
755 
756 more:
757 	event = (event_t *)(buf + head);
758 	ui_progress__update(progress, offset);
759 
760 	if (self->header.needs_swap)
761 		perf_event_header__bswap(&event->header);
762 	size = event->header.size;
763 	if (size == 0)
764 		size = 8;
765 
766 	if (head + event->header.size >= page_size * self->mmap_window) {
767 		int munmap_ret;
768 
769 		shift = page_size * (head / page_size);
770 
771 		munmap_ret = munmap(buf, page_size * self->mmap_window);
772 		assert(munmap_ret == 0);
773 
774 		offset += shift;
775 		head -= shift;
776 		goto remap;
777 	}
778 
779 	size = event->header.size;
780 
781 	dump_printf("\n%#Lx [%#x]: event: %d\n",
782 		    offset + head, event->header.size, event->header.type);
783 
784 	if (size == 0 ||
785 	    perf_session__process_event(self, event, ops, offset, head) < 0) {
786 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
787 			    offset + head, event->header.size,
788 			    event->header.type);
789 		/*
790 		 * assume we lost track of the stream, check alignment, and
791 		 * increment a single u64 in the hope to catch on again 'soon'.
792 		 */
793 		if (unlikely(head & 7))
794 			head &= ~7ULL;
795 
796 		size = 8;
797 	}
798 
799 	head += size;
800 
801 	if (offset + head >= data_offset + data_size)
802 		goto done;
803 
804 	if (offset + head < file_size)
805 		goto more;
806 done:
807 	err = 0;
808 	/* do the final flush for ordered samples */
809 	self->ordered_samples.next_flush = ULLONG_MAX;
810 	flush_sample_queue(self, ops);
811 out_err:
812 	ui_progress__delete(progress);
813 	return err;
814 }
815 
816 int perf_session__process_events(struct perf_session *self,
817 				 struct perf_event_ops *ops)
818 {
819 	int err;
820 
821 	if (perf_session__register_idle_thread(self) == NULL)
822 		return -ENOMEM;
823 
824 	if (!symbol_conf.full_paths) {
825 		char bf[PATH_MAX];
826 
827 		if (getcwd(bf, sizeof(bf)) == NULL) {
828 			err = -errno;
829 out_getcwd_err:
830 			pr_err("failed to get the current directory\n");
831 			goto out_err;
832 		}
833 		self->cwd = strdup(bf);
834 		if (self->cwd == NULL) {
835 			err = -ENOMEM;
836 			goto out_getcwd_err;
837 		}
838 		self->cwdlen = strlen(self->cwd);
839 	}
840 
841 	if (!self->fd_pipe)
842 		err = __perf_session__process_events(self,
843 						     self->header.data_offset,
844 						     self->header.data_size,
845 						     self->size, ops);
846 	else
847 		err = __perf_session__process_pipe_events(self, ops);
848 out_err:
849 	return err;
850 }
851 
852 bool perf_session__has_traces(struct perf_session *self, const char *msg)
853 {
854 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
855 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
856 		return false;
857 	}
858 
859 	return true;
860 }
861 
862 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
863 					     const char *symbol_name,
864 					     u64 addr)
865 {
866 	char *bracket;
867 	enum map_type i;
868 	struct ref_reloc_sym *ref;
869 
870 	ref = zalloc(sizeof(struct ref_reloc_sym));
871 	if (ref == NULL)
872 		return -ENOMEM;
873 
874 	ref->name = strdup(symbol_name);
875 	if (ref->name == NULL) {
876 		free(ref);
877 		return -ENOMEM;
878 	}
879 
880 	bracket = strchr(ref->name, ']');
881 	if (bracket)
882 		*bracket = '\0';
883 
884 	ref->addr = addr;
885 
886 	for (i = 0; i < MAP__NR_TYPES; ++i) {
887 		struct kmap *kmap = map__kmap(maps[i]);
888 		kmap->ref_reloc_sym = ref;
889 	}
890 
891 	return 0;
892 }
893 
894 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
895 {
896 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
897 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
898 	       machines__fprintf_dsos(&self->machines, fp);
899 }
900 
901 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
902 					  bool with_hits)
903 {
904 	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
905 	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
906 }
907