xref: /openbmc/linux/tools/perf/util/session.c (revision cee75ac7)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 
9 #include "session.h"
10 #include "sort.h"
11 #include "util.h"
12 
13 static int perf_session__open(struct perf_session *self, bool force)
14 {
15 	struct stat input_stat;
16 
17 	if (!strcmp(self->filename, "-")) {
18 		self->fd_pipe = true;
19 		self->fd = STDIN_FILENO;
20 
21 		if (perf_header__read(self, self->fd) < 0)
22 			pr_err("incompatible file format");
23 
24 		return 0;
25 	}
26 
27 	self->fd = open(self->filename, O_RDONLY);
28 	if (self->fd < 0) {
29 		pr_err("failed to open file: %s", self->filename);
30 		if (!strcmp(self->filename, "perf.data"))
31 			pr_err("  (try 'perf record' first)");
32 		pr_err("\n");
33 		return -errno;
34 	}
35 
36 	if (fstat(self->fd, &input_stat) < 0)
37 		goto out_close;
38 
39 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
40 		pr_err("file %s not owned by current user or root\n",
41 		       self->filename);
42 		goto out_close;
43 	}
44 
45 	if (!input_stat.st_size) {
46 		pr_info("zero-sized file (%s), nothing to do!\n",
47 			self->filename);
48 		goto out_close;
49 	}
50 
51 	if (perf_header__read(self, self->fd) < 0) {
52 		pr_err("incompatible file format");
53 		goto out_close;
54 	}
55 
56 	self->size = input_stat.st_size;
57 	return 0;
58 
59 out_close:
60 	close(self->fd);
61 	self->fd = -1;
62 	return -1;
63 }
64 
65 void perf_session__update_sample_type(struct perf_session *self)
66 {
67 	self->sample_type = perf_header__sample_type(&self->header);
68 }
69 
70 int perf_session__create_kernel_maps(struct perf_session *self)
71 {
72 	int ret = machine__create_kernel_maps(&self->host_machine);
73 
74 	if (ret >= 0)
75 		ret = machines__create_guest_kernel_maps(&self->machines);
76 	return ret;
77 }
78 
79 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
80 {
81 	size_t len = filename ? strlen(filename) + 1 : 0;
82 	struct perf_session *self = zalloc(sizeof(*self) + len);
83 
84 	if (self == NULL)
85 		goto out;
86 
87 	if (perf_header__init(&self->header) < 0)
88 		goto out_free;
89 
90 	memcpy(self->filename, filename, len);
91 	self->threads = RB_ROOT;
92 	self->hists_tree = RB_ROOT;
93 	self->last_match = NULL;
94 	self->mmap_window = 32;
95 	self->cwd = NULL;
96 	self->cwdlen = 0;
97 	self->machines = RB_ROOT;
98 	self->repipe = repipe;
99 	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
100 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
101 
102 	if (mode == O_RDONLY) {
103 		if (perf_session__open(self, force) < 0)
104 			goto out_delete;
105 	} else if (mode == O_WRONLY) {
106 		/*
107 		 * In O_RDONLY mode this will be performed when reading the
108 		 * kernel MMAP event, in event__process_mmap().
109 		 */
110 		if (perf_session__create_kernel_maps(self) < 0)
111 			goto out_delete;
112 	}
113 
114 	perf_session__update_sample_type(self);
115 out:
116 	return self;
117 out_free:
118 	free(self);
119 	return NULL;
120 out_delete:
121 	perf_session__delete(self);
122 	return NULL;
123 }
124 
125 void perf_session__delete(struct perf_session *self)
126 {
127 	perf_header__exit(&self->header);
128 	close(self->fd);
129 	free(self->cwd);
130 	free(self);
131 }
132 
133 static bool symbol__match_parent_regex(struct symbol *sym)
134 {
135 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
136 		return 1;
137 
138 	return 0;
139 }
140 
141 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
142 						   struct thread *thread,
143 						   struct ip_callchain *chain,
144 						   struct symbol **parent)
145 {
146 	u8 cpumode = PERF_RECORD_MISC_USER;
147 	unsigned int i;
148 	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
149 
150 	if (!syms)
151 		return NULL;
152 
153 	for (i = 0; i < chain->nr; i++) {
154 		u64 ip = chain->ips[i];
155 		struct addr_location al;
156 
157 		if (ip >= PERF_CONTEXT_MAX) {
158 			switch (ip) {
159 			case PERF_CONTEXT_HV:
160 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
161 			case PERF_CONTEXT_KERNEL:
162 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
163 			case PERF_CONTEXT_USER:
164 				cpumode = PERF_RECORD_MISC_USER;	break;
165 			default:
166 				break;
167 			}
168 			continue;
169 		}
170 
171 		al.filtered = false;
172 		thread__find_addr_location(thread, self, cpumode,
173 				MAP__FUNCTION, thread->pid, ip, &al, NULL);
174 		if (al.sym != NULL) {
175 			if (sort__has_parent && !*parent &&
176 			    symbol__match_parent_regex(al.sym))
177 				*parent = al.sym;
178 			if (!symbol_conf.use_callchain)
179 				break;
180 			syms[i].map = al.map;
181 			syms[i].sym = al.sym;
182 		}
183 	}
184 
185 	return syms;
186 }
187 
188 static int process_event_stub(event_t *event __used,
189 			      struct perf_session *session __used)
190 {
191 	dump_printf(": unhandled!\n");
192 	return 0;
193 }
194 
195 static int process_finished_round_stub(event_t *event __used,
196 				       struct perf_session *session __used,
197 				       struct perf_event_ops *ops __used)
198 {
199 	dump_printf(": unhandled!\n");
200 	return 0;
201 }
202 
203 static int process_finished_round(event_t *event,
204 				  struct perf_session *session,
205 				  struct perf_event_ops *ops);
206 
207 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
208 {
209 	if (handler->sample == NULL)
210 		handler->sample = process_event_stub;
211 	if (handler->mmap == NULL)
212 		handler->mmap = process_event_stub;
213 	if (handler->comm == NULL)
214 		handler->comm = process_event_stub;
215 	if (handler->fork == NULL)
216 		handler->fork = process_event_stub;
217 	if (handler->exit == NULL)
218 		handler->exit = process_event_stub;
219 	if (handler->lost == NULL)
220 		handler->lost = process_event_stub;
221 	if (handler->read == NULL)
222 		handler->read = process_event_stub;
223 	if (handler->throttle == NULL)
224 		handler->throttle = process_event_stub;
225 	if (handler->unthrottle == NULL)
226 		handler->unthrottle = process_event_stub;
227 	if (handler->attr == NULL)
228 		handler->attr = process_event_stub;
229 	if (handler->event_type == NULL)
230 		handler->event_type = process_event_stub;
231 	if (handler->tracing_data == NULL)
232 		handler->tracing_data = process_event_stub;
233 	if (handler->build_id == NULL)
234 		handler->build_id = process_event_stub;
235 	if (handler->finished_round == NULL) {
236 		if (handler->ordered_samples)
237 			handler->finished_round = process_finished_round;
238 		else
239 			handler->finished_round = process_finished_round_stub;
240 	}
241 }
242 
243 void mem_bswap_64(void *src, int byte_size)
244 {
245 	u64 *m = src;
246 
247 	while (byte_size > 0) {
248 		*m = bswap_64(*m);
249 		byte_size -= sizeof(u64);
250 		++m;
251 	}
252 }
253 
254 static void event__all64_swap(event_t *self)
255 {
256 	struct perf_event_header *hdr = &self->header;
257 	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
258 }
259 
260 static void event__comm_swap(event_t *self)
261 {
262 	self->comm.pid = bswap_32(self->comm.pid);
263 	self->comm.tid = bswap_32(self->comm.tid);
264 }
265 
266 static void event__mmap_swap(event_t *self)
267 {
268 	self->mmap.pid	 = bswap_32(self->mmap.pid);
269 	self->mmap.tid	 = bswap_32(self->mmap.tid);
270 	self->mmap.start = bswap_64(self->mmap.start);
271 	self->mmap.len	 = bswap_64(self->mmap.len);
272 	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
273 }
274 
275 static void event__task_swap(event_t *self)
276 {
277 	self->fork.pid	= bswap_32(self->fork.pid);
278 	self->fork.tid	= bswap_32(self->fork.tid);
279 	self->fork.ppid	= bswap_32(self->fork.ppid);
280 	self->fork.ptid	= bswap_32(self->fork.ptid);
281 	self->fork.time	= bswap_64(self->fork.time);
282 }
283 
284 static void event__read_swap(event_t *self)
285 {
286 	self->read.pid		= bswap_32(self->read.pid);
287 	self->read.tid		= bswap_32(self->read.tid);
288 	self->read.value	= bswap_64(self->read.value);
289 	self->read.time_enabled	= bswap_64(self->read.time_enabled);
290 	self->read.time_running	= bswap_64(self->read.time_running);
291 	self->read.id		= bswap_64(self->read.id);
292 }
293 
294 static void event__attr_swap(event_t *self)
295 {
296 	size_t size;
297 
298 	self->attr.attr.type		= bswap_32(self->attr.attr.type);
299 	self->attr.attr.size		= bswap_32(self->attr.attr.size);
300 	self->attr.attr.config		= bswap_64(self->attr.attr.config);
301 	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
302 	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
303 	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
304 	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
305 	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
306 	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
307 	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);
308 
309 	size = self->header.size;
310 	size -= (void *)&self->attr.id - (void *)self;
311 	mem_bswap_64(self->attr.id, size);
312 }
313 
314 static void event__event_type_swap(event_t *self)
315 {
316 	self->event_type.event_type.event_id =
317 		bswap_64(self->event_type.event_type.event_id);
318 }
319 
320 static void event__tracing_data_swap(event_t *self)
321 {
322 	self->tracing_data.size = bswap_32(self->tracing_data.size);
323 }
324 
325 typedef void (*event__swap_op)(event_t *self);
326 
327 static event__swap_op event__swap_ops[] = {
328 	[PERF_RECORD_MMAP]   = event__mmap_swap,
329 	[PERF_RECORD_COMM]   = event__comm_swap,
330 	[PERF_RECORD_FORK]   = event__task_swap,
331 	[PERF_RECORD_EXIT]   = event__task_swap,
332 	[PERF_RECORD_LOST]   = event__all64_swap,
333 	[PERF_RECORD_READ]   = event__read_swap,
334 	[PERF_RECORD_SAMPLE] = event__all64_swap,
335 	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
336 	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
337 	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
338 	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
339 	[PERF_RECORD_HEADER_MAX]    = NULL,
340 };
341 
342 struct sample_queue {
343 	u64			timestamp;
344 	struct sample_event	*event;
345 	struct list_head	list;
346 };
347 
348 static void flush_sample_queue(struct perf_session *s,
349 			       struct perf_event_ops *ops)
350 {
351 	struct list_head *head = &s->ordered_samples.samples_head;
352 	u64 limit = s->ordered_samples.next_flush;
353 	struct sample_queue *tmp, *iter;
354 
355 	if (!ops->ordered_samples || !limit)
356 		return;
357 
358 	list_for_each_entry_safe(iter, tmp, head, list) {
359 		if (iter->timestamp > limit)
360 			return;
361 
362 		if (iter == s->ordered_samples.last_inserted)
363 			s->ordered_samples.last_inserted = NULL;
364 
365 		ops->sample((event_t *)iter->event, s);
366 
367 		s->ordered_samples.last_flush = iter->timestamp;
368 		list_del(&iter->list);
369 		free(iter->event);
370 		free(iter);
371 	}
372 }
373 
374 /*
375  * When perf record finishes a pass on every buffers, it records this pseudo
376  * event.
377  * We record the max timestamp t found in the pass n.
378  * Assuming these timestamps are monotonic across cpus, we know that if
379  * a buffer still has events with timestamps below t, they will be all
380  * available and then read in the pass n + 1.
381  * Hence when we start to read the pass n + 2, we can safely flush every
382  * events with timestamps below t.
383  *
384  *    ============ PASS n =================
385  *       CPU 0         |   CPU 1
386  *                     |
387  *    cnt1 timestamps  |   cnt2 timestamps
388  *          1          |         2
389  *          2          |         3
390  *          -          |         4  <--- max recorded
391  *
392  *    ============ PASS n + 1 ==============
393  *       CPU 0         |   CPU 1
394  *                     |
395  *    cnt1 timestamps  |   cnt2 timestamps
396  *          3          |         5
397  *          4          |         6
398  *          5          |         7 <---- max recorded
399  *
400  *      Flush every events below timestamp 4
401  *
402  *    ============ PASS n + 2 ==============
403  *       CPU 0         |   CPU 1
404  *                     |
405  *    cnt1 timestamps  |   cnt2 timestamps
406  *          6          |         8
407  *          7          |         9
408  *          -          |         10
409  *
410  *      Flush every events below timestamp 7
411  *      etc...
412  */
413 static int process_finished_round(event_t *event __used,
414 				  struct perf_session *session,
415 				  struct perf_event_ops *ops)
416 {
417 	flush_sample_queue(session, ops);
418 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
419 
420 	return 0;
421 }
422 
423 static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
424 {
425 	struct sample_queue *iter;
426 
427 	list_for_each_entry_reverse(iter, head, list) {
428 		if (iter->timestamp < new->timestamp) {
429 			list_add(&new->list, &iter->list);
430 			return;
431 		}
432 	}
433 
434 	list_add(&new->list, head);
435 }
436 
437 static void __queue_sample_before(struct sample_queue *new,
438 				  struct sample_queue *iter,
439 				  struct list_head *head)
440 {
441 	list_for_each_entry_continue_reverse(iter, head, list) {
442 		if (iter->timestamp < new->timestamp) {
443 			list_add(&new->list, &iter->list);
444 			return;
445 		}
446 	}
447 
448 	list_add(&new->list, head);
449 }
450 
451 static void __queue_sample_after(struct sample_queue *new,
452 				 struct sample_queue *iter,
453 				 struct list_head *head)
454 {
455 	list_for_each_entry_continue(iter, head, list) {
456 		if (iter->timestamp > new->timestamp) {
457 			list_add_tail(&new->list, &iter->list);
458 			return;
459 		}
460 	}
461 	list_add_tail(&new->list, head);
462 }
463 
464 /* The queue is ordered by time */
465 static void __queue_sample_event(struct sample_queue *new,
466 				 struct perf_session *s)
467 {
468 	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
469 	struct list_head *head = &s->ordered_samples.samples_head;
470 
471 
472 	if (!last_inserted) {
473 		__queue_sample_end(new, head);
474 		return;
475 	}
476 
477 	/*
478 	 * Most of the time the current event has a timestamp
479 	 * very close to the last event inserted, unless we just switched
480 	 * to another event buffer. Having a sorting based on a list and
481 	 * on the last inserted event that is close to the current one is
482 	 * probably more efficient than an rbtree based sorting.
483 	 */
484 	if (last_inserted->timestamp >= new->timestamp)
485 		__queue_sample_before(new, last_inserted, head);
486 	else
487 		__queue_sample_after(new, last_inserted, head);
488 }
489 
490 static int queue_sample_event(event_t *event, struct sample_data *data,
491 			      struct perf_session *s)
492 {
493 	u64 timestamp = data->time;
494 	struct sample_queue *new;
495 
496 
497 	if (timestamp < s->ordered_samples.last_flush) {
498 		printf("Warning: Timestamp below last timeslice flush\n");
499 		return -EINVAL;
500 	}
501 
502 	new = malloc(sizeof(*new));
503 	if (!new)
504 		return -ENOMEM;
505 
506 	new->timestamp = timestamp;
507 
508 	new->event = malloc(event->header.size);
509 	if (!new->event) {
510 		free(new);
511 		return -ENOMEM;
512 	}
513 
514 	memcpy(new->event, event, event->header.size);
515 
516 	__queue_sample_event(new, s);
517 	s->ordered_samples.last_inserted = new;
518 
519 	if (new->timestamp > s->ordered_samples.max_timestamp)
520 		s->ordered_samples.max_timestamp = new->timestamp;
521 
522 	return 0;
523 }
524 
525 static int perf_session__process_sample(event_t *event, struct perf_session *s,
526 					struct perf_event_ops *ops)
527 {
528 	struct sample_data data;
529 
530 	if (!ops->ordered_samples)
531 		return ops->sample(event, s);
532 
533 	bzero(&data, sizeof(struct sample_data));
534 	event__parse_sample(event, s->sample_type, &data);
535 
536 	queue_sample_event(event, &data, s);
537 
538 	return 0;
539 }
540 
541 static int perf_session__process_event(struct perf_session *self,
542 				       event_t *event,
543 				       struct perf_event_ops *ops,
544 				       u64 offset, u64 head)
545 {
546 	trace_event(event);
547 
548 	if (event->header.type < PERF_RECORD_HEADER_MAX) {
549 		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
550 			    offset + head, event->header.size,
551 			    event__name[event->header.type]);
552 		hists__inc_nr_events(&self->hists, event->header.type);
553 	}
554 
555 	if (self->header.needs_swap && event__swap_ops[event->header.type])
556 		event__swap_ops[event->header.type](event);
557 
558 	switch (event->header.type) {
559 	case PERF_RECORD_SAMPLE:
560 		return perf_session__process_sample(event, self, ops);
561 	case PERF_RECORD_MMAP:
562 		return ops->mmap(event, self);
563 	case PERF_RECORD_COMM:
564 		return ops->comm(event, self);
565 	case PERF_RECORD_FORK:
566 		return ops->fork(event, self);
567 	case PERF_RECORD_EXIT:
568 		return ops->exit(event, self);
569 	case PERF_RECORD_LOST:
570 		return ops->lost(event, self);
571 	case PERF_RECORD_READ:
572 		return ops->read(event, self);
573 	case PERF_RECORD_THROTTLE:
574 		return ops->throttle(event, self);
575 	case PERF_RECORD_UNTHROTTLE:
576 		return ops->unthrottle(event, self);
577 	case PERF_RECORD_HEADER_ATTR:
578 		return ops->attr(event, self);
579 	case PERF_RECORD_HEADER_EVENT_TYPE:
580 		return ops->event_type(event, self);
581 	case PERF_RECORD_HEADER_TRACING_DATA:
582 		/* setup for reading amidst mmap */
583 		lseek(self->fd, offset + head, SEEK_SET);
584 		return ops->tracing_data(event, self);
585 	case PERF_RECORD_HEADER_BUILD_ID:
586 		return ops->build_id(event, self);
587 	case PERF_RECORD_FINISHED_ROUND:
588 		return ops->finished_round(event, self, ops);
589 	default:
590 		++self->hists.stats.nr_unknown_events;
591 		return -1;
592 	}
593 }
594 
595 void perf_event_header__bswap(struct perf_event_header *self)
596 {
597 	self->type = bswap_32(self->type);
598 	self->misc = bswap_16(self->misc);
599 	self->size = bswap_16(self->size);
600 }
601 
602 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
603 {
604 	struct thread *thread = perf_session__findnew(self, 0);
605 
606 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
607 		pr_err("problem inserting idle task.\n");
608 		thread = NULL;
609 	}
610 
611 	return thread;
612 }
613 
614 int do_read(int fd, void *buf, size_t size)
615 {
616 	void *buf_start = buf;
617 
618 	while (size) {
619 		int ret = read(fd, buf, size);
620 
621 		if (ret <= 0)
622 			return ret;
623 
624 		size -= ret;
625 		buf += ret;
626 	}
627 
628 	return buf - buf_start;
629 }
630 
631 #define session_done()	(*(volatile int *)(&session_done))
632 volatile int session_done;
633 
634 static int __perf_session__process_pipe_events(struct perf_session *self,
635 					       struct perf_event_ops *ops)
636 {
637 	event_t event;
638 	uint32_t size;
639 	int skip = 0;
640 	u64 head;
641 	int err;
642 	void *p;
643 
644 	perf_event_ops__fill_defaults(ops);
645 
646 	head = 0;
647 more:
648 	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
649 	if (err <= 0) {
650 		if (err == 0)
651 			goto done;
652 
653 		pr_err("failed to read event header\n");
654 		goto out_err;
655 	}
656 
657 	if (self->header.needs_swap)
658 		perf_event_header__bswap(&event.header);
659 
660 	size = event.header.size;
661 	if (size == 0)
662 		size = 8;
663 
664 	p = &event;
665 	p += sizeof(struct perf_event_header);
666 
667 	if (size - sizeof(struct perf_event_header)) {
668 		err = do_read(self->fd, p,
669 			      size - sizeof(struct perf_event_header));
670 		if (err <= 0) {
671 			if (err == 0) {
672 				pr_err("unexpected end of event stream\n");
673 				goto done;
674 			}
675 
676 			pr_err("failed to read event data\n");
677 			goto out_err;
678 		}
679 	}
680 
681 	if (size == 0 ||
682 	    (skip = perf_session__process_event(self, &event, ops,
683 						0, head)) < 0) {
684 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
685 			    head, event.header.size, event.header.type);
686 		/*
687 		 * assume we lost track of the stream, check alignment, and
688 		 * increment a single u64 in the hope to catch on again 'soon'.
689 		 */
690 		if (unlikely(head & 7))
691 			head &= ~7ULL;
692 
693 		size = 8;
694 	}
695 
696 	head += size;
697 
698 	dump_printf("\n%#Lx [%#x]: event: %d\n",
699 		    head, event.header.size, event.header.type);
700 
701 	if (skip > 0)
702 		head += skip;
703 
704 	if (!session_done())
705 		goto more;
706 done:
707 	err = 0;
708 out_err:
709 	return err;
710 }
711 
712 int __perf_session__process_events(struct perf_session *self,
713 				   u64 data_offset, u64 data_size,
714 				   u64 file_size, struct perf_event_ops *ops)
715 {
716 	int err, mmap_prot, mmap_flags;
717 	u64 head, shift;
718 	u64 offset = 0;
719 	size_t	page_size;
720 	event_t *event;
721 	uint32_t size;
722 	char *buf;
723 	struct ui_progress *progress = ui_progress__new("Processing events...",
724 							self->size);
725 	if (progress == NULL)
726 		return -1;
727 
728 	perf_event_ops__fill_defaults(ops);
729 
730 	page_size = sysconf(_SC_PAGESIZE);
731 
732 	head = data_offset;
733 	shift = page_size * (head / page_size);
734 	offset += shift;
735 	head -= shift;
736 
737 	mmap_prot  = PROT_READ;
738 	mmap_flags = MAP_SHARED;
739 
740 	if (self->header.needs_swap) {
741 		mmap_prot  |= PROT_WRITE;
742 		mmap_flags = MAP_PRIVATE;
743 	}
744 remap:
745 	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
746 		   mmap_flags, self->fd, offset);
747 	if (buf == MAP_FAILED) {
748 		pr_err("failed to mmap file\n");
749 		err = -errno;
750 		goto out_err;
751 	}
752 
753 more:
754 	event = (event_t *)(buf + head);
755 	ui_progress__update(progress, offset);
756 
757 	if (self->header.needs_swap)
758 		perf_event_header__bswap(&event->header);
759 	size = event->header.size;
760 	if (size == 0)
761 		size = 8;
762 
763 	if (head + event->header.size >= page_size * self->mmap_window) {
764 		int munmap_ret;
765 
766 		shift = page_size * (head / page_size);
767 
768 		munmap_ret = munmap(buf, page_size * self->mmap_window);
769 		assert(munmap_ret == 0);
770 
771 		offset += shift;
772 		head -= shift;
773 		goto remap;
774 	}
775 
776 	size = event->header.size;
777 
778 	dump_printf("\n%#Lx [%#x]: event: %d\n",
779 		    offset + head, event->header.size, event->header.type);
780 
781 	if (size == 0 ||
782 	    perf_session__process_event(self, event, ops, offset, head) < 0) {
783 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
784 			    offset + head, event->header.size,
785 			    event->header.type);
786 		/*
787 		 * assume we lost track of the stream, check alignment, and
788 		 * increment a single u64 in the hope to catch on again 'soon'.
789 		 */
790 		if (unlikely(head & 7))
791 			head &= ~7ULL;
792 
793 		size = 8;
794 	}
795 
796 	head += size;
797 
798 	if (offset + head >= data_offset + data_size)
799 		goto done;
800 
801 	if (offset + head < file_size)
802 		goto more;
803 done:
804 	err = 0;
805 	/* do the final flush for ordered samples */
806 	self->ordered_samples.next_flush = ULLONG_MAX;
807 	flush_sample_queue(self, ops);
808 out_err:
809 	ui_progress__delete(progress);
810 	return err;
811 }
812 
813 int perf_session__process_events(struct perf_session *self,
814 				 struct perf_event_ops *ops)
815 {
816 	int err;
817 
818 	if (perf_session__register_idle_thread(self) == NULL)
819 		return -ENOMEM;
820 
821 	if (!symbol_conf.full_paths) {
822 		char bf[PATH_MAX];
823 
824 		if (getcwd(bf, sizeof(bf)) == NULL) {
825 			err = -errno;
826 out_getcwd_err:
827 			pr_err("failed to get the current directory\n");
828 			goto out_err;
829 		}
830 		self->cwd = strdup(bf);
831 		if (self->cwd == NULL) {
832 			err = -ENOMEM;
833 			goto out_getcwd_err;
834 		}
835 		self->cwdlen = strlen(self->cwd);
836 	}
837 
838 	if (!self->fd_pipe)
839 		err = __perf_session__process_events(self,
840 						     self->header.data_offset,
841 						     self->header.data_size,
842 						     self->size, ops);
843 	else
844 		err = __perf_session__process_pipe_events(self, ops);
845 out_err:
846 	return err;
847 }
848 
849 bool perf_session__has_traces(struct perf_session *self, const char *msg)
850 {
851 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
852 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
853 		return false;
854 	}
855 
856 	return true;
857 }
858 
859 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
860 					     const char *symbol_name,
861 					     u64 addr)
862 {
863 	char *bracket;
864 	enum map_type i;
865 	struct ref_reloc_sym *ref;
866 
867 	ref = zalloc(sizeof(struct ref_reloc_sym));
868 	if (ref == NULL)
869 		return -ENOMEM;
870 
871 	ref->name = strdup(symbol_name);
872 	if (ref->name == NULL) {
873 		free(ref);
874 		return -ENOMEM;
875 	}
876 
877 	bracket = strchr(ref->name, ']');
878 	if (bracket)
879 		*bracket = '\0';
880 
881 	ref->addr = addr;
882 
883 	for (i = 0; i < MAP__NR_TYPES; ++i) {
884 		struct kmap *kmap = map__kmap(maps[i]);
885 		kmap->ref_reloc_sym = ref;
886 	}
887 
888 	return 0;
889 }
890 
891 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
892 {
893 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
894 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
895 	       machines__fprintf_dsos(&self->machines, fp);
896 }
897