xref: /openbmc/linux/tools/perf/util/session.c (revision ba74f064)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 
10 #include "session.h"
11 #include "sort.h"
12 #include "util.h"
13 
14 static int perf_session__open(struct perf_session *self, bool force)
15 {
16 	struct stat input_stat;
17 
18 	if (!strcmp(self->filename, "-")) {
19 		self->fd_pipe = true;
20 		self->fd = STDIN_FILENO;
21 
22 		if (perf_header__read(self, self->fd) < 0)
23 			pr_err("incompatible file format");
24 
25 		return 0;
26 	}
27 
28 	self->fd = open(self->filename, O_RDONLY);
29 	if (self->fd < 0) {
30 		int err = errno;
31 
32 		pr_err("failed to open %s: %s", self->filename, strerror(err));
33 		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 			pr_err("  (try 'perf record' first)");
35 		pr_err("\n");
36 		return -errno;
37 	}
38 
39 	if (fstat(self->fd, &input_stat) < 0)
40 		goto out_close;
41 
42 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
43 		pr_err("file %s not owned by current user or root\n",
44 		       self->filename);
45 		goto out_close;
46 	}
47 
48 	if (!input_stat.st_size) {
49 		pr_info("zero-sized file (%s), nothing to do!\n",
50 			self->filename);
51 		goto out_close;
52 	}
53 
54 	if (perf_header__read(self, self->fd) < 0) {
55 		pr_err("incompatible file format");
56 		goto out_close;
57 	}
58 
59 	self->size = input_stat.st_size;
60 	return 0;
61 
62 out_close:
63 	close(self->fd);
64 	self->fd = -1;
65 	return -1;
66 }
67 
68 static void perf_session__id_header_size(struct perf_session *session)
69 {
70        struct sample_data *data;
71        u64 sample_type = session->sample_type;
72        u16 size = 0;
73 
74 	if (!session->sample_id_all)
75 		goto out;
76 
77        if (sample_type & PERF_SAMPLE_TID)
78                size += sizeof(data->tid) * 2;
79 
80        if (sample_type & PERF_SAMPLE_TIME)
81                size += sizeof(data->time);
82 
83        if (sample_type & PERF_SAMPLE_ID)
84                size += sizeof(data->id);
85 
86        if (sample_type & PERF_SAMPLE_STREAM_ID)
87                size += sizeof(data->stream_id);
88 
89        if (sample_type & PERF_SAMPLE_CPU)
90                size += sizeof(data->cpu) * 2;
91 out:
92        session->id_hdr_size = size;
93 }
94 
95 void perf_session__set_sample_id_all(struct perf_session *session, bool value)
96 {
97 	session->sample_id_all = value;
98 	perf_session__id_header_size(session);
99 }
100 
101 void perf_session__set_sample_type(struct perf_session *session, u64 type)
102 {
103 	session->sample_type = type;
104 }
105 
106 void perf_session__update_sample_type(struct perf_session *self)
107 {
108 	self->sample_type = perf_header__sample_type(&self->header);
109 	self->sample_id_all = perf_header__sample_id_all(&self->header);
110 	perf_session__id_header_size(self);
111 }
112 
113 int perf_session__create_kernel_maps(struct perf_session *self)
114 {
115 	int ret = machine__create_kernel_maps(&self->host_machine);
116 
117 	if (ret >= 0)
118 		ret = machines__create_guest_kernel_maps(&self->machines);
119 	return ret;
120 }
121 
122 static void perf_session__destroy_kernel_maps(struct perf_session *self)
123 {
124 	machine__destroy_kernel_maps(&self->host_machine);
125 	machines__destroy_guest_kernel_maps(&self->machines);
126 }
127 
128 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
129 {
130 	size_t len = filename ? strlen(filename) + 1 : 0;
131 	struct perf_session *self = zalloc(sizeof(*self) + len);
132 
133 	if (self == NULL)
134 		goto out;
135 
136 	if (perf_header__init(&self->header) < 0)
137 		goto out_free;
138 
139 	memcpy(self->filename, filename, len);
140 	self->threads = RB_ROOT;
141 	INIT_LIST_HEAD(&self->dead_threads);
142 	self->hists_tree = RB_ROOT;
143 	self->last_match = NULL;
144 	/*
145 	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
146 	 * slices. On 32bit we use 32MB.
147 	 */
148 #if BITS_PER_LONG == 64
149 	self->mmap_window = ULLONG_MAX;
150 #else
151 	self->mmap_window = 32 * 1024 * 1024ULL;
152 #endif
153 	self->machines = RB_ROOT;
154 	self->repipe = repipe;
155 	INIT_LIST_HEAD(&self->ordered_samples.samples);
156 	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
157 	INIT_LIST_HEAD(&self->ordered_samples.to_free);
158 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
159 
160 	if (mode == O_RDONLY) {
161 		if (perf_session__open(self, force) < 0)
162 			goto out_delete;
163 	} else if (mode == O_WRONLY) {
164 		/*
165 		 * In O_RDONLY mode this will be performed when reading the
166 		 * kernel MMAP event, in event__process_mmap().
167 		 */
168 		if (perf_session__create_kernel_maps(self) < 0)
169 			goto out_delete;
170 	}
171 
172 	perf_session__update_sample_type(self);
173 out:
174 	return self;
175 out_free:
176 	free(self);
177 	return NULL;
178 out_delete:
179 	perf_session__delete(self);
180 	return NULL;
181 }
182 
183 static void perf_session__delete_dead_threads(struct perf_session *self)
184 {
185 	struct thread *n, *t;
186 
187 	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
188 		list_del(&t->node);
189 		thread__delete(t);
190 	}
191 }
192 
193 static void perf_session__delete_threads(struct perf_session *self)
194 {
195 	struct rb_node *nd = rb_first(&self->threads);
196 
197 	while (nd) {
198 		struct thread *t = rb_entry(nd, struct thread, rb_node);
199 
200 		rb_erase(&t->rb_node, &self->threads);
201 		nd = rb_next(nd);
202 		thread__delete(t);
203 	}
204 }
205 
206 void perf_session__delete(struct perf_session *self)
207 {
208 	perf_header__exit(&self->header);
209 	perf_session__destroy_kernel_maps(self);
210 	perf_session__delete_dead_threads(self);
211 	perf_session__delete_threads(self);
212 	machine__exit(&self->host_machine);
213 	close(self->fd);
214 	free(self);
215 }
216 
217 void perf_session__remove_thread(struct perf_session *self, struct thread *th)
218 {
219 	self->last_match = NULL;
220 	rb_erase(&th->rb_node, &self->threads);
221 	/*
222 	 * We may have references to this thread, for instance in some hist_entry
223 	 * instances, so just move them to a separate list.
224 	 */
225 	list_add_tail(&th->node, &self->dead_threads);
226 }
227 
228 static bool symbol__match_parent_regex(struct symbol *sym)
229 {
230 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
231 		return 1;
232 
233 	return 0;
234 }
235 
236 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
237 						   struct thread *thread,
238 						   struct ip_callchain *chain,
239 						   struct symbol **parent)
240 {
241 	u8 cpumode = PERF_RECORD_MISC_USER;
242 	unsigned int i;
243 	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
244 
245 	if (!syms)
246 		return NULL;
247 
248 	for (i = 0; i < chain->nr; i++) {
249 		u64 ip = chain->ips[i];
250 		struct addr_location al;
251 
252 		if (ip >= PERF_CONTEXT_MAX) {
253 			switch (ip) {
254 			case PERF_CONTEXT_HV:
255 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
256 			case PERF_CONTEXT_KERNEL:
257 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
258 			case PERF_CONTEXT_USER:
259 				cpumode = PERF_RECORD_MISC_USER;	break;
260 			default:
261 				break;
262 			}
263 			continue;
264 		}
265 
266 		al.filtered = false;
267 		thread__find_addr_location(thread, self, cpumode,
268 				MAP__FUNCTION, thread->pid, ip, &al, NULL);
269 		if (al.sym != NULL) {
270 			if (sort__has_parent && !*parent &&
271 			    symbol__match_parent_regex(al.sym))
272 				*parent = al.sym;
273 			if (!symbol_conf.use_callchain)
274 				break;
275 			syms[i].map = al.map;
276 			syms[i].sym = al.sym;
277 		}
278 	}
279 
280 	return syms;
281 }
282 
283 static int process_event_synth_stub(event_t *event __used,
284 				    struct perf_session *session __used)
285 {
286 	dump_printf(": unhandled!\n");
287 	return 0;
288 }
289 
290 static int process_event_stub(event_t *event __used,
291 			      struct sample_data *sample __used,
292 			      struct perf_session *session __used)
293 {
294 	dump_printf(": unhandled!\n");
295 	return 0;
296 }
297 
298 static int process_finished_round_stub(event_t *event __used,
299 				       struct perf_session *session __used,
300 				       struct perf_event_ops *ops __used)
301 {
302 	dump_printf(": unhandled!\n");
303 	return 0;
304 }
305 
306 static int process_finished_round(event_t *event,
307 				  struct perf_session *session,
308 				  struct perf_event_ops *ops);
309 
310 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
311 {
312 	if (handler->sample == NULL)
313 		handler->sample = process_event_stub;
314 	if (handler->mmap == NULL)
315 		handler->mmap = process_event_stub;
316 	if (handler->comm == NULL)
317 		handler->comm = process_event_stub;
318 	if (handler->fork == NULL)
319 		handler->fork = process_event_stub;
320 	if (handler->exit == NULL)
321 		handler->exit = process_event_stub;
322 	if (handler->lost == NULL)
323 		handler->lost = event__process_lost;
324 	if (handler->read == NULL)
325 		handler->read = process_event_stub;
326 	if (handler->throttle == NULL)
327 		handler->throttle = process_event_stub;
328 	if (handler->unthrottle == NULL)
329 		handler->unthrottle = process_event_stub;
330 	if (handler->attr == NULL)
331 		handler->attr = process_event_synth_stub;
332 	if (handler->event_type == NULL)
333 		handler->event_type = process_event_synth_stub;
334 	if (handler->tracing_data == NULL)
335 		handler->tracing_data = process_event_synth_stub;
336 	if (handler->build_id == NULL)
337 		handler->build_id = process_event_synth_stub;
338 	if (handler->finished_round == NULL) {
339 		if (handler->ordered_samples)
340 			handler->finished_round = process_finished_round;
341 		else
342 			handler->finished_round = process_finished_round_stub;
343 	}
344 }
345 
346 void mem_bswap_64(void *src, int byte_size)
347 {
348 	u64 *m = src;
349 
350 	while (byte_size > 0) {
351 		*m = bswap_64(*m);
352 		byte_size -= sizeof(u64);
353 		++m;
354 	}
355 }
356 
357 static void event__all64_swap(event_t *self)
358 {
359 	struct perf_event_header *hdr = &self->header;
360 	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
361 }
362 
363 static void event__comm_swap(event_t *self)
364 {
365 	self->comm.pid = bswap_32(self->comm.pid);
366 	self->comm.tid = bswap_32(self->comm.tid);
367 }
368 
369 static void event__mmap_swap(event_t *self)
370 {
371 	self->mmap.pid	 = bswap_32(self->mmap.pid);
372 	self->mmap.tid	 = bswap_32(self->mmap.tid);
373 	self->mmap.start = bswap_64(self->mmap.start);
374 	self->mmap.len	 = bswap_64(self->mmap.len);
375 	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
376 }
377 
378 static void event__task_swap(event_t *self)
379 {
380 	self->fork.pid	= bswap_32(self->fork.pid);
381 	self->fork.tid	= bswap_32(self->fork.tid);
382 	self->fork.ppid	= bswap_32(self->fork.ppid);
383 	self->fork.ptid	= bswap_32(self->fork.ptid);
384 	self->fork.time	= bswap_64(self->fork.time);
385 }
386 
387 static void event__read_swap(event_t *self)
388 {
389 	self->read.pid		= bswap_32(self->read.pid);
390 	self->read.tid		= bswap_32(self->read.tid);
391 	self->read.value	= bswap_64(self->read.value);
392 	self->read.time_enabled	= bswap_64(self->read.time_enabled);
393 	self->read.time_running	= bswap_64(self->read.time_running);
394 	self->read.id		= bswap_64(self->read.id);
395 }
396 
397 static void event__attr_swap(event_t *self)
398 {
399 	size_t size;
400 
401 	self->attr.attr.type		= bswap_32(self->attr.attr.type);
402 	self->attr.attr.size		= bswap_32(self->attr.attr.size);
403 	self->attr.attr.config		= bswap_64(self->attr.attr.config);
404 	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
405 	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
406 	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
407 	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
408 	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
409 	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
410 	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);
411 
412 	size = self->header.size;
413 	size -= (void *)&self->attr.id - (void *)self;
414 	mem_bswap_64(self->attr.id, size);
415 }
416 
417 static void event__event_type_swap(event_t *self)
418 {
419 	self->event_type.event_type.event_id =
420 		bswap_64(self->event_type.event_type.event_id);
421 }
422 
423 static void event__tracing_data_swap(event_t *self)
424 {
425 	self->tracing_data.size = bswap_32(self->tracing_data.size);
426 }
427 
428 typedef void (*event__swap_op)(event_t *self);
429 
430 static event__swap_op event__swap_ops[] = {
431 	[PERF_RECORD_MMAP]   = event__mmap_swap,
432 	[PERF_RECORD_COMM]   = event__comm_swap,
433 	[PERF_RECORD_FORK]   = event__task_swap,
434 	[PERF_RECORD_EXIT]   = event__task_swap,
435 	[PERF_RECORD_LOST]   = event__all64_swap,
436 	[PERF_RECORD_READ]   = event__read_swap,
437 	[PERF_RECORD_SAMPLE] = event__all64_swap,
438 	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
439 	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
440 	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
441 	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
442 	[PERF_RECORD_HEADER_MAX]    = NULL,
443 };
444 
445 struct sample_queue {
446 	u64			timestamp;
447 	u64			file_offset;
448 	event_t			*event;
449 	struct list_head	list;
450 };
451 
452 static void perf_session_free_sample_buffers(struct perf_session *session)
453 {
454 	struct ordered_samples *os = &session->ordered_samples;
455 
456 	while (!list_empty(&os->to_free)) {
457 		struct sample_queue *sq;
458 
459 		sq = list_entry(os->to_free.next, struct sample_queue, list);
460 		list_del(&sq->list);
461 		free(sq);
462 	}
463 }
464 
465 static int perf_session_deliver_event(struct perf_session *session,
466 				      event_t *event,
467 				      struct sample_data *sample,
468 				      struct perf_event_ops *ops,
469 				      u64 file_offset);
470 
471 static void flush_sample_queue(struct perf_session *s,
472 			       struct perf_event_ops *ops)
473 {
474 	struct ordered_samples *os = &s->ordered_samples;
475 	struct list_head *head = &os->samples;
476 	struct sample_queue *tmp, *iter;
477 	struct sample_data sample;
478 	u64 limit = os->next_flush;
479 	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
480 
481 	if (!ops->ordered_samples || !limit)
482 		return;
483 
484 	list_for_each_entry_safe(iter, tmp, head, list) {
485 		if (iter->timestamp > limit)
486 			break;
487 
488 		event__parse_sample(iter->event, s, &sample);
489 		perf_session_deliver_event(s, iter->event, &sample, ops,
490 					   iter->file_offset);
491 
492 		os->last_flush = iter->timestamp;
493 		list_del(&iter->list);
494 		list_add(&iter->list, &os->sample_cache);
495 	}
496 
497 	if (list_empty(head)) {
498 		os->last_sample = NULL;
499 	} else if (last_ts <= limit) {
500 		os->last_sample =
501 			list_entry(head->prev, struct sample_queue, list);
502 	}
503 }
504 
505 /*
506  * When perf record finishes a pass on every buffers, it records this pseudo
507  * event.
508  * We record the max timestamp t found in the pass n.
509  * Assuming these timestamps are monotonic across cpus, we know that if
510  * a buffer still has events with timestamps below t, they will be all
511  * available and then read in the pass n + 1.
512  * Hence when we start to read the pass n + 2, we can safely flush every
513  * events with timestamps below t.
514  *
515  *    ============ PASS n =================
516  *       CPU 0         |   CPU 1
517  *                     |
518  *    cnt1 timestamps  |   cnt2 timestamps
519  *          1          |         2
520  *          2          |         3
521  *          -          |         4  <--- max recorded
522  *
523  *    ============ PASS n + 1 ==============
524  *       CPU 0         |   CPU 1
525  *                     |
526  *    cnt1 timestamps  |   cnt2 timestamps
527  *          3          |         5
528  *          4          |         6
529  *          5          |         7 <---- max recorded
530  *
531  *      Flush every events below timestamp 4
532  *
533  *    ============ PASS n + 2 ==============
534  *       CPU 0         |   CPU 1
535  *                     |
536  *    cnt1 timestamps  |   cnt2 timestamps
537  *          6          |         8
538  *          7          |         9
539  *          -          |         10
540  *
541  *      Flush every events below timestamp 7
542  *      etc...
543  */
544 static int process_finished_round(event_t *event __used,
545 				  struct perf_session *session,
546 				  struct perf_event_ops *ops)
547 {
548 	flush_sample_queue(session, ops);
549 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
550 
551 	return 0;
552 }
553 
554 /* The queue is ordered by time */
555 static void __queue_event(struct sample_queue *new, struct perf_session *s)
556 {
557 	struct ordered_samples *os = &s->ordered_samples;
558 	struct sample_queue *sample = os->last_sample;
559 	u64 timestamp = new->timestamp;
560 	struct list_head *p;
561 
562 	os->last_sample = new;
563 
564 	if (!sample) {
565 		list_add(&new->list, &os->samples);
566 		os->max_timestamp = timestamp;
567 		return;
568 	}
569 
570 	/*
571 	 * last_sample might point to some random place in the list as it's
572 	 * the last queued event. We expect that the new event is close to
573 	 * this.
574 	 */
575 	if (sample->timestamp <= timestamp) {
576 		while (sample->timestamp <= timestamp) {
577 			p = sample->list.next;
578 			if (p == &os->samples) {
579 				list_add_tail(&new->list, &os->samples);
580 				os->max_timestamp = timestamp;
581 				return;
582 			}
583 			sample = list_entry(p, struct sample_queue, list);
584 		}
585 		list_add_tail(&new->list, &sample->list);
586 	} else {
587 		while (sample->timestamp > timestamp) {
588 			p = sample->list.prev;
589 			if (p == &os->samples) {
590 				list_add(&new->list, &os->samples);
591 				return;
592 			}
593 			sample = list_entry(p, struct sample_queue, list);
594 		}
595 		list_add(&new->list, &sample->list);
596 	}
597 }
598 
599 #define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))
600 
601 static int perf_session_queue_event(struct perf_session *s, event_t *event,
602 				    struct sample_data *data, u64 file_offset)
603 {
604 	struct ordered_samples *os = &s->ordered_samples;
605 	struct list_head *sc = &os->sample_cache;
606 	u64 timestamp = data->time;
607 	struct sample_queue *new;
608 
609 	if (!timestamp || timestamp == ~0ULL)
610 		return -ETIME;
611 
612 	if (timestamp < s->ordered_samples.last_flush) {
613 		printf("Warning: Timestamp below last timeslice flush\n");
614 		return -EINVAL;
615 	}
616 
617 	if (!list_empty(sc)) {
618 		new = list_entry(sc->next, struct sample_queue, list);
619 		list_del(&new->list);
620 	} else if (os->sample_buffer) {
621 		new = os->sample_buffer + os->sample_buffer_idx;
622 		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
623 			os->sample_buffer = NULL;
624 	} else {
625 		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
626 		if (!os->sample_buffer)
627 			return -ENOMEM;
628 		list_add(&os->sample_buffer->list, &os->to_free);
629 		os->sample_buffer_idx = 2;
630 		new = os->sample_buffer + 1;
631 	}
632 
633 	new->timestamp = timestamp;
634 	new->file_offset = file_offset;
635 	new->event = event;
636 
637 	__queue_event(new, s);
638 
639 	return 0;
640 }
641 
642 static void callchain__dump(struct sample_data *sample)
643 {
644 	unsigned int i;
645 
646 	if (!dump_trace)
647 		return;
648 
649 	printf("... chain: nr:%Lu\n", sample->callchain->nr);
650 
651 	for (i = 0; i < sample->callchain->nr; i++)
652 		printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
653 }
654 
655 static void perf_session__print_tstamp(struct perf_session *session,
656 				       event_t *event,
657 				       struct sample_data *sample)
658 {
659 	if (event->header.type != PERF_RECORD_SAMPLE &&
660 	    !session->sample_id_all) {
661 		fputs("-1 -1 ", stdout);
662 		return;
663 	}
664 
665 	if ((session->sample_type & PERF_SAMPLE_CPU))
666 		printf("%u ", sample->cpu);
667 
668 	if (session->sample_type & PERF_SAMPLE_TIME)
669 		printf("%Lu ", sample->time);
670 }
671 
672 static void dump_event(struct perf_session *session, event_t *event,
673 		       u64 file_offset, struct sample_data *sample)
674 {
675 	if (!dump_trace)
676 		return;
677 
678 	dump_printf("\n%#Lx [%#x]: event: %d\n", file_offset,
679 		    event->header.size, event->header.type);
680 
681 	trace_event(event);
682 
683 	if (sample)
684 		perf_session__print_tstamp(session, event, sample);
685 
686 	dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
687 		    file_offset, event->header.size,
688 		    event__get_event_name(event->header.type));
689 }
690 
691 static void dump_sample(struct perf_session *session, event_t *event,
692 			struct sample_data *sample)
693 {
694 	dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
695 		    sample->pid, sample->tid, sample->ip, sample->period);
696 
697 	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
698 		callchain__dump(sample);
699 }
700 
701 static int perf_session_deliver_event(struct perf_session *session,
702 				      event_t *event,
703 				      struct sample_data *sample,
704 				      struct perf_event_ops *ops,
705 				      u64 file_offset)
706 {
707 	dump_event(session, event, file_offset, sample);
708 
709 	switch (event->header.type) {
710 	case PERF_RECORD_SAMPLE:
711 		dump_sample(session, event, sample);
712 		return ops->sample(event, sample, session);
713 	case PERF_RECORD_MMAP:
714 		return ops->mmap(event, sample, session);
715 	case PERF_RECORD_COMM:
716 		return ops->comm(event, sample, session);
717 	case PERF_RECORD_FORK:
718 		return ops->fork(event, sample, session);
719 	case PERF_RECORD_EXIT:
720 		return ops->exit(event, sample, session);
721 	case PERF_RECORD_LOST:
722 		return ops->lost(event, sample, session);
723 	case PERF_RECORD_READ:
724 		return ops->read(event, sample, session);
725 	case PERF_RECORD_THROTTLE:
726 		return ops->throttle(event, sample, session);
727 	case PERF_RECORD_UNTHROTTLE:
728 		return ops->unthrottle(event, sample, session);
729 	default:
730 		++session->hists.stats.nr_unknown_events;
731 		return -1;
732 	}
733 }
734 
735 static int perf_session__preprocess_sample(struct perf_session *session,
736 					   event_t *event, struct sample_data *sample)
737 {
738 	if (event->header.type != PERF_RECORD_SAMPLE ||
739 	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
740 		return 0;
741 
742 	if (!ip_callchain__valid(sample->callchain, event)) {
743 		pr_debug("call-chain problem with event, skipping it.\n");
744 		++session->hists.stats.nr_invalid_chains;
745 		session->hists.stats.total_invalid_chains += sample->period;
746 		return -EINVAL;
747 	}
748 	return 0;
749 }
750 
751 static int perf_session__process_user_event(struct perf_session *session, event_t *event,
752 					    struct perf_event_ops *ops, u64 file_offset)
753 {
754 	dump_event(session, event, file_offset, NULL);
755 
756 	/* These events are processed right away */
757 	switch (event->header.type) {
758 	case PERF_RECORD_HEADER_ATTR:
759 		return ops->attr(event, session);
760 	case PERF_RECORD_HEADER_EVENT_TYPE:
761 		return ops->event_type(event, session);
762 	case PERF_RECORD_HEADER_TRACING_DATA:
763 		/* setup for reading amidst mmap */
764 		lseek(session->fd, file_offset, SEEK_SET);
765 		return ops->tracing_data(event, session);
766 	case PERF_RECORD_HEADER_BUILD_ID:
767 		return ops->build_id(event, session);
768 	case PERF_RECORD_FINISHED_ROUND:
769 		return ops->finished_round(event, session, ops);
770 	default:
771 		return -EINVAL;
772 	}
773 }
774 
775 static int perf_session__process_event(struct perf_session *session,
776 				       event_t *event,
777 				       struct perf_event_ops *ops,
778 				       u64 file_offset)
779 {
780 	struct sample_data sample;
781 	int ret;
782 
783 	if (session->header.needs_swap && event__swap_ops[event->header.type])
784 		event__swap_ops[event->header.type](event);
785 
786 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
787 		return -EINVAL;
788 
789 	hists__inc_nr_events(&session->hists, event->header.type);
790 
791 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
792 		return perf_session__process_user_event(session, event, ops, file_offset);
793 
794 	/*
795 	 * For all kernel events we get the sample data
796 	 */
797 	event__parse_sample(event, session, &sample);
798 
799 	/* Preprocess sample records - precheck callchains */
800 	if (perf_session__preprocess_sample(session, event, &sample))
801 		return 0;
802 
803 	if (ops->ordered_samples) {
804 		ret = perf_session_queue_event(session, event, &sample,
805 					       file_offset);
806 		if (ret != -ETIME)
807 			return ret;
808 	}
809 
810 	return perf_session_deliver_event(session, event, &sample, ops,
811 					  file_offset);
812 }
813 
814 void perf_event_header__bswap(struct perf_event_header *self)
815 {
816 	self->type = bswap_32(self->type);
817 	self->misc = bswap_16(self->misc);
818 	self->size = bswap_16(self->size);
819 }
820 
821 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
822 {
823 	struct thread *thread = perf_session__findnew(self, 0);
824 
825 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
826 		pr_err("problem inserting idle task.\n");
827 		thread = NULL;
828 	}
829 
830 	return thread;
831 }
832 
833 int do_read(int fd, void *buf, size_t size)
834 {
835 	void *buf_start = buf;
836 
837 	while (size) {
838 		int ret = read(fd, buf, size);
839 
840 		if (ret <= 0)
841 			return ret;
842 
843 		size -= ret;
844 		buf += ret;
845 	}
846 
847 	return buf - buf_start;
848 }
849 
850 #define session_done()	(*(volatile int *)(&session_done))
851 volatile int session_done;
852 
853 static int __perf_session__process_pipe_events(struct perf_session *self,
854 					       struct perf_event_ops *ops)
855 {
856 	event_t event;
857 	uint32_t size;
858 	int skip = 0;
859 	u64 head;
860 	int err;
861 	void *p;
862 
863 	perf_event_ops__fill_defaults(ops);
864 
865 	head = 0;
866 more:
867 	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
868 	if (err <= 0) {
869 		if (err == 0)
870 			goto done;
871 
872 		pr_err("failed to read event header\n");
873 		goto out_err;
874 	}
875 
876 	if (self->header.needs_swap)
877 		perf_event_header__bswap(&event.header);
878 
879 	size = event.header.size;
880 	if (size == 0)
881 		size = 8;
882 
883 	p = &event;
884 	p += sizeof(struct perf_event_header);
885 
886 	if (size - sizeof(struct perf_event_header)) {
887 		err = do_read(self->fd, p,
888 			      size - sizeof(struct perf_event_header));
889 		if (err <= 0) {
890 			if (err == 0) {
891 				pr_err("unexpected end of event stream\n");
892 				goto done;
893 			}
894 
895 			pr_err("failed to read event data\n");
896 			goto out_err;
897 		}
898 	}
899 
900 	if (size == 0 ||
901 	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
902 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
903 			    head, event.header.size, event.header.type);
904 		/*
905 		 * assume we lost track of the stream, check alignment, and
906 		 * increment a single u64 in the hope to catch on again 'soon'.
907 		 */
908 		if (unlikely(head & 7))
909 			head &= ~7ULL;
910 
911 		size = 8;
912 	}
913 
914 	head += size;
915 
916 	if (skip > 0)
917 		head += skip;
918 
919 	if (!session_done())
920 		goto more;
921 done:
922 	err = 0;
923 out_err:
924 	perf_session_free_sample_buffers(self);
925 	return err;
926 }
927 
928 int __perf_session__process_events(struct perf_session *session,
929 				   u64 data_offset, u64 data_size,
930 				   u64 file_size, struct perf_event_ops *ops)
931 {
932 	u64 head, page_offset, file_offset, file_pos, progress_next;
933 	int err, mmap_prot, mmap_flags, map_idx = 0;
934 	struct ui_progress *progress;
935 	size_t	page_size, mmap_size;
936 	char *buf, *mmaps[8];
937 	event_t *event;
938 	uint32_t size;
939 
940 	perf_event_ops__fill_defaults(ops);
941 
942 	page_size = sysconf(_SC_PAGESIZE);
943 
944 	page_offset = page_size * (data_offset / page_size);
945 	file_offset = page_offset;
946 	head = data_offset - page_offset;
947 
948 	if (data_offset + data_size < file_size)
949 		file_size = data_offset + data_size;
950 
951 	progress_next = file_size / 16;
952 	progress = ui_progress__new("Processing events...", file_size);
953 	if (progress == NULL)
954 		return -1;
955 
956 	mmap_size = session->mmap_window;
957 	if (mmap_size > file_size)
958 		mmap_size = file_size;
959 
960 	memset(mmaps, 0, sizeof(mmaps));
961 
962 	mmap_prot  = PROT_READ;
963 	mmap_flags = MAP_SHARED;
964 
965 	if (session->header.needs_swap) {
966 		mmap_prot  |= PROT_WRITE;
967 		mmap_flags = MAP_PRIVATE;
968 	}
969 remap:
970 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
971 		   file_offset);
972 	if (buf == MAP_FAILED) {
973 		pr_err("failed to mmap file\n");
974 		err = -errno;
975 		goto out_err;
976 	}
977 	mmaps[map_idx] = buf;
978 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
979 	file_pos = file_offset + head;
980 
981 more:
982 	event = (event_t *)(buf + head);
983 
984 	if (session->header.needs_swap)
985 		perf_event_header__bswap(&event->header);
986 	size = event->header.size;
987 	if (size == 0)
988 		size = 8;
989 
990 	if (head + event->header.size >= mmap_size) {
991 		if (mmaps[map_idx]) {
992 			munmap(mmaps[map_idx], mmap_size);
993 			mmaps[map_idx] = NULL;
994 		}
995 
996 		page_offset = page_size * (head / page_size);
997 		file_offset += page_offset;
998 		head -= page_offset;
999 		goto remap;
1000 	}
1001 
1002 	size = event->header.size;
1003 
1004 	if (size == 0 ||
1005 	    perf_session__process_event(session, event, ops, file_pos) < 0) {
1006 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
1007 			    file_offset + head, event->header.size,
1008 			    event->header.type);
1009 		/*
1010 		 * assume we lost track of the stream, check alignment, and
1011 		 * increment a single u64 in the hope to catch on again 'soon'.
1012 		 */
1013 		if (unlikely(head & 7))
1014 			head &= ~7ULL;
1015 
1016 		size = 8;
1017 	}
1018 
1019 	head += size;
1020 	file_pos += size;
1021 
1022 	if (file_pos >= progress_next) {
1023 		progress_next += file_size / 16;
1024 		ui_progress__update(progress, file_pos);
1025 	}
1026 
1027 	if (file_pos < file_size)
1028 		goto more;
1029 
1030 	err = 0;
1031 	/* do the final flush for ordered samples */
1032 	session->ordered_samples.next_flush = ULLONG_MAX;
1033 	flush_sample_queue(session, ops);
1034 out_err:
1035 	ui_progress__delete(progress);
1036 
1037 	if (ops->lost == event__process_lost &&
1038 	    session->hists.stats.total_lost != 0) {
1039 		ui__warning("Processed %Lu events and LOST %Lu!\n\n"
1040 			    "Check IO/CPU overload!\n\n",
1041 			    session->hists.stats.total_period,
1042 			    session->hists.stats.total_lost);
1043 	}
1044 
1045 	if (session->hists.stats.nr_unknown_events != 0) {
1046 		ui__warning("Found %u unknown events!\n\n"
1047 			    "Is this an older tool processing a perf.data "
1048 			    "file generated by a more recent tool?\n\n"
1049 			    "If that is not the case, consider "
1050 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1051 			    session->hists.stats.nr_unknown_events);
1052 	}
1053 
1054  	if (session->hists.stats.nr_invalid_chains != 0) {
1055  		ui__warning("Found invalid callchains!\n\n"
1056  			    "%u out of %u events were discarded for this reason.\n\n"
1057  			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1058  			    session->hists.stats.nr_invalid_chains,
1059  			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
1060  	}
1061 
1062 	perf_session_free_sample_buffers(session);
1063 	return err;
1064 }
1065 
1066 int perf_session__process_events(struct perf_session *self,
1067 				 struct perf_event_ops *ops)
1068 {
1069 	int err;
1070 
1071 	if (perf_session__register_idle_thread(self) == NULL)
1072 		return -ENOMEM;
1073 
1074 	if (!self->fd_pipe)
1075 		err = __perf_session__process_events(self,
1076 						     self->header.data_offset,
1077 						     self->header.data_size,
1078 						     self->size, ops);
1079 	else
1080 		err = __perf_session__process_pipe_events(self, ops);
1081 
1082 	return err;
1083 }
1084 
1085 bool perf_session__has_traces(struct perf_session *self, const char *msg)
1086 {
1087 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1088 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1089 		return false;
1090 	}
1091 
1092 	return true;
1093 }
1094 
1095 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1096 					     const char *symbol_name,
1097 					     u64 addr)
1098 {
1099 	char *bracket;
1100 	enum map_type i;
1101 	struct ref_reloc_sym *ref;
1102 
1103 	ref = zalloc(sizeof(struct ref_reloc_sym));
1104 	if (ref == NULL)
1105 		return -ENOMEM;
1106 
1107 	ref->name = strdup(symbol_name);
1108 	if (ref->name == NULL) {
1109 		free(ref);
1110 		return -ENOMEM;
1111 	}
1112 
1113 	bracket = strchr(ref->name, ']');
1114 	if (bracket)
1115 		*bracket = '\0';
1116 
1117 	ref->addr = addr;
1118 
1119 	for (i = 0; i < MAP__NR_TYPES; ++i) {
1120 		struct kmap *kmap = map__kmap(maps[i]);
1121 		kmap->ref_reloc_sym = ref;
1122 	}
1123 
1124 	return 0;
1125 }
1126 
1127 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1128 {
1129 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1130 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1131 	       machines__fprintf_dsos(&self->machines, fp);
1132 }
1133 
1134 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1135 					  bool with_hits)
1136 {
1137 	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1138 	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1139 }
1140