xref: /openbmc/linux/tools/perf/util/session.c (revision e248de33)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "session.h"
13 #include "sort.h"
14 #include "util.h"
15 
16 static int perf_session__read_evlist(struct perf_session *session)
17 {
18 	int i, j;
19 
20 	session->evlist = perf_evlist__new(NULL, NULL);
21 	if (session->evlist == NULL)
22 		return -ENOMEM;
23 
24 	for (i = 0; i < session->header.attrs; ++i) {
25 		struct perf_header_attr *hattr = session->header.attr[i];
26 		struct perf_evsel *evsel = perf_evsel__new(&hattr->attr, i);
27 
28 		if (evsel == NULL)
29 			goto out_delete_evlist;
30 		/*
31 		 * Do it before so that if perf_evsel__alloc_id fails, this
32 		 * entry gets purged too at perf_evlist__delete().
33 		 */
34 		perf_evlist__add(session->evlist, evsel);
35 		/*
36 		 * We don't have the cpu and thread maps on the header, so
37 		 * for allocating the perf_sample_id table we fake 1 cpu and
38 		 * hattr->ids threads.
39 		 */
40 		if (perf_evsel__alloc_id(evsel, 1, hattr->ids))
41 			goto out_delete_evlist;
42 
43 		for (j = 0; j < hattr->ids; ++j)
44 			perf_evlist__id_hash(session->evlist, evsel, 0, j,
45 					     hattr->id[j]);
46 	}
47 
48 	return 0;
49 
50 out_delete_evlist:
51 	perf_evlist__delete(session->evlist);
52 	session->evlist = NULL;
53 	return -ENOMEM;
54 }
55 
56 static int perf_session__open(struct perf_session *self, bool force)
57 {
58 	struct stat input_stat;
59 
60 	if (!strcmp(self->filename, "-")) {
61 		self->fd_pipe = true;
62 		self->fd = STDIN_FILENO;
63 
64 		if (perf_header__read(self, self->fd) < 0)
65 			pr_err("incompatible file format");
66 
67 		return 0;
68 	}
69 
70 	self->fd = open(self->filename, O_RDONLY);
71 	if (self->fd < 0) {
72 		int err = errno;
73 
74 		pr_err("failed to open %s: %s", self->filename, strerror(err));
75 		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
76 			pr_err("  (try 'perf record' first)");
77 		pr_err("\n");
78 		return -errno;
79 	}
80 
81 	if (fstat(self->fd, &input_stat) < 0)
82 		goto out_close;
83 
84 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
85 		pr_err("file %s not owned by current user or root\n",
86 		       self->filename);
87 		goto out_close;
88 	}
89 
90 	if (!input_stat.st_size) {
91 		pr_info("zero-sized file (%s), nothing to do!\n",
92 			self->filename);
93 		goto out_close;
94 	}
95 
96 	if (perf_header__read(self, self->fd) < 0) {
97 		pr_err("incompatible file format");
98 		goto out_close;
99 	}
100 
101 	if (perf_session__read_evlist(self) < 0) {
102 		pr_err("Not enough memory to read the event selector list\n");
103 		goto out_close;
104 	}
105 
106 	self->size = input_stat.st_size;
107 	return 0;
108 
109 out_close:
110 	close(self->fd);
111 	self->fd = -1;
112 	return -1;
113 }
114 
115 static void perf_session__id_header_size(struct perf_session *session)
116 {
117        struct perf_sample *data;
118        u64 sample_type = session->sample_type;
119        u16 size = 0;
120 
121 	if (!session->sample_id_all)
122 		goto out;
123 
124        if (sample_type & PERF_SAMPLE_TID)
125                size += sizeof(data->tid) * 2;
126 
127        if (sample_type & PERF_SAMPLE_TIME)
128                size += sizeof(data->time);
129 
130        if (sample_type & PERF_SAMPLE_ID)
131                size += sizeof(data->id);
132 
133        if (sample_type & PERF_SAMPLE_STREAM_ID)
134                size += sizeof(data->stream_id);
135 
136        if (sample_type & PERF_SAMPLE_CPU)
137                size += sizeof(data->cpu) * 2;
138 out:
139        session->id_hdr_size = size;
140 }
141 
142 void perf_session__set_sample_id_all(struct perf_session *session, bool value)
143 {
144 	session->sample_id_all = value;
145 	perf_session__id_header_size(session);
146 }
147 
148 void perf_session__set_sample_type(struct perf_session *session, u64 type)
149 {
150 	session->sample_type = type;
151 }
152 
153 void perf_session__update_sample_type(struct perf_session *self)
154 {
155 	self->sample_type = perf_header__sample_type(&self->header);
156 	self->sample_id_all = perf_header__sample_id_all(&self->header);
157 	perf_session__id_header_size(self);
158 }
159 
160 int perf_session__create_kernel_maps(struct perf_session *self)
161 {
162 	int ret = machine__create_kernel_maps(&self->host_machine);
163 
164 	if (ret >= 0)
165 		ret = machines__create_guest_kernel_maps(&self->machines);
166 	return ret;
167 }
168 
169 static void perf_session__destroy_kernel_maps(struct perf_session *self)
170 {
171 	machine__destroy_kernel_maps(&self->host_machine);
172 	machines__destroy_guest_kernel_maps(&self->machines);
173 }
174 
175 struct perf_session *perf_session__new(const char *filename, int mode,
176 				       bool force, bool repipe,
177 				       struct perf_event_ops *ops)
178 {
179 	size_t len = filename ? strlen(filename) + 1 : 0;
180 	struct perf_session *self = zalloc(sizeof(*self) + len);
181 
182 	if (self == NULL)
183 		goto out;
184 
185 	if (perf_header__init(&self->header) < 0)
186 		goto out_free;
187 
188 	memcpy(self->filename, filename, len);
189 	self->threads = RB_ROOT;
190 	INIT_LIST_HEAD(&self->dead_threads);
191 	self->last_match = NULL;
192 	/*
193 	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
194 	 * slices. On 32bit we use 32MB.
195 	 */
196 #if BITS_PER_LONG == 64
197 	self->mmap_window = ULLONG_MAX;
198 #else
199 	self->mmap_window = 32 * 1024 * 1024ULL;
200 #endif
201 	self->machines = RB_ROOT;
202 	self->repipe = repipe;
203 	INIT_LIST_HEAD(&self->ordered_samples.samples);
204 	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
205 	INIT_LIST_HEAD(&self->ordered_samples.to_free);
206 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
207 
208 	if (mode == O_RDONLY) {
209 		if (perf_session__open(self, force) < 0)
210 			goto out_delete;
211 	} else if (mode == O_WRONLY) {
212 		/*
213 		 * In O_RDONLY mode this will be performed when reading the
214 		 * kernel MMAP event, in perf_event__process_mmap().
215 		 */
216 		if (perf_session__create_kernel_maps(self) < 0)
217 			goto out_delete;
218 	}
219 
220 	perf_session__update_sample_type(self);
221 
222 	if (ops && ops->ordering_requires_timestamps &&
223 	    ops->ordered_samples && !self->sample_id_all) {
224 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
225 		ops->ordered_samples = false;
226 	}
227 
228 out:
229 	return self;
230 out_free:
231 	free(self);
232 	return NULL;
233 out_delete:
234 	perf_session__delete(self);
235 	return NULL;
236 }
237 
238 static void perf_session__delete_dead_threads(struct perf_session *self)
239 {
240 	struct thread *n, *t;
241 
242 	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
243 		list_del(&t->node);
244 		thread__delete(t);
245 	}
246 }
247 
248 static void perf_session__delete_threads(struct perf_session *self)
249 {
250 	struct rb_node *nd = rb_first(&self->threads);
251 
252 	while (nd) {
253 		struct thread *t = rb_entry(nd, struct thread, rb_node);
254 
255 		rb_erase(&t->rb_node, &self->threads);
256 		nd = rb_next(nd);
257 		thread__delete(t);
258 	}
259 }
260 
261 void perf_session__delete(struct perf_session *self)
262 {
263 	perf_header__exit(&self->header);
264 	perf_session__destroy_kernel_maps(self);
265 	perf_session__delete_dead_threads(self);
266 	perf_session__delete_threads(self);
267 	machine__exit(&self->host_machine);
268 	close(self->fd);
269 	free(self);
270 }
271 
272 void perf_session__remove_thread(struct perf_session *self, struct thread *th)
273 {
274 	self->last_match = NULL;
275 	rb_erase(&th->rb_node, &self->threads);
276 	/*
277 	 * We may have references to this thread, for instance in some hist_entry
278 	 * instances, so just move them to a separate list.
279 	 */
280 	list_add_tail(&th->node, &self->dead_threads);
281 }
282 
283 static bool symbol__match_parent_regex(struct symbol *sym)
284 {
285 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
286 		return 1;
287 
288 	return 0;
289 }
290 
291 int perf_session__resolve_callchain(struct perf_session *self,
292 				    struct thread *thread,
293 				    struct ip_callchain *chain,
294 				    struct symbol **parent)
295 {
296 	u8 cpumode = PERF_RECORD_MISC_USER;
297 	unsigned int i;
298 	int err;
299 
300 	callchain_cursor_reset(&self->callchain_cursor);
301 
302 	for (i = 0; i < chain->nr; i++) {
303 		u64 ip = chain->ips[i];
304 		struct addr_location al;
305 
306 		if (ip >= PERF_CONTEXT_MAX) {
307 			switch (ip) {
308 			case PERF_CONTEXT_HV:
309 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
310 			case PERF_CONTEXT_KERNEL:
311 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
312 			case PERF_CONTEXT_USER:
313 				cpumode = PERF_RECORD_MISC_USER;	break;
314 			default:
315 				break;
316 			}
317 			continue;
318 		}
319 
320 		al.filtered = false;
321 		thread__find_addr_location(thread, self, cpumode,
322 				MAP__FUNCTION, thread->pid, ip, &al, NULL);
323 		if (al.sym != NULL) {
324 			if (sort__has_parent && !*parent &&
325 			    symbol__match_parent_regex(al.sym))
326 				*parent = al.sym;
327 			if (!symbol_conf.use_callchain)
328 				break;
329 		}
330 
331 		err = callchain_cursor_append(&self->callchain_cursor,
332 					      ip, al.map, al.sym);
333 		if (err)
334 			return err;
335 	}
336 
337 	return 0;
338 }
339 
340 static int process_event_synth_stub(union perf_event *event __used,
341 				    struct perf_session *session __used)
342 {
343 	dump_printf(": unhandled!\n");
344 	return 0;
345 }
346 
347 static int process_event_stub(union perf_event *event __used,
348 			      struct perf_sample *sample __used,
349 			      struct perf_session *session __used)
350 {
351 	dump_printf(": unhandled!\n");
352 	return 0;
353 }
354 
355 static int process_finished_round_stub(union perf_event *event __used,
356 				       struct perf_session *session __used,
357 				       struct perf_event_ops *ops __used)
358 {
359 	dump_printf(": unhandled!\n");
360 	return 0;
361 }
362 
363 static int process_finished_round(union perf_event *event,
364 				  struct perf_session *session,
365 				  struct perf_event_ops *ops);
366 
367 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
368 {
369 	if (handler->sample == NULL)
370 		handler->sample = process_event_stub;
371 	if (handler->mmap == NULL)
372 		handler->mmap = process_event_stub;
373 	if (handler->comm == NULL)
374 		handler->comm = process_event_stub;
375 	if (handler->fork == NULL)
376 		handler->fork = process_event_stub;
377 	if (handler->exit == NULL)
378 		handler->exit = process_event_stub;
379 	if (handler->lost == NULL)
380 		handler->lost = perf_event__process_lost;
381 	if (handler->read == NULL)
382 		handler->read = process_event_stub;
383 	if (handler->throttle == NULL)
384 		handler->throttle = process_event_stub;
385 	if (handler->unthrottle == NULL)
386 		handler->unthrottle = process_event_stub;
387 	if (handler->attr == NULL)
388 		handler->attr = process_event_synth_stub;
389 	if (handler->event_type == NULL)
390 		handler->event_type = process_event_synth_stub;
391 	if (handler->tracing_data == NULL)
392 		handler->tracing_data = process_event_synth_stub;
393 	if (handler->build_id == NULL)
394 		handler->build_id = process_event_synth_stub;
395 	if (handler->finished_round == NULL) {
396 		if (handler->ordered_samples)
397 			handler->finished_round = process_finished_round;
398 		else
399 			handler->finished_round = process_finished_round_stub;
400 	}
401 }
402 
403 void mem_bswap_64(void *src, int byte_size)
404 {
405 	u64 *m = src;
406 
407 	while (byte_size > 0) {
408 		*m = bswap_64(*m);
409 		byte_size -= sizeof(u64);
410 		++m;
411 	}
412 }
413 
414 static void perf_event__all64_swap(union perf_event *event)
415 {
416 	struct perf_event_header *hdr = &event->header;
417 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
418 }
419 
420 static void perf_event__comm_swap(union perf_event *event)
421 {
422 	event->comm.pid = bswap_32(event->comm.pid);
423 	event->comm.tid = bswap_32(event->comm.tid);
424 }
425 
426 static void perf_event__mmap_swap(union perf_event *event)
427 {
428 	event->mmap.pid	  = bswap_32(event->mmap.pid);
429 	event->mmap.tid	  = bswap_32(event->mmap.tid);
430 	event->mmap.start = bswap_64(event->mmap.start);
431 	event->mmap.len	  = bswap_64(event->mmap.len);
432 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
433 }
434 
435 static void perf_event__task_swap(union perf_event *event)
436 {
437 	event->fork.pid	 = bswap_32(event->fork.pid);
438 	event->fork.tid	 = bswap_32(event->fork.tid);
439 	event->fork.ppid = bswap_32(event->fork.ppid);
440 	event->fork.ptid = bswap_32(event->fork.ptid);
441 	event->fork.time = bswap_64(event->fork.time);
442 }
443 
444 static void perf_event__read_swap(union perf_event *event)
445 {
446 	event->read.pid		 = bswap_32(event->read.pid);
447 	event->read.tid		 = bswap_32(event->read.tid);
448 	event->read.value	 = bswap_64(event->read.value);
449 	event->read.time_enabled = bswap_64(event->read.time_enabled);
450 	event->read.time_running = bswap_64(event->read.time_running);
451 	event->read.id		 = bswap_64(event->read.id);
452 }
453 
454 static void perf_event__attr_swap(union perf_event *event)
455 {
456 	size_t size;
457 
458 	event->attr.attr.type		= bswap_32(event->attr.attr.type);
459 	event->attr.attr.size		= bswap_32(event->attr.attr.size);
460 	event->attr.attr.config		= bswap_64(event->attr.attr.config);
461 	event->attr.attr.sample_period	= bswap_64(event->attr.attr.sample_period);
462 	event->attr.attr.sample_type	= bswap_64(event->attr.attr.sample_type);
463 	event->attr.attr.read_format	= bswap_64(event->attr.attr.read_format);
464 	event->attr.attr.wakeup_events	= bswap_32(event->attr.attr.wakeup_events);
465 	event->attr.attr.bp_type	= bswap_32(event->attr.attr.bp_type);
466 	event->attr.attr.bp_addr	= bswap_64(event->attr.attr.bp_addr);
467 	event->attr.attr.bp_len		= bswap_64(event->attr.attr.bp_len);
468 
469 	size = event->header.size;
470 	size -= (void *)&event->attr.id - (void *)event;
471 	mem_bswap_64(event->attr.id, size);
472 }
473 
474 static void perf_event__event_type_swap(union perf_event *event)
475 {
476 	event->event_type.event_type.event_id =
477 		bswap_64(event->event_type.event_type.event_id);
478 }
479 
480 static void perf_event__tracing_data_swap(union perf_event *event)
481 {
482 	event->tracing_data.size = bswap_32(event->tracing_data.size);
483 }
484 
485 typedef void (*perf_event__swap_op)(union perf_event *event);
486 
487 static perf_event__swap_op perf_event__swap_ops[] = {
488 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
489 	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
490 	[PERF_RECORD_FORK]		  = perf_event__task_swap,
491 	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
492 	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
493 	[PERF_RECORD_READ]		  = perf_event__read_swap,
494 	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
495 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__attr_swap,
496 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
497 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
498 	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
499 	[PERF_RECORD_HEADER_MAX]	  = NULL,
500 };
501 
502 struct sample_queue {
503 	u64			timestamp;
504 	u64			file_offset;
505 	union perf_event	*event;
506 	struct list_head	list;
507 };
508 
509 static void perf_session_free_sample_buffers(struct perf_session *session)
510 {
511 	struct ordered_samples *os = &session->ordered_samples;
512 
513 	while (!list_empty(&os->to_free)) {
514 		struct sample_queue *sq;
515 
516 		sq = list_entry(os->to_free.next, struct sample_queue, list);
517 		list_del(&sq->list);
518 		free(sq);
519 	}
520 }
521 
522 static int perf_session_deliver_event(struct perf_session *session,
523 				      union perf_event *event,
524 				      struct perf_sample *sample,
525 				      struct perf_event_ops *ops,
526 				      u64 file_offset);
527 
528 static void flush_sample_queue(struct perf_session *s,
529 			       struct perf_event_ops *ops)
530 {
531 	struct ordered_samples *os = &s->ordered_samples;
532 	struct list_head *head = &os->samples;
533 	struct sample_queue *tmp, *iter;
534 	struct perf_sample sample;
535 	u64 limit = os->next_flush;
536 	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
537 
538 	if (!ops->ordered_samples || !limit)
539 		return;
540 
541 	list_for_each_entry_safe(iter, tmp, head, list) {
542 		if (iter->timestamp > limit)
543 			break;
544 
545 		perf_session__parse_sample(s, iter->event, &sample);
546 		perf_session_deliver_event(s, iter->event, &sample, ops,
547 					   iter->file_offset);
548 
549 		os->last_flush = iter->timestamp;
550 		list_del(&iter->list);
551 		list_add(&iter->list, &os->sample_cache);
552 	}
553 
554 	if (list_empty(head)) {
555 		os->last_sample = NULL;
556 	} else if (last_ts <= limit) {
557 		os->last_sample =
558 			list_entry(head->prev, struct sample_queue, list);
559 	}
560 }
561 
562 /*
563  * When perf record finishes a pass on every buffers, it records this pseudo
564  * event.
565  * We record the max timestamp t found in the pass n.
566  * Assuming these timestamps are monotonic across cpus, we know that if
567  * a buffer still has events with timestamps below t, they will be all
568  * available and then read in the pass n + 1.
569  * Hence when we start to read the pass n + 2, we can safely flush every
570  * events with timestamps below t.
571  *
572  *    ============ PASS n =================
573  *       CPU 0         |   CPU 1
574  *                     |
575  *    cnt1 timestamps  |   cnt2 timestamps
576  *          1          |         2
577  *          2          |         3
578  *          -          |         4  <--- max recorded
579  *
580  *    ============ PASS n + 1 ==============
581  *       CPU 0         |   CPU 1
582  *                     |
583  *    cnt1 timestamps  |   cnt2 timestamps
584  *          3          |         5
585  *          4          |         6
586  *          5          |         7 <---- max recorded
587  *
588  *      Flush every events below timestamp 4
589  *
590  *    ============ PASS n + 2 ==============
591  *       CPU 0         |   CPU 1
592  *                     |
593  *    cnt1 timestamps  |   cnt2 timestamps
594  *          6          |         8
595  *          7          |         9
596  *          -          |         10
597  *
598  *      Flush every events below timestamp 7
599  *      etc...
600  */
601 static int process_finished_round(union perf_event *event __used,
602 				  struct perf_session *session,
603 				  struct perf_event_ops *ops)
604 {
605 	flush_sample_queue(session, ops);
606 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
607 
608 	return 0;
609 }
610 
611 /* The queue is ordered by time */
612 static void __queue_event(struct sample_queue *new, struct perf_session *s)
613 {
614 	struct ordered_samples *os = &s->ordered_samples;
615 	struct sample_queue *sample = os->last_sample;
616 	u64 timestamp = new->timestamp;
617 	struct list_head *p;
618 
619 	os->last_sample = new;
620 
621 	if (!sample) {
622 		list_add(&new->list, &os->samples);
623 		os->max_timestamp = timestamp;
624 		return;
625 	}
626 
627 	/*
628 	 * last_sample might point to some random place in the list as it's
629 	 * the last queued event. We expect that the new event is close to
630 	 * this.
631 	 */
632 	if (sample->timestamp <= timestamp) {
633 		while (sample->timestamp <= timestamp) {
634 			p = sample->list.next;
635 			if (p == &os->samples) {
636 				list_add_tail(&new->list, &os->samples);
637 				os->max_timestamp = timestamp;
638 				return;
639 			}
640 			sample = list_entry(p, struct sample_queue, list);
641 		}
642 		list_add_tail(&new->list, &sample->list);
643 	} else {
644 		while (sample->timestamp > timestamp) {
645 			p = sample->list.prev;
646 			if (p == &os->samples) {
647 				list_add(&new->list, &os->samples);
648 				return;
649 			}
650 			sample = list_entry(p, struct sample_queue, list);
651 		}
652 		list_add(&new->list, &sample->list);
653 	}
654 }
655 
656 #define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))
657 
658 static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
659 				    struct perf_sample *sample, u64 file_offset)
660 {
661 	struct ordered_samples *os = &s->ordered_samples;
662 	struct list_head *sc = &os->sample_cache;
663 	u64 timestamp = sample->time;
664 	struct sample_queue *new;
665 
666 	if (!timestamp || timestamp == ~0ULL)
667 		return -ETIME;
668 
669 	if (timestamp < s->ordered_samples.last_flush) {
670 		printf("Warning: Timestamp below last timeslice flush\n");
671 		return -EINVAL;
672 	}
673 
674 	if (!list_empty(sc)) {
675 		new = list_entry(sc->next, struct sample_queue, list);
676 		list_del(&new->list);
677 	} else if (os->sample_buffer) {
678 		new = os->sample_buffer + os->sample_buffer_idx;
679 		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
680 			os->sample_buffer = NULL;
681 	} else {
682 		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
683 		if (!os->sample_buffer)
684 			return -ENOMEM;
685 		list_add(&os->sample_buffer->list, &os->to_free);
686 		os->sample_buffer_idx = 2;
687 		new = os->sample_buffer + 1;
688 	}
689 
690 	new->timestamp = timestamp;
691 	new->file_offset = file_offset;
692 	new->event = event;
693 
694 	__queue_event(new, s);
695 
696 	return 0;
697 }
698 
699 static void callchain__printf(struct perf_sample *sample)
700 {
701 	unsigned int i;
702 
703 	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
704 
705 	for (i = 0; i < sample->callchain->nr; i++)
706 		printf("..... %2d: %016" PRIx64 "\n",
707 		       i, sample->callchain->ips[i]);
708 }
709 
710 static void perf_session__print_tstamp(struct perf_session *session,
711 				       union perf_event *event,
712 				       struct perf_sample *sample)
713 {
714 	if (event->header.type != PERF_RECORD_SAMPLE &&
715 	    !session->sample_id_all) {
716 		fputs("-1 -1 ", stdout);
717 		return;
718 	}
719 
720 	if ((session->sample_type & PERF_SAMPLE_CPU))
721 		printf("%u ", sample->cpu);
722 
723 	if (session->sample_type & PERF_SAMPLE_TIME)
724 		printf("%" PRIu64 " ", sample->time);
725 }
726 
727 static void dump_event(struct perf_session *session, union perf_event *event,
728 		       u64 file_offset, struct perf_sample *sample)
729 {
730 	if (!dump_trace)
731 		return;
732 
733 	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
734 	       file_offset, event->header.size, event->header.type);
735 
736 	trace_event(event);
737 
738 	if (sample)
739 		perf_session__print_tstamp(session, event, sample);
740 
741 	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
742 	       event->header.size, perf_event__name(event->header.type));
743 }
744 
745 static void dump_sample(struct perf_session *session, union perf_event *event,
746 			struct perf_sample *sample)
747 {
748 	if (!dump_trace)
749 		return;
750 
751 	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
752 	       event->header.misc, sample->pid, sample->tid, sample->ip,
753 	       sample->period);
754 
755 	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
756 		callchain__printf(sample);
757 }
758 
759 static int perf_session_deliver_event(struct perf_session *session,
760 				      union perf_event *event,
761 				      struct perf_sample *sample,
762 				      struct perf_event_ops *ops,
763 				      u64 file_offset)
764 {
765 	dump_event(session, event, file_offset, sample);
766 
767 	switch (event->header.type) {
768 	case PERF_RECORD_SAMPLE:
769 		dump_sample(session, event, sample);
770 		return ops->sample(event, sample, session);
771 	case PERF_RECORD_MMAP:
772 		return ops->mmap(event, sample, session);
773 	case PERF_RECORD_COMM:
774 		return ops->comm(event, sample, session);
775 	case PERF_RECORD_FORK:
776 		return ops->fork(event, sample, session);
777 	case PERF_RECORD_EXIT:
778 		return ops->exit(event, sample, session);
779 	case PERF_RECORD_LOST:
780 		return ops->lost(event, sample, session);
781 	case PERF_RECORD_READ:
782 		return ops->read(event, sample, session);
783 	case PERF_RECORD_THROTTLE:
784 		return ops->throttle(event, sample, session);
785 	case PERF_RECORD_UNTHROTTLE:
786 		return ops->unthrottle(event, sample, session);
787 	default:
788 		++session->hists.stats.nr_unknown_events;
789 		return -1;
790 	}
791 }
792 
793 static int perf_session__preprocess_sample(struct perf_session *session,
794 					   union perf_event *event, struct perf_sample *sample)
795 {
796 	if (event->header.type != PERF_RECORD_SAMPLE ||
797 	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
798 		return 0;
799 
800 	if (!ip_callchain__valid(sample->callchain, event)) {
801 		pr_debug("call-chain problem with event, skipping it.\n");
802 		++session->hists.stats.nr_invalid_chains;
803 		session->hists.stats.total_invalid_chains += sample->period;
804 		return -EINVAL;
805 	}
806 	return 0;
807 }
808 
809 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
810 					    struct perf_event_ops *ops, u64 file_offset)
811 {
812 	dump_event(session, event, file_offset, NULL);
813 
814 	/* These events are processed right away */
815 	switch (event->header.type) {
816 	case PERF_RECORD_HEADER_ATTR:
817 		return ops->attr(event, session);
818 	case PERF_RECORD_HEADER_EVENT_TYPE:
819 		return ops->event_type(event, session);
820 	case PERF_RECORD_HEADER_TRACING_DATA:
821 		/* setup for reading amidst mmap */
822 		lseek(session->fd, file_offset, SEEK_SET);
823 		return ops->tracing_data(event, session);
824 	case PERF_RECORD_HEADER_BUILD_ID:
825 		return ops->build_id(event, session);
826 	case PERF_RECORD_FINISHED_ROUND:
827 		return ops->finished_round(event, session, ops);
828 	default:
829 		return -EINVAL;
830 	}
831 }
832 
833 static int perf_session__process_event(struct perf_session *session,
834 				       union perf_event *event,
835 				       struct perf_event_ops *ops,
836 				       u64 file_offset)
837 {
838 	struct perf_sample sample;
839 	int ret;
840 
841 	if (session->header.needs_swap &&
842 	    perf_event__swap_ops[event->header.type])
843 		perf_event__swap_ops[event->header.type](event);
844 
845 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
846 		return -EINVAL;
847 
848 	hists__inc_nr_events(&session->hists, event->header.type);
849 
850 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
851 		return perf_session__process_user_event(session, event, ops, file_offset);
852 
853 	/*
854 	 * For all kernel events we get the sample data
855 	 */
856 	perf_session__parse_sample(session, event, &sample);
857 
858 	/* Preprocess sample records - precheck callchains */
859 	if (perf_session__preprocess_sample(session, event, &sample))
860 		return 0;
861 
862 	if (ops->ordered_samples) {
863 		ret = perf_session_queue_event(session, event, &sample,
864 					       file_offset);
865 		if (ret != -ETIME)
866 			return ret;
867 	}
868 
869 	return perf_session_deliver_event(session, event, &sample, ops,
870 					  file_offset);
871 }
872 
873 void perf_event_header__bswap(struct perf_event_header *self)
874 {
875 	self->type = bswap_32(self->type);
876 	self->misc = bswap_16(self->misc);
877 	self->size = bswap_16(self->size);
878 }
879 
880 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
881 {
882 	struct thread *thread = perf_session__findnew(self, 0);
883 
884 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
885 		pr_err("problem inserting idle task.\n");
886 		thread = NULL;
887 	}
888 
889 	return thread;
890 }
891 
892 static void perf_session__warn_about_errors(const struct perf_session *session,
893 					    const struct perf_event_ops *ops)
894 {
895 	if (ops->lost == perf_event__process_lost &&
896 	    session->hists.stats.total_lost != 0) {
897 		ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
898 			    "!\n\nCheck IO/CPU overload!\n\n",
899 			    session->hists.stats.total_period,
900 			    session->hists.stats.total_lost);
901 	}
902 
903 	if (session->hists.stats.nr_unknown_events != 0) {
904 		ui__warning("Found %u unknown events!\n\n"
905 			    "Is this an older tool processing a perf.data "
906 			    "file generated by a more recent tool?\n\n"
907 			    "If that is not the case, consider "
908 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
909 			    session->hists.stats.nr_unknown_events);
910 	}
911 
912  	if (session->hists.stats.nr_invalid_chains != 0) {
913  		ui__warning("Found invalid callchains!\n\n"
914  			    "%u out of %u events were discarded for this reason.\n\n"
915  			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
916  			    session->hists.stats.nr_invalid_chains,
917  			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
918  	}
919 }
920 
921 #define session_done()	(*(volatile int *)(&session_done))
922 volatile int session_done;
923 
924 static int __perf_session__process_pipe_events(struct perf_session *self,
925 					       struct perf_event_ops *ops)
926 {
927 	union perf_event event;
928 	uint32_t size;
929 	int skip = 0;
930 	u64 head;
931 	int err;
932 	void *p;
933 
934 	perf_event_ops__fill_defaults(ops);
935 
936 	head = 0;
937 more:
938 	err = readn(self->fd, &event, sizeof(struct perf_event_header));
939 	if (err <= 0) {
940 		if (err == 0)
941 			goto done;
942 
943 		pr_err("failed to read event header\n");
944 		goto out_err;
945 	}
946 
947 	if (self->header.needs_swap)
948 		perf_event_header__bswap(&event.header);
949 
950 	size = event.header.size;
951 	if (size == 0)
952 		size = 8;
953 
954 	p = &event;
955 	p += sizeof(struct perf_event_header);
956 
957 	if (size - sizeof(struct perf_event_header)) {
958 		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
959 		if (err <= 0) {
960 			if (err == 0) {
961 				pr_err("unexpected end of event stream\n");
962 				goto done;
963 			}
964 
965 			pr_err("failed to read event data\n");
966 			goto out_err;
967 		}
968 	}
969 
970 	if (size == 0 ||
971 	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
972 		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
973 			    head, event.header.size, event.header.type);
974 		/*
975 		 * assume we lost track of the stream, check alignment, and
976 		 * increment a single u64 in the hope to catch on again 'soon'.
977 		 */
978 		if (unlikely(head & 7))
979 			head &= ~7ULL;
980 
981 		size = 8;
982 	}
983 
984 	head += size;
985 
986 	if (skip > 0)
987 		head += skip;
988 
989 	if (!session_done())
990 		goto more;
991 done:
992 	err = 0;
993 out_err:
994 	perf_session__warn_about_errors(self, ops);
995 	perf_session_free_sample_buffers(self);
996 	return err;
997 }
998 
999 int __perf_session__process_events(struct perf_session *session,
1000 				   u64 data_offset, u64 data_size,
1001 				   u64 file_size, struct perf_event_ops *ops)
1002 {
1003 	u64 head, page_offset, file_offset, file_pos, progress_next;
1004 	int err, mmap_prot, mmap_flags, map_idx = 0;
1005 	struct ui_progress *progress;
1006 	size_t	page_size, mmap_size;
1007 	char *buf, *mmaps[8];
1008 	union perf_event *event;
1009 	uint32_t size;
1010 
1011 	perf_event_ops__fill_defaults(ops);
1012 
1013 	page_size = sysconf(_SC_PAGESIZE);
1014 
1015 	page_offset = page_size * (data_offset / page_size);
1016 	file_offset = page_offset;
1017 	head = data_offset - page_offset;
1018 
1019 	if (data_offset + data_size < file_size)
1020 		file_size = data_offset + data_size;
1021 
1022 	progress_next = file_size / 16;
1023 	progress = ui_progress__new("Processing events...", file_size);
1024 	if (progress == NULL)
1025 		return -1;
1026 
1027 	mmap_size = session->mmap_window;
1028 	if (mmap_size > file_size)
1029 		mmap_size = file_size;
1030 
1031 	memset(mmaps, 0, sizeof(mmaps));
1032 
1033 	mmap_prot  = PROT_READ;
1034 	mmap_flags = MAP_SHARED;
1035 
1036 	if (session->header.needs_swap) {
1037 		mmap_prot  |= PROT_WRITE;
1038 		mmap_flags = MAP_PRIVATE;
1039 	}
1040 remap:
1041 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1042 		   file_offset);
1043 	if (buf == MAP_FAILED) {
1044 		pr_err("failed to mmap file\n");
1045 		err = -errno;
1046 		goto out_err;
1047 	}
1048 	mmaps[map_idx] = buf;
1049 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1050 	file_pos = file_offset + head;
1051 
1052 more:
1053 	event = (union perf_event *)(buf + head);
1054 
1055 	if (session->header.needs_swap)
1056 		perf_event_header__bswap(&event->header);
1057 	size = event->header.size;
1058 	if (size == 0)
1059 		size = 8;
1060 
1061 	if (head + event->header.size > mmap_size) {
1062 		if (mmaps[map_idx]) {
1063 			munmap(mmaps[map_idx], mmap_size);
1064 			mmaps[map_idx] = NULL;
1065 		}
1066 
1067 		page_offset = page_size * (head / page_size);
1068 		file_offset += page_offset;
1069 		head -= page_offset;
1070 		goto remap;
1071 	}
1072 
1073 	size = event->header.size;
1074 
1075 	if (size == 0 ||
1076 	    perf_session__process_event(session, event, ops, file_pos) < 0) {
1077 		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1078 			    file_offset + head, event->header.size,
1079 			    event->header.type);
1080 		/*
1081 		 * assume we lost track of the stream, check alignment, and
1082 		 * increment a single u64 in the hope to catch on again 'soon'.
1083 		 */
1084 		if (unlikely(head & 7))
1085 			head &= ~7ULL;
1086 
1087 		size = 8;
1088 	}
1089 
1090 	head += size;
1091 	file_pos += size;
1092 
1093 	if (file_pos >= progress_next) {
1094 		progress_next += file_size / 16;
1095 		ui_progress__update(progress, file_pos);
1096 	}
1097 
1098 	if (file_pos < file_size)
1099 		goto more;
1100 
1101 	err = 0;
1102 	/* do the final flush for ordered samples */
1103 	session->ordered_samples.next_flush = ULLONG_MAX;
1104 	flush_sample_queue(session, ops);
1105 out_err:
1106 	ui_progress__delete(progress);
1107 	perf_session__warn_about_errors(session, ops);
1108 	perf_session_free_sample_buffers(session);
1109 	return err;
1110 }
1111 
1112 int perf_session__process_events(struct perf_session *self,
1113 				 struct perf_event_ops *ops)
1114 {
1115 	int err;
1116 
1117 	if (perf_session__register_idle_thread(self) == NULL)
1118 		return -ENOMEM;
1119 
1120 	if (!self->fd_pipe)
1121 		err = __perf_session__process_events(self,
1122 						     self->header.data_offset,
1123 						     self->header.data_size,
1124 						     self->size, ops);
1125 	else
1126 		err = __perf_session__process_pipe_events(self, ops);
1127 
1128 	return err;
1129 }
1130 
1131 bool perf_session__has_traces(struct perf_session *self, const char *msg)
1132 {
1133 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1134 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1135 		return false;
1136 	}
1137 
1138 	return true;
1139 }
1140 
1141 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1142 					     const char *symbol_name,
1143 					     u64 addr)
1144 {
1145 	char *bracket;
1146 	enum map_type i;
1147 	struct ref_reloc_sym *ref;
1148 
1149 	ref = zalloc(sizeof(struct ref_reloc_sym));
1150 	if (ref == NULL)
1151 		return -ENOMEM;
1152 
1153 	ref->name = strdup(symbol_name);
1154 	if (ref->name == NULL) {
1155 		free(ref);
1156 		return -ENOMEM;
1157 	}
1158 
1159 	bracket = strchr(ref->name, ']');
1160 	if (bracket)
1161 		*bracket = '\0';
1162 
1163 	ref->addr = addr;
1164 
1165 	for (i = 0; i < MAP__NR_TYPES; ++i) {
1166 		struct kmap *kmap = map__kmap(maps[i]);
1167 		kmap->ref_reloc_sym = ref;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
1173 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1174 {
1175 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1176 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1177 	       machines__fprintf_dsos(&self->machines, fp);
1178 }
1179 
1180 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1181 					  bool with_hits)
1182 {
1183 	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1184 	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1185 }
1186 
1187 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1188 {
1189 	struct perf_evsel *pos;
1190 	size_t ret = fprintf(fp, "Aggregated stats:\n");
1191 
1192 	ret += hists__fprintf_nr_events(&session->hists, fp);
1193 
1194 	list_for_each_entry(pos, &session->evlist->entries, node) {
1195 		ret += fprintf(fp, "%s stats:\n", event_name(pos));
1196 		ret += hists__fprintf_nr_events(&pos->hists, fp);
1197 	}
1198 
1199 	return ret;
1200 }
1201