xref: /openbmc/linux/tools/perf/util/session.c (revision 28a6b6aa)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include <linux/kernel.h>
4 
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "session.h"
13 #include "tool.h"
14 #include "sort.h"
15 #include "util.h"
16 #include "cpumap.h"
17 #include "event-parse.h"
18 #include "perf_regs.h"
19 #include "vdso.h"
20 
21 static int perf_session__open(struct perf_session *self, bool force)
22 {
23 	struct stat input_stat;
24 
25 	if (!strcmp(self->filename, "-")) {
26 		self->fd_pipe = true;
27 		self->fd = STDIN_FILENO;
28 
29 		if (perf_session__read_header(self, self->fd) < 0)
30 			pr_err("incompatible file format (rerun with -v to learn more)");
31 
32 		return 0;
33 	}
34 
35 	self->fd = open(self->filename, O_RDONLY);
36 	if (self->fd < 0) {
37 		int err = errno;
38 
39 		pr_err("failed to open %s: %s", self->filename, strerror(err));
40 		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
41 			pr_err("  (try 'perf record' first)");
42 		pr_err("\n");
43 		return -errno;
44 	}
45 
46 	if (fstat(self->fd, &input_stat) < 0)
47 		goto out_close;
48 
49 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
50 		pr_err("file %s not owned by current user or root\n",
51 		       self->filename);
52 		goto out_close;
53 	}
54 
55 	if (!input_stat.st_size) {
56 		pr_info("zero-sized file (%s), nothing to do!\n",
57 			self->filename);
58 		goto out_close;
59 	}
60 
61 	if (perf_session__read_header(self, self->fd) < 0) {
62 		pr_err("incompatible file format (rerun with -v to learn more)");
63 		goto out_close;
64 	}
65 
66 	if (!perf_evlist__valid_sample_type(self->evlist)) {
67 		pr_err("non matching sample_type");
68 		goto out_close;
69 	}
70 
71 	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
72 		pr_err("non matching sample_id_all");
73 		goto out_close;
74 	}
75 
76 	self->size = input_stat.st_size;
77 	return 0;
78 
79 out_close:
80 	close(self->fd);
81 	self->fd = -1;
82 	return -1;
83 }
84 
85 void perf_session__set_id_hdr_size(struct perf_session *session)
86 {
87 	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
88 
89 	session->host_machine.id_hdr_size = id_hdr_size;
90 	machines__set_id_hdr_size(&session->machines, id_hdr_size);
91 }
92 
93 int perf_session__create_kernel_maps(struct perf_session *self)
94 {
95 	int ret = machine__create_kernel_maps(&self->host_machine);
96 
97 	if (ret >= 0)
98 		ret = machines__create_guest_kernel_maps(&self->machines);
99 	return ret;
100 }
101 
102 static void perf_session__destroy_kernel_maps(struct perf_session *self)
103 {
104 	machine__destroy_kernel_maps(&self->host_machine);
105 	machines__destroy_guest_kernel_maps(&self->machines);
106 }
107 
108 struct perf_session *perf_session__new(const char *filename, int mode,
109 				       bool force, bool repipe,
110 				       struct perf_tool *tool)
111 {
112 	struct perf_session *self;
113 	struct stat st;
114 	size_t len;
115 
116 	if (!filename || !strlen(filename)) {
117 		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
118 			filename = "-";
119 		else
120 			filename = "perf.data";
121 	}
122 
123 	len = strlen(filename);
124 	self = zalloc(sizeof(*self) + len);
125 
126 	if (self == NULL)
127 		goto out;
128 
129 	memcpy(self->filename, filename, len);
130 	self->machines = RB_ROOT;
131 	self->repipe = repipe;
132 	INIT_LIST_HEAD(&self->ordered_samples.samples);
133 	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
134 	INIT_LIST_HEAD(&self->ordered_samples.to_free);
135 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
136 
137 	if (mode == O_RDONLY) {
138 		if (perf_session__open(self, force) < 0)
139 			goto out_delete;
140 		perf_session__set_id_hdr_size(self);
141 	} else if (mode == O_WRONLY) {
142 		/*
143 		 * In O_RDONLY mode this will be performed when reading the
144 		 * kernel MMAP event, in perf_event__process_mmap().
145 		 */
146 		if (perf_session__create_kernel_maps(self) < 0)
147 			goto out_delete;
148 	}
149 
150 	if (tool && tool->ordering_requires_timestamps &&
151 	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
152 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
153 		tool->ordered_samples = false;
154 	}
155 
156 out:
157 	return self;
158 out_delete:
159 	perf_session__delete(self);
160 	return NULL;
161 }
162 
163 static void perf_session__delete_dead_threads(struct perf_session *session)
164 {
165 	machine__delete_dead_threads(&session->host_machine);
166 }
167 
168 static void perf_session__delete_threads(struct perf_session *session)
169 {
170 	machine__delete_threads(&session->host_machine);
171 }
172 
173 static void perf_session_env__delete(struct perf_session_env *env)
174 {
175 	free(env->hostname);
176 	free(env->os_release);
177 	free(env->version);
178 	free(env->arch);
179 	free(env->cpu_desc);
180 	free(env->cpuid);
181 
182 	free(env->cmdline);
183 	free(env->sibling_cores);
184 	free(env->sibling_threads);
185 	free(env->numa_nodes);
186 	free(env->pmu_mappings);
187 }
188 
189 void perf_session__delete(struct perf_session *self)
190 {
191 	perf_session__destroy_kernel_maps(self);
192 	perf_session__delete_dead_threads(self);
193 	perf_session__delete_threads(self);
194 	perf_session_env__delete(&self->header.env);
195 	machine__exit(&self->host_machine);
196 	close(self->fd);
197 	free(self);
198 	vdso__exit();
199 }
200 
201 static int process_event_synth_tracing_data_stub(union perf_event *event
202 						 __maybe_unused,
203 						 struct perf_session *session
204 						__maybe_unused)
205 {
206 	dump_printf(": unhandled!\n");
207 	return 0;
208 }
209 
210 static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
211 					 struct perf_evlist **pevlist
212 					 __maybe_unused)
213 {
214 	dump_printf(": unhandled!\n");
215 	return 0;
216 }
217 
218 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
219 				     union perf_event *event __maybe_unused,
220 				     struct perf_sample *sample __maybe_unused,
221 				     struct perf_evsel *evsel __maybe_unused,
222 				     struct machine *machine __maybe_unused)
223 {
224 	dump_printf(": unhandled!\n");
225 	return 0;
226 }
227 
228 static int process_event_stub(struct perf_tool *tool __maybe_unused,
229 			      union perf_event *event __maybe_unused,
230 			      struct perf_sample *sample __maybe_unused,
231 			      struct machine *machine __maybe_unused)
232 {
233 	dump_printf(": unhandled!\n");
234 	return 0;
235 }
236 
237 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
238 				       union perf_event *event __maybe_unused,
239 				       struct perf_session *perf_session
240 				       __maybe_unused)
241 {
242 	dump_printf(": unhandled!\n");
243 	return 0;
244 }
245 
246 static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
247 				   union perf_event *event __maybe_unused)
248 {
249 	dump_printf(": unhandled!\n");
250 	return 0;
251 }
252 
253 static int process_finished_round(struct perf_tool *tool,
254 				  union perf_event *event,
255 				  struct perf_session *session);
256 
257 static void perf_tool__fill_defaults(struct perf_tool *tool)
258 {
259 	if (tool->sample == NULL)
260 		tool->sample = process_event_sample_stub;
261 	if (tool->mmap == NULL)
262 		tool->mmap = process_event_stub;
263 	if (tool->comm == NULL)
264 		tool->comm = process_event_stub;
265 	if (tool->fork == NULL)
266 		tool->fork = process_event_stub;
267 	if (tool->exit == NULL)
268 		tool->exit = process_event_stub;
269 	if (tool->lost == NULL)
270 		tool->lost = perf_event__process_lost;
271 	if (tool->read == NULL)
272 		tool->read = process_event_sample_stub;
273 	if (tool->throttle == NULL)
274 		tool->throttle = process_event_stub;
275 	if (tool->unthrottle == NULL)
276 		tool->unthrottle = process_event_stub;
277 	if (tool->attr == NULL)
278 		tool->attr = process_event_synth_attr_stub;
279 	if (tool->event_type == NULL)
280 		tool->event_type = process_event_type_stub;
281 	if (tool->tracing_data == NULL)
282 		tool->tracing_data = process_event_synth_tracing_data_stub;
283 	if (tool->build_id == NULL)
284 		tool->build_id = process_finished_round_stub;
285 	if (tool->finished_round == NULL) {
286 		if (tool->ordered_samples)
287 			tool->finished_round = process_finished_round;
288 		else
289 			tool->finished_round = process_finished_round_stub;
290 	}
291 }
292 
293 void mem_bswap_32(void *src, int byte_size)
294 {
295 	u32 *m = src;
296 	while (byte_size > 0) {
297 		*m = bswap_32(*m);
298 		byte_size -= sizeof(u32);
299 		++m;
300 	}
301 }
302 
303 void mem_bswap_64(void *src, int byte_size)
304 {
305 	u64 *m = src;
306 
307 	while (byte_size > 0) {
308 		*m = bswap_64(*m);
309 		byte_size -= sizeof(u64);
310 		++m;
311 	}
312 }
313 
314 static void swap_sample_id_all(union perf_event *event, void *data)
315 {
316 	void *end = (void *) event + event->header.size;
317 	int size = end - data;
318 
319 	BUG_ON(size % sizeof(u64));
320 	mem_bswap_64(data, size);
321 }
322 
323 static void perf_event__all64_swap(union perf_event *event,
324 				   bool sample_id_all __maybe_unused)
325 {
326 	struct perf_event_header *hdr = &event->header;
327 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
328 }
329 
330 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
331 {
332 	event->comm.pid = bswap_32(event->comm.pid);
333 	event->comm.tid = bswap_32(event->comm.tid);
334 
335 	if (sample_id_all) {
336 		void *data = &event->comm.comm;
337 
338 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
339 		swap_sample_id_all(event, data);
340 	}
341 }
342 
343 static void perf_event__mmap_swap(union perf_event *event,
344 				  bool sample_id_all)
345 {
346 	event->mmap.pid	  = bswap_32(event->mmap.pid);
347 	event->mmap.tid	  = bswap_32(event->mmap.tid);
348 	event->mmap.start = bswap_64(event->mmap.start);
349 	event->mmap.len	  = bswap_64(event->mmap.len);
350 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
351 
352 	if (sample_id_all) {
353 		void *data = &event->mmap.filename;
354 
355 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
356 		swap_sample_id_all(event, data);
357 	}
358 }
359 
360 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
361 {
362 	event->fork.pid	 = bswap_32(event->fork.pid);
363 	event->fork.tid	 = bswap_32(event->fork.tid);
364 	event->fork.ppid = bswap_32(event->fork.ppid);
365 	event->fork.ptid = bswap_32(event->fork.ptid);
366 	event->fork.time = bswap_64(event->fork.time);
367 
368 	if (sample_id_all)
369 		swap_sample_id_all(event, &event->fork + 1);
370 }
371 
372 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
373 {
374 	event->read.pid		 = bswap_32(event->read.pid);
375 	event->read.tid		 = bswap_32(event->read.tid);
376 	event->read.value	 = bswap_64(event->read.value);
377 	event->read.time_enabled = bswap_64(event->read.time_enabled);
378 	event->read.time_running = bswap_64(event->read.time_running);
379 	event->read.id		 = bswap_64(event->read.id);
380 
381 	if (sample_id_all)
382 		swap_sample_id_all(event, &event->read + 1);
383 }
384 
385 static u8 revbyte(u8 b)
386 {
387 	int rev = (b >> 4) | ((b & 0xf) << 4);
388 	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
389 	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
390 	return (u8) rev;
391 }
392 
393 /*
394  * XXX this is hack in attempt to carry flags bitfield
395  * throught endian village. ABI says:
396  *
397  * Bit-fields are allocated from right to left (least to most significant)
398  * on little-endian implementations and from left to right (most to least
399  * significant) on big-endian implementations.
400  *
401  * The above seems to be byte specific, so we need to reverse each
402  * byte of the bitfield. 'Internet' also says this might be implementation
403  * specific and we probably need proper fix and carry perf_event_attr
404  * bitfield flags in separate data file FEAT_ section. Thought this seems
405  * to work for now.
406  */
407 static void swap_bitfield(u8 *p, unsigned len)
408 {
409 	unsigned i;
410 
411 	for (i = 0; i < len; i++) {
412 		*p = revbyte(*p);
413 		p++;
414 	}
415 }
416 
417 /* exported for swapping attributes in file header */
418 void perf_event__attr_swap(struct perf_event_attr *attr)
419 {
420 	attr->type		= bswap_32(attr->type);
421 	attr->size		= bswap_32(attr->size);
422 	attr->config		= bswap_64(attr->config);
423 	attr->sample_period	= bswap_64(attr->sample_period);
424 	attr->sample_type	= bswap_64(attr->sample_type);
425 	attr->read_format	= bswap_64(attr->read_format);
426 	attr->wakeup_events	= bswap_32(attr->wakeup_events);
427 	attr->bp_type		= bswap_32(attr->bp_type);
428 	attr->bp_addr		= bswap_64(attr->bp_addr);
429 	attr->bp_len		= bswap_64(attr->bp_len);
430 
431 	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
432 }
433 
434 static void perf_event__hdr_attr_swap(union perf_event *event,
435 				      bool sample_id_all __maybe_unused)
436 {
437 	size_t size;
438 
439 	perf_event__attr_swap(&event->attr.attr);
440 
441 	size = event->header.size;
442 	size -= (void *)&event->attr.id - (void *)event;
443 	mem_bswap_64(event->attr.id, size);
444 }
445 
446 static void perf_event__event_type_swap(union perf_event *event,
447 					bool sample_id_all __maybe_unused)
448 {
449 	event->event_type.event_type.event_id =
450 		bswap_64(event->event_type.event_type.event_id);
451 }
452 
453 static void perf_event__tracing_data_swap(union perf_event *event,
454 					  bool sample_id_all __maybe_unused)
455 {
456 	event->tracing_data.size = bswap_32(event->tracing_data.size);
457 }
458 
459 typedef void (*perf_event__swap_op)(union perf_event *event,
460 				    bool sample_id_all);
461 
462 static perf_event__swap_op perf_event__swap_ops[] = {
463 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
464 	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
465 	[PERF_RECORD_FORK]		  = perf_event__task_swap,
466 	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
467 	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
468 	[PERF_RECORD_READ]		  = perf_event__read_swap,
469 	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
470 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
471 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
472 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
473 	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
474 	[PERF_RECORD_HEADER_MAX]	  = NULL,
475 };
476 
477 struct sample_queue {
478 	u64			timestamp;
479 	u64			file_offset;
480 	union perf_event	*event;
481 	struct list_head	list;
482 };
483 
484 static void perf_session_free_sample_buffers(struct perf_session *session)
485 {
486 	struct ordered_samples *os = &session->ordered_samples;
487 
488 	while (!list_empty(&os->to_free)) {
489 		struct sample_queue *sq;
490 
491 		sq = list_entry(os->to_free.next, struct sample_queue, list);
492 		list_del(&sq->list);
493 		free(sq);
494 	}
495 }
496 
497 static int perf_session_deliver_event(struct perf_session *session,
498 				      union perf_event *event,
499 				      struct perf_sample *sample,
500 				      struct perf_tool *tool,
501 				      u64 file_offset);
502 
503 static int flush_sample_queue(struct perf_session *s,
504 			       struct perf_tool *tool)
505 {
506 	struct ordered_samples *os = &s->ordered_samples;
507 	struct list_head *head = &os->samples;
508 	struct sample_queue *tmp, *iter;
509 	struct perf_sample sample;
510 	u64 limit = os->next_flush;
511 	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
512 	unsigned idx = 0, progress_next = os->nr_samples / 16;
513 	int ret;
514 
515 	if (!tool->ordered_samples || !limit)
516 		return 0;
517 
518 	list_for_each_entry_safe(iter, tmp, head, list) {
519 		if (iter->timestamp > limit)
520 			break;
521 
522 		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
523 		if (ret)
524 			pr_err("Can't parse sample, err = %d\n", ret);
525 		else {
526 			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
527 							 iter->file_offset);
528 			if (ret)
529 				return ret;
530 		}
531 
532 		os->last_flush = iter->timestamp;
533 		list_del(&iter->list);
534 		list_add(&iter->list, &os->sample_cache);
535 		if (++idx >= progress_next) {
536 			progress_next += os->nr_samples / 16;
537 			ui_progress__update(idx, os->nr_samples,
538 					    "Processing time ordered events...");
539 		}
540 	}
541 
542 	if (list_empty(head)) {
543 		os->last_sample = NULL;
544 	} else if (last_ts <= limit) {
545 		os->last_sample =
546 			list_entry(head->prev, struct sample_queue, list);
547 	}
548 
549 	os->nr_samples = 0;
550 
551 	return 0;
552 }
553 
554 /*
555  * When perf record finishes a pass on every buffers, it records this pseudo
556  * event.
557  * We record the max timestamp t found in the pass n.
558  * Assuming these timestamps are monotonic across cpus, we know that if
559  * a buffer still has events with timestamps below t, they will be all
560  * available and then read in the pass n + 1.
561  * Hence when we start to read the pass n + 2, we can safely flush every
562  * events with timestamps below t.
563  *
564  *    ============ PASS n =================
565  *       CPU 0         |   CPU 1
566  *                     |
567  *    cnt1 timestamps  |   cnt2 timestamps
568  *          1          |         2
569  *          2          |         3
570  *          -          |         4  <--- max recorded
571  *
572  *    ============ PASS n + 1 ==============
573  *       CPU 0         |   CPU 1
574  *                     |
575  *    cnt1 timestamps  |   cnt2 timestamps
576  *          3          |         5
577  *          4          |         6
578  *          5          |         7 <---- max recorded
579  *
580  *      Flush every events below timestamp 4
581  *
582  *    ============ PASS n + 2 ==============
583  *       CPU 0         |   CPU 1
584  *                     |
585  *    cnt1 timestamps  |   cnt2 timestamps
586  *          6          |         8
587  *          7          |         9
588  *          -          |         10
589  *
590  *      Flush every events below timestamp 7
591  *      etc...
592  */
593 static int process_finished_round(struct perf_tool *tool,
594 				  union perf_event *event __maybe_unused,
595 				  struct perf_session *session)
596 {
597 	int ret = flush_sample_queue(session, tool);
598 	if (!ret)
599 		session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
600 
601 	return ret;
602 }
603 
604 /* The queue is ordered by time */
605 static void __queue_event(struct sample_queue *new, struct perf_session *s)
606 {
607 	struct ordered_samples *os = &s->ordered_samples;
608 	struct sample_queue *sample = os->last_sample;
609 	u64 timestamp = new->timestamp;
610 	struct list_head *p;
611 
612 	++os->nr_samples;
613 	os->last_sample = new;
614 
615 	if (!sample) {
616 		list_add(&new->list, &os->samples);
617 		os->max_timestamp = timestamp;
618 		return;
619 	}
620 
621 	/*
622 	 * last_sample might point to some random place in the list as it's
623 	 * the last queued event. We expect that the new event is close to
624 	 * this.
625 	 */
626 	if (sample->timestamp <= timestamp) {
627 		while (sample->timestamp <= timestamp) {
628 			p = sample->list.next;
629 			if (p == &os->samples) {
630 				list_add_tail(&new->list, &os->samples);
631 				os->max_timestamp = timestamp;
632 				return;
633 			}
634 			sample = list_entry(p, struct sample_queue, list);
635 		}
636 		list_add_tail(&new->list, &sample->list);
637 	} else {
638 		while (sample->timestamp > timestamp) {
639 			p = sample->list.prev;
640 			if (p == &os->samples) {
641 				list_add(&new->list, &os->samples);
642 				return;
643 			}
644 			sample = list_entry(p, struct sample_queue, list);
645 		}
646 		list_add(&new->list, &sample->list);
647 	}
648 }
649 
650 #define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))
651 
652 static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
653 				    struct perf_sample *sample, u64 file_offset)
654 {
655 	struct ordered_samples *os = &s->ordered_samples;
656 	struct list_head *sc = &os->sample_cache;
657 	u64 timestamp = sample->time;
658 	struct sample_queue *new;
659 
660 	if (!timestamp || timestamp == ~0ULL)
661 		return -ETIME;
662 
663 	if (timestamp < s->ordered_samples.last_flush) {
664 		printf("Warning: Timestamp below last timeslice flush\n");
665 		return -EINVAL;
666 	}
667 
668 	if (!list_empty(sc)) {
669 		new = list_entry(sc->next, struct sample_queue, list);
670 		list_del(&new->list);
671 	} else if (os->sample_buffer) {
672 		new = os->sample_buffer + os->sample_buffer_idx;
673 		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
674 			os->sample_buffer = NULL;
675 	} else {
676 		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
677 		if (!os->sample_buffer)
678 			return -ENOMEM;
679 		list_add(&os->sample_buffer->list, &os->to_free);
680 		os->sample_buffer_idx = 2;
681 		new = os->sample_buffer + 1;
682 	}
683 
684 	new->timestamp = timestamp;
685 	new->file_offset = file_offset;
686 	new->event = event;
687 
688 	__queue_event(new, s);
689 
690 	return 0;
691 }
692 
693 static void callchain__printf(struct perf_sample *sample)
694 {
695 	unsigned int i;
696 
697 	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
698 
699 	for (i = 0; i < sample->callchain->nr; i++)
700 		printf("..... %2d: %016" PRIx64 "\n",
701 		       i, sample->callchain->ips[i]);
702 }
703 
704 static void branch_stack__printf(struct perf_sample *sample)
705 {
706 	uint64_t i;
707 
708 	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
709 
710 	for (i = 0; i < sample->branch_stack->nr; i++)
711 		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
712 			i, sample->branch_stack->entries[i].from,
713 			sample->branch_stack->entries[i].to);
714 }
715 
716 static void regs_dump__printf(u64 mask, u64 *regs)
717 {
718 	unsigned rid, i = 0;
719 
720 	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
721 		u64 val = regs[i++];
722 
723 		printf(".... %-5s 0x%" PRIx64 "\n",
724 		       perf_reg_name(rid), val);
725 	}
726 }
727 
728 static void regs_user__printf(struct perf_sample *sample, u64 mask)
729 {
730 	struct regs_dump *user_regs = &sample->user_regs;
731 
732 	if (user_regs->regs) {
733 		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
734 		regs_dump__printf(mask, user_regs->regs);
735 	}
736 }
737 
738 static void stack_user__printf(struct stack_dump *dump)
739 {
740 	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
741 	       dump->size, dump->offset);
742 }
743 
744 static void perf_session__print_tstamp(struct perf_session *session,
745 				       union perf_event *event,
746 				       struct perf_sample *sample)
747 {
748 	u64 sample_type = perf_evlist__sample_type(session->evlist);
749 
750 	if (event->header.type != PERF_RECORD_SAMPLE &&
751 	    !perf_evlist__sample_id_all(session->evlist)) {
752 		fputs("-1 -1 ", stdout);
753 		return;
754 	}
755 
756 	if ((sample_type & PERF_SAMPLE_CPU))
757 		printf("%u ", sample->cpu);
758 
759 	if (sample_type & PERF_SAMPLE_TIME)
760 		printf("%" PRIu64 " ", sample->time);
761 }
762 
763 static void dump_event(struct perf_session *session, union perf_event *event,
764 		       u64 file_offset, struct perf_sample *sample)
765 {
766 	if (!dump_trace)
767 		return;
768 
769 	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
770 	       file_offset, event->header.size, event->header.type);
771 
772 	trace_event(event);
773 
774 	if (sample)
775 		perf_session__print_tstamp(session, event, sample);
776 
777 	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
778 	       event->header.size, perf_event__name(event->header.type));
779 }
780 
781 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
782 			struct perf_sample *sample)
783 {
784 	u64 sample_type;
785 
786 	if (!dump_trace)
787 		return;
788 
789 	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
790 	       event->header.misc, sample->pid, sample->tid, sample->ip,
791 	       sample->period, sample->addr);
792 
793 	sample_type = evsel->attr.sample_type;
794 
795 	if (sample_type & PERF_SAMPLE_CALLCHAIN)
796 		callchain__printf(sample);
797 
798 	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
799 		branch_stack__printf(sample);
800 
801 	if (sample_type & PERF_SAMPLE_REGS_USER)
802 		regs_user__printf(sample, evsel->attr.sample_regs_user);
803 
804 	if (sample_type & PERF_SAMPLE_STACK_USER)
805 		stack_user__printf(&sample->user_stack);
806 }
807 
808 static struct machine *
809 	perf_session__find_machine_for_cpumode(struct perf_session *session,
810 					       union perf_event *event)
811 {
812 	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
813 
814 	if (perf_guest &&
815 	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
816 	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
817 		u32 pid;
818 
819 		if (event->header.type == PERF_RECORD_MMAP)
820 			pid = event->mmap.pid;
821 		else
822 			pid = event->ip.pid;
823 
824 		return perf_session__findnew_machine(session, pid);
825 	}
826 
827 	return perf_session__find_host_machine(session);
828 }
829 
830 static int perf_session_deliver_event(struct perf_session *session,
831 				      union perf_event *event,
832 				      struct perf_sample *sample,
833 				      struct perf_tool *tool,
834 				      u64 file_offset)
835 {
836 	struct perf_evsel *evsel;
837 	struct machine *machine;
838 
839 	dump_event(session, event, file_offset, sample);
840 
841 	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
842 	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
843 		/*
844 		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
845 		 * because the tools right now may apply filters, discarding
846 		 * some of the samples. For consistency, in the future we
847 		 * should have something like nr_filtered_samples and remove
848 		 * the sample->period from total_sample_period, etc, KISS for
849 		 * now tho.
850 		 *
851 		 * Also testing against NULL allows us to handle files without
852 		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
853 		 * future probably it'll be a good idea to restrict event
854 		 * processing via perf_session to files with both set.
855 		 */
856 		hists__inc_nr_events(&evsel->hists, event->header.type);
857 	}
858 
859 	machine = perf_session__find_machine_for_cpumode(session, event);
860 
861 	switch (event->header.type) {
862 	case PERF_RECORD_SAMPLE:
863 		dump_sample(evsel, event, sample);
864 		if (evsel == NULL) {
865 			++session->stats.nr_unknown_id;
866 			return 0;
867 		}
868 		if (machine == NULL) {
869 			++session->stats.nr_unprocessable_samples;
870 			return 0;
871 		}
872 		return tool->sample(tool, event, sample, evsel, machine);
873 	case PERF_RECORD_MMAP:
874 		return tool->mmap(tool, event, sample, machine);
875 	case PERF_RECORD_COMM:
876 		return tool->comm(tool, event, sample, machine);
877 	case PERF_RECORD_FORK:
878 		return tool->fork(tool, event, sample, machine);
879 	case PERF_RECORD_EXIT:
880 		return tool->exit(tool, event, sample, machine);
881 	case PERF_RECORD_LOST:
882 		if (tool->lost == perf_event__process_lost)
883 			session->stats.total_lost += event->lost.lost;
884 		return tool->lost(tool, event, sample, machine);
885 	case PERF_RECORD_READ:
886 		return tool->read(tool, event, sample, evsel, machine);
887 	case PERF_RECORD_THROTTLE:
888 		return tool->throttle(tool, event, sample, machine);
889 	case PERF_RECORD_UNTHROTTLE:
890 		return tool->unthrottle(tool, event, sample, machine);
891 	default:
892 		++session->stats.nr_unknown_events;
893 		return -1;
894 	}
895 }
896 
897 static int perf_session__preprocess_sample(struct perf_session *session,
898 					   union perf_event *event, struct perf_sample *sample)
899 {
900 	if (event->header.type != PERF_RECORD_SAMPLE ||
901 	    !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
902 		return 0;
903 
904 	if (!ip_callchain__valid(sample->callchain, event)) {
905 		pr_debug("call-chain problem with event, skipping it.\n");
906 		++session->stats.nr_invalid_chains;
907 		session->stats.total_invalid_chains += sample->period;
908 		return -EINVAL;
909 	}
910 	return 0;
911 }
912 
913 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
914 					    struct perf_tool *tool, u64 file_offset)
915 {
916 	int err;
917 
918 	dump_event(session, event, file_offset, NULL);
919 
920 	/* These events are processed right away */
921 	switch (event->header.type) {
922 	case PERF_RECORD_HEADER_ATTR:
923 		err = tool->attr(event, &session->evlist);
924 		if (err == 0)
925 			perf_session__set_id_hdr_size(session);
926 		return err;
927 	case PERF_RECORD_HEADER_EVENT_TYPE:
928 		return tool->event_type(tool, event);
929 	case PERF_RECORD_HEADER_TRACING_DATA:
930 		/* setup for reading amidst mmap */
931 		lseek(session->fd, file_offset, SEEK_SET);
932 		return tool->tracing_data(event, session);
933 	case PERF_RECORD_HEADER_BUILD_ID:
934 		return tool->build_id(tool, event, session);
935 	case PERF_RECORD_FINISHED_ROUND:
936 		return tool->finished_round(tool, event, session);
937 	default:
938 		return -EINVAL;
939 	}
940 }
941 
942 static void event_swap(union perf_event *event, bool sample_id_all)
943 {
944 	perf_event__swap_op swap;
945 
946 	swap = perf_event__swap_ops[event->header.type];
947 	if (swap)
948 		swap(event, sample_id_all);
949 }
950 
951 static int perf_session__process_event(struct perf_session *session,
952 				       union perf_event *event,
953 				       struct perf_tool *tool,
954 				       u64 file_offset)
955 {
956 	struct perf_sample sample;
957 	int ret;
958 
959 	if (session->header.needs_swap)
960 		event_swap(event, perf_evlist__sample_id_all(session->evlist));
961 
962 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
963 		return -EINVAL;
964 
965 	events_stats__inc(&session->stats, event->header.type);
966 
967 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
968 		return perf_session__process_user_event(session, event, tool, file_offset);
969 
970 	/*
971 	 * For all kernel events we get the sample data
972 	 */
973 	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
974 	if (ret)
975 		return ret;
976 
977 	/* Preprocess sample records - precheck callchains */
978 	if (perf_session__preprocess_sample(session, event, &sample))
979 		return 0;
980 
981 	if (tool->ordered_samples) {
982 		ret = perf_session_queue_event(session, event, &sample,
983 					       file_offset);
984 		if (ret != -ETIME)
985 			return ret;
986 	}
987 
988 	return perf_session_deliver_event(session, event, &sample, tool,
989 					  file_offset);
990 }
991 
992 void perf_event_header__bswap(struct perf_event_header *self)
993 {
994 	self->type = bswap_32(self->type);
995 	self->misc = bswap_16(self->misc);
996 	self->size = bswap_16(self->size);
997 }
998 
999 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1000 {
1001 	return machine__findnew_thread(&session->host_machine, pid);
1002 }
1003 
1004 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
1005 {
1006 	struct thread *thread = perf_session__findnew(self, 0);
1007 
1008 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
1009 		pr_err("problem inserting idle task.\n");
1010 		thread = NULL;
1011 	}
1012 
1013 	return thread;
1014 }
1015 
1016 static void perf_session__warn_about_errors(const struct perf_session *session,
1017 					    const struct perf_tool *tool)
1018 {
1019 	if (tool->lost == perf_event__process_lost &&
1020 	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1021 		ui__warning("Processed %d events and lost %d chunks!\n\n"
1022 			    "Check IO/CPU overload!\n\n",
1023 			    session->stats.nr_events[0],
1024 			    session->stats.nr_events[PERF_RECORD_LOST]);
1025 	}
1026 
1027 	if (session->stats.nr_unknown_events != 0) {
1028 		ui__warning("Found %u unknown events!\n\n"
1029 			    "Is this an older tool processing a perf.data "
1030 			    "file generated by a more recent tool?\n\n"
1031 			    "If that is not the case, consider "
1032 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1033 			    session->stats.nr_unknown_events);
1034 	}
1035 
1036 	if (session->stats.nr_unknown_id != 0) {
1037 		ui__warning("%u samples with id not present in the header\n",
1038 			    session->stats.nr_unknown_id);
1039 	}
1040 
1041  	if (session->stats.nr_invalid_chains != 0) {
1042  		ui__warning("Found invalid callchains!\n\n"
1043  			    "%u out of %u events were discarded for this reason.\n\n"
1044  			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1045  			    session->stats.nr_invalid_chains,
1046  			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1047  	}
1048 
1049 	if (session->stats.nr_unprocessable_samples != 0) {
1050 		ui__warning("%u unprocessable samples recorded.\n"
1051 			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1052 			    session->stats.nr_unprocessable_samples);
1053 	}
1054 }
1055 
1056 #define session_done()	(*(volatile int *)(&session_done))
1057 volatile int session_done;
1058 
1059 static int __perf_session__process_pipe_events(struct perf_session *self,
1060 					       struct perf_tool *tool)
1061 {
1062 	union perf_event *event;
1063 	uint32_t size, cur_size = 0;
1064 	void *buf = NULL;
1065 	int skip = 0;
1066 	u64 head;
1067 	int err;
1068 	void *p;
1069 
1070 	perf_tool__fill_defaults(tool);
1071 
1072 	head = 0;
1073 	cur_size = sizeof(union perf_event);
1074 
1075 	buf = malloc(cur_size);
1076 	if (!buf)
1077 		return -errno;
1078 more:
1079 	event = buf;
1080 	err = readn(self->fd, event, sizeof(struct perf_event_header));
1081 	if (err <= 0) {
1082 		if (err == 0)
1083 			goto done;
1084 
1085 		pr_err("failed to read event header\n");
1086 		goto out_err;
1087 	}
1088 
1089 	if (self->header.needs_swap)
1090 		perf_event_header__bswap(&event->header);
1091 
1092 	size = event->header.size;
1093 	if (size == 0)
1094 		size = 8;
1095 
1096 	if (size > cur_size) {
1097 		void *new = realloc(buf, size);
1098 		if (!new) {
1099 			pr_err("failed to allocate memory to read event\n");
1100 			goto out_err;
1101 		}
1102 		buf = new;
1103 		cur_size = size;
1104 		event = buf;
1105 	}
1106 	p = event;
1107 	p += sizeof(struct perf_event_header);
1108 
1109 	if (size - sizeof(struct perf_event_header)) {
1110 		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1111 		if (err <= 0) {
1112 			if (err == 0) {
1113 				pr_err("unexpected end of event stream\n");
1114 				goto done;
1115 			}
1116 
1117 			pr_err("failed to read event data\n");
1118 			goto out_err;
1119 		}
1120 	}
1121 
1122 	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1123 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1124 		       head, event->header.size, event->header.type);
1125 		err = -EINVAL;
1126 		goto out_err;
1127 	}
1128 
1129 	head += size;
1130 
1131 	if (skip > 0)
1132 		head += skip;
1133 
1134 	if (!session_done())
1135 		goto more;
1136 done:
1137 	err = 0;
1138 out_err:
1139 	free(buf);
1140 	perf_session__warn_about_errors(self, tool);
1141 	perf_session_free_sample_buffers(self);
1142 	return err;
1143 }
1144 
1145 static union perf_event *
1146 fetch_mmaped_event(struct perf_session *session,
1147 		   u64 head, size_t mmap_size, char *buf)
1148 {
1149 	union perf_event *event;
1150 
1151 	/*
1152 	 * Ensure we have enough space remaining to read
1153 	 * the size of the event in the headers.
1154 	 */
1155 	if (head + sizeof(event->header) > mmap_size)
1156 		return NULL;
1157 
1158 	event = (union perf_event *)(buf + head);
1159 
1160 	if (session->header.needs_swap)
1161 		perf_event_header__bswap(&event->header);
1162 
1163 	if (head + event->header.size > mmap_size)
1164 		return NULL;
1165 
1166 	return event;
1167 }
1168 
1169 /*
1170  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1171  * slices. On 32bit we use 32MB.
1172  */
1173 #if BITS_PER_LONG == 64
1174 #define MMAP_SIZE ULLONG_MAX
1175 #define NUM_MMAPS 1
1176 #else
1177 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1178 #define NUM_MMAPS 128
1179 #endif
1180 
1181 int __perf_session__process_events(struct perf_session *session,
1182 				   u64 data_offset, u64 data_size,
1183 				   u64 file_size, struct perf_tool *tool)
1184 {
1185 	u64 head, page_offset, file_offset, file_pos, progress_next;
1186 	int err, mmap_prot, mmap_flags, map_idx = 0;
1187 	size_t	mmap_size;
1188 	char *buf, *mmaps[NUM_MMAPS];
1189 	union perf_event *event;
1190 	uint32_t size;
1191 
1192 	perf_tool__fill_defaults(tool);
1193 
1194 	page_offset = page_size * (data_offset / page_size);
1195 	file_offset = page_offset;
1196 	head = data_offset - page_offset;
1197 
1198 	if (data_offset + data_size < file_size)
1199 		file_size = data_offset + data_size;
1200 
1201 	progress_next = file_size / 16;
1202 
1203 	mmap_size = MMAP_SIZE;
1204 	if (mmap_size > file_size)
1205 		mmap_size = file_size;
1206 
1207 	memset(mmaps, 0, sizeof(mmaps));
1208 
1209 	mmap_prot  = PROT_READ;
1210 	mmap_flags = MAP_SHARED;
1211 
1212 	if (session->header.needs_swap) {
1213 		mmap_prot  |= PROT_WRITE;
1214 		mmap_flags = MAP_PRIVATE;
1215 	}
1216 remap:
1217 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1218 		   file_offset);
1219 	if (buf == MAP_FAILED) {
1220 		pr_err("failed to mmap file\n");
1221 		err = -errno;
1222 		goto out_err;
1223 	}
1224 	mmaps[map_idx] = buf;
1225 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1226 	file_pos = file_offset + head;
1227 
1228 more:
1229 	event = fetch_mmaped_event(session, head, mmap_size, buf);
1230 	if (!event) {
1231 		if (mmaps[map_idx]) {
1232 			munmap(mmaps[map_idx], mmap_size);
1233 			mmaps[map_idx] = NULL;
1234 		}
1235 
1236 		page_offset = page_size * (head / page_size);
1237 		file_offset += page_offset;
1238 		head -= page_offset;
1239 		goto remap;
1240 	}
1241 
1242 	size = event->header.size;
1243 
1244 	if (size == 0 ||
1245 	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1246 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1247 		       file_offset + head, event->header.size,
1248 		       event->header.type);
1249 		err = -EINVAL;
1250 		goto out_err;
1251 	}
1252 
1253 	head += size;
1254 	file_pos += size;
1255 
1256 	if (file_pos >= progress_next) {
1257 		progress_next += file_size / 16;
1258 		ui_progress__update(file_pos, file_size,
1259 				    "Processing events...");
1260 	}
1261 
1262 	if (file_pos < file_size)
1263 		goto more;
1264 
1265 	err = 0;
1266 	/* do the final flush for ordered samples */
1267 	session->ordered_samples.next_flush = ULLONG_MAX;
1268 	err = flush_sample_queue(session, tool);
1269 out_err:
1270 	ui_progress__finish();
1271 	perf_session__warn_about_errors(session, tool);
1272 	perf_session_free_sample_buffers(session);
1273 	return err;
1274 }
1275 
1276 int perf_session__process_events(struct perf_session *self,
1277 				 struct perf_tool *tool)
1278 {
1279 	int err;
1280 
1281 	if (perf_session__register_idle_thread(self) == NULL)
1282 		return -ENOMEM;
1283 
1284 	if (!self->fd_pipe)
1285 		err = __perf_session__process_events(self,
1286 						     self->header.data_offset,
1287 						     self->header.data_size,
1288 						     self->size, tool);
1289 	else
1290 		err = __perf_session__process_pipe_events(self, tool);
1291 
1292 	return err;
1293 }
1294 
1295 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1296 {
1297 	if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1298 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1299 		return false;
1300 	}
1301 
1302 	return true;
1303 }
1304 
1305 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1306 				     const char *symbol_name, u64 addr)
1307 {
1308 	char *bracket;
1309 	enum map_type i;
1310 	struct ref_reloc_sym *ref;
1311 
1312 	ref = zalloc(sizeof(struct ref_reloc_sym));
1313 	if (ref == NULL)
1314 		return -ENOMEM;
1315 
1316 	ref->name = strdup(symbol_name);
1317 	if (ref->name == NULL) {
1318 		free(ref);
1319 		return -ENOMEM;
1320 	}
1321 
1322 	bracket = strchr(ref->name, ']');
1323 	if (bracket)
1324 		*bracket = '\0';
1325 
1326 	ref->addr = addr;
1327 
1328 	for (i = 0; i < MAP__NR_TYPES; ++i) {
1329 		struct kmap *kmap = map__kmap(maps[i]);
1330 		kmap->ref_reloc_sym = ref;
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1337 {
1338 	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1339 	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1340 	       machines__fprintf_dsos(&self->machines, fp);
1341 }
1342 
1343 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1344 					  bool (skip)(struct dso *dso, int parm), int parm)
1345 {
1346 	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm);
1347 	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1348 }
1349 
1350 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1351 {
1352 	struct perf_evsel *pos;
1353 	size_t ret = fprintf(fp, "Aggregated stats:\n");
1354 
1355 	ret += events_stats__fprintf(&session->stats, fp);
1356 
1357 	list_for_each_entry(pos, &session->evlist->entries, node) {
1358 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1359 		ret += events_stats__fprintf(&pos->hists.stats, fp);
1360 	}
1361 
1362 	return ret;
1363 }
1364 
1365 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1366 {
1367 	/*
1368 	 * FIXME: Here we have to actually print all the machines in this
1369 	 * session, not just the host...
1370 	 */
1371 	return machine__fprintf(&session->host_machine, fp);
1372 }
1373 
1374 void perf_session__remove_thread(struct perf_session *session,
1375 				 struct thread *th)
1376 {
1377 	/*
1378 	 * FIXME: This one makes no sense, we need to remove the thread from
1379 	 * the machine it belongs to, perf_session can have many machines, so
1380 	 * doing it always on ->host_machine is wrong.  Fix when auditing all
1381 	 * the 'perf kvm' code.
1382 	 */
1383 	machine__remove_thread(&session->host_machine, th);
1384 }
1385 
1386 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1387 					      unsigned int type)
1388 {
1389 	struct perf_evsel *pos;
1390 
1391 	list_for_each_entry(pos, &session->evlist->entries, node) {
1392 		if (pos->attr.type == type)
1393 			return pos;
1394 	}
1395 	return NULL;
1396 }
1397 
1398 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
1399 			  struct perf_sample *sample, struct machine *machine,
1400 			  int print_sym, int print_dso, int print_symoffset)
1401 {
1402 	struct addr_location al;
1403 	struct callchain_cursor_node *node;
1404 
1405 	if (perf_event__preprocess_sample(event, machine, &al, sample,
1406 					  NULL) < 0) {
1407 		error("problem processing %d event, skipping it.\n",
1408 			event->header.type);
1409 		return;
1410 	}
1411 
1412 	if (symbol_conf.use_callchain && sample->callchain) {
1413 
1414 
1415 		if (machine__resolve_callchain(machine, evsel, al.thread,
1416 					       sample, NULL) != 0) {
1417 			if (verbose)
1418 				error("Failed to resolve callchain. Skipping\n");
1419 			return;
1420 		}
1421 		callchain_cursor_commit(&callchain_cursor);
1422 
1423 		while (1) {
1424 			node = callchain_cursor_current(&callchain_cursor);
1425 			if (!node)
1426 				break;
1427 
1428 			printf("\t%16" PRIx64, node->ip);
1429 			if (print_sym) {
1430 				printf(" ");
1431 				symbol__fprintf_symname(node->sym, stdout);
1432 			}
1433 			if (print_dso) {
1434 				printf(" (");
1435 				map__fprintf_dsoname(node->map, stdout);
1436 				printf(")");
1437 			}
1438 			printf("\n");
1439 
1440 			callchain_cursor_advance(&callchain_cursor);
1441 		}
1442 
1443 	} else {
1444 		printf("%16" PRIx64, sample->ip);
1445 		if (print_sym) {
1446 			printf(" ");
1447 			if (print_symoffset)
1448 				symbol__fprintf_symname_offs(al.sym, &al,
1449 							     stdout);
1450 			else
1451 				symbol__fprintf_symname(al.sym, stdout);
1452 		}
1453 
1454 		if (print_dso) {
1455 			printf(" (");
1456 			map__fprintf_dsoname(al.map, stdout);
1457 			printf(")");
1458 		}
1459 	}
1460 }
1461 
1462 int perf_session__cpu_bitmap(struct perf_session *session,
1463 			     const char *cpu_list, unsigned long *cpu_bitmap)
1464 {
1465 	int i;
1466 	struct cpu_map *map;
1467 
1468 	for (i = 0; i < PERF_TYPE_MAX; ++i) {
1469 		struct perf_evsel *evsel;
1470 
1471 		evsel = perf_session__find_first_evtype(session, i);
1472 		if (!evsel)
1473 			continue;
1474 
1475 		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1476 			pr_err("File does not contain CPU events. "
1477 			       "Remove -c option to proceed.\n");
1478 			return -1;
1479 		}
1480 	}
1481 
1482 	map = cpu_map__new(cpu_list);
1483 	if (map == NULL) {
1484 		pr_err("Invalid cpu_list\n");
1485 		return -1;
1486 	}
1487 
1488 	for (i = 0; i < map->nr; i++) {
1489 		int cpu = map->map[i];
1490 
1491 		if (cpu >= MAX_NR_CPUS) {
1492 			pr_err("Requested CPU %d too large. "
1493 			       "Consider raising MAX_NR_CPUS\n", cpu);
1494 			return -1;
1495 		}
1496 
1497 		set_bit(cpu, cpu_bitmap);
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1504 				bool full)
1505 {
1506 	struct stat st;
1507 	int ret;
1508 
1509 	if (session == NULL || fp == NULL)
1510 		return;
1511 
1512 	ret = fstat(session->fd, &st);
1513 	if (ret == -1)
1514 		return;
1515 
1516 	fprintf(fp, "# ========\n");
1517 	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1518 	perf_header__fprintf_info(session, fp, full);
1519 	fprintf(fp, "# ========\n#\n");
1520 }
1521 
1522 
1523 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1524 					     const struct perf_evsel_str_handler *assocs,
1525 					     size_t nr_assocs)
1526 {
1527 	struct perf_evlist *evlist = session->evlist;
1528 	struct event_format *format;
1529 	struct perf_evsel *evsel;
1530 	char *tracepoint, *name;
1531 	size_t i;
1532 	int err;
1533 
1534 	for (i = 0; i < nr_assocs; i++) {
1535 		err = -ENOMEM;
1536 		tracepoint = strdup(assocs[i].name);
1537 		if (tracepoint == NULL)
1538 			goto out;
1539 
1540 		err = -ENOENT;
1541 		name = strchr(tracepoint, ':');
1542 		if (name == NULL)
1543 			goto out_free;
1544 
1545 		*name++ = '\0';
1546 		format = pevent_find_event_by_name(session->pevent,
1547 						   tracepoint, name);
1548 		if (format == NULL) {
1549 			/*
1550 			 * Adding a handler for an event not in the session,
1551 			 * just ignore it.
1552 			 */
1553 			goto next;
1554 		}
1555 
1556 		evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
1557 		if (evsel == NULL)
1558 			goto next;
1559 
1560 		err = -EEXIST;
1561 		if (evsel->handler.func != NULL)
1562 			goto out_free;
1563 		evsel->handler.func = assocs[i].handler;
1564 next:
1565 		free(tracepoint);
1566 	}
1567 
1568 	err = 0;
1569 out:
1570 	return err;
1571 
1572 out_free:
1573 	free(tracepoint);
1574 	goto out;
1575 }
1576