xref: /openbmc/linux/tools/perf/util/session.c (revision ba21594c)
1 #include <linux/kernel.h>
2 
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <sys/types.h>
6 
7 #include "session.h"
8 #include "sort.h"
9 #include "util.h"
10 
11 static int perf_session__open(struct perf_session *self, bool force)
12 {
13 	struct stat input_stat;
14 
15 	self->fd = open(self->filename, O_RDONLY);
16 	if (self->fd < 0) {
17 		pr_err("failed to open file: %s", self->filename);
18 		if (!strcmp(self->filename, "perf.data"))
19 			pr_err("  (try 'perf record' first)");
20 		pr_err("\n");
21 		return -errno;
22 	}
23 
24 	if (fstat(self->fd, &input_stat) < 0)
25 		goto out_close;
26 
27 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
28 		pr_err("file %s not owned by current user or root\n",
29 		       self->filename);
30 		goto out_close;
31 	}
32 
33 	if (!input_stat.st_size) {
34 		pr_info("zero-sized file (%s), nothing to do!\n",
35 			self->filename);
36 		goto out_close;
37 	}
38 
39 	if (perf_header__read(&self->header, self->fd) < 0) {
40 		pr_err("incompatible file format");
41 		goto out_close;
42 	}
43 
44 	self->size = input_stat.st_size;
45 	return 0;
46 
47 out_close:
48 	close(self->fd);
49 	self->fd = -1;
50 	return -1;
51 }
52 
53 struct perf_session *perf_session__new(const char *filename, int mode, bool force)
54 {
55 	size_t len = filename ? strlen(filename) + 1 : 0;
56 	struct perf_session *self = zalloc(sizeof(*self) + len);
57 
58 	if (self == NULL)
59 		goto out;
60 
61 	if (perf_header__init(&self->header) < 0)
62 		goto out_free;
63 
64 	memcpy(self->filename, filename, len);
65 	self->threads = RB_ROOT;
66 	self->last_match = NULL;
67 	self->mmap_window = 32;
68 	self->cwd = NULL;
69 	self->cwdlen = 0;
70 	self->unknown_events = 0;
71 	map_groups__init(&self->kmaps);
72 
73 	if (mode == O_RDONLY && perf_session__open(self, force) < 0)
74 		goto out_delete;
75 
76 	self->sample_type = perf_header__sample_type(&self->header);
77 out:
78 	return self;
79 out_free:
80 	free(self);
81 	return NULL;
82 out_delete:
83 	perf_session__delete(self);
84 	return NULL;
85 }
86 
87 void perf_session__delete(struct perf_session *self)
88 {
89 	perf_header__exit(&self->header);
90 	close(self->fd);
91 	free(self->cwd);
92 	free(self);
93 }
94 
95 static bool symbol__match_parent_regex(struct symbol *sym)
96 {
97 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
98 		return 1;
99 
100 	return 0;
101 }
102 
103 struct symbol **perf_session__resolve_callchain(struct perf_session *self,
104 						struct thread *thread,
105 						struct ip_callchain *chain,
106 						struct symbol **parent)
107 {
108 	u8 cpumode = PERF_RECORD_MISC_USER;
109 	struct symbol **syms = NULL;
110 	unsigned int i;
111 
112 	if (symbol_conf.use_callchain) {
113 		syms = calloc(chain->nr, sizeof(*syms));
114 		if (!syms) {
115 			fprintf(stderr, "Can't allocate memory for symbols\n");
116 			exit(-1);
117 		}
118 	}
119 
120 	for (i = 0; i < chain->nr; i++) {
121 		u64 ip = chain->ips[i];
122 		struct addr_location al;
123 
124 		if (ip >= PERF_CONTEXT_MAX) {
125 			switch (ip) {
126 			case PERF_CONTEXT_HV:
127 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
128 			case PERF_CONTEXT_KERNEL:
129 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
130 			case PERF_CONTEXT_USER:
131 				cpumode = PERF_RECORD_MISC_USER;	break;
132 			default:
133 				break;
134 			}
135 			continue;
136 		}
137 
138 		thread__find_addr_location(thread, self, cpumode,
139 					   MAP__FUNCTION, ip, &al, NULL);
140 		if (al.sym != NULL) {
141 			if (sort__has_parent && !*parent &&
142 			    symbol__match_parent_regex(al.sym))
143 				*parent = al.sym;
144 			if (!symbol_conf.use_callchain)
145 				break;
146 			syms[i] = al.sym;
147 		}
148 	}
149 
150 	return syms;
151 }
152 
153 static int process_event_stub(event_t *event __used,
154 			      struct perf_session *session __used)
155 {
156 	dump_printf(": unhandled!\n");
157 	return 0;
158 }
159 
160 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
161 {
162 	if (handler->sample == NULL)
163 		handler->sample = process_event_stub;
164 	if (handler->mmap == NULL)
165 		handler->mmap = process_event_stub;
166 	if (handler->comm == NULL)
167 		handler->comm = process_event_stub;
168 	if (handler->fork == NULL)
169 		handler->fork = process_event_stub;
170 	if (handler->exit == NULL)
171 		handler->exit = process_event_stub;
172 	if (handler->lost == NULL)
173 		handler->lost = process_event_stub;
174 	if (handler->read == NULL)
175 		handler->read = process_event_stub;
176 	if (handler->throttle == NULL)
177 		handler->throttle = process_event_stub;
178 	if (handler->unthrottle == NULL)
179 		handler->unthrottle = process_event_stub;
180 }
181 
182 static const char *event__name[] = {
183 	[0]			 = "TOTAL",
184 	[PERF_RECORD_MMAP]	 = "MMAP",
185 	[PERF_RECORD_LOST]	 = "LOST",
186 	[PERF_RECORD_COMM]	 = "COMM",
187 	[PERF_RECORD_EXIT]	 = "EXIT",
188 	[PERF_RECORD_THROTTLE]	 = "THROTTLE",
189 	[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
190 	[PERF_RECORD_FORK]	 = "FORK",
191 	[PERF_RECORD_READ]	 = "READ",
192 	[PERF_RECORD_SAMPLE]	 = "SAMPLE",
193 };
194 
195 unsigned long event__total[PERF_RECORD_MAX];
196 
197 void event__print_totals(void)
198 {
199 	int i;
200 	for (i = 0; i < PERF_RECORD_MAX; ++i)
201 		pr_info("%10s events: %10ld\n",
202 			event__name[i], event__total[i]);
203 }
204 
205 void mem_bswap_64(void *src, int byte_size)
206 {
207 	u64 *m = src;
208 
209 	while (byte_size > 0) {
210 		*m = bswap_64(*m);
211 		byte_size -= sizeof(u64);
212 		++m;
213 	}
214 }
215 
216 static void event__all64_swap(event_t *self)
217 {
218 	struct perf_event_header *hdr = &self->header;
219 	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
220 }
221 
222 static void event__comm_swap(event_t *self)
223 {
224 	self->comm.pid = bswap_32(self->comm.pid);
225 	self->comm.tid = bswap_32(self->comm.tid);
226 }
227 
228 static void event__mmap_swap(event_t *self)
229 {
230 	self->mmap.pid	 = bswap_32(self->mmap.pid);
231 	self->mmap.tid	 = bswap_32(self->mmap.tid);
232 	self->mmap.start = bswap_64(self->mmap.start);
233 	self->mmap.len	 = bswap_64(self->mmap.len);
234 	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
235 }
236 
237 static void event__task_swap(event_t *self)
238 {
239 	self->fork.pid	= bswap_32(self->fork.pid);
240 	self->fork.tid	= bswap_32(self->fork.tid);
241 	self->fork.ppid	= bswap_32(self->fork.ppid);
242 	self->fork.ptid	= bswap_32(self->fork.ptid);
243 	self->fork.time	= bswap_64(self->fork.time);
244 }
245 
246 static void event__read_swap(event_t *self)
247 {
248 	self->read.pid		= bswap_32(self->read.pid);
249 	self->read.tid		= bswap_32(self->read.tid);
250 	self->read.value	= bswap_64(self->read.value);
251 	self->read.time_enabled	= bswap_64(self->read.time_enabled);
252 	self->read.time_running	= bswap_64(self->read.time_running);
253 	self->read.id		= bswap_64(self->read.id);
254 }
255 
256 typedef void (*event__swap_op)(event_t *self);
257 
258 static event__swap_op event__swap_ops[] = {
259 	[PERF_RECORD_MMAP]   = event__mmap_swap,
260 	[PERF_RECORD_COMM]   = event__comm_swap,
261 	[PERF_RECORD_FORK]   = event__task_swap,
262 	[PERF_RECORD_EXIT]   = event__task_swap,
263 	[PERF_RECORD_LOST]   = event__all64_swap,
264 	[PERF_RECORD_READ]   = event__read_swap,
265 	[PERF_RECORD_SAMPLE] = event__all64_swap,
266 	[PERF_RECORD_MAX]    = NULL,
267 };
268 
269 static int perf_session__process_event(struct perf_session *self,
270 				       event_t *event,
271 				       struct perf_event_ops *ops,
272 				       u64 offset, u64 head)
273 {
274 	trace_event(event);
275 
276 	if (event->header.type < PERF_RECORD_MAX) {
277 		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
278 			    offset + head, event->header.size,
279 			    event__name[event->header.type]);
280 		++event__total[0];
281 		++event__total[event->header.type];
282 	}
283 
284 	if (self->header.needs_swap && event__swap_ops[event->header.type])
285 		event__swap_ops[event->header.type](event);
286 
287 	switch (event->header.type) {
288 	case PERF_RECORD_SAMPLE:
289 		return ops->sample(event, self);
290 	case PERF_RECORD_MMAP:
291 		return ops->mmap(event, self);
292 	case PERF_RECORD_COMM:
293 		return ops->comm(event, self);
294 	case PERF_RECORD_FORK:
295 		return ops->fork(event, self);
296 	case PERF_RECORD_EXIT:
297 		return ops->exit(event, self);
298 	case PERF_RECORD_LOST:
299 		return ops->lost(event, self);
300 	case PERF_RECORD_READ:
301 		return ops->read(event, self);
302 	case PERF_RECORD_THROTTLE:
303 		return ops->throttle(event, self);
304 	case PERF_RECORD_UNTHROTTLE:
305 		return ops->unthrottle(event, self);
306 	default:
307 		self->unknown_events++;
308 		return -1;
309 	}
310 }
311 
312 void perf_event_header__bswap(struct perf_event_header *self)
313 {
314 	self->type = bswap_32(self->type);
315 	self->misc = bswap_16(self->misc);
316 	self->size = bswap_16(self->size);
317 }
318 
319 int perf_header__read_build_ids(struct perf_header *self,
320 				int input, u64 offset, u64 size)
321 {
322 	struct build_id_event bev;
323 	char filename[PATH_MAX];
324 	u64 limit = offset + size;
325 	int err = -1;
326 
327 	while (offset < limit) {
328 		struct dso *dso;
329 		ssize_t len;
330 		struct list_head *head = &dsos__user;
331 
332 		if (read(input, &bev, sizeof(bev)) != sizeof(bev))
333 			goto out;
334 
335 		if (self->needs_swap)
336 			perf_event_header__bswap(&bev.header);
337 
338 		len = bev.header.size - sizeof(bev);
339 		if (read(input, filename, len) != len)
340 			goto out;
341 
342 		if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
343 			head = &dsos__kernel;
344 
345 		dso = __dsos__findnew(head, filename);
346 		if (dso != NULL) {
347 			dso__set_build_id(dso, &bev.build_id);
348 			if (head == &dsos__kernel && filename[0] == '[')
349 				dso->kernel = 1;
350 		}
351 
352 		offset += bev.header.size;
353 	}
354 	err = 0;
355 out:
356 	return err;
357 }
358 
359 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
360 {
361 	struct thread *thread = perf_session__findnew(self, 0);
362 
363 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
364 		pr_err("problem inserting idle task.\n");
365 		thread = NULL;
366 	}
367 
368 	return thread;
369 }
370 
371 int perf_session__process_events(struct perf_session *self,
372 				 struct perf_event_ops *ops)
373 {
374 	int err, mmap_prot, mmap_flags;
375 	u64 head, shift;
376 	u64 offset = 0;
377 	size_t	page_size;
378 	event_t *event;
379 	uint32_t size;
380 	char *buf;
381 
382 	if (perf_session__register_idle_thread(self) == NULL)
383 		return -ENOMEM;
384 
385 	perf_event_ops__fill_defaults(ops);
386 
387 	page_size = getpagesize();
388 
389 	head = self->header.data_offset;
390 
391 	if (!symbol_conf.full_paths) {
392 		char bf[PATH_MAX];
393 
394 		if (getcwd(bf, sizeof(bf)) == NULL) {
395 			err = -errno;
396 out_getcwd_err:
397 			pr_err("failed to get the current directory\n");
398 			goto out_err;
399 		}
400 		self->cwd = strdup(bf);
401 		if (self->cwd == NULL) {
402 			err = -ENOMEM;
403 			goto out_getcwd_err;
404 		}
405 		self->cwdlen = strlen(self->cwd);
406 	}
407 
408 	shift = page_size * (head / page_size);
409 	offset += shift;
410 	head -= shift;
411 
412 	mmap_prot  = PROT_READ;
413 	mmap_flags = MAP_SHARED;
414 
415 	if (self->header.needs_swap) {
416 		mmap_prot  |= PROT_WRITE;
417 		mmap_flags = MAP_PRIVATE;
418 	}
419 remap:
420 	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
421 		   mmap_flags, self->fd, offset);
422 	if (buf == MAP_FAILED) {
423 		pr_err("failed to mmap file\n");
424 		err = -errno;
425 		goto out_err;
426 	}
427 
428 more:
429 	event = (event_t *)(buf + head);
430 
431 	if (self->header.needs_swap)
432 		perf_event_header__bswap(&event->header);
433 	size = event->header.size;
434 	if (size == 0)
435 		size = 8;
436 
437 	if (head + event->header.size >= page_size * self->mmap_window) {
438 		int munmap_ret;
439 
440 		shift = page_size * (head / page_size);
441 
442 		munmap_ret = munmap(buf, page_size * self->mmap_window);
443 		assert(munmap_ret == 0);
444 
445 		offset += shift;
446 		head -= shift;
447 		goto remap;
448 	}
449 
450 	size = event->header.size;
451 
452 	dump_printf("\n%#Lx [%#x]: event: %d\n",
453 		    offset + head, event->header.size, event->header.type);
454 
455 	if (size == 0 ||
456 	    perf_session__process_event(self, event, ops, offset, head) < 0) {
457 		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
458 			    offset + head, event->header.size,
459 			    event->header.type);
460 		/*
461 		 * assume we lost track of the stream, check alignment, and
462 		 * increment a single u64 in the hope to catch on again 'soon'.
463 		 */
464 		if (unlikely(head & 7))
465 			head &= ~7ULL;
466 
467 		size = 8;
468 	}
469 
470 	head += size;
471 
472 	if (offset + head >= self->header.data_offset + self->header.data_size)
473 		goto done;
474 
475 	if (offset + head < self->size)
476 		goto more;
477 done:
478 	err = 0;
479 out_err:
480 	return err;
481 }
482 
483 bool perf_session__has_traces(struct perf_session *self, const char *msg)
484 {
485 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
486 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
487 		return false;
488 	}
489 
490 	return true;
491 }
492 
493 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
494 					     const char *symbol_name,
495 					     u64 addr)
496 {
497 	char *bracket;
498 
499 	self->ref_reloc_sym.name = strdup(symbol_name);
500 	if (self->ref_reloc_sym.name == NULL)
501 		return -ENOMEM;
502 
503 	bracket = strchr(self->ref_reloc_sym.name, ']');
504 	if (bracket)
505 		*bracket = '\0';
506 
507 	self->ref_reloc_sym.addr = addr;
508 	return 0;
509 }
510 
511 static u64 map__reloc_map_ip(struct map *map, u64 ip)
512 {
513 	return ip + (s64)map->pgoff;
514 }
515 
516 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
517 {
518 	return ip - (s64)map->pgoff;
519 }
520 
521 void perf_session__reloc_vmlinux_maps(struct perf_session *self,
522 				      u64 unrelocated_addr)
523 {
524 	enum map_type type;
525 	s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr;
526 
527 	if (!reloc)
528 		return;
529 
530 	for (type = 0; type < MAP__NR_TYPES; ++type) {
531 		struct map *map = self->vmlinux_maps[type];
532 
533 		map->map_ip = map__reloc_map_ip;
534 		map->unmap_ip = map__reloc_unmap_ip;
535 		map->pgoff = reloc;
536 	}
537 }
538