xref: /openbmc/linux/tools/perf/util/session.c (revision a89e5abe)
1 #include <linux/kernel.h>
2 
3 #include <unistd.h>
4 #include <sys/types.h>
5 
6 #include "session.h"
7 #include "sort.h"
8 #include "util.h"
9 
10 static int perf_session__open(struct perf_session *self, bool force)
11 {
12 	struct stat input_stat;
13 
14 	self->fd = open(self->filename, O_RDONLY);
15 	if (self->fd < 0) {
16 		pr_err("failed to open file: %s", self->filename);
17 		if (!strcmp(self->filename, "perf.data"))
18 			pr_err("  (try 'perf record' first)");
19 		pr_err("\n");
20 		return -errno;
21 	}
22 
23 	if (fstat(self->fd, &input_stat) < 0)
24 		goto out_close;
25 
26 	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
27 		pr_err("file %s not owned by current user or root\n",
28 		       self->filename);
29 		goto out_close;
30 	}
31 
32 	if (!input_stat.st_size) {
33 		pr_info("zero-sized file (%s), nothing to do!\n",
34 			self->filename);
35 		goto out_close;
36 	}
37 
38 	if (perf_header__read(&self->header, self->fd) < 0) {
39 		pr_err("incompatible file format");
40 		goto out_close;
41 	}
42 
43 	self->size = input_stat.st_size;
44 	return 0;
45 
46 out_close:
47 	close(self->fd);
48 	self->fd = -1;
49 	return -1;
50 }
51 
52 struct perf_session *perf_session__new(const char *filename, int mode, bool force)
53 {
54 	size_t len = filename ? strlen(filename) + 1 : 0;
55 	struct perf_session *self = zalloc(sizeof(*self) + len);
56 
57 	if (self == NULL)
58 		goto out;
59 
60 	if (perf_header__init(&self->header) < 0)
61 		goto out_free;
62 
63 	memcpy(self->filename, filename, len);
64 	self->threads = RB_ROOT;
65 	self->last_match = NULL;
66 	self->mmap_window = 32;
67 	self->cwd = NULL;
68 	self->cwdlen = 0;
69 	self->unknown_events = 0;
70 	map_groups__init(&self->kmaps);
71 
72 	if (perf_session__create_kernel_maps(self) < 0)
73 		goto out_delete;
74 
75 	if (mode == O_RDONLY && perf_session__open(self, force) < 0)
76 		goto out_delete;
77 
78 	self->sample_type = perf_header__sample_type(&self->header);
79 out:
80 	return self;
81 out_free:
82 	free(self);
83 	return NULL;
84 out_delete:
85 	perf_session__delete(self);
86 	return NULL;
87 }
88 
89 void perf_session__delete(struct perf_session *self)
90 {
91 	perf_header__exit(&self->header);
92 	close(self->fd);
93 	free(self->cwd);
94 	free(self);
95 }
96 
97 static bool symbol__match_parent_regex(struct symbol *sym)
98 {
99 	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
100 		return 1;
101 
102 	return 0;
103 }
104 
105 struct symbol **perf_session__resolve_callchain(struct perf_session *self,
106 						struct thread *thread,
107 						struct ip_callchain *chain,
108 						struct symbol **parent)
109 {
110 	u8 cpumode = PERF_RECORD_MISC_USER;
111 	struct symbol **syms = NULL;
112 	unsigned int i;
113 
114 	if (symbol_conf.use_callchain) {
115 		syms = calloc(chain->nr, sizeof(*syms));
116 		if (!syms) {
117 			fprintf(stderr, "Can't allocate memory for symbols\n");
118 			exit(-1);
119 		}
120 	}
121 
122 	for (i = 0; i < chain->nr; i++) {
123 		u64 ip = chain->ips[i];
124 		struct addr_location al;
125 
126 		if (ip >= PERF_CONTEXT_MAX) {
127 			switch (ip) {
128 			case PERF_CONTEXT_HV:
129 				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
130 			case PERF_CONTEXT_KERNEL:
131 				cpumode = PERF_RECORD_MISC_KERNEL;	break;
132 			case PERF_CONTEXT_USER:
133 				cpumode = PERF_RECORD_MISC_USER;	break;
134 			default:
135 				break;
136 			}
137 			continue;
138 		}
139 
140 		thread__find_addr_location(thread, self, cpumode,
141 					   MAP__FUNCTION, ip, &al, NULL);
142 		if (al.sym != NULL) {
143 			if (sort__has_parent && !*parent &&
144 			    symbol__match_parent_regex(al.sym))
145 				*parent = al.sym;
146 			if (!symbol_conf.use_callchain)
147 				break;
148 			syms[i] = al.sym;
149 		}
150 	}
151 
152 	return syms;
153 }
154 
155 static int process_event_stub(event_t *event __used,
156 			      struct perf_session *session __used)
157 {
158 	dump_printf(": unhandled!\n");
159 	return 0;
160 }
161 
162 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
163 {
164 	if (handler->sample == NULL)
165 		handler->sample = process_event_stub;
166 	if (handler->mmap == NULL)
167 		handler->mmap = process_event_stub;
168 	if (handler->comm == NULL)
169 		handler->comm = process_event_stub;
170 	if (handler->fork == NULL)
171 		handler->fork = process_event_stub;
172 	if (handler->exit == NULL)
173 		handler->exit = process_event_stub;
174 	if (handler->lost == NULL)
175 		handler->lost = process_event_stub;
176 	if (handler->read == NULL)
177 		handler->read = process_event_stub;
178 	if (handler->throttle == NULL)
179 		handler->throttle = process_event_stub;
180 	if (handler->unthrottle == NULL)
181 		handler->unthrottle = process_event_stub;
182 }
183 
184 static const char *event__name[] = {
185 	[0]			 = "TOTAL",
186 	[PERF_RECORD_MMAP]	 = "MMAP",
187 	[PERF_RECORD_LOST]	 = "LOST",
188 	[PERF_RECORD_COMM]	 = "COMM",
189 	[PERF_RECORD_EXIT]	 = "EXIT",
190 	[PERF_RECORD_THROTTLE]	 = "THROTTLE",
191 	[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
192 	[PERF_RECORD_FORK]	 = "FORK",
193 	[PERF_RECORD_READ]	 = "READ",
194 	[PERF_RECORD_SAMPLE]	 = "SAMPLE",
195 };
196 
197 unsigned long event__total[PERF_RECORD_MAX];
198 
199 void event__print_totals(void)
200 {
201 	int i;
202 	for (i = 0; i < PERF_RECORD_MAX; ++i)
203 		pr_info("%10s events: %10ld\n",
204 			event__name[i], event__total[i]);
205 }
206 
207 static int perf_session__process_event(struct perf_session *self,
208 				       event_t *event,
209 				       struct perf_event_ops *ops,
210 				       unsigned long offset, unsigned long head)
211 {
212 	trace_event(event);
213 
214 	if (event->header.type < PERF_RECORD_MAX) {
215 		dump_printf("%p [%p]: PERF_RECORD_%s",
216 			    (void *)(offset + head),
217 			    (void *)(long)(event->header.size),
218 			    event__name[event->header.type]);
219 		++event__total[0];
220 		++event__total[event->header.type];
221 	}
222 
223 	switch (event->header.type) {
224 	case PERF_RECORD_SAMPLE:
225 		return ops->sample(event, self);
226 	case PERF_RECORD_MMAP:
227 		return ops->mmap(event, self);
228 	case PERF_RECORD_COMM:
229 		return ops->comm(event, self);
230 	case PERF_RECORD_FORK:
231 		return ops->fork(event, self);
232 	case PERF_RECORD_EXIT:
233 		return ops->exit(event, self);
234 	case PERF_RECORD_LOST:
235 		return ops->lost(event, self);
236 	case PERF_RECORD_READ:
237 		return ops->read(event, self);
238 	case PERF_RECORD_THROTTLE:
239 		return ops->throttle(event, self);
240 	case PERF_RECORD_UNTHROTTLE:
241 		return ops->unthrottle(event, self);
242 	default:
243 		self->unknown_events++;
244 		return -1;
245 	}
246 }
247 
248 int perf_header__read_build_ids(int input, u64 offset, u64 size)
249 {
250 	struct build_id_event bev;
251 	char filename[PATH_MAX];
252 	u64 limit = offset + size;
253 	int err = -1;
254 
255 	while (offset < limit) {
256 		struct dso *dso;
257 		ssize_t len;
258 		struct list_head *head = &dsos__user;
259 
260 		if (read(input, &bev, sizeof(bev)) != sizeof(bev))
261 			goto out;
262 
263 		len = bev.header.size - sizeof(bev);
264 		if (read(input, filename, len) != len)
265 			goto out;
266 
267 		if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
268 			head = &dsos__kernel;
269 
270 		dso = __dsos__findnew(head, filename);
271 		if (dso != NULL)
272 			dso__set_build_id(dso, &bev.build_id);
273 
274 		offset += bev.header.size;
275 	}
276 	err = 0;
277 out:
278 	return err;
279 }
280 
281 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
282 {
283 	struct thread *thread = perf_session__findnew(self, 0);
284 
285 	if (thread == NULL || thread__set_comm(thread, "swapper")) {
286 		pr_err("problem inserting idle task.\n");
287 		thread = NULL;
288 	}
289 
290 	return thread;
291 }
292 
293 int perf_session__process_events(struct perf_session *self,
294 				 struct perf_event_ops *ops)
295 {
296 	int err;
297 	unsigned long head, shift;
298 	unsigned long offset = 0;
299 	size_t	page_size;
300 	event_t *event;
301 	uint32_t size;
302 	char *buf;
303 
304 	if (perf_session__register_idle_thread(self) == NULL)
305 		return -ENOMEM;
306 
307 	perf_event_ops__fill_defaults(ops);
308 
309 	page_size = getpagesize();
310 
311 	head = self->header.data_offset;
312 
313 	if (!symbol_conf.full_paths) {
314 		char bf[PATH_MAX];
315 
316 		if (getcwd(bf, sizeof(bf)) == NULL) {
317 			err = -errno;
318 out_getcwd_err:
319 			pr_err("failed to get the current directory\n");
320 			goto out_err;
321 		}
322 		self->cwd = strdup(bf);
323 		if (self->cwd == NULL) {
324 			err = -ENOMEM;
325 			goto out_getcwd_err;
326 		}
327 		self->cwdlen = strlen(self->cwd);
328 	}
329 
330 	shift = page_size * (head / page_size);
331 	offset += shift;
332 	head -= shift;
333 
334 remap:
335 	buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
336 		   MAP_SHARED, self->fd, offset);
337 	if (buf == MAP_FAILED) {
338 		pr_err("failed to mmap file\n");
339 		err = -errno;
340 		goto out_err;
341 	}
342 
343 more:
344 	event = (event_t *)(buf + head);
345 
346 	size = event->header.size;
347 	if (size == 0)
348 		size = 8;
349 
350 	if (head + event->header.size >= page_size * self->mmap_window) {
351 		int munmap_ret;
352 
353 		shift = page_size * (head / page_size);
354 
355 		munmap_ret = munmap(buf, page_size * self->mmap_window);
356 		assert(munmap_ret == 0);
357 
358 		offset += shift;
359 		head -= shift;
360 		goto remap;
361 	}
362 
363 	size = event->header.size;
364 
365 	dump_printf("\n%p [%p]: event: %d\n",
366 			(void *)(offset + head),
367 			(void *)(long)event->header.size,
368 			event->header.type);
369 
370 	if (size == 0 ||
371 	    perf_session__process_event(self, event, ops, offset, head) < 0) {
372 		dump_printf("%p [%p]: skipping unknown header type: %d\n",
373 			    (void *)(offset + head),
374 			    (void *)(long)(event->header.size),
375 			    event->header.type);
376 		/*
377 		 * assume we lost track of the stream, check alignment, and
378 		 * increment a single u64 in the hope to catch on again 'soon'.
379 		 */
380 		if (unlikely(head & 7))
381 			head &= ~7ULL;
382 
383 		size = 8;
384 	}
385 
386 	head += size;
387 
388 	if (offset + head >= self->header.data_offset + self->header.data_size)
389 		goto done;
390 
391 	if (offset + head < self->size)
392 		goto more;
393 done:
394 	err = 0;
395 out_err:
396 	return err;
397 }
398 
399 bool perf_session__has_traces(struct perf_session *self, const char *msg)
400 {
401 	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
402 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
403 		return false;
404 	}
405 
406 	return true;
407 }
408 
409 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
410 					     const char *symbol_name,
411 					     u64 addr)
412 {
413 	char *bracket;
414 
415 	self->ref_reloc_sym.name = strdup(symbol_name);
416 	if (self->ref_reloc_sym.name == NULL)
417 		return -ENOMEM;
418 
419 	bracket = strchr(self->ref_reloc_sym.name, ']');
420 	if (bracket)
421 		*bracket = '\0';
422 
423 	self->ref_reloc_sym.addr = addr;
424 	return 0;
425 }
426 
427 static u64 map__reloc_map_ip(struct map *map, u64 ip)
428 {
429 	return ip + (s64)map->pgoff;
430 }
431 
432 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
433 {
434 	return ip - (s64)map->pgoff;
435 }
436 
437 void perf_session__reloc_vmlinux_maps(struct perf_session *self,
438 				      u64 unrelocated_addr)
439 {
440 	enum map_type type;
441 	s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr;
442 
443 	if (!reloc)
444 		return;
445 
446 	for (type = 0; type < MAP__NR_TYPES; ++type) {
447 		struct map *map = self->vmlinux_maps[type];
448 
449 		map->map_ip = map__reloc_map_ip;
450 		map->unmap_ip = map__reloc_unmap_ip;
451 		map->pgoff = reloc;
452 	}
453 }
454