xref: /openbmc/linux/tools/perf/tests/code-reading.c (revision a06c488d)
1 #include <linux/types.h>
2 #include <stdlib.h>
3 #include <unistd.h>
4 #include <stdio.h>
5 #include <ctype.h>
6 #include <string.h>
7 
8 #include "parse-events.h"
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "thread_map.h"
12 #include "cpumap.h"
13 #include "machine.h"
14 #include "event.h"
15 #include "thread.h"
16 
17 #include "tests.h"
18 
19 #define BUFSZ	1024
20 #define READLEN	128
21 
22 struct state {
23 	u64 done[1024];
24 	size_t done_cnt;
25 };
26 
27 static unsigned int hex(char c)
28 {
29 	if (c >= '0' && c <= '9')
30 		return c - '0';
31 	if (c >= 'a' && c <= 'f')
32 		return c - 'a' + 10;
33 	return c - 'A' + 10;
34 }
35 
36 static size_t read_objdump_line(const char *line, size_t line_len, void *buf,
37 			      size_t len)
38 {
39 	const char *p;
40 	size_t i, j = 0;
41 
42 	/* Skip to a colon */
43 	p = strchr(line, ':');
44 	if (!p)
45 		return 0;
46 	i = p + 1 - line;
47 
48 	/* Read bytes */
49 	while (j < len) {
50 		char c1, c2;
51 
52 		/* Skip spaces */
53 		for (; i < line_len; i++) {
54 			if (!isspace(line[i]))
55 				break;
56 		}
57 		/* Get 2 hex digits */
58 		if (i >= line_len || !isxdigit(line[i]))
59 			break;
60 		c1 = line[i++];
61 		if (i >= line_len || !isxdigit(line[i]))
62 			break;
63 		c2 = line[i++];
64 		/* Followed by a space */
65 		if (i < line_len && line[i] && !isspace(line[i]))
66 			break;
67 		/* Store byte */
68 		*(unsigned char *)buf = (hex(c1) << 4) | hex(c2);
69 		buf += 1;
70 		j++;
71 	}
72 	/* return number of successfully read bytes */
73 	return j;
74 }
75 
76 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
77 {
78 	char *line = NULL;
79 	size_t line_len, off_last = 0;
80 	ssize_t ret;
81 	int err = 0;
82 	u64 addr, last_addr = start_addr;
83 
84 	while (off_last < *len) {
85 		size_t off, read_bytes, written_bytes;
86 		unsigned char tmp[BUFSZ];
87 
88 		ret = getline(&line, &line_len, f);
89 		if (feof(f))
90 			break;
91 		if (ret < 0) {
92 			pr_debug("getline failed\n");
93 			err = -1;
94 			break;
95 		}
96 
97 		/* read objdump data into temporary buffer */
98 		read_bytes = read_objdump_line(line, ret, tmp, sizeof(tmp));
99 		if (!read_bytes)
100 			continue;
101 
102 		if (sscanf(line, "%"PRIx64, &addr) != 1)
103 			continue;
104 		if (addr < last_addr) {
105 			pr_debug("addr going backwards, read beyond section?\n");
106 			break;
107 		}
108 		last_addr = addr;
109 
110 		/* copy it from temporary buffer to 'buf' according
111 		 * to address on current objdump line */
112 		off = addr - start_addr;
113 		if (off >= *len)
114 			break;
115 		written_bytes = MIN(read_bytes, *len - off);
116 		memcpy(buf + off, tmp, written_bytes);
117 		off_last = off + written_bytes;
118 	}
119 
120 	/* len returns number of bytes that could not be read */
121 	*len -= off_last;
122 
123 	free(line);
124 
125 	return err;
126 }
127 
128 static int read_via_objdump(const char *filename, u64 addr, void *buf,
129 			    size_t len)
130 {
131 	char cmd[PATH_MAX * 2];
132 	const char *fmt;
133 	FILE *f;
134 	int ret;
135 
136 	fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
137 	ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
138 		       filename);
139 	if (ret <= 0 || (size_t)ret >= sizeof(cmd))
140 		return -1;
141 
142 	pr_debug("Objdump command is: %s\n", cmd);
143 
144 	/* Ignore objdump errors */
145 	strcat(cmd, " 2>/dev/null");
146 
147 	f = popen(cmd, "r");
148 	if (!f) {
149 		pr_debug("popen failed\n");
150 		return -1;
151 	}
152 
153 	ret = read_objdump_output(f, buf, &len, addr);
154 	if (len) {
155 		pr_debug("objdump read too few bytes\n");
156 		if (!ret)
157 			ret = len;
158 	}
159 
160 	pclose(f);
161 
162 	return ret;
163 }
164 
165 static void dump_buf(unsigned char *buf, size_t len)
166 {
167 	size_t i;
168 
169 	for (i = 0; i < len; i++) {
170 		pr_debug("0x%02x ", buf[i]);
171 		if (i % 16 == 15)
172 			pr_debug("\n");
173 	}
174 	pr_debug("\n");
175 }
176 
177 static int read_object_code(u64 addr, size_t len, u8 cpumode,
178 			    struct thread *thread, struct state *state)
179 {
180 	struct addr_location al;
181 	unsigned char buf1[BUFSZ];
182 	unsigned char buf2[BUFSZ];
183 	size_t ret_len;
184 	u64 objdump_addr;
185 	int ret;
186 
187 	pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
188 
189 	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
190 	if (!al.map || !al.map->dso) {
191 		pr_debug("thread__find_addr_map failed\n");
192 		return -1;
193 	}
194 
195 	pr_debug("File is: %s\n", al.map->dso->long_name);
196 
197 	if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
198 	    !dso__is_kcore(al.map->dso)) {
199 		pr_debug("Unexpected kernel address - skipping\n");
200 		return 0;
201 	}
202 
203 	pr_debug("On file address is: %#"PRIx64"\n", al.addr);
204 
205 	if (len > BUFSZ)
206 		len = BUFSZ;
207 
208 	/* Do not go off the map */
209 	if (addr + len > al.map->end)
210 		len = al.map->end - addr;
211 
212 	/* Read the object code using perf */
213 	ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
214 					al.addr, buf1, len);
215 	if (ret_len != len) {
216 		pr_debug("dso__data_read_offset failed\n");
217 		return -1;
218 	}
219 
220 	/*
221 	 * Converting addresses for use by objdump requires more information.
222 	 * map__load() does that.  See map__rip_2objdump() for details.
223 	 */
224 	if (map__load(al.map, NULL))
225 		return -1;
226 
227 	/* objdump struggles with kcore - try each map only once */
228 	if (dso__is_kcore(al.map->dso)) {
229 		size_t d;
230 
231 		for (d = 0; d < state->done_cnt; d++) {
232 			if (state->done[d] == al.map->start) {
233 				pr_debug("kcore map tested already");
234 				pr_debug(" - skipping\n");
235 				return 0;
236 			}
237 		}
238 		if (state->done_cnt >= ARRAY_SIZE(state->done)) {
239 			pr_debug("Too many kcore maps - skipping\n");
240 			return 0;
241 		}
242 		state->done[state->done_cnt++] = al.map->start;
243 	}
244 
245 	/* Read the object code using objdump */
246 	objdump_addr = map__rip_2objdump(al.map, al.addr);
247 	ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
248 	if (ret > 0) {
249 		/*
250 		 * The kernel maps are inaccurate - assume objdump is right in
251 		 * that case.
252 		 */
253 		if (cpumode == PERF_RECORD_MISC_KERNEL ||
254 		    cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
255 			len -= ret;
256 			if (len) {
257 				pr_debug("Reducing len to %zu\n", len);
258 			} else if (dso__is_kcore(al.map->dso)) {
259 				/*
260 				 * objdump cannot handle very large segments
261 				 * that may be found in kcore.
262 				 */
263 				pr_debug("objdump failed for kcore");
264 				pr_debug(" - skipping\n");
265 				return 0;
266 			} else {
267 				return -1;
268 			}
269 		}
270 	}
271 	if (ret < 0) {
272 		pr_debug("read_via_objdump failed\n");
273 		return -1;
274 	}
275 
276 	/* The results should be identical */
277 	if (memcmp(buf1, buf2, len)) {
278 		pr_debug("Bytes read differ from those read by objdump\n");
279 		pr_debug("buf1 (dso):\n");
280 		dump_buf(buf1, len);
281 		pr_debug("buf2 (objdump):\n");
282 		dump_buf(buf2, len);
283 		return -1;
284 	}
285 	pr_debug("Bytes read match those read by objdump\n");
286 
287 	return 0;
288 }
289 
290 static int process_sample_event(struct machine *machine,
291 				struct perf_evlist *evlist,
292 				union perf_event *event, struct state *state)
293 {
294 	struct perf_sample sample;
295 	struct thread *thread;
296 	u8 cpumode;
297 	int ret;
298 
299 	if (perf_evlist__parse_sample(evlist, event, &sample)) {
300 		pr_debug("perf_evlist__parse_sample failed\n");
301 		return -1;
302 	}
303 
304 	thread = machine__findnew_thread(machine, sample.pid, sample.tid);
305 	if (!thread) {
306 		pr_debug("machine__findnew_thread failed\n");
307 		return -1;
308 	}
309 
310 	cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
311 
312 	ret = read_object_code(sample.ip, READLEN, cpumode, thread, state);
313 	thread__put(thread);
314 	return ret;
315 }
316 
317 static int process_event(struct machine *machine, struct perf_evlist *evlist,
318 			 union perf_event *event, struct state *state)
319 {
320 	if (event->header.type == PERF_RECORD_SAMPLE)
321 		return process_sample_event(machine, evlist, event, state);
322 
323 	if (event->header.type == PERF_RECORD_THROTTLE ||
324 	    event->header.type == PERF_RECORD_UNTHROTTLE)
325 		return 0;
326 
327 	if (event->header.type < PERF_RECORD_MAX) {
328 		int ret;
329 
330 		ret = machine__process_event(machine, event, NULL);
331 		if (ret < 0)
332 			pr_debug("machine__process_event failed, event type %u\n",
333 				 event->header.type);
334 		return ret;
335 	}
336 
337 	return 0;
338 }
339 
340 static int process_events(struct machine *machine, struct perf_evlist *evlist,
341 			  struct state *state)
342 {
343 	union perf_event *event;
344 	int i, ret;
345 
346 	for (i = 0; i < evlist->nr_mmaps; i++) {
347 		while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
348 			ret = process_event(machine, evlist, event, state);
349 			perf_evlist__mmap_consume(evlist, i);
350 			if (ret < 0)
351 				return ret;
352 		}
353 	}
354 	return 0;
355 }
356 
357 static int comp(const void *a, const void *b)
358 {
359 	return *(int *)a - *(int *)b;
360 }
361 
362 static void do_sort_something(void)
363 {
364 	int buf[40960], i;
365 
366 	for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
367 		buf[i] = ARRAY_SIZE(buf) - i - 1;
368 
369 	qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
370 
371 	for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
372 		if (buf[i] != i) {
373 			pr_debug("qsort failed\n");
374 			break;
375 		}
376 	}
377 }
378 
379 static void sort_something(void)
380 {
381 	int i;
382 
383 	for (i = 0; i < 10; i++)
384 		do_sort_something();
385 }
386 
387 static void syscall_something(void)
388 {
389 	int pipefd[2];
390 	int i;
391 
392 	for (i = 0; i < 1000; i++) {
393 		if (pipe(pipefd) < 0) {
394 			pr_debug("pipe failed\n");
395 			break;
396 		}
397 		close(pipefd[1]);
398 		close(pipefd[0]);
399 	}
400 }
401 
402 static void fs_something(void)
403 {
404 	const char *test_file_name = "temp-perf-code-reading-test-file--";
405 	FILE *f;
406 	int i;
407 
408 	for (i = 0; i < 1000; i++) {
409 		f = fopen(test_file_name, "w+");
410 		if (f) {
411 			fclose(f);
412 			unlink(test_file_name);
413 		}
414 	}
415 }
416 
417 static void do_something(void)
418 {
419 	fs_something();
420 
421 	sort_something();
422 
423 	syscall_something();
424 }
425 
426 enum {
427 	TEST_CODE_READING_OK,
428 	TEST_CODE_READING_NO_VMLINUX,
429 	TEST_CODE_READING_NO_KCORE,
430 	TEST_CODE_READING_NO_ACCESS,
431 	TEST_CODE_READING_NO_KERNEL_OBJ,
432 };
433 
434 static int do_test_code_reading(bool try_kcore)
435 {
436 	struct machine *machine;
437 	struct thread *thread;
438 	struct record_opts opts = {
439 		.mmap_pages	     = UINT_MAX,
440 		.user_freq	     = UINT_MAX,
441 		.user_interval	     = ULLONG_MAX,
442 		.freq		     = 4000,
443 		.target		     = {
444 			.uses_mmap   = true,
445 		},
446 	};
447 	struct state state = {
448 		.done_cnt = 0,
449 	};
450 	struct thread_map *threads = NULL;
451 	struct cpu_map *cpus = NULL;
452 	struct perf_evlist *evlist = NULL;
453 	struct perf_evsel *evsel = NULL;
454 	int err = -1, ret;
455 	pid_t pid;
456 	struct map *map;
457 	bool have_vmlinux, have_kcore, excl_kernel = false;
458 
459 	pid = getpid();
460 
461 	machine = machine__new_host();
462 
463 	ret = machine__create_kernel_maps(machine);
464 	if (ret < 0) {
465 		pr_debug("machine__create_kernel_maps failed\n");
466 		goto out_err;
467 	}
468 
469 	/* Force the use of kallsyms instead of vmlinux to try kcore */
470 	if (try_kcore)
471 		symbol_conf.kallsyms_name = "/proc/kallsyms";
472 
473 	/* Load kernel map */
474 	map = machine__kernel_map(machine);
475 	ret = map__load(map, NULL);
476 	if (ret < 0) {
477 		pr_debug("map__load failed\n");
478 		goto out_err;
479 	}
480 	have_vmlinux = dso__is_vmlinux(map->dso);
481 	have_kcore = dso__is_kcore(map->dso);
482 
483 	/* 2nd time through we just try kcore */
484 	if (try_kcore && !have_kcore)
485 		return TEST_CODE_READING_NO_KCORE;
486 
487 	/* No point getting kernel events if there is no kernel object */
488 	if (!have_vmlinux && !have_kcore)
489 		excl_kernel = true;
490 
491 	threads = thread_map__new_by_tid(pid);
492 	if (!threads) {
493 		pr_debug("thread_map__new_by_tid failed\n");
494 		goto out_err;
495 	}
496 
497 	ret = perf_event__synthesize_thread_map(NULL, threads,
498 						perf_event__process, machine, false, 500);
499 	if (ret < 0) {
500 		pr_debug("perf_event__synthesize_thread_map failed\n");
501 		goto out_err;
502 	}
503 
504 	thread = machine__findnew_thread(machine, pid, pid);
505 	if (!thread) {
506 		pr_debug("machine__findnew_thread failed\n");
507 		goto out_put;
508 	}
509 
510 	cpus = cpu_map__new(NULL);
511 	if (!cpus) {
512 		pr_debug("cpu_map__new failed\n");
513 		goto out_put;
514 	}
515 
516 	while (1) {
517 		const char *str;
518 
519 		evlist = perf_evlist__new();
520 		if (!evlist) {
521 			pr_debug("perf_evlist__new failed\n");
522 			goto out_put;
523 		}
524 
525 		perf_evlist__set_maps(evlist, cpus, threads);
526 
527 		if (excl_kernel)
528 			str = "cycles:u";
529 		else
530 			str = "cycles";
531 		pr_debug("Parsing event '%s'\n", str);
532 		ret = parse_events(evlist, str, NULL);
533 		if (ret < 0) {
534 			pr_debug("parse_events failed\n");
535 			goto out_put;
536 		}
537 
538 		perf_evlist__config(evlist, &opts);
539 
540 		evsel = perf_evlist__first(evlist);
541 
542 		evsel->attr.comm = 1;
543 		evsel->attr.disabled = 1;
544 		evsel->attr.enable_on_exec = 0;
545 
546 		ret = perf_evlist__open(evlist);
547 		if (ret < 0) {
548 			if (!excl_kernel) {
549 				excl_kernel = true;
550 				/*
551 				 * Both cpus and threads are now owned by evlist
552 				 * and will be freed by following perf_evlist__set_maps
553 				 * call. Getting refference to keep them alive.
554 				 */
555 				cpu_map__get(cpus);
556 				thread_map__get(threads);
557 				perf_evlist__set_maps(evlist, NULL, NULL);
558 				perf_evlist__delete(evlist);
559 				evlist = NULL;
560 				continue;
561 			}
562 			pr_debug("perf_evlist__open failed\n");
563 			goto out_put;
564 		}
565 		break;
566 	}
567 
568 	ret = perf_evlist__mmap(evlist, UINT_MAX, false);
569 	if (ret < 0) {
570 		pr_debug("perf_evlist__mmap failed\n");
571 		goto out_put;
572 	}
573 
574 	perf_evlist__enable(evlist);
575 
576 	do_something();
577 
578 	perf_evlist__disable(evlist);
579 
580 	ret = process_events(machine, evlist, &state);
581 	if (ret < 0)
582 		goto out_put;
583 
584 	if (!have_vmlinux && !have_kcore && !try_kcore)
585 		err = TEST_CODE_READING_NO_KERNEL_OBJ;
586 	else if (!have_vmlinux && !try_kcore)
587 		err = TEST_CODE_READING_NO_VMLINUX;
588 	else if (excl_kernel)
589 		err = TEST_CODE_READING_NO_ACCESS;
590 	else
591 		err = TEST_CODE_READING_OK;
592 out_put:
593 	thread__put(thread);
594 out_err:
595 
596 	if (evlist) {
597 		perf_evlist__delete(evlist);
598 	} else {
599 		cpu_map__put(cpus);
600 		thread_map__put(threads);
601 	}
602 	machine__delete_threads(machine);
603 	machine__delete(machine);
604 
605 	return err;
606 }
607 
608 int test__code_reading(int subtest __maybe_unused)
609 {
610 	int ret;
611 
612 	ret = do_test_code_reading(false);
613 	if (!ret)
614 		ret = do_test_code_reading(true);
615 
616 	switch (ret) {
617 	case TEST_CODE_READING_OK:
618 		return 0;
619 	case TEST_CODE_READING_NO_VMLINUX:
620 		pr_debug("no vmlinux\n");
621 		return 0;
622 	case TEST_CODE_READING_NO_KCORE:
623 		pr_debug("no kcore\n");
624 		return 0;
625 	case TEST_CODE_READING_NO_ACCESS:
626 		pr_debug("no access\n");
627 		return 0;
628 	case TEST_CODE_READING_NO_KERNEL_OBJ:
629 		pr_debug("no kernel obj\n");
630 		return 0;
631 	default:
632 		return -1;
633 	};
634 }
635