xref: /openbmc/linux/tools/perf/util/annotate.c (revision 68198dca)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-annotate.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <errno.h>
11 #include <inttypes.h>
12 #include "util.h"
13 #include "ui/ui.h"
14 #include "sort.h"
15 #include "build-id.h"
16 #include "color.h"
17 #include "cache.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "annotate.h"
21 #include "evsel.h"
22 #include "block-range.h"
23 #include "string2.h"
24 #include "arch/common.h"
25 #include <regex.h>
26 #include <pthread.h>
27 #include <linux/bitops.h>
28 #include <linux/kernel.h>
29 #include <sys/utsname.h>
30 
31 #include "sane_ctype.h"
32 
33 const char 	*disassembler_style;
34 const char	*objdump_path;
35 static regex_t	 file_lineno;
36 
37 static struct ins_ops *ins__find(struct arch *arch, const char *name);
38 static void ins__sort(struct arch *arch);
39 static int disasm_line__parse(char *line, const char **namep, char **rawp);
40 
41 struct arch {
42 	const char	*name;
43 	struct ins	*instructions;
44 	size_t		nr_instructions;
45 	size_t		nr_instructions_allocated;
46 	struct ins_ops  *(*associate_instruction_ops)(struct arch *arch, const char *name);
47 	bool		sorted_instructions;
48 	bool		initialized;
49 	void		*priv;
50 	unsigned int	model;
51 	unsigned int	family;
52 	int		(*init)(struct arch *arch, char *cpuid);
53 	bool		(*ins_is_fused)(struct arch *arch, const char *ins1,
54 					const char *ins2);
55 	struct		{
56 		char comment_char;
57 		char skip_functions_char;
58 	} objdump;
59 };
60 
61 static struct ins_ops call_ops;
62 static struct ins_ops dec_ops;
63 static struct ins_ops jump_ops;
64 static struct ins_ops mov_ops;
65 static struct ins_ops nop_ops;
66 static struct ins_ops lock_ops;
67 static struct ins_ops ret_ops;
68 
69 static int arch__grow_instructions(struct arch *arch)
70 {
71 	struct ins *new_instructions;
72 	size_t new_nr_allocated;
73 
74 	if (arch->nr_instructions_allocated == 0 && arch->instructions)
75 		goto grow_from_non_allocated_table;
76 
77 	new_nr_allocated = arch->nr_instructions_allocated + 128;
78 	new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
79 	if (new_instructions == NULL)
80 		return -1;
81 
82 out_update_instructions:
83 	arch->instructions = new_instructions;
84 	arch->nr_instructions_allocated = new_nr_allocated;
85 	return 0;
86 
87 grow_from_non_allocated_table:
88 	new_nr_allocated = arch->nr_instructions + 128;
89 	new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
90 	if (new_instructions == NULL)
91 		return -1;
92 
93 	memcpy(new_instructions, arch->instructions, arch->nr_instructions);
94 	goto out_update_instructions;
95 }
96 
97 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
98 {
99 	struct ins *ins;
100 
101 	if (arch->nr_instructions == arch->nr_instructions_allocated &&
102 	    arch__grow_instructions(arch))
103 		return -1;
104 
105 	ins = &arch->instructions[arch->nr_instructions];
106 	ins->name = strdup(name);
107 	if (!ins->name)
108 		return -1;
109 
110 	ins->ops  = ops;
111 	arch->nr_instructions++;
112 
113 	ins__sort(arch);
114 	return 0;
115 }
116 
117 #include "arch/arm/annotate/instructions.c"
118 #include "arch/arm64/annotate/instructions.c"
119 #include "arch/x86/annotate/instructions.c"
120 #include "arch/powerpc/annotate/instructions.c"
121 #include "arch/s390/annotate/instructions.c"
122 
123 static struct arch architectures[] = {
124 	{
125 		.name = "arm",
126 		.init = arm__annotate_init,
127 	},
128 	{
129 		.name = "arm64",
130 		.init = arm64__annotate_init,
131 	},
132 	{
133 		.name = "x86",
134 		.init = x86__annotate_init,
135 		.instructions = x86__instructions,
136 		.nr_instructions = ARRAY_SIZE(x86__instructions),
137 		.ins_is_fused = x86__ins_is_fused,
138 		.objdump =  {
139 			.comment_char = '#',
140 		},
141 	},
142 	{
143 		.name = "powerpc",
144 		.init = powerpc__annotate_init,
145 	},
146 	{
147 		.name = "s390",
148 		.init = s390__annotate_init,
149 		.objdump =  {
150 			.comment_char = '#',
151 		},
152 	},
153 };
154 
155 static void ins__delete(struct ins_operands *ops)
156 {
157 	if (ops == NULL)
158 		return;
159 	zfree(&ops->source.raw);
160 	zfree(&ops->source.name);
161 	zfree(&ops->target.raw);
162 	zfree(&ops->target.name);
163 }
164 
165 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
166 			      struct ins_operands *ops)
167 {
168 	return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
169 }
170 
171 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
172 		  struct ins_operands *ops)
173 {
174 	if (ins->ops->scnprintf)
175 		return ins->ops->scnprintf(ins, bf, size, ops);
176 
177 	return ins__raw_scnprintf(ins, bf, size, ops);
178 }
179 
180 bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
181 {
182 	if (!arch || !arch->ins_is_fused)
183 		return false;
184 
185 	return arch->ins_is_fused(arch, ins1, ins2);
186 }
187 
188 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
189 {
190 	char *endptr, *tok, *name;
191 
192 	ops->target.addr = strtoull(ops->raw, &endptr, 16);
193 
194 	name = strchr(endptr, '<');
195 	if (name == NULL)
196 		goto indirect_call;
197 
198 	name++;
199 
200 	if (arch->objdump.skip_functions_char &&
201 	    strchr(name, arch->objdump.skip_functions_char))
202 		return -1;
203 
204 	tok = strchr(name, '>');
205 	if (tok == NULL)
206 		return -1;
207 
208 	*tok = '\0';
209 	ops->target.name = strdup(name);
210 	*tok = '>';
211 
212 	return ops->target.name == NULL ? -1 : 0;
213 
214 indirect_call:
215 	tok = strchr(endptr, '*');
216 	if (tok == NULL) {
217 		struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr));
218 		if (sym != NULL)
219 			ops->target.name = strdup(sym->name);
220 		else
221 			ops->target.addr = 0;
222 		return 0;
223 	}
224 
225 	ops->target.addr = strtoull(tok + 1, NULL, 16);
226 	return 0;
227 }
228 
229 static int call__scnprintf(struct ins *ins, char *bf, size_t size,
230 			   struct ins_operands *ops)
231 {
232 	if (ops->target.name)
233 		return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
234 
235 	if (ops->target.addr == 0)
236 		return ins__raw_scnprintf(ins, bf, size, ops);
237 
238 	return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
239 }
240 
241 static struct ins_ops call_ops = {
242 	.parse	   = call__parse,
243 	.scnprintf = call__scnprintf,
244 };
245 
246 bool ins__is_call(const struct ins *ins)
247 {
248 	return ins->ops == &call_ops;
249 }
250 
251 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
252 {
253 	const char *s = strchr(ops->raw, '+');
254 	const char *c = strchr(ops->raw, ',');
255 
256 	/*
257 	 * skip over possible up to 2 operands to get to address, e.g.:
258 	 * tbnz	 w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
259 	 */
260 	if (c++ != NULL) {
261 		ops->target.addr = strtoull(c, NULL, 16);
262 		if (!ops->target.addr) {
263 			c = strchr(c, ',');
264 			if (c++ != NULL)
265 				ops->target.addr = strtoull(c, NULL, 16);
266 		}
267 	} else {
268 		ops->target.addr = strtoull(ops->raw, NULL, 16);
269 	}
270 
271 	if (s++ != NULL) {
272 		ops->target.offset = strtoull(s, NULL, 16);
273 		ops->target.offset_avail = true;
274 	} else {
275 		ops->target.offset_avail = false;
276 	}
277 
278 	return 0;
279 }
280 
281 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
282 			   struct ins_operands *ops)
283 {
284 	const char *c = strchr(ops->raw, ',');
285 
286 	if (!ops->target.addr || ops->target.offset < 0)
287 		return ins__raw_scnprintf(ins, bf, size, ops);
288 
289 	if (c != NULL) {
290 		const char *c2 = strchr(c + 1, ',');
291 
292 		/* check for 3-op insn */
293 		if (c2 != NULL)
294 			c = c2;
295 		c++;
296 
297 		/* mirror arch objdump's space-after-comma style */
298 		if (*c == ' ')
299 			c++;
300 	}
301 
302 	return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
303 			 ins->name, c ? c - ops->raw : 0, ops->raw,
304 			 ops->target.offset);
305 }
306 
307 static struct ins_ops jump_ops = {
308 	.parse	   = jump__parse,
309 	.scnprintf = jump__scnprintf,
310 };
311 
312 bool ins__is_jump(const struct ins *ins)
313 {
314 	return ins->ops == &jump_ops;
315 }
316 
317 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
318 {
319 	char *endptr, *name, *t;
320 
321 	if (strstr(raw, "(%rip)") == NULL)
322 		return 0;
323 
324 	*addrp = strtoull(comment, &endptr, 16);
325 	name = strchr(endptr, '<');
326 	if (name == NULL)
327 		return -1;
328 
329 	name++;
330 
331 	t = strchr(name, '>');
332 	if (t == NULL)
333 		return 0;
334 
335 	*t = '\0';
336 	*namep = strdup(name);
337 	*t = '>';
338 
339 	return 0;
340 }
341 
342 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
343 {
344 	ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
345 	if (ops->locked.ops == NULL)
346 		return 0;
347 
348 	if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
349 		goto out_free_ops;
350 
351 	ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
352 
353 	if (ops->locked.ins.ops == NULL)
354 		goto out_free_ops;
355 
356 	if (ops->locked.ins.ops->parse &&
357 	    ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0)
358 		goto out_free_ops;
359 
360 	return 0;
361 
362 out_free_ops:
363 	zfree(&ops->locked.ops);
364 	return 0;
365 }
366 
367 static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
368 			   struct ins_operands *ops)
369 {
370 	int printed;
371 
372 	if (ops->locked.ins.ops == NULL)
373 		return ins__raw_scnprintf(ins, bf, size, ops);
374 
375 	printed = scnprintf(bf, size, "%-6s ", ins->name);
376 	return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
377 					size - printed, ops->locked.ops);
378 }
379 
380 static void lock__delete(struct ins_operands *ops)
381 {
382 	struct ins *ins = &ops->locked.ins;
383 
384 	if (ins->ops && ins->ops->free)
385 		ins->ops->free(ops->locked.ops);
386 	else
387 		ins__delete(ops->locked.ops);
388 
389 	zfree(&ops->locked.ops);
390 	zfree(&ops->target.raw);
391 	zfree(&ops->target.name);
392 }
393 
394 static struct ins_ops lock_ops = {
395 	.free	   = lock__delete,
396 	.parse	   = lock__parse,
397 	.scnprintf = lock__scnprintf,
398 };
399 
400 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused)
401 {
402 	char *s = strchr(ops->raw, ','), *target, *comment, prev;
403 
404 	if (s == NULL)
405 		return -1;
406 
407 	*s = '\0';
408 	ops->source.raw = strdup(ops->raw);
409 	*s = ',';
410 
411 	if (ops->source.raw == NULL)
412 		return -1;
413 
414 	target = ++s;
415 	comment = strchr(s, arch->objdump.comment_char);
416 
417 	if (comment != NULL)
418 		s = comment - 1;
419 	else
420 		s = strchr(s, '\0') - 1;
421 
422 	while (s > target && isspace(s[0]))
423 		--s;
424 	s++;
425 	prev = *s;
426 	*s = '\0';
427 
428 	ops->target.raw = strdup(target);
429 	*s = prev;
430 
431 	if (ops->target.raw == NULL)
432 		goto out_free_source;
433 
434 	if (comment == NULL)
435 		return 0;
436 
437 	comment = ltrim(comment);
438 	comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
439 	comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
440 
441 	return 0;
442 
443 out_free_source:
444 	zfree(&ops->source.raw);
445 	return -1;
446 }
447 
448 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
449 			   struct ins_operands *ops)
450 {
451 	return scnprintf(bf, size, "%-6s %s,%s", ins->name,
452 			 ops->source.name ?: ops->source.raw,
453 			 ops->target.name ?: ops->target.raw);
454 }
455 
456 static struct ins_ops mov_ops = {
457 	.parse	   = mov__parse,
458 	.scnprintf = mov__scnprintf,
459 };
460 
461 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
462 {
463 	char *target, *comment, *s, prev;
464 
465 	target = s = ops->raw;
466 
467 	while (s[0] != '\0' && !isspace(s[0]))
468 		++s;
469 	prev = *s;
470 	*s = '\0';
471 
472 	ops->target.raw = strdup(target);
473 	*s = prev;
474 
475 	if (ops->target.raw == NULL)
476 		return -1;
477 
478 	comment = strchr(s, arch->objdump.comment_char);
479 	if (comment == NULL)
480 		return 0;
481 
482 	comment = ltrim(comment);
483 	comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
484 
485 	return 0;
486 }
487 
488 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
489 			   struct ins_operands *ops)
490 {
491 	return scnprintf(bf, size, "%-6s %s", ins->name,
492 			 ops->target.name ?: ops->target.raw);
493 }
494 
495 static struct ins_ops dec_ops = {
496 	.parse	   = dec__parse,
497 	.scnprintf = dec__scnprintf,
498 };
499 
500 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
501 			  struct ins_operands *ops __maybe_unused)
502 {
503 	return scnprintf(bf, size, "%-6s", "nop");
504 }
505 
506 static struct ins_ops nop_ops = {
507 	.scnprintf = nop__scnprintf,
508 };
509 
510 static struct ins_ops ret_ops = {
511 	.scnprintf = ins__raw_scnprintf,
512 };
513 
514 bool ins__is_ret(const struct ins *ins)
515 {
516 	return ins->ops == &ret_ops;
517 }
518 
519 bool ins__is_lock(const struct ins *ins)
520 {
521 	return ins->ops == &lock_ops;
522 }
523 
524 static int ins__key_cmp(const void *name, const void *insp)
525 {
526 	const struct ins *ins = insp;
527 
528 	return strcmp(name, ins->name);
529 }
530 
531 static int ins__cmp(const void *a, const void *b)
532 {
533 	const struct ins *ia = a;
534 	const struct ins *ib = b;
535 
536 	return strcmp(ia->name, ib->name);
537 }
538 
539 static void ins__sort(struct arch *arch)
540 {
541 	const int nmemb = arch->nr_instructions;
542 
543 	qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
544 }
545 
546 static struct ins_ops *__ins__find(struct arch *arch, const char *name)
547 {
548 	struct ins *ins;
549 	const int nmemb = arch->nr_instructions;
550 
551 	if (!arch->sorted_instructions) {
552 		ins__sort(arch);
553 		arch->sorted_instructions = true;
554 	}
555 
556 	ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
557 	return ins ? ins->ops : NULL;
558 }
559 
560 static struct ins_ops *ins__find(struct arch *arch, const char *name)
561 {
562 	struct ins_ops *ops = __ins__find(arch, name);
563 
564 	if (!ops && arch->associate_instruction_ops)
565 		ops = arch->associate_instruction_ops(arch, name);
566 
567 	return ops;
568 }
569 
570 static int arch__key_cmp(const void *name, const void *archp)
571 {
572 	const struct arch *arch = archp;
573 
574 	return strcmp(name, arch->name);
575 }
576 
577 static int arch__cmp(const void *a, const void *b)
578 {
579 	const struct arch *aa = a;
580 	const struct arch *ab = b;
581 
582 	return strcmp(aa->name, ab->name);
583 }
584 
585 static void arch__sort(void)
586 {
587 	const int nmemb = ARRAY_SIZE(architectures);
588 
589 	qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
590 }
591 
592 static struct arch *arch__find(const char *name)
593 {
594 	const int nmemb = ARRAY_SIZE(architectures);
595 	static bool sorted;
596 
597 	if (!sorted) {
598 		arch__sort();
599 		sorted = true;
600 	}
601 
602 	return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
603 }
604 
605 int symbol__alloc_hist(struct symbol *sym)
606 {
607 	struct annotation *notes = symbol__annotation(sym);
608 	size_t size = symbol__size(sym);
609 	size_t sizeof_sym_hist;
610 
611 	/*
612 	 * Add buffer of one element for zero length symbol.
613 	 * When sample is taken from first instruction of
614 	 * zero length symbol, perf still resolves it and
615 	 * shows symbol name in perf report and allows to
616 	 * annotate it.
617 	 */
618 	if (size == 0)
619 		size = 1;
620 
621 	/* Check for overflow when calculating sizeof_sym_hist */
622 	if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry))
623 		return -1;
624 
625 	sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry));
626 
627 	/* Check for overflow in zalloc argument */
628 	if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
629 				/ symbol_conf.nr_events)
630 		return -1;
631 
632 	notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
633 	if (notes->src == NULL)
634 		return -1;
635 	notes->src->sizeof_sym_hist = sizeof_sym_hist;
636 	notes->src->nr_histograms   = symbol_conf.nr_events;
637 	INIT_LIST_HEAD(&notes->src->source);
638 	return 0;
639 }
640 
641 /* The cycles histogram is lazily allocated. */
642 static int symbol__alloc_hist_cycles(struct symbol *sym)
643 {
644 	struct annotation *notes = symbol__annotation(sym);
645 	const size_t size = symbol__size(sym);
646 
647 	notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
648 	if (notes->src->cycles_hist == NULL)
649 		return -1;
650 	return 0;
651 }
652 
653 void symbol__annotate_zero_histograms(struct symbol *sym)
654 {
655 	struct annotation *notes = symbol__annotation(sym);
656 
657 	pthread_mutex_lock(&notes->lock);
658 	if (notes->src != NULL) {
659 		memset(notes->src->histograms, 0,
660 		       notes->src->nr_histograms * notes->src->sizeof_sym_hist);
661 		if (notes->src->cycles_hist)
662 			memset(notes->src->cycles_hist, 0,
663 				symbol__size(sym) * sizeof(struct cyc_hist));
664 	}
665 	pthread_mutex_unlock(&notes->lock);
666 }
667 
668 static int __symbol__account_cycles(struct annotation *notes,
669 				    u64 start,
670 				    unsigned offset, unsigned cycles,
671 				    unsigned have_start)
672 {
673 	struct cyc_hist *ch;
674 
675 	ch = notes->src->cycles_hist;
676 	/*
677 	 * For now we can only account one basic block per
678 	 * final jump. But multiple could be overlapping.
679 	 * Always account the longest one. So when
680 	 * a shorter one has been already seen throw it away.
681 	 *
682 	 * We separately always account the full cycles.
683 	 */
684 	ch[offset].num_aggr++;
685 	ch[offset].cycles_aggr += cycles;
686 
687 	if (!have_start && ch[offset].have_start)
688 		return 0;
689 	if (ch[offset].num) {
690 		if (have_start && (!ch[offset].have_start ||
691 				   ch[offset].start > start)) {
692 			ch[offset].have_start = 0;
693 			ch[offset].cycles = 0;
694 			ch[offset].num = 0;
695 			if (ch[offset].reset < 0xffff)
696 				ch[offset].reset++;
697 		} else if (have_start &&
698 			   ch[offset].start < start)
699 			return 0;
700 	}
701 	ch[offset].have_start = have_start;
702 	ch[offset].start = start;
703 	ch[offset].cycles += cycles;
704 	ch[offset].num++;
705 	return 0;
706 }
707 
708 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
709 				      struct annotation *notes, int evidx, u64 addr,
710 				      struct perf_sample *sample)
711 {
712 	unsigned offset;
713 	struct sym_hist *h;
714 
715 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
716 
717 	if ((addr < sym->start || addr >= sym->end) &&
718 	    (addr != sym->end || sym->start != sym->end)) {
719 		pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
720 		       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
721 		return -ERANGE;
722 	}
723 
724 	offset = addr - sym->start;
725 	h = annotation__histogram(notes, evidx);
726 	h->nr_samples++;
727 	h->addr[offset].nr_samples++;
728 	h->period += sample->period;
729 	h->addr[offset].period += sample->period;
730 
731 	pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
732 		  ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
733 		  sym->start, sym->name, addr, addr - sym->start, evidx,
734 		  h->addr[offset].nr_samples, h->addr[offset].period);
735 	return 0;
736 }
737 
738 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
739 {
740 	struct annotation *notes = symbol__annotation(sym);
741 
742 	if (notes->src == NULL) {
743 		if (symbol__alloc_hist(sym) < 0)
744 			return NULL;
745 	}
746 	if (!notes->src->cycles_hist && cycles) {
747 		if (symbol__alloc_hist_cycles(sym) < 0)
748 			return NULL;
749 	}
750 	return notes;
751 }
752 
753 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
754 				    int evidx, u64 addr,
755 				    struct perf_sample *sample)
756 {
757 	struct annotation *notes;
758 
759 	if (sym == NULL)
760 		return 0;
761 	notes = symbol__get_annotation(sym, false);
762 	if (notes == NULL)
763 		return -ENOMEM;
764 	return __symbol__inc_addr_samples(sym, map, notes, evidx, addr, sample);
765 }
766 
767 static int symbol__account_cycles(u64 addr, u64 start,
768 				  struct symbol *sym, unsigned cycles)
769 {
770 	struct annotation *notes;
771 	unsigned offset;
772 
773 	if (sym == NULL)
774 		return 0;
775 	notes = symbol__get_annotation(sym, true);
776 	if (notes == NULL)
777 		return -ENOMEM;
778 	if (addr < sym->start || addr >= sym->end)
779 		return -ERANGE;
780 
781 	if (start) {
782 		if (start < sym->start || start >= sym->end)
783 			return -ERANGE;
784 		if (start >= addr)
785 			start = 0;
786 	}
787 	offset = addr - sym->start;
788 	return __symbol__account_cycles(notes,
789 					start ? start - sym->start : 0,
790 					offset, cycles,
791 					!!start);
792 }
793 
794 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
795 				    struct addr_map_symbol *start,
796 				    unsigned cycles)
797 {
798 	u64 saddr = 0;
799 	int err;
800 
801 	if (!cycles)
802 		return 0;
803 
804 	/*
805 	 * Only set start when IPC can be computed. We can only
806 	 * compute it when the basic block is completely in a single
807 	 * function.
808 	 * Special case the case when the jump is elsewhere, but
809 	 * it starts on the function start.
810 	 */
811 	if (start &&
812 		(start->sym == ams->sym ||
813 		 (ams->sym &&
814 		   start->addr == ams->sym->start + ams->map->start)))
815 		saddr = start->al_addr;
816 	if (saddr == 0)
817 		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
818 			ams->addr,
819 			start ? start->addr : 0,
820 			ams->sym ? ams->sym->start + ams->map->start : 0,
821 			saddr);
822 	err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
823 	if (err)
824 		pr_debug2("account_cycles failed %d\n", err);
825 	return err;
826 }
827 
828 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
829 				 int evidx)
830 {
831 	return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr, sample);
832 }
833 
834 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
835 				 int evidx, u64 ip)
836 {
837 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip, sample);
838 }
839 
840 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
841 {
842 	dl->ins.ops = ins__find(arch, dl->ins.name);
843 
844 	if (!dl->ins.ops)
845 		return;
846 
847 	if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0)
848 		dl->ins.ops = NULL;
849 }
850 
851 static int disasm_line__parse(char *line, const char **namep, char **rawp)
852 {
853 	char tmp, *name = ltrim(line);
854 
855 	if (name[0] == '\0')
856 		return -1;
857 
858 	*rawp = name + 1;
859 
860 	while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
861 		++*rawp;
862 
863 	tmp = (*rawp)[0];
864 	(*rawp)[0] = '\0';
865 	*namep = strdup(name);
866 
867 	if (*namep == NULL)
868 		goto out_free_name;
869 
870 	(*rawp)[0] = tmp;
871 	*rawp = ltrim(*rawp);
872 
873 	return 0;
874 
875 out_free_name:
876 	free((void *)namep);
877 	*namep = NULL;
878 	return -1;
879 }
880 
881 static struct disasm_line *disasm_line__new(s64 offset, char *line,
882 					    size_t privsize, int line_nr,
883 					    struct arch *arch,
884 					    struct map *map)
885 {
886 	struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
887 
888 	if (dl != NULL) {
889 		dl->offset = offset;
890 		dl->line = strdup(line);
891 		dl->line_nr = line_nr;
892 		if (dl->line == NULL)
893 			goto out_delete;
894 
895 		if (offset != -1) {
896 			if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
897 				goto out_free_line;
898 
899 			disasm_line__init_ins(dl, arch, map);
900 		}
901 	}
902 
903 	return dl;
904 
905 out_free_line:
906 	zfree(&dl->line);
907 out_delete:
908 	free(dl);
909 	return NULL;
910 }
911 
912 void disasm_line__free(struct disasm_line *dl)
913 {
914 	zfree(&dl->line);
915 	if (dl->ins.ops && dl->ins.ops->free)
916 		dl->ins.ops->free(&dl->ops);
917 	else
918 		ins__delete(&dl->ops);
919 	free((void *)dl->ins.name);
920 	dl->ins.name = NULL;
921 	free(dl);
922 }
923 
924 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
925 {
926 	if (raw || !dl->ins.ops)
927 		return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
928 
929 	return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
930 }
931 
932 static void disasm__add(struct list_head *head, struct disasm_line *line)
933 {
934 	list_add_tail(&line->node, head);
935 }
936 
937 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
938 {
939 	list_for_each_entry_continue(pos, head, node)
940 		if (pos->offset >= 0)
941 			return pos;
942 
943 	return NULL;
944 }
945 
946 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
947 			    s64 end, const char **path, struct sym_hist_entry *sample)
948 {
949 	struct source_line *src_line = notes->src->lines;
950 	double percent = 0.0;
951 
952 	sample->nr_samples = sample->period = 0;
953 
954 	if (src_line) {
955 		size_t sizeof_src_line = sizeof(*src_line) +
956 				sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
957 
958 		while (offset < end) {
959 			src_line = (void *)notes->src->lines +
960 					(sizeof_src_line * offset);
961 
962 			if (*path == NULL)
963 				*path = src_line->path;
964 
965 			percent += src_line->samples[evidx].percent;
966 			sample->nr_samples += src_line->samples[evidx].nr;
967 			offset++;
968 		}
969 	} else {
970 		struct sym_hist *h = annotation__histogram(notes, evidx);
971 		unsigned int hits = 0;
972 		u64 period = 0;
973 
974 		while (offset < end) {
975 			hits   += h->addr[offset].nr_samples;
976 			period += h->addr[offset].period;
977 			++offset;
978 		}
979 
980 		if (h->nr_samples) {
981 			sample->period	   = period;
982 			sample->nr_samples = hits;
983 			percent = 100.0 * hits / h->nr_samples;
984 		}
985 	}
986 
987 	return percent;
988 }
989 
990 static const char *annotate__address_color(struct block_range *br)
991 {
992 	double cov = block_range__coverage(br);
993 
994 	if (cov >= 0) {
995 		/* mark red for >75% coverage */
996 		if (cov > 0.75)
997 			return PERF_COLOR_RED;
998 
999 		/* mark dull for <1% coverage */
1000 		if (cov < 0.01)
1001 			return PERF_COLOR_NORMAL;
1002 	}
1003 
1004 	return PERF_COLOR_MAGENTA;
1005 }
1006 
1007 static const char *annotate__asm_color(struct block_range *br)
1008 {
1009 	double cov = block_range__coverage(br);
1010 
1011 	if (cov >= 0) {
1012 		/* mark dull for <1% coverage */
1013 		if (cov < 0.01)
1014 			return PERF_COLOR_NORMAL;
1015 	}
1016 
1017 	return PERF_COLOR_BLUE;
1018 }
1019 
1020 static void annotate__branch_printf(struct block_range *br, u64 addr)
1021 {
1022 	bool emit_comment = true;
1023 
1024 	if (!br)
1025 		return;
1026 
1027 #if 1
1028 	if (br->is_target && br->start == addr) {
1029 		struct block_range *branch = br;
1030 		double p;
1031 
1032 		/*
1033 		 * Find matching branch to our target.
1034 		 */
1035 		while (!branch->is_branch)
1036 			branch = block_range__next(branch);
1037 
1038 		p = 100 *(double)br->entry / branch->coverage;
1039 
1040 		if (p > 0.1) {
1041 			if (emit_comment) {
1042 				emit_comment = false;
1043 				printf("\t#");
1044 			}
1045 
1046 			/*
1047 			 * The percentage of coverage joined at this target in relation
1048 			 * to the next branch.
1049 			 */
1050 			printf(" +%.2f%%", p);
1051 		}
1052 	}
1053 #endif
1054 	if (br->is_branch && br->end == addr) {
1055 		double p = 100*(double)br->taken / br->coverage;
1056 
1057 		if (p > 0.1) {
1058 			if (emit_comment) {
1059 				emit_comment = false;
1060 				printf("\t#");
1061 			}
1062 
1063 			/*
1064 			 * The percentage of coverage leaving at this branch, and
1065 			 * its prediction ratio.
1066 			 */
1067 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
1068 		}
1069 	}
1070 }
1071 
1072 
1073 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
1074 		      struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
1075 		      int max_lines, struct disasm_line *queue)
1076 {
1077 	static const char *prev_line;
1078 	static const char *prev_color;
1079 
1080 	if (dl->offset != -1) {
1081 		const char *path = NULL;
1082 		double percent, max_percent = 0.0;
1083 		double *ppercents = &percent;
1084 		struct sym_hist_entry sample;
1085 		struct sym_hist_entry *psamples = &sample;
1086 		int i, nr_percent = 1;
1087 		const char *color;
1088 		struct annotation *notes = symbol__annotation(sym);
1089 		s64 offset = dl->offset;
1090 		const u64 addr = start + offset;
1091 		struct disasm_line *next;
1092 		struct block_range *br;
1093 
1094 		next = disasm__get_next_ip_line(&notes->src->source, dl);
1095 
1096 		if (perf_evsel__is_group_event(evsel)) {
1097 			nr_percent = evsel->nr_members;
1098 			ppercents = calloc(nr_percent, sizeof(double));
1099 			psamples = calloc(nr_percent, sizeof(struct sym_hist_entry));
1100 			if (ppercents == NULL || psamples == NULL) {
1101 				return -1;
1102 			}
1103 		}
1104 
1105 		for (i = 0; i < nr_percent; i++) {
1106 			percent = disasm__calc_percent(notes,
1107 					notes->src->lines ? i : evsel->idx + i,
1108 					offset,
1109 					next ? next->offset : (s64) len,
1110 					&path, &sample);
1111 
1112 			ppercents[i] = percent;
1113 			psamples[i] = sample;
1114 			if (percent > max_percent)
1115 				max_percent = percent;
1116 		}
1117 
1118 		if (max_percent < min_pcnt)
1119 			return -1;
1120 
1121 		if (max_lines && printed >= max_lines)
1122 			return 1;
1123 
1124 		if (queue != NULL) {
1125 			list_for_each_entry_from(queue, &notes->src->source, node) {
1126 				if (queue == dl)
1127 					break;
1128 				disasm_line__print(queue, sym, start, evsel, len,
1129 						    0, 0, 1, NULL);
1130 			}
1131 		}
1132 
1133 		color = get_percent_color(max_percent);
1134 
1135 		/*
1136 		 * Also color the filename and line if needed, with
1137 		 * the same color than the percentage. Don't print it
1138 		 * twice for close colored addr with the same filename:line
1139 		 */
1140 		if (path) {
1141 			if (!prev_line || strcmp(prev_line, path)
1142 				       || color != prev_color) {
1143 				color_fprintf(stdout, color, " %s", path);
1144 				prev_line = path;
1145 				prev_color = color;
1146 			}
1147 		}
1148 
1149 		for (i = 0; i < nr_percent; i++) {
1150 			percent = ppercents[i];
1151 			sample = psamples[i];
1152 			color = get_percent_color(percent);
1153 
1154 			if (symbol_conf.show_total_period)
1155 				color_fprintf(stdout, color, " %11" PRIu64,
1156 					      sample.period);
1157 			else if (symbol_conf.show_nr_samples)
1158 				color_fprintf(stdout, color, " %7" PRIu64,
1159 					      sample.nr_samples);
1160 			else
1161 				color_fprintf(stdout, color, " %7.2f", percent);
1162 		}
1163 
1164 		printf(" :	");
1165 
1166 		br = block_range__find(addr);
1167 		color_fprintf(stdout, annotate__address_color(br), "  %" PRIx64 ":", addr);
1168 		color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
1169 		annotate__branch_printf(br, addr);
1170 		printf("\n");
1171 
1172 		if (ppercents != &percent)
1173 			free(ppercents);
1174 
1175 		if (psamples != &sample)
1176 			free(psamples);
1177 
1178 	} else if (max_lines && printed >= max_lines)
1179 		return 1;
1180 	else {
1181 		int width = symbol_conf.show_total_period ? 12 : 8;
1182 
1183 		if (queue)
1184 			return -1;
1185 
1186 		if (perf_evsel__is_group_event(evsel))
1187 			width *= evsel->nr_members;
1188 
1189 		if (!*dl->line)
1190 			printf(" %*s:\n", width, " ");
1191 		else
1192 			printf(" %*s:	%s\n", width, " ", dl->line);
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 /*
1199  * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1200  * which looks like following
1201  *
1202  *  0000000000415500 <_init>:
1203  *    415500:       sub    $0x8,%rsp
1204  *    415504:       mov    0x2f5ad5(%rip),%rax        # 70afe0 <_DYNAMIC+0x2f8>
1205  *    41550b:       test   %rax,%rax
1206  *    41550e:       je     415515 <_init+0x15>
1207  *    415510:       callq  416e70 <__gmon_start__@plt>
1208  *    415515:       add    $0x8,%rsp
1209  *    415519:       retq
1210  *
1211  * it will be parsed and saved into struct disasm_line as
1212  *  <offset>       <name>  <ops.raw>
1213  *
1214  * The offset will be a relative offset from the start of the symbol and -1
1215  * means that it's not a disassembly line so should be treated differently.
1216  * The ops.raw part will be parsed further according to type of the instruction.
1217  */
1218 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
1219 				      struct arch *arch,
1220 				      FILE *file, size_t privsize,
1221 				      int *line_nr)
1222 {
1223 	struct annotation *notes = symbol__annotation(sym);
1224 	struct disasm_line *dl;
1225 	char *line = NULL, *parsed_line, *tmp, *tmp2;
1226 	size_t line_len;
1227 	s64 line_ip, offset = -1;
1228 	regmatch_t match[2];
1229 
1230 	if (getline(&line, &line_len, file) < 0)
1231 		return -1;
1232 
1233 	if (!line)
1234 		return -1;
1235 
1236 	line_ip = -1;
1237 	parsed_line = rtrim(line);
1238 
1239 	/* /filename:linenr ? Save line number and ignore. */
1240 	if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
1241 		*line_nr = atoi(parsed_line + match[1].rm_so);
1242 		return 0;
1243 	}
1244 
1245 	tmp = ltrim(parsed_line);
1246 	if (*tmp) {
1247 		/*
1248 		 * Parse hexa addresses followed by ':'
1249 		 */
1250 		line_ip = strtoull(tmp, &tmp2, 16);
1251 		if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1252 			line_ip = -1;
1253 	}
1254 
1255 	if (line_ip != -1) {
1256 		u64 start = map__rip_2objdump(map, sym->start),
1257 		    end = map__rip_2objdump(map, sym->end);
1258 
1259 		offset = line_ip - start;
1260 		if ((u64)line_ip < start || (u64)line_ip >= end)
1261 			offset = -1;
1262 		else
1263 			parsed_line = tmp2 + 1;
1264 	}
1265 
1266 	dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
1267 	free(line);
1268 	(*line_nr)++;
1269 
1270 	if (dl == NULL)
1271 		return -1;
1272 
1273 	if (!disasm_line__has_offset(dl)) {
1274 		dl->ops.target.offset = dl->ops.target.addr -
1275 					map__rip_2objdump(map, sym->start);
1276 		dl->ops.target.offset_avail = true;
1277 	}
1278 
1279 	/* kcore has no symbols, so add the call target name */
1280 	if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
1281 		struct addr_map_symbol target = {
1282 			.map = map,
1283 			.addr = dl->ops.target.addr,
1284 		};
1285 
1286 		if (!map_groups__find_ams(&target) &&
1287 		    target.sym->start == target.al_addr)
1288 			dl->ops.target.name = strdup(target.sym->name);
1289 	}
1290 
1291 	disasm__add(&notes->src->source, dl);
1292 
1293 	return 0;
1294 }
1295 
1296 static __attribute__((constructor)) void symbol__init_regexpr(void)
1297 {
1298 	regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1299 }
1300 
1301 static void delete_last_nop(struct symbol *sym)
1302 {
1303 	struct annotation *notes = symbol__annotation(sym);
1304 	struct list_head *list = &notes->src->source;
1305 	struct disasm_line *dl;
1306 
1307 	while (!list_empty(list)) {
1308 		dl = list_entry(list->prev, struct disasm_line, node);
1309 
1310 		if (dl->ins.ops) {
1311 			if (dl->ins.ops != &nop_ops)
1312 				return;
1313 		} else {
1314 			if (!strstr(dl->line, " nop ") &&
1315 			    !strstr(dl->line, " nopl ") &&
1316 			    !strstr(dl->line, " nopw "))
1317 				return;
1318 		}
1319 
1320 		list_del(&dl->node);
1321 		disasm_line__free(dl);
1322 	}
1323 }
1324 
1325 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
1326 			      int errnum, char *buf, size_t buflen)
1327 {
1328 	struct dso *dso = map->dso;
1329 
1330 	BUG_ON(buflen == 0);
1331 
1332 	if (errnum >= 0) {
1333 		str_error_r(errnum, buf, buflen);
1334 		return 0;
1335 	}
1336 
1337 	switch (errnum) {
1338 	case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1339 		char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1340 		char *build_id_msg = NULL;
1341 
1342 		if (dso->has_build_id) {
1343 			build_id__sprintf(dso->build_id,
1344 					  sizeof(dso->build_id), bf + 15);
1345 			build_id_msg = bf;
1346 		}
1347 		scnprintf(buf, buflen,
1348 			  "No vmlinux file%s\nwas found in the path.\n\n"
1349 			  "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1350 			  "Please use:\n\n"
1351 			  "  perf buildid-cache -vu vmlinux\n\n"
1352 			  "or:\n\n"
1353 			  "  --vmlinux vmlinux\n", build_id_msg ?: "");
1354 	}
1355 		break;
1356 	default:
1357 		scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1358 		break;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1365 {
1366 	char linkname[PATH_MAX];
1367 	char *build_id_filename;
1368 	char *build_id_path = NULL;
1369 	char *pos;
1370 
1371 	if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1372 	    !dso__is_kcore(dso))
1373 		return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1374 
1375 	build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
1376 	if (build_id_filename) {
1377 		__symbol__join_symfs(filename, filename_size, build_id_filename);
1378 		free(build_id_filename);
1379 	} else {
1380 		if (dso->has_build_id)
1381 			return ENOMEM;
1382 		goto fallback;
1383 	}
1384 
1385 	build_id_path = strdup(filename);
1386 	if (!build_id_path)
1387 		return -1;
1388 
1389 	/*
1390 	 * old style build-id cache has name of XX/XXXXXXX.. while
1391 	 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1392 	 * extract the build-id part of dirname in the new style only.
1393 	 */
1394 	pos = strrchr(build_id_path, '/');
1395 	if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1396 		dirname(build_id_path);
1397 
1398 	if (dso__is_kcore(dso) ||
1399 	    readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
1400 	    strstr(linkname, DSO__NAME_KALLSYMS) ||
1401 	    access(filename, R_OK)) {
1402 fallback:
1403 		/*
1404 		 * If we don't have build-ids or the build-id file isn't in the
1405 		 * cache, or is just a kallsyms file, well, lets hope that this
1406 		 * DSO is the same as when 'perf record' ran.
1407 		 */
1408 		__symbol__join_symfs(filename, filename_size, dso->long_name);
1409 	}
1410 
1411 	free(build_id_path);
1412 	return 0;
1413 }
1414 
1415 static const char *annotate__norm_arch(const char *arch_name)
1416 {
1417 	struct utsname uts;
1418 
1419 	if (!arch_name) { /* Assume we are annotating locally. */
1420 		if (uname(&uts) < 0)
1421 			return NULL;
1422 		arch_name = uts.machine;
1423 	}
1424 	return normalize_arch((char *)arch_name);
1425 }
1426 
1427 int symbol__disassemble(struct symbol *sym, struct map *map,
1428 			const char *arch_name, size_t privsize,
1429 			struct arch **parch, char *cpuid)
1430 {
1431 	struct dso *dso = map->dso;
1432 	char command[PATH_MAX * 2];
1433 	struct arch *arch = NULL;
1434 	FILE *file;
1435 	char symfs_filename[PATH_MAX];
1436 	struct kcore_extract kce;
1437 	bool delete_extract = false;
1438 	int stdout_fd[2];
1439 	int lineno = 0;
1440 	int nline;
1441 	pid_t pid;
1442 	int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
1443 
1444 	if (err)
1445 		return err;
1446 
1447 	arch_name = annotate__norm_arch(arch_name);
1448 	if (!arch_name)
1449 		return -1;
1450 
1451 	arch = arch__find(arch_name);
1452 	if (arch == NULL)
1453 		return -ENOTSUP;
1454 
1455 	if (parch)
1456 		*parch = arch;
1457 
1458 	if (arch->init) {
1459 		err = arch->init(arch, cpuid);
1460 		if (err) {
1461 			pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
1462 			return err;
1463 		}
1464 	}
1465 
1466 	pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
1467 		 symfs_filename, sym->name, map->unmap_ip(map, sym->start),
1468 		 map->unmap_ip(map, sym->end));
1469 
1470 	pr_debug("annotating [%p] %30s : [%p] %30s\n",
1471 		 dso, dso->long_name, sym, sym->name);
1472 
1473 	if (dso__is_kcore(dso)) {
1474 		kce.kcore_filename = symfs_filename;
1475 		kce.addr = map__rip_2objdump(map, sym->start);
1476 		kce.offs = sym->start;
1477 		kce.len = sym->end - sym->start;
1478 		if (!kcore_extract__create(&kce)) {
1479 			delete_extract = true;
1480 			strlcpy(symfs_filename, kce.extract_filename,
1481 				sizeof(symfs_filename));
1482 		}
1483 	} else if (dso__needs_decompress(dso)) {
1484 		char tmp[KMOD_DECOMP_LEN];
1485 
1486 		if (dso__decompress_kmodule_path(dso, symfs_filename,
1487 						 tmp, sizeof(tmp)) < 0)
1488 			goto out;
1489 
1490 		strcpy(symfs_filename, tmp);
1491 	}
1492 
1493 	snprintf(command, sizeof(command),
1494 		 "%s %s%s --start-address=0x%016" PRIx64
1495 		 " --stop-address=0x%016" PRIx64
1496 		 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1497 		 objdump_path ? objdump_path : "objdump",
1498 		 disassembler_style ? "-M " : "",
1499 		 disassembler_style ? disassembler_style : "",
1500 		 map__rip_2objdump(map, sym->start),
1501 		 map__rip_2objdump(map, sym->end),
1502 		 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
1503 		 symbol_conf.annotate_src ? "-S" : "",
1504 		 symfs_filename, symfs_filename);
1505 
1506 	pr_debug("Executing: %s\n", command);
1507 
1508 	err = -1;
1509 	if (pipe(stdout_fd) < 0) {
1510 		pr_err("Failure creating the pipe to run %s\n", command);
1511 		goto out_remove_tmp;
1512 	}
1513 
1514 	pid = fork();
1515 	if (pid < 0) {
1516 		pr_err("Failure forking to run %s\n", command);
1517 		goto out_close_stdout;
1518 	}
1519 
1520 	if (pid == 0) {
1521 		close(stdout_fd[0]);
1522 		dup2(stdout_fd[1], 1);
1523 		close(stdout_fd[1]);
1524 		execl("/bin/sh", "sh", "-c", command, NULL);
1525 		perror(command);
1526 		exit(-1);
1527 	}
1528 
1529 	close(stdout_fd[1]);
1530 
1531 	file = fdopen(stdout_fd[0], "r");
1532 	if (!file) {
1533 		pr_err("Failure creating FILE stream for %s\n", command);
1534 		/*
1535 		 * If we were using debug info should retry with
1536 		 * original binary.
1537 		 */
1538 		goto out_remove_tmp;
1539 	}
1540 
1541 	nline = 0;
1542 	while (!feof(file)) {
1543 		/*
1544 		 * The source code line number (lineno) needs to be kept in
1545 		 * accross calls to symbol__parse_objdump_line(), so that it
1546 		 * can associate it with the instructions till the next one.
1547 		 * See disasm_line__new() and struct disasm_line::line_nr.
1548 		 */
1549 		if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
1550 			    &lineno) < 0)
1551 			break;
1552 		nline++;
1553 	}
1554 
1555 	if (nline == 0)
1556 		pr_err("No output from %s\n", command);
1557 
1558 	/*
1559 	 * kallsyms does not have symbol sizes so there may a nop at the end.
1560 	 * Remove it.
1561 	 */
1562 	if (dso__is_kcore(dso))
1563 		delete_last_nop(sym);
1564 
1565 	fclose(file);
1566 	err = 0;
1567 out_remove_tmp:
1568 	close(stdout_fd[0]);
1569 
1570 	if (dso__needs_decompress(dso))
1571 		unlink(symfs_filename);
1572 
1573 	if (delete_extract)
1574 		kcore_extract__delete(&kce);
1575 out:
1576 	return err;
1577 
1578 out_close_stdout:
1579 	close(stdout_fd[1]);
1580 	goto out_remove_tmp;
1581 }
1582 
1583 static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1584 {
1585 	struct source_line *iter;
1586 	struct rb_node **p = &root->rb_node;
1587 	struct rb_node *parent = NULL;
1588 	int i, ret;
1589 
1590 	while (*p != NULL) {
1591 		parent = *p;
1592 		iter = rb_entry(parent, struct source_line, node);
1593 
1594 		ret = strcmp(iter->path, src_line->path);
1595 		if (ret == 0) {
1596 			for (i = 0; i < src_line->nr_pcnt; i++)
1597 				iter->samples[i].percent_sum += src_line->samples[i].percent;
1598 			return;
1599 		}
1600 
1601 		if (ret < 0)
1602 			p = &(*p)->rb_left;
1603 		else
1604 			p = &(*p)->rb_right;
1605 	}
1606 
1607 	for (i = 0; i < src_line->nr_pcnt; i++)
1608 		src_line->samples[i].percent_sum = src_line->samples[i].percent;
1609 
1610 	rb_link_node(&src_line->node, parent, p);
1611 	rb_insert_color(&src_line->node, root);
1612 }
1613 
1614 static int cmp_source_line(struct source_line *a, struct source_line *b)
1615 {
1616 	int i;
1617 
1618 	for (i = 0; i < a->nr_pcnt; i++) {
1619 		if (a->samples[i].percent_sum == b->samples[i].percent_sum)
1620 			continue;
1621 		return a->samples[i].percent_sum > b->samples[i].percent_sum;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
1628 {
1629 	struct source_line *iter;
1630 	struct rb_node **p = &root->rb_node;
1631 	struct rb_node *parent = NULL;
1632 
1633 	while (*p != NULL) {
1634 		parent = *p;
1635 		iter = rb_entry(parent, struct source_line, node);
1636 
1637 		if (cmp_source_line(src_line, iter))
1638 			p = &(*p)->rb_left;
1639 		else
1640 			p = &(*p)->rb_right;
1641 	}
1642 
1643 	rb_link_node(&src_line->node, parent, p);
1644 	rb_insert_color(&src_line->node, root);
1645 }
1646 
1647 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1648 {
1649 	struct source_line *src_line;
1650 	struct rb_node *node;
1651 
1652 	node = rb_first(src_root);
1653 	while (node) {
1654 		struct rb_node *next;
1655 
1656 		src_line = rb_entry(node, struct source_line, node);
1657 		next = rb_next(node);
1658 		rb_erase(node, src_root);
1659 
1660 		__resort_source_line(dest_root, src_line);
1661 		node = next;
1662 	}
1663 }
1664 
1665 static void symbol__free_source_line(struct symbol *sym, int len)
1666 {
1667 	struct annotation *notes = symbol__annotation(sym);
1668 	struct source_line *src_line = notes->src->lines;
1669 	size_t sizeof_src_line;
1670 	int i;
1671 
1672 	sizeof_src_line = sizeof(*src_line) +
1673 			  (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
1674 
1675 	for (i = 0; i < len; i++) {
1676 		free_srcline(src_line->path);
1677 		src_line = (void *)src_line + sizeof_src_line;
1678 	}
1679 
1680 	zfree(&notes->src->lines);
1681 }
1682 
1683 /* Get the filename:line for the colored entries */
1684 static int symbol__get_source_line(struct symbol *sym, struct map *map,
1685 				   struct perf_evsel *evsel,
1686 				   struct rb_root *root, int len)
1687 {
1688 	u64 start;
1689 	int i, k;
1690 	int evidx = evsel->idx;
1691 	struct source_line *src_line;
1692 	struct annotation *notes = symbol__annotation(sym);
1693 	struct sym_hist *h = annotation__histogram(notes, evidx);
1694 	struct rb_root tmp_root = RB_ROOT;
1695 	int nr_pcnt = 1;
1696 	u64 nr_samples = h->nr_samples;
1697 	size_t sizeof_src_line = sizeof(struct source_line);
1698 
1699 	if (perf_evsel__is_group_event(evsel)) {
1700 		for (i = 1; i < evsel->nr_members; i++) {
1701 			h = annotation__histogram(notes, evidx + i);
1702 			nr_samples += h->nr_samples;
1703 		}
1704 		nr_pcnt = evsel->nr_members;
1705 		sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
1706 	}
1707 
1708 	if (!nr_samples)
1709 		return 0;
1710 
1711 	src_line = notes->src->lines = calloc(len, sizeof_src_line);
1712 	if (!notes->src->lines)
1713 		return -1;
1714 
1715 	start = map__rip_2objdump(map, sym->start);
1716 
1717 	for (i = 0; i < len; i++) {
1718 		u64 offset;
1719 		double percent_max = 0.0;
1720 
1721 		src_line->nr_pcnt = nr_pcnt;
1722 
1723 		for (k = 0; k < nr_pcnt; k++) {
1724 			double percent = 0.0;
1725 
1726 			h = annotation__histogram(notes, evidx + k);
1727 			nr_samples = h->addr[i].nr_samples;
1728 			if (h->nr_samples)
1729 				percent = 100.0 * nr_samples / h->nr_samples;
1730 
1731 			if (percent > percent_max)
1732 				percent_max = percent;
1733 			src_line->samples[k].percent = percent;
1734 			src_line->samples[k].nr = nr_samples;
1735 		}
1736 
1737 		if (percent_max <= 0.5)
1738 			goto next;
1739 
1740 		offset = start + i;
1741 		src_line->path = get_srcline(map->dso, offset, NULL,
1742 					     false, true);
1743 		insert_source_line(&tmp_root, src_line);
1744 
1745 	next:
1746 		src_line = (void *)src_line + sizeof_src_line;
1747 	}
1748 
1749 	resort_source_line(root, &tmp_root);
1750 	return 0;
1751 }
1752 
1753 static void print_summary(struct rb_root *root, const char *filename)
1754 {
1755 	struct source_line *src_line;
1756 	struct rb_node *node;
1757 
1758 	printf("\nSorted summary for file %s\n", filename);
1759 	printf("----------------------------------------------\n\n");
1760 
1761 	if (RB_EMPTY_ROOT(root)) {
1762 		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1763 		return;
1764 	}
1765 
1766 	node = rb_first(root);
1767 	while (node) {
1768 		double percent, percent_max = 0.0;
1769 		const char *color;
1770 		char *path;
1771 		int i;
1772 
1773 		src_line = rb_entry(node, struct source_line, node);
1774 		for (i = 0; i < src_line->nr_pcnt; i++) {
1775 			percent = src_line->samples[i].percent_sum;
1776 			color = get_percent_color(percent);
1777 			color_fprintf(stdout, color, " %7.2f", percent);
1778 
1779 			if (percent > percent_max)
1780 				percent_max = percent;
1781 		}
1782 
1783 		path = src_line->path;
1784 		color = get_percent_color(percent_max);
1785 		color_fprintf(stdout, color, " %s\n", path);
1786 
1787 		node = rb_next(node);
1788 	}
1789 }
1790 
1791 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
1792 {
1793 	struct annotation *notes = symbol__annotation(sym);
1794 	struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1795 	u64 len = symbol__size(sym), offset;
1796 
1797 	for (offset = 0; offset < len; ++offset)
1798 		if (h->addr[offset].nr_samples != 0)
1799 			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1800 			       sym->start + offset, h->addr[offset].nr_samples);
1801 	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1802 }
1803 
1804 int symbol__annotate_printf(struct symbol *sym, struct map *map,
1805 			    struct perf_evsel *evsel, bool full_paths,
1806 			    int min_pcnt, int max_lines, int context)
1807 {
1808 	struct dso *dso = map->dso;
1809 	char *filename;
1810 	const char *d_filename;
1811 	const char *evsel_name = perf_evsel__name(evsel);
1812 	struct annotation *notes = symbol__annotation(sym);
1813 	struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1814 	struct disasm_line *pos, *queue = NULL;
1815 	u64 start = map__rip_2objdump(map, sym->start);
1816 	int printed = 2, queue_len = 0;
1817 	int more = 0;
1818 	u64 len;
1819 	int width = symbol_conf.show_total_period ? 12 : 8;
1820 	int graph_dotted_len;
1821 
1822 	filename = strdup(dso->long_name);
1823 	if (!filename)
1824 		return -ENOMEM;
1825 
1826 	if (full_paths)
1827 		d_filename = filename;
1828 	else
1829 		d_filename = basename(filename);
1830 
1831 	len = symbol__size(sym);
1832 
1833 	if (perf_evsel__is_group_event(evsel))
1834 		width *= evsel->nr_members;
1835 
1836 	graph_dotted_len = printf(" %-*.*s|	Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
1837 				  width, width, symbol_conf.show_total_period ? "Period" :
1838 				  symbol_conf.show_nr_samples ? "Samples" : "Percent",
1839 				  d_filename, evsel_name, h->nr_samples);
1840 
1841 	printf("%-*.*s----\n",
1842 	       graph_dotted_len, graph_dotted_len, graph_dotted_line);
1843 
1844 	if (verbose > 0)
1845 		symbol__annotate_hits(sym, evsel);
1846 
1847 	list_for_each_entry(pos, &notes->src->source, node) {
1848 		if (context && queue == NULL) {
1849 			queue = pos;
1850 			queue_len = 0;
1851 		}
1852 
1853 		switch (disasm_line__print(pos, sym, start, evsel, len,
1854 					    min_pcnt, printed, max_lines,
1855 					    queue)) {
1856 		case 0:
1857 			++printed;
1858 			if (context) {
1859 				printed += queue_len;
1860 				queue = NULL;
1861 				queue_len = 0;
1862 			}
1863 			break;
1864 		case 1:
1865 			/* filtered by max_lines */
1866 			++more;
1867 			break;
1868 		case -1:
1869 		default:
1870 			/*
1871 			 * Filtered by min_pcnt or non IP lines when
1872 			 * context != 0
1873 			 */
1874 			if (!context)
1875 				break;
1876 			if (queue_len == context)
1877 				queue = list_entry(queue->node.next, typeof(*queue), node);
1878 			else
1879 				++queue_len;
1880 			break;
1881 		}
1882 	}
1883 
1884 	free(filename);
1885 
1886 	return more;
1887 }
1888 
1889 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1890 {
1891 	struct annotation *notes = symbol__annotation(sym);
1892 	struct sym_hist *h = annotation__histogram(notes, evidx);
1893 
1894 	memset(h, 0, notes->src->sizeof_sym_hist);
1895 }
1896 
1897 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1898 {
1899 	struct annotation *notes = symbol__annotation(sym);
1900 	struct sym_hist *h = annotation__histogram(notes, evidx);
1901 	int len = symbol__size(sym), offset;
1902 
1903 	h->nr_samples = 0;
1904 	for (offset = 0; offset < len; ++offset) {
1905 		h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8;
1906 		h->nr_samples += h->addr[offset].nr_samples;
1907 	}
1908 }
1909 
1910 void disasm__purge(struct list_head *head)
1911 {
1912 	struct disasm_line *pos, *n;
1913 
1914 	list_for_each_entry_safe(pos, n, head, node) {
1915 		list_del(&pos->node);
1916 		disasm_line__free(pos);
1917 	}
1918 }
1919 
1920 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1921 {
1922 	size_t printed;
1923 
1924 	if (dl->offset == -1)
1925 		return fprintf(fp, "%s\n", dl->line);
1926 
1927 	printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
1928 
1929 	if (dl->ops.raw[0] != '\0') {
1930 		printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1931 				   dl->ops.raw);
1932 	}
1933 
1934 	return printed + fprintf(fp, "\n");
1935 }
1936 
1937 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1938 {
1939 	struct disasm_line *pos;
1940 	size_t printed = 0;
1941 
1942 	list_for_each_entry(pos, head, node)
1943 		printed += disasm_line__fprintf(pos, fp);
1944 
1945 	return printed;
1946 }
1947 
1948 int symbol__tty_annotate(struct symbol *sym, struct map *map,
1949 			 struct perf_evsel *evsel, bool print_lines,
1950 			 bool full_paths, int min_pcnt, int max_lines)
1951 {
1952 	struct dso *dso = map->dso;
1953 	struct rb_root source_line = RB_ROOT;
1954 	u64 len;
1955 
1956 	if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
1957 				0, NULL, NULL) < 0)
1958 		return -1;
1959 
1960 	len = symbol__size(sym);
1961 
1962 	if (print_lines) {
1963 		srcline_full_filename = full_paths;
1964 		symbol__get_source_line(sym, map, evsel, &source_line, len);
1965 		print_summary(&source_line, dso->long_name);
1966 	}
1967 
1968 	symbol__annotate_printf(sym, map, evsel, full_paths,
1969 				min_pcnt, max_lines, 0);
1970 	if (print_lines)
1971 		symbol__free_source_line(sym, len);
1972 
1973 	disasm__purge(&symbol__annotation(sym)->src->source);
1974 
1975 	return 0;
1976 }
1977 
1978 bool ui__has_annotation(void)
1979 {
1980 	return use_browser == 1 && perf_hpp_list.sym;
1981 }
1982