xref: /openbmc/linux/tools/perf/builtin-top.c (revision 20ff1cb5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * builtin-top.c
4  *
5  * Builtin top command: Display a continuously updated profile of
6  * any workload, CPU or specific PID.
7  *
8  * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9  *		 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Improvements and fixes by:
12  *
13  *   Arjan van de Ven <arjan@linux.intel.com>
14  *   Yanmin Zhang <yanmin.zhang@intel.com>
15  *   Wu Fengguang <fengguang.wu@intel.com>
16  *   Mike Galbraith <efault@gmx.de>
17  *   Paul Mackerras <paulus@samba.org>
18  */
19 #include "builtin.h"
20 
21 #include "perf.h"
22 
23 #include "util/annotate.h"
24 #include "util/bpf-event.h"
25 #include "util/config.h"
26 #include "util/color.h"
27 #include "util/dso.h"
28 #include "util/evlist.h"
29 #include "util/evsel.h"
30 #include "util/event.h"
31 #include "util/machine.h"
32 #include "util/map.h"
33 #include "util/session.h"
34 #include "util/symbol.h"
35 #include "util/top.h"
36 #include "util/util.h"
37 #include <linux/rbtree.h>
38 #include <subcmd/parse-options.h>
39 #include "util/parse-events.h"
40 #include "util/callchain.h"
41 #include "util/cpumap.h"
42 #include "util/sort.h"
43 #include "util/string2.h"
44 #include "util/term.h"
45 #include "util/intlist.h"
46 #include "util/parse-branch-options.h"
47 #include "arch/common.h"
48 #include "ui/ui.h"
49 
50 #include "util/debug.h"
51 #include "util/ordered-events.h"
52 
53 #include <assert.h>
54 #include <elf.h>
55 #include <fcntl.h>
56 
57 #include <stdio.h>
58 #include <termios.h>
59 #include <unistd.h>
60 #include <inttypes.h>
61 
62 #include <errno.h>
63 #include <time.h>
64 #include <sched.h>
65 #include <signal.h>
66 
67 #include <sys/syscall.h>
68 #include <sys/ioctl.h>
69 #include <poll.h>
70 #include <sys/prctl.h>
71 #include <sys/wait.h>
72 #include <sys/uio.h>
73 #include <sys/utsname.h>
74 #include <sys/mman.h>
75 
76 #include <linux/stringify.h>
77 #include <linux/time64.h>
78 #include <linux/types.h>
79 
80 #include <linux/ctype.h>
81 
82 static volatile int done;
83 static volatile int resize;
84 
85 #define HEADER_LINE_NR  5
86 
87 static void perf_top__update_print_entries(struct perf_top *top)
88 {
89 	top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
90 }
91 
92 static void winch_sig(int sig __maybe_unused)
93 {
94 	resize = 1;
95 }
96 
97 static void perf_top__resize(struct perf_top *top)
98 {
99 	get_term_dimensions(&top->winsize);
100 	perf_top__update_print_entries(top);
101 }
102 
103 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
104 {
105 	struct evsel *evsel;
106 	struct symbol *sym;
107 	struct annotation *notes;
108 	struct map *map;
109 	int err = -1;
110 
111 	if (!he || !he->ms.sym)
112 		return -1;
113 
114 	evsel = hists_to_evsel(he->hists);
115 
116 	sym = he->ms.sym;
117 	map = he->ms.map;
118 
119 	/*
120 	 * We can't annotate with just /proc/kallsyms
121 	 */
122 	if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
123 	    !dso__is_kcore(map->dso)) {
124 		pr_err("Can't annotate %s: No vmlinux file was found in the "
125 		       "path\n", sym->name);
126 		sleep(1);
127 		return -1;
128 	}
129 
130 	notes = symbol__annotation(sym);
131 	pthread_mutex_lock(&notes->lock);
132 
133 	if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
134 		pthread_mutex_unlock(&notes->lock);
135 		pr_err("Not enough memory for annotating '%s' symbol!\n",
136 		       sym->name);
137 		sleep(1);
138 		return err;
139 	}
140 
141 	err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
142 	if (err == 0) {
143 		top->sym_filter_entry = he;
144 	} else {
145 		char msg[BUFSIZ];
146 		symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
147 		pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
148 	}
149 
150 	pthread_mutex_unlock(&notes->lock);
151 	return err;
152 }
153 
154 static void __zero_source_counters(struct hist_entry *he)
155 {
156 	struct symbol *sym = he->ms.sym;
157 	symbol__annotate_zero_histograms(sym);
158 }
159 
160 static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
161 {
162 	struct utsname uts;
163 	int err = uname(&uts);
164 
165 	ui__warning("Out of bounds address found:\n\n"
166 		    "Addr:   %" PRIx64 "\n"
167 		    "DSO:    %s %c\n"
168 		    "Map:    %" PRIx64 "-%" PRIx64 "\n"
169 		    "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
170 		    "Arch:   %s\n"
171 		    "Kernel: %s\n"
172 		    "Tools:  %s\n\n"
173 		    "Not all samples will be on the annotation output.\n\n"
174 		    "Please report to linux-kernel@vger.kernel.org\n",
175 		    ip, map->dso->long_name, dso__symtab_origin(map->dso),
176 		    map->start, map->end, sym->start, sym->end,
177 		    sym->binding == STB_GLOBAL ? 'g' :
178 		    sym->binding == STB_LOCAL  ? 'l' : 'w', sym->name,
179 		    err ? "[unknown]" : uts.machine,
180 		    err ? "[unknown]" : uts.release, perf_version_string);
181 	if (use_browser <= 0)
182 		sleep(5);
183 
184 	map->erange_warned = true;
185 }
186 
187 static void perf_top__record_precise_ip(struct perf_top *top,
188 					struct hist_entry *he,
189 					struct perf_sample *sample,
190 					struct evsel *evsel, u64 ip)
191 {
192 	struct annotation *notes;
193 	struct symbol *sym = he->ms.sym;
194 	int err = 0;
195 
196 	if (sym == NULL || (use_browser == 0 &&
197 			    (top->sym_filter_entry == NULL ||
198 			     top->sym_filter_entry->ms.sym != sym)))
199 		return;
200 
201 	notes = symbol__annotation(sym);
202 
203 	if (pthread_mutex_trylock(&notes->lock))
204 		return;
205 
206 	err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
207 
208 	pthread_mutex_unlock(&notes->lock);
209 
210 	if (unlikely(err)) {
211 		/*
212 		 * This function is now called with he->hists->lock held.
213 		 * Release it before going to sleep.
214 		 */
215 		pthread_mutex_unlock(&he->hists->lock);
216 
217 		if (err == -ERANGE && !he->ms.map->erange_warned)
218 			ui__warn_map_erange(he->ms.map, sym, ip);
219 		else if (err == -ENOMEM) {
220 			pr_err("Not enough memory for annotating '%s' symbol!\n",
221 			       sym->name);
222 			sleep(1);
223 		}
224 
225 		pthread_mutex_lock(&he->hists->lock);
226 	}
227 }
228 
229 static void perf_top__show_details(struct perf_top *top)
230 {
231 	struct hist_entry *he = top->sym_filter_entry;
232 	struct evsel *evsel;
233 	struct annotation *notes;
234 	struct symbol *symbol;
235 	int more;
236 
237 	if (!he)
238 		return;
239 
240 	evsel = hists_to_evsel(he->hists);
241 
242 	symbol = he->ms.sym;
243 	notes = symbol__annotation(symbol);
244 
245 	pthread_mutex_lock(&notes->lock);
246 
247 	symbol__calc_percent(symbol, evsel);
248 
249 	if (notes->src == NULL)
250 		goto out_unlock;
251 
252 	printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
253 	printf("  Events  Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
254 
255 	more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
256 
257 	if (top->evlist->enabled) {
258 		if (top->zero)
259 			symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
260 		else
261 			symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
262 	}
263 	if (more != 0)
264 		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
265 out_unlock:
266 	pthread_mutex_unlock(&notes->lock);
267 }
268 
269 static void perf_top__resort_hists(struct perf_top *t)
270 {
271 	struct evlist *evlist = t->evlist;
272 	struct evsel *pos;
273 
274 	evlist__for_each_entry(evlist, pos) {
275 		struct hists *hists = evsel__hists(pos);
276 
277 		/*
278 		 * unlink existing entries so that they can be linked
279 		 * in a correct order in hists__match() below.
280 		 */
281 		hists__unlink(hists);
282 
283 		if (evlist->enabled) {
284 			if (t->zero) {
285 				hists__delete_entries(hists);
286 			} else {
287 				hists__decay_entries(hists, t->hide_user_symbols,
288 						     t->hide_kernel_symbols);
289 			}
290 		}
291 
292 		hists__collapse_resort(hists, NULL);
293 
294 		/* Non-group events are considered as leader */
295 		if (symbol_conf.event_group &&
296 		    !perf_evsel__is_group_leader(pos)) {
297 			struct hists *leader_hists = evsel__hists(pos->leader);
298 
299 			hists__match(leader_hists, hists);
300 			hists__link(leader_hists, hists);
301 		}
302 	}
303 
304 	evlist__for_each_entry(evlist, pos) {
305 		perf_evsel__output_resort(pos, NULL);
306 	}
307 }
308 
309 static void perf_top__print_sym_table(struct perf_top *top)
310 {
311 	char bf[160];
312 	int printed = 0;
313 	const int win_width = top->winsize.ws_col - 1;
314 	struct evsel *evsel = top->sym_evsel;
315 	struct hists *hists = evsel__hists(evsel);
316 
317 	puts(CONSOLE_CLEAR);
318 
319 	perf_top__header_snprintf(top, bf, sizeof(bf));
320 	printf("%s\n", bf);
321 
322 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
323 
324 	if (!top->record_opts.overwrite &&
325 	    (hists->stats.nr_lost_warned !=
326 	    hists->stats.nr_events[PERF_RECORD_LOST])) {
327 		hists->stats.nr_lost_warned =
328 			      hists->stats.nr_events[PERF_RECORD_LOST];
329 		color_fprintf(stdout, PERF_COLOR_RED,
330 			      "WARNING: LOST %d chunks, Check IO/CPU overload",
331 			      hists->stats.nr_lost_warned);
332 		++printed;
333 	}
334 
335 	if (top->sym_filter_entry) {
336 		perf_top__show_details(top);
337 		return;
338 	}
339 
340 	perf_top__resort_hists(top);
341 
342 	hists__output_recalc_col_len(hists, top->print_entries - printed);
343 	putchar('\n');
344 	hists__fprintf(hists, false, top->print_entries - printed, win_width,
345 		       top->min_percent, stdout, !symbol_conf.use_callchain);
346 }
347 
348 static void prompt_integer(int *target, const char *msg)
349 {
350 	char *buf = malloc(0), *p;
351 	size_t dummy = 0;
352 	int tmp;
353 
354 	fprintf(stdout, "\n%s: ", msg);
355 	if (getline(&buf, &dummy, stdin) < 0)
356 		return;
357 
358 	p = strchr(buf, '\n');
359 	if (p)
360 		*p = 0;
361 
362 	p = buf;
363 	while(*p) {
364 		if (!isdigit(*p))
365 			goto out_free;
366 		p++;
367 	}
368 	tmp = strtoul(buf, NULL, 10);
369 	*target = tmp;
370 out_free:
371 	free(buf);
372 }
373 
374 static void prompt_percent(int *target, const char *msg)
375 {
376 	int tmp = 0;
377 
378 	prompt_integer(&tmp, msg);
379 	if (tmp >= 0 && tmp <= 100)
380 		*target = tmp;
381 }
382 
383 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
384 {
385 	char *buf = malloc(0), *p;
386 	struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
387 	struct hists *hists = evsel__hists(top->sym_evsel);
388 	struct rb_node *next;
389 	size_t dummy = 0;
390 
391 	/* zero counters of active symbol */
392 	if (syme) {
393 		__zero_source_counters(syme);
394 		top->sym_filter_entry = NULL;
395 	}
396 
397 	fprintf(stdout, "\n%s: ", msg);
398 	if (getline(&buf, &dummy, stdin) < 0)
399 		goto out_free;
400 
401 	p = strchr(buf, '\n');
402 	if (p)
403 		*p = 0;
404 
405 	next = rb_first_cached(&hists->entries);
406 	while (next) {
407 		n = rb_entry(next, struct hist_entry, rb_node);
408 		if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
409 			found = n;
410 			break;
411 		}
412 		next = rb_next(&n->rb_node);
413 	}
414 
415 	if (!found) {
416 		fprintf(stderr, "Sorry, %s is not active.\n", buf);
417 		sleep(1);
418 	} else
419 		perf_top__parse_source(top, found);
420 
421 out_free:
422 	free(buf);
423 }
424 
425 static void perf_top__print_mapped_keys(struct perf_top *top)
426 {
427 	char *name = NULL;
428 
429 	if (top->sym_filter_entry) {
430 		struct symbol *sym = top->sym_filter_entry->ms.sym;
431 		name = sym->name;
432 	}
433 
434 	fprintf(stdout, "\nMapped keys:\n");
435 	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top->delay_secs);
436 	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
437 
438 	if (top->evlist->core.nr_entries > 1)
439 		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", perf_evsel__name(top->sym_evsel));
440 
441 	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
442 
443 	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
444 	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
445 	fprintf(stdout, "\t[S]     stop annotation.\n");
446 
447 	fprintf(stdout,
448 		"\t[K]     hide kernel symbols.             \t(%s)\n",
449 		top->hide_kernel_symbols ? "yes" : "no");
450 	fprintf(stdout,
451 		"\t[U]     hide user symbols.               \t(%s)\n",
452 		top->hide_user_symbols ? "yes" : "no");
453 	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top->zero ? 1 : 0);
454 	fprintf(stdout, "\t[qQ]    quit.\n");
455 }
456 
457 static int perf_top__key_mapped(struct perf_top *top, int c)
458 {
459 	switch (c) {
460 		case 'd':
461 		case 'e':
462 		case 'f':
463 		case 'z':
464 		case 'q':
465 		case 'Q':
466 		case 'K':
467 		case 'U':
468 		case 'F':
469 		case 's':
470 		case 'S':
471 			return 1;
472 		case 'E':
473 			return top->evlist->core.nr_entries > 1 ? 1 : 0;
474 		default:
475 			break;
476 	}
477 
478 	return 0;
479 }
480 
481 static bool perf_top__handle_keypress(struct perf_top *top, int c)
482 {
483 	bool ret = true;
484 
485 	if (!perf_top__key_mapped(top, c)) {
486 		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
487 		struct termios save;
488 
489 		perf_top__print_mapped_keys(top);
490 		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
491 		fflush(stdout);
492 
493 		set_term_quiet_input(&save);
494 
495 		poll(&stdin_poll, 1, -1);
496 		c = getc(stdin);
497 
498 		tcsetattr(0, TCSAFLUSH, &save);
499 		if (!perf_top__key_mapped(top, c))
500 			return ret;
501 	}
502 
503 	switch (c) {
504 		case 'd':
505 			prompt_integer(&top->delay_secs, "Enter display delay");
506 			if (top->delay_secs < 1)
507 				top->delay_secs = 1;
508 			break;
509 		case 'e':
510 			prompt_integer(&top->print_entries, "Enter display entries (lines)");
511 			if (top->print_entries == 0) {
512 				perf_top__resize(top);
513 				signal(SIGWINCH, winch_sig);
514 			} else {
515 				signal(SIGWINCH, SIG_DFL);
516 			}
517 			break;
518 		case 'E':
519 			if (top->evlist->core.nr_entries > 1) {
520 				/* Select 0 as the default event: */
521 				int counter = 0;
522 
523 				fprintf(stderr, "\nAvailable events:");
524 
525 				evlist__for_each_entry(top->evlist, top->sym_evsel)
526 					fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
527 
528 				prompt_integer(&counter, "Enter details event counter");
529 
530 				if (counter >= top->evlist->core.nr_entries) {
531 					top->sym_evsel = perf_evlist__first(top->evlist);
532 					fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
533 					sleep(1);
534 					break;
535 				}
536 				evlist__for_each_entry(top->evlist, top->sym_evsel)
537 					if (top->sym_evsel->idx == counter)
538 						break;
539 			} else
540 				top->sym_evsel = perf_evlist__first(top->evlist);
541 			break;
542 		case 'f':
543 			prompt_integer(&top->count_filter, "Enter display event count filter");
544 			break;
545 		case 'F':
546 			prompt_percent(&top->annotation_opts.min_pcnt,
547 				       "Enter details display event filter (percent)");
548 			break;
549 		case 'K':
550 			top->hide_kernel_symbols = !top->hide_kernel_symbols;
551 			break;
552 		case 'q':
553 		case 'Q':
554 			printf("exiting.\n");
555 			if (top->dump_symtab)
556 				perf_session__fprintf_dsos(top->session, stderr);
557 			ret = false;
558 			break;
559 		case 's':
560 			perf_top__prompt_symbol(top, "Enter details symbol");
561 			break;
562 		case 'S':
563 			if (!top->sym_filter_entry)
564 				break;
565 			else {
566 				struct hist_entry *syme = top->sym_filter_entry;
567 
568 				top->sym_filter_entry = NULL;
569 				__zero_source_counters(syme);
570 			}
571 			break;
572 		case 'U':
573 			top->hide_user_symbols = !top->hide_user_symbols;
574 			break;
575 		case 'z':
576 			top->zero = !top->zero;
577 			break;
578 		default:
579 			break;
580 	}
581 
582 	return ret;
583 }
584 
585 static void perf_top__sort_new_samples(void *arg)
586 {
587 	struct perf_top *t = arg;
588 
589 	if (t->evlist->selected != NULL)
590 		t->sym_evsel = t->evlist->selected;
591 
592 	perf_top__resort_hists(t);
593 
594 	if (t->lost || t->drop)
595 		pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
596 }
597 
598 static void stop_top(void)
599 {
600 	session_done = 1;
601 	done = 1;
602 }
603 
604 static void *display_thread_tui(void *arg)
605 {
606 	struct evsel *pos;
607 	struct perf_top *top = arg;
608 	const char *help = "For a higher level overview, try: perf top --sort comm,dso";
609 	struct hist_browser_timer hbt = {
610 		.timer		= perf_top__sort_new_samples,
611 		.arg		= top,
612 		.refresh	= top->delay_secs,
613 	};
614 
615 	/* In order to read symbols from other namespaces perf to  needs to call
616 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
617 	 * unshare(2) the fs so that we may continue to setns into namespaces
618 	 * that we're observing.
619 	 */
620 	unshare(CLONE_FS);
621 
622 	prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
623 
624 	perf_top__sort_new_samples(top);
625 
626 	/*
627 	 * Initialize the uid_filter_str, in the future the TUI will allow
628 	 * Zooming in/out UIDs. For now just use whatever the user passed
629 	 * via --uid.
630 	 */
631 	evlist__for_each_entry(top->evlist, pos) {
632 		struct hists *hists = evsel__hists(pos);
633 		hists->uid_filter_str = top->record_opts.target.uid_str;
634 	}
635 
636 	perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
637 				      top->min_percent,
638 				      &top->session->header.env,
639 				      !top->record_opts.overwrite,
640 				      &top->annotation_opts);
641 
642 	stop_top();
643 	return NULL;
644 }
645 
646 static void display_sig(int sig __maybe_unused)
647 {
648 	stop_top();
649 }
650 
651 static void display_setup_sig(void)
652 {
653 	signal(SIGSEGV, sighandler_dump_stack);
654 	signal(SIGFPE, sighandler_dump_stack);
655 	signal(SIGINT,  display_sig);
656 	signal(SIGQUIT, display_sig);
657 	signal(SIGTERM, display_sig);
658 }
659 
660 static void *display_thread(void *arg)
661 {
662 	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
663 	struct termios save;
664 	struct perf_top *top = arg;
665 	int delay_msecs, c;
666 
667 	/* In order to read symbols from other namespaces perf to  needs to call
668 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
669 	 * unshare(2) the fs so that we may continue to setns into namespaces
670 	 * that we're observing.
671 	 */
672 	unshare(CLONE_FS);
673 
674 	prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
675 
676 	display_setup_sig();
677 	pthread__unblock_sigwinch();
678 repeat:
679 	delay_msecs = top->delay_secs * MSEC_PER_SEC;
680 	set_term_quiet_input(&save);
681 	/* trash return*/
682 	getc(stdin);
683 
684 	while (!done) {
685 		perf_top__print_sym_table(top);
686 		/*
687 		 * Either timeout expired or we got an EINTR due to SIGWINCH,
688 		 * refresh screen in both cases.
689 		 */
690 		switch (poll(&stdin_poll, 1, delay_msecs)) {
691 		case 0:
692 			continue;
693 		case -1:
694 			if (errno == EINTR)
695 				continue;
696 			__fallthrough;
697 		default:
698 			c = getc(stdin);
699 			tcsetattr(0, TCSAFLUSH, &save);
700 
701 			if (perf_top__handle_keypress(top, c))
702 				goto repeat;
703 			stop_top();
704 		}
705 	}
706 
707 	tcsetattr(0, TCSAFLUSH, &save);
708 	return NULL;
709 }
710 
711 static int hist_iter__top_callback(struct hist_entry_iter *iter,
712 				   struct addr_location *al, bool single,
713 				   void *arg)
714 {
715 	struct perf_top *top = arg;
716 	struct hist_entry *he = iter->he;
717 	struct evsel *evsel = iter->evsel;
718 
719 	if (perf_hpp_list.sym && single)
720 		perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
721 
722 	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
723 		     !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
724 	return 0;
725 }
726 
727 static void perf_event__process_sample(struct perf_tool *tool,
728 				       const union perf_event *event,
729 				       struct evsel *evsel,
730 				       struct perf_sample *sample,
731 				       struct machine *machine)
732 {
733 	struct perf_top *top = container_of(tool, struct perf_top, tool);
734 	struct addr_location al;
735 	int err;
736 
737 	if (!machine && perf_guest) {
738 		static struct intlist *seen;
739 
740 		if (!seen)
741 			seen = intlist__new(NULL);
742 
743 		if (!intlist__has_entry(seen, sample->pid)) {
744 			pr_err("Can't find guest [%d]'s kernel information\n",
745 				sample->pid);
746 			intlist__add(seen, sample->pid);
747 		}
748 		return;
749 	}
750 
751 	if (!machine) {
752 		pr_err("%u unprocessable samples recorded.\r",
753 		       top->session->evlist->stats.nr_unprocessable_samples++);
754 		return;
755 	}
756 
757 	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
758 		top->exact_samples++;
759 
760 	if (machine__resolve(machine, &al, sample) < 0)
761 		return;
762 
763 	if (!machine->kptr_restrict_warned &&
764 	    symbol_conf.kptr_restrict &&
765 	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
766 		if (!perf_evlist__exclude_kernel(top->session->evlist)) {
767 			ui__warning(
768 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
769 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
770 "Kernel%s samples will not be resolved.\n",
771 			  al.map && map__has_symbols(al.map) ?
772 			  " modules" : "");
773 			if (use_browser <= 0)
774 				sleep(5);
775 		}
776 		machine->kptr_restrict_warned = true;
777 	}
778 
779 	if (al.sym == NULL && al.map != NULL) {
780 		const char *msg = "Kernel samples will not be resolved.\n";
781 		/*
782 		 * As we do lazy loading of symtabs we only will know if the
783 		 * specified vmlinux file is invalid when we actually have a
784 		 * hit in kernel space and then try to load it. So if we get
785 		 * here and there are _no_ symbols in the DSO backing the
786 		 * kernel map, bail out.
787 		 *
788 		 * We may never get here, for instance, if we use -K/
789 		 * --hide-kernel-symbols, even if the user specifies an
790 		 * invalid --vmlinux ;-)
791 		 */
792 		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
793 		    __map__is_kernel(al.map) && map__has_symbols(al.map)) {
794 			if (symbol_conf.vmlinux_name) {
795 				char serr[256];
796 				dso__strerror_load(al.map->dso, serr, sizeof(serr));
797 				ui__warning("The %s file can't be used: %s\n%s",
798 					    symbol_conf.vmlinux_name, serr, msg);
799 			} else {
800 				ui__warning("A vmlinux file was not found.\n%s",
801 					    msg);
802 			}
803 
804 			if (use_browser <= 0)
805 				sleep(5);
806 			top->vmlinux_warned = true;
807 		}
808 	}
809 
810 	if (al.sym == NULL || !al.sym->idle) {
811 		struct hists *hists = evsel__hists(evsel);
812 		struct hist_entry_iter iter = {
813 			.evsel		= evsel,
814 			.sample 	= sample,
815 			.add_entry_cb 	= hist_iter__top_callback,
816 		};
817 
818 		if (symbol_conf.cumulate_callchain)
819 			iter.ops = &hist_iter_cumulative;
820 		else
821 			iter.ops = &hist_iter_normal;
822 
823 		pthread_mutex_lock(&hists->lock);
824 
825 		err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
826 		if (err < 0)
827 			pr_err("Problem incrementing symbol period, skipping event\n");
828 
829 		pthread_mutex_unlock(&hists->lock);
830 	}
831 
832 	addr_location__put(&al);
833 }
834 
835 static void
836 perf_top__process_lost(struct perf_top *top, union perf_event *event,
837 		       struct evsel *evsel)
838 {
839 	struct hists *hists = evsel__hists(evsel);
840 
841 	top->lost += event->lost.lost;
842 	top->lost_total += event->lost.lost;
843 	hists->stats.total_lost += event->lost.lost;
844 }
845 
846 static void
847 perf_top__process_lost_samples(struct perf_top *top,
848 			       union perf_event *event,
849 			       struct evsel *evsel)
850 {
851 	struct hists *hists = evsel__hists(evsel);
852 
853 	top->lost += event->lost_samples.lost;
854 	top->lost_total += event->lost_samples.lost;
855 	hists->stats.total_lost_samples += event->lost_samples.lost;
856 }
857 
858 static u64 last_timestamp;
859 
860 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
861 {
862 	struct record_opts *opts = &top->record_opts;
863 	struct evlist *evlist = top->evlist;
864 	struct perf_mmap *md;
865 	union perf_event *event;
866 
867 	md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
868 	if (perf_mmap__read_init(md) < 0)
869 		return;
870 
871 	while ((event = perf_mmap__read_event(md)) != NULL) {
872 		int ret;
873 
874 		ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
875 		if (ret && ret != -1)
876 			break;
877 
878 		ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
879 		if (ret)
880 			break;
881 
882 		perf_mmap__consume(md);
883 
884 		if (top->qe.rotate) {
885 			pthread_mutex_lock(&top->qe.mutex);
886 			top->qe.rotate = false;
887 			pthread_cond_signal(&top->qe.cond);
888 			pthread_mutex_unlock(&top->qe.mutex);
889 		}
890 	}
891 
892 	perf_mmap__read_done(md);
893 }
894 
895 static void perf_top__mmap_read(struct perf_top *top)
896 {
897 	bool overwrite = top->record_opts.overwrite;
898 	struct evlist *evlist = top->evlist;
899 	int i;
900 
901 	if (overwrite)
902 		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
903 
904 	for (i = 0; i < top->evlist->nr_mmaps; i++)
905 		perf_top__mmap_read_idx(top, i);
906 
907 	if (overwrite) {
908 		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
909 		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
910 	}
911 }
912 
913 /*
914  * Check per-event overwrite term.
915  * perf top should support consistent term for all events.
916  * - All events don't have per-event term
917  *   E.g. "cpu/cpu-cycles/,cpu/instructions/"
918  *   Nothing change, return 0.
919  * - All events have same per-event term
920  *   E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
921  *   Using the per-event setting to replace the opts->overwrite if
922  *   they are different, then return 0.
923  * - Events have different per-event term
924  *   E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
925  *   Return -1
926  * - Some of the event set per-event term, but some not.
927  *   E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
928  *   Return -1
929  */
930 static int perf_top__overwrite_check(struct perf_top *top)
931 {
932 	struct record_opts *opts = &top->record_opts;
933 	struct evlist *evlist = top->evlist;
934 	struct perf_evsel_config_term *term;
935 	struct list_head *config_terms;
936 	struct evsel *evsel;
937 	int set, overwrite = -1;
938 
939 	evlist__for_each_entry(evlist, evsel) {
940 		set = -1;
941 		config_terms = &evsel->config_terms;
942 		list_for_each_entry(term, config_terms, list) {
943 			if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
944 				set = term->val.overwrite ? 1 : 0;
945 		}
946 
947 		/* no term for current and previous event (likely) */
948 		if ((overwrite < 0) && (set < 0))
949 			continue;
950 
951 		/* has term for both current and previous event, compare */
952 		if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
953 			return -1;
954 
955 		/* no term for current event but has term for previous one */
956 		if ((overwrite >= 0) && (set < 0))
957 			return -1;
958 
959 		/* has term for current event */
960 		if ((overwrite < 0) && (set >= 0)) {
961 			/* if it's first event, set overwrite */
962 			if (evsel == perf_evlist__first(evlist))
963 				overwrite = set;
964 			else
965 				return -1;
966 		}
967 	}
968 
969 	if ((overwrite >= 0) && (opts->overwrite != overwrite))
970 		opts->overwrite = overwrite;
971 
972 	return 0;
973 }
974 
975 static int perf_top_overwrite_fallback(struct perf_top *top,
976 				       struct evsel *evsel)
977 {
978 	struct record_opts *opts = &top->record_opts;
979 	struct evlist *evlist = top->evlist;
980 	struct evsel *counter;
981 
982 	if (!opts->overwrite)
983 		return 0;
984 
985 	/* only fall back when first event fails */
986 	if (evsel != perf_evlist__first(evlist))
987 		return 0;
988 
989 	evlist__for_each_entry(evlist, counter)
990 		counter->core.attr.write_backward = false;
991 	opts->overwrite = false;
992 	pr_debug2("fall back to non-overwrite mode\n");
993 	return 1;
994 }
995 
996 static int perf_top__start_counters(struct perf_top *top)
997 {
998 	char msg[BUFSIZ];
999 	struct evsel *counter;
1000 	struct evlist *evlist = top->evlist;
1001 	struct record_opts *opts = &top->record_opts;
1002 
1003 	if (perf_top__overwrite_check(top)) {
1004 		ui__error("perf top only support consistent per-event "
1005 			  "overwrite setting for all events\n");
1006 		goto out_err;
1007 	}
1008 
1009 	perf_evlist__config(evlist, opts, &callchain_param);
1010 
1011 	evlist__for_each_entry(evlist, counter) {
1012 try_again:
1013 		if (evsel__open(counter, top->evlist->core.cpus,
1014 				     top->evlist->core.threads) < 0) {
1015 
1016 			/*
1017 			 * Specially handle overwrite fall back.
1018 			 * Because perf top is the only tool which has
1019 			 * overwrite mode by default, support
1020 			 * both overwrite and non-overwrite mode, and
1021 			 * require consistent mode for all events.
1022 			 *
1023 			 * May move it to generic code with more tools
1024 			 * have similar attribute.
1025 			 */
1026 			if (perf_missing_features.write_backward &&
1027 			    perf_top_overwrite_fallback(top, counter))
1028 				goto try_again;
1029 
1030 			if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
1031 				if (verbose > 0)
1032 					ui__warning("%s\n", msg);
1033 				goto try_again;
1034 			}
1035 
1036 			perf_evsel__open_strerror(counter, &opts->target,
1037 						  errno, msg, sizeof(msg));
1038 			ui__error("%s\n", msg);
1039 			goto out_err;
1040 		}
1041 	}
1042 
1043 	if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
1044 		ui__error("Failed to mmap with %d (%s)\n",
1045 			    errno, str_error_r(errno, msg, sizeof(msg)));
1046 		goto out_err;
1047 	}
1048 
1049 	return 0;
1050 
1051 out_err:
1052 	return -1;
1053 }
1054 
1055 static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1056 {
1057 	if (callchain->mode != CHAIN_NONE) {
1058 		if (callchain_register_param(callchain) < 0) {
1059 			ui__error("Can't register callchain params.\n");
1060 			return -EINVAL;
1061 		}
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 static struct ordered_events *rotate_queues(struct perf_top *top)
1068 {
1069 	struct ordered_events *in = top->qe.in;
1070 
1071 	if (top->qe.in == &top->qe.data[1])
1072 		top->qe.in = &top->qe.data[0];
1073 	else
1074 		top->qe.in = &top->qe.data[1];
1075 
1076 	return in;
1077 }
1078 
1079 static void *process_thread(void *arg)
1080 {
1081 	struct perf_top *top = arg;
1082 
1083 	while (!done) {
1084 		struct ordered_events *out, *in = top->qe.in;
1085 
1086 		if (!in->nr_events) {
1087 			usleep(100);
1088 			continue;
1089 		}
1090 
1091 		out = rotate_queues(top);
1092 
1093 		pthread_mutex_lock(&top->qe.mutex);
1094 		top->qe.rotate = true;
1095 		pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1096 		pthread_mutex_unlock(&top->qe.mutex);
1097 
1098 		if (ordered_events__flush(out, OE_FLUSH__TOP))
1099 			pr_err("failed to process events\n");
1100 	}
1101 
1102 	return NULL;
1103 }
1104 
1105 /*
1106  * Allow only 'top->delay_secs' seconds behind samples.
1107  */
1108 static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1109 {
1110 	union perf_event *event = qevent->event;
1111 	u64 delay_timestamp;
1112 
1113 	if (event->header.type != PERF_RECORD_SAMPLE)
1114 		return false;
1115 
1116 	delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1117 	return delay_timestamp < last_timestamp;
1118 }
1119 
1120 static int deliver_event(struct ordered_events *qe,
1121 			 struct ordered_event *qevent)
1122 {
1123 	struct perf_top *top = qe->data;
1124 	struct evlist *evlist = top->evlist;
1125 	struct perf_session *session = top->session;
1126 	union perf_event *event = qevent->event;
1127 	struct perf_sample sample;
1128 	struct evsel *evsel;
1129 	struct machine *machine;
1130 	int ret = -1;
1131 
1132 	if (should_drop(qevent, top)) {
1133 		top->drop++;
1134 		top->drop_total++;
1135 		return 0;
1136 	}
1137 
1138 	ret = perf_evlist__parse_sample(evlist, event, &sample);
1139 	if (ret) {
1140 		pr_err("Can't parse sample, err = %d\n", ret);
1141 		goto next_event;
1142 	}
1143 
1144 	evsel = perf_evlist__id2evsel(session->evlist, sample.id);
1145 	assert(evsel != NULL);
1146 
1147 	if (event->header.type == PERF_RECORD_SAMPLE) {
1148 		if (evswitch__discard(&top->evswitch, evsel))
1149 			return 0;
1150 		++top->samples;
1151 	}
1152 
1153 	switch (sample.cpumode) {
1154 	case PERF_RECORD_MISC_USER:
1155 		++top->us_samples;
1156 		if (top->hide_user_symbols)
1157 			goto next_event;
1158 		machine = &session->machines.host;
1159 		break;
1160 	case PERF_RECORD_MISC_KERNEL:
1161 		++top->kernel_samples;
1162 		if (top->hide_kernel_symbols)
1163 			goto next_event;
1164 		machine = &session->machines.host;
1165 		break;
1166 	case PERF_RECORD_MISC_GUEST_KERNEL:
1167 		++top->guest_kernel_samples;
1168 		machine = perf_session__find_machine(session,
1169 						     sample.pid);
1170 		break;
1171 	case PERF_RECORD_MISC_GUEST_USER:
1172 		++top->guest_us_samples;
1173 		/*
1174 		 * TODO: we don't process guest user from host side
1175 		 * except simple counting.
1176 		 */
1177 		goto next_event;
1178 	default:
1179 		if (event->header.type == PERF_RECORD_SAMPLE)
1180 			goto next_event;
1181 		machine = &session->machines.host;
1182 		break;
1183 	}
1184 
1185 	if (event->header.type == PERF_RECORD_SAMPLE) {
1186 		perf_event__process_sample(&top->tool, event, evsel,
1187 					   &sample, machine);
1188 	} else if (event->header.type == PERF_RECORD_LOST) {
1189 		perf_top__process_lost(top, event, evsel);
1190 	} else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1191 		perf_top__process_lost_samples(top, event, evsel);
1192 	} else if (event->header.type < PERF_RECORD_MAX) {
1193 		hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1194 		machine__process_event(machine, event, &sample);
1195 	} else
1196 		++session->evlist->stats.nr_unknown_events;
1197 
1198 	ret = 0;
1199 next_event:
1200 	return ret;
1201 }
1202 
1203 static void init_process_thread(struct perf_top *top)
1204 {
1205 	ordered_events__init(&top->qe.data[0], deliver_event, top);
1206 	ordered_events__init(&top->qe.data[1], deliver_event, top);
1207 	ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1208 	ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1209 	top->qe.in = &top->qe.data[0];
1210 	pthread_mutex_init(&top->qe.mutex, NULL);
1211 	pthread_cond_init(&top->qe.cond, NULL);
1212 }
1213 
1214 static int __cmd_top(struct perf_top *top)
1215 {
1216 	struct record_opts *opts = &top->record_opts;
1217 	pthread_t thread, thread_process;
1218 	int ret;
1219 
1220 	if (!top->annotation_opts.objdump_path) {
1221 		ret = perf_env__lookup_objdump(&top->session->header.env,
1222 					       &top->annotation_opts.objdump_path);
1223 		if (ret)
1224 			return ret;
1225 	}
1226 
1227 	ret = callchain_param__setup_sample_type(&callchain_param);
1228 	if (ret)
1229 		return ret;
1230 
1231 	if (perf_session__register_idle_thread(top->session) < 0)
1232 		return ret;
1233 
1234 	if (top->nr_threads_synthesize > 1)
1235 		perf_set_multithreaded();
1236 
1237 	init_process_thread(top);
1238 
1239 	if (opts->record_namespaces)
1240 		top->tool.namespace_events = true;
1241 
1242 	ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1243 						&top->session->machines.host,
1244 						&top->record_opts);
1245 	if (ret < 0)
1246 		pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
1247 
1248 	machine__synthesize_threads(&top->session->machines.host, &opts->target,
1249 				    top->evlist->core.threads, false,
1250 				    top->nr_threads_synthesize);
1251 
1252 	if (top->nr_threads_synthesize > 1)
1253 		perf_set_singlethreaded();
1254 
1255 	if (perf_hpp_list.socket) {
1256 		ret = perf_env__read_cpu_topology_map(&perf_env);
1257 		if (ret < 0) {
1258 			char errbuf[BUFSIZ];
1259 			const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1260 
1261 			ui__error("Could not read the CPU topology map: %s\n", err);
1262 			return ret;
1263 		}
1264 	}
1265 
1266 	ret = perf_top__start_counters(top);
1267 	if (ret)
1268 		return ret;
1269 
1270 	top->session->evlist = top->evlist;
1271 	perf_session__set_id_hdr_size(top->session);
1272 
1273 	/*
1274 	 * When perf is starting the traced process, all the events (apart from
1275 	 * group members) have enable_on_exec=1 set, so don't spoil it by
1276 	 * prematurely enabling them.
1277 	 *
1278 	 * XXX 'top' still doesn't start workloads like record, trace, but should,
1279 	 * so leave the check here.
1280 	 */
1281         if (!target__none(&opts->target))
1282 		evlist__enable(top->evlist);
1283 
1284 	ret = -1;
1285 	if (pthread_create(&thread_process, NULL, process_thread, top)) {
1286 		ui__error("Could not create process thread.\n");
1287 		return ret;
1288 	}
1289 
1290 	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1291 							    display_thread), top)) {
1292 		ui__error("Could not create display thread.\n");
1293 		goto out_join_thread;
1294 	}
1295 
1296 	if (top->realtime_prio) {
1297 		struct sched_param param;
1298 
1299 		param.sched_priority = top->realtime_prio;
1300 		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1301 			ui__error("Could not set realtime priority.\n");
1302 			goto out_join;
1303 		}
1304 	}
1305 
1306 	/* Wait for a minimal set of events before starting the snapshot */
1307 	perf_evlist__poll(top->evlist, 100);
1308 
1309 	perf_top__mmap_read(top);
1310 
1311 	while (!done) {
1312 		u64 hits = top->samples;
1313 
1314 		perf_top__mmap_read(top);
1315 
1316 		if (opts->overwrite || (hits == top->samples))
1317 			ret = perf_evlist__poll(top->evlist, 100);
1318 
1319 		if (resize) {
1320 			perf_top__resize(top);
1321 			resize = 0;
1322 		}
1323 	}
1324 
1325 	ret = 0;
1326 out_join:
1327 	pthread_join(thread, NULL);
1328 out_join_thread:
1329 	pthread_cond_signal(&top->qe.cond);
1330 	pthread_join(thread_process, NULL);
1331 	return ret;
1332 }
1333 
1334 static int
1335 callchain_opt(const struct option *opt, const char *arg, int unset)
1336 {
1337 	symbol_conf.use_callchain = true;
1338 	return record_callchain_opt(opt, arg, unset);
1339 }
1340 
1341 static int
1342 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1343 {
1344 	struct callchain_param *callchain = opt->value;
1345 
1346 	callchain->enabled = !unset;
1347 	callchain->record_mode = CALLCHAIN_FP;
1348 
1349 	/*
1350 	 * --no-call-graph
1351 	 */
1352 	if (unset) {
1353 		symbol_conf.use_callchain = false;
1354 		callchain->record_mode = CALLCHAIN_NONE;
1355 		return 0;
1356 	}
1357 
1358 	return parse_callchain_top_opt(arg);
1359 }
1360 
1361 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1362 {
1363 	if (!strcmp(var, "top.call-graph")) {
1364 		var = "call-graph.record-mode";
1365 		return perf_default_config(var, value, cb);
1366 	}
1367 	if (!strcmp(var, "top.children")) {
1368 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1369 		return 0;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 static int
1376 parse_percent_limit(const struct option *opt, const char *arg,
1377 		    int unset __maybe_unused)
1378 {
1379 	struct perf_top *top = opt->value;
1380 
1381 	top->min_percent = strtof(arg, NULL);
1382 	return 0;
1383 }
1384 
1385 const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1386 	"\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1387 
1388 int cmd_top(int argc, const char **argv)
1389 {
1390 	char errbuf[BUFSIZ];
1391 	struct perf_top top = {
1392 		.count_filter	     = 5,
1393 		.delay_secs	     = 2,
1394 		.record_opts = {
1395 			.mmap_pages	= UINT_MAX,
1396 			.user_freq	= UINT_MAX,
1397 			.user_interval	= ULLONG_MAX,
1398 			.freq		= 4000, /* 4 KHz */
1399 			.target		= {
1400 				.uses_mmap   = true,
1401 			},
1402 			/*
1403 			 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1404 			 * when we pause, fix that and reenable. Probably using a
1405 			 * separate evlist with a dummy event, i.e. a non-overwrite
1406 			 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1407 			 * stays in overwrite mode. -acme
1408 			 * */
1409 			.overwrite	= 0,
1410 			.sample_time	= true,
1411 			.sample_time_set = true,
1412 		},
1413 		.max_stack	     = sysctl__max_stack(),
1414 		.annotation_opts     = annotation__default_options,
1415 		.nr_threads_synthesize = UINT_MAX,
1416 	};
1417 	struct record_opts *opts = &top.record_opts;
1418 	struct target *target = &opts->target;
1419 	const struct option options[] = {
1420 	OPT_CALLBACK('e', "event", &top.evlist, "event",
1421 		     "event selector. use 'perf list' to list available events",
1422 		     parse_events_option),
1423 	OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1424 	OPT_STRING('p', "pid", &target->pid, "pid",
1425 		    "profile events on existing process id"),
1426 	OPT_STRING('t', "tid", &target->tid, "tid",
1427 		    "profile events on existing thread id"),
1428 	OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1429 			    "system-wide collection from all CPUs"),
1430 	OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1431 		    "list of cpus to monitor"),
1432 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1433 		   "file", "vmlinux pathname"),
1434 	OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1435 		    "don't load vmlinux even if found"),
1436 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1437 		   "file", "kallsyms pathname"),
1438 	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1439 		    "hide kernel symbols"),
1440 	OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
1441 		     "number of mmap data pages",
1442 		     perf_evlist__parse_mmap_pages),
1443 	OPT_INTEGER('r', "realtime", &top.realtime_prio,
1444 		    "collect data with this RT SCHED_FIFO priority"),
1445 	OPT_INTEGER('d', "delay", &top.delay_secs,
1446 		    "number of seconds to delay between refreshes"),
1447 	OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1448 			    "dump the symbol table used for profiling"),
1449 	OPT_INTEGER('f', "count-filter", &top.count_filter,
1450 		    "only display functions with more events than this"),
1451 	OPT_BOOLEAN(0, "group", &opts->group,
1452 			    "put the counters into a counter group"),
1453 	OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1454 		    "child tasks do not inherit counters"),
1455 	OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1456 		    "symbol to annotate"),
1457 	OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1458 	OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1459 		     "profile at this frequency",
1460 		      record__parse_freq),
1461 	OPT_INTEGER('E', "entries", &top.print_entries,
1462 		    "display this many functions"),
1463 	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1464 		    "hide user symbols"),
1465 	OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1466 	OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1467 	OPT_INCR('v', "verbose", &verbose,
1468 		    "be more verbose (show counter open errors, etc)"),
1469 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1470 		   "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1471 		   " Please refer the man page for the complete list."),
1472 	OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1473 		   "output field(s): overhead, period, sample plus all of sort keys"),
1474 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1475 		    "Show a column with the number of samples"),
1476 	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1477 			   NULL, "enables call-graph recording and display",
1478 			   &callchain_opt),
1479 	OPT_CALLBACK(0, "call-graph", &callchain_param,
1480 		     "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1481 		     top_callchain_help, &parse_callchain_opt),
1482 	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1483 		    "Accumulate callchains of children and show total overhead as well"),
1484 	OPT_INTEGER(0, "max-stack", &top.max_stack,
1485 		    "Set the maximum stack depth when parsing the callchain. "
1486 		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1487 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1488 		   "ignore callees of these functions in call graphs",
1489 		   report_parse_ignore_callees_opt),
1490 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1491 		    "Show a column with the sum of periods"),
1492 	OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1493 		   "only consider symbols in these dsos"),
1494 	OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1495 		   "only consider symbols in these comms"),
1496 	OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1497 		   "only consider these symbols"),
1498 	OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
1499 		    "Interleave source code with assembly code (default)"),
1500 	OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
1501 		    "Display raw encoding of assembly instructions (default)"),
1502 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1503 		    "Enable kernel symbol demangling"),
1504 	OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
1505 	OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
1506 		    "objdump binary to use for disassembly and annotations"),
1507 	OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
1508 		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
1509 	OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1510 	OPT_CALLBACK(0, "percent-limit", &top, "percent",
1511 		     "Don't show entries under that percent", parse_percent_limit),
1512 	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1513 		     "How to display percentage of filtered entries", parse_filter_percentage),
1514 	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1515 		   "width[,width...]",
1516 		   "don't try to adjust column width, use these fixed values"),
1517 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1518 			"per thread proc mmap processing timeout in ms"),
1519 	OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1520 		     "branch any", "sample any taken branches",
1521 		     parse_branch_stack),
1522 	OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1523 		     "branch filter mask", "branch stack filter modes",
1524 		     parse_branch_stack),
1525 	OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1526 		    "Show raw trace event output (do not use print fmt or plugins)"),
1527 	OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1528 		    "Show entries in a hierarchy"),
1529 	OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1530 		    "Use a backward ring buffer, default: no"),
1531 	OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1532 	OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1533 			"number of thread to run event synthesize"),
1534 	OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
1535 		    "Record namespaces events"),
1536 	OPTS_EVSWITCH(&top.evswitch),
1537 	OPT_END()
1538 	};
1539 	struct evlist *sb_evlist = NULL;
1540 	const char * const top_usage[] = {
1541 		"perf top [<options>]",
1542 		NULL
1543 	};
1544 	int status = hists__init();
1545 
1546 	if (status < 0)
1547 		return status;
1548 
1549 	top.annotation_opts.min_pcnt = 5;
1550 	top.annotation_opts.context  = 4;
1551 
1552 	top.evlist = evlist__new();
1553 	if (top.evlist == NULL)
1554 		return -ENOMEM;
1555 
1556 	status = perf_config(perf_top_config, &top);
1557 	if (status)
1558 		return status;
1559 
1560 	argc = parse_options(argc, argv, options, top_usage, 0);
1561 	if (argc)
1562 		usage_with_options(top_usage, options);
1563 
1564 	if (!top.evlist->core.nr_entries &&
1565 	    perf_evlist__add_default(top.evlist) < 0) {
1566 		pr_err("Not enough memory for event selector list\n");
1567 		goto out_delete_evlist;
1568 	}
1569 
1570 	status = evswitch__init(&top.evswitch, top.evlist, stderr);
1571 	if (status)
1572 		goto out_delete_evlist;
1573 
1574 	if (symbol_conf.report_hierarchy) {
1575 		/* disable incompatible options */
1576 		symbol_conf.event_group = false;
1577 		symbol_conf.cumulate_callchain = false;
1578 
1579 		if (field_order) {
1580 			pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1581 			parse_options_usage(top_usage, options, "fields", 0);
1582 			parse_options_usage(NULL, options, "hierarchy", 0);
1583 			goto out_delete_evlist;
1584 		}
1585 	}
1586 
1587 	if (opts->branch_stack && callchain_param.enabled)
1588 		symbol_conf.show_branchflag_count = true;
1589 
1590 	sort__mode = SORT_MODE__TOP;
1591 	/* display thread wants entries to be collapsed in a different tree */
1592 	perf_hpp_list.need_collapse = 1;
1593 
1594 	if (top.use_stdio)
1595 		use_browser = 0;
1596 	else if (top.use_tui)
1597 		use_browser = 1;
1598 
1599 	setup_browser(false);
1600 
1601 	if (setup_sorting(top.evlist) < 0) {
1602 		if (sort_order)
1603 			parse_options_usage(top_usage, options, "s", 1);
1604 		if (field_order)
1605 			parse_options_usage(sort_order ? NULL : top_usage,
1606 					    options, "fields", 0);
1607 		goto out_delete_evlist;
1608 	}
1609 
1610 	status = target__validate(target);
1611 	if (status) {
1612 		target__strerror(target, status, errbuf, BUFSIZ);
1613 		ui__warning("%s\n", errbuf);
1614 	}
1615 
1616 	status = target__parse_uid(target);
1617 	if (status) {
1618 		int saved_errno = errno;
1619 
1620 		target__strerror(target, status, errbuf, BUFSIZ);
1621 		ui__error("%s\n", errbuf);
1622 
1623 		status = -saved_errno;
1624 		goto out_delete_evlist;
1625 	}
1626 
1627 	if (target__none(target))
1628 		target->system_wide = true;
1629 
1630 	if (perf_evlist__create_maps(top.evlist, target) < 0) {
1631 		ui__error("Couldn't create thread/CPU maps: %s\n",
1632 			  errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1633 		goto out_delete_evlist;
1634 	}
1635 
1636 	if (top.delay_secs < 1)
1637 		top.delay_secs = 1;
1638 
1639 	if (record_opts__config(opts)) {
1640 		status = -EINVAL;
1641 		goto out_delete_evlist;
1642 	}
1643 
1644 	top.sym_evsel = perf_evlist__first(top.evlist);
1645 
1646 	if (!callchain_param.enabled) {
1647 		symbol_conf.cumulate_callchain = false;
1648 		perf_hpp__cancel_cumulate();
1649 	}
1650 
1651 	if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1652 		callchain_param.order = ORDER_CALLER;
1653 
1654 	status = symbol__annotation_init();
1655 	if (status < 0)
1656 		goto out_delete_evlist;
1657 
1658 	annotation_config__init();
1659 
1660 	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1661 	status = symbol__init(NULL);
1662 	if (status < 0)
1663 		goto out_delete_evlist;
1664 
1665 	sort__setup_elide(stdout);
1666 
1667 	get_term_dimensions(&top.winsize);
1668 	if (top.print_entries == 0) {
1669 		perf_top__update_print_entries(&top);
1670 		signal(SIGWINCH, winch_sig);
1671 	}
1672 
1673 	top.session = perf_session__new(NULL, false, NULL);
1674 	if (top.session == NULL) {
1675 		status = -1;
1676 		goto out_delete_evlist;
1677 	}
1678 
1679 	if (!top.record_opts.no_bpf_event)
1680 		bpf_event__add_sb_event(&sb_evlist, &perf_env);
1681 
1682 	if (perf_evlist__start_sb_thread(sb_evlist, target)) {
1683 		pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1684 		opts->no_bpf_event = true;
1685 	}
1686 
1687 	status = __cmd_top(&top);
1688 
1689 	if (!opts->no_bpf_event)
1690 		perf_evlist__stop_sb_thread(sb_evlist);
1691 
1692 out_delete_evlist:
1693 	evlist__delete(top.evlist);
1694 	perf_session__delete(top.session);
1695 
1696 	return status;
1697 }
1698