xref: /openbmc/linux/tools/perf/builtin-top.c (revision 3a35093a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * builtin-top.c
4  *
5  * Builtin top command: Display a continuously updated profile of
6  * any workload, CPU or specific PID.
7  *
8  * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9  *		 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Improvements and fixes by:
12  *
13  *   Arjan van de Ven <arjan@linux.intel.com>
14  *   Yanmin Zhang <yanmin.zhang@intel.com>
15  *   Wu Fengguang <fengguang.wu@intel.com>
16  *   Mike Galbraith <efault@gmx.de>
17  *   Paul Mackerras <paulus@samba.org>
18  */
19 #include "builtin.h"
20 
21 #include "perf.h"
22 
23 #include "util/annotate.h"
24 #include "util/bpf-event.h"
25 #include "util/config.h"
26 #include "util/color.h"
27 #include "util/dso.h"
28 #include "util/evlist.h"
29 #include "util/evsel.h"
30 #include "util/evsel_config.h"
31 #include "util/event.h"
32 #include "util/machine.h"
33 #include "util/map.h"
34 #include "util/mmap.h"
35 #include "util/session.h"
36 #include "util/thread.h"
37 #include "util/symbol.h"
38 #include "util/synthetic-events.h"
39 #include "util/top.h"
40 #include "util/util.h"
41 #include <linux/rbtree.h>
42 #include <subcmd/parse-options.h>
43 #include "util/parse-events.h"
44 #include "util/callchain.h"
45 #include "util/cpumap.h"
46 #include "util/sort.h"
47 #include "util/string2.h"
48 #include "util/term.h"
49 #include "util/intlist.h"
50 #include "util/parse-branch-options.h"
51 #include "arch/common.h"
52 #include "ui/ui.h"
53 
54 #include "util/debug.h"
55 #include "util/ordered-events.h"
56 #include "util/pfm.h"
57 
58 #include <assert.h>
59 #include <elf.h>
60 #include <fcntl.h>
61 
62 #include <stdio.h>
63 #include <termios.h>
64 #include <unistd.h>
65 #include <inttypes.h>
66 
67 #include <errno.h>
68 #include <time.h>
69 #include <sched.h>
70 #include <signal.h>
71 
72 #include <sys/syscall.h>
73 #include <sys/ioctl.h>
74 #include <poll.h>
75 #include <sys/prctl.h>
76 #include <sys/wait.h>
77 #include <sys/uio.h>
78 #include <sys/utsname.h>
79 #include <sys/mman.h>
80 
81 #include <linux/stringify.h>
82 #include <linux/time64.h>
83 #include <linux/types.h>
84 #include <linux/err.h>
85 
86 #include <linux/ctype.h>
87 #include <perf/mmap.h>
88 
89 static volatile int done;
90 static volatile int resize;
91 
92 #define HEADER_LINE_NR  5
93 
94 static void perf_top__update_print_entries(struct perf_top *top)
95 {
96 	top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
97 }
98 
99 static void winch_sig(int sig __maybe_unused)
100 {
101 	resize = 1;
102 }
103 
104 static void perf_top__resize(struct perf_top *top)
105 {
106 	get_term_dimensions(&top->winsize);
107 	perf_top__update_print_entries(top);
108 }
109 
110 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
111 {
112 	struct evsel *evsel;
113 	struct symbol *sym;
114 	struct annotation *notes;
115 	struct map *map;
116 	int err = -1;
117 
118 	if (!he || !he->ms.sym)
119 		return -1;
120 
121 	evsel = hists_to_evsel(he->hists);
122 
123 	sym = he->ms.sym;
124 	map = he->ms.map;
125 
126 	/*
127 	 * We can't annotate with just /proc/kallsyms
128 	 */
129 	if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
130 	    !dso__is_kcore(map->dso)) {
131 		pr_err("Can't annotate %s: No vmlinux file was found in the "
132 		       "path\n", sym->name);
133 		sleep(1);
134 		return -1;
135 	}
136 
137 	notes = symbol__annotation(sym);
138 	pthread_mutex_lock(&notes->lock);
139 
140 	if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
141 		pthread_mutex_unlock(&notes->lock);
142 		pr_err("Not enough memory for annotating '%s' symbol!\n",
143 		       sym->name);
144 		sleep(1);
145 		return err;
146 	}
147 
148 	err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
149 	if (err == 0) {
150 		top->sym_filter_entry = he;
151 	} else {
152 		char msg[BUFSIZ];
153 		symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg));
154 		pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
155 	}
156 
157 	pthread_mutex_unlock(&notes->lock);
158 	return err;
159 }
160 
161 static void __zero_source_counters(struct hist_entry *he)
162 {
163 	struct symbol *sym = he->ms.sym;
164 	symbol__annotate_zero_histograms(sym);
165 }
166 
167 static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
168 {
169 	struct utsname uts;
170 	int err = uname(&uts);
171 
172 	ui__warning("Out of bounds address found:\n\n"
173 		    "Addr:   %" PRIx64 "\n"
174 		    "DSO:    %s %c\n"
175 		    "Map:    %" PRIx64 "-%" PRIx64 "\n"
176 		    "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
177 		    "Arch:   %s\n"
178 		    "Kernel: %s\n"
179 		    "Tools:  %s\n\n"
180 		    "Not all samples will be on the annotation output.\n\n"
181 		    "Please report to linux-kernel@vger.kernel.org\n",
182 		    ip, map->dso->long_name, dso__symtab_origin(map->dso),
183 		    map->start, map->end, sym->start, sym->end,
184 		    sym->binding == STB_GLOBAL ? 'g' :
185 		    sym->binding == STB_LOCAL  ? 'l' : 'w', sym->name,
186 		    err ? "[unknown]" : uts.machine,
187 		    err ? "[unknown]" : uts.release, perf_version_string);
188 	if (use_browser <= 0)
189 		sleep(5);
190 
191 	map->erange_warned = true;
192 }
193 
194 static void perf_top__record_precise_ip(struct perf_top *top,
195 					struct hist_entry *he,
196 					struct perf_sample *sample,
197 					struct evsel *evsel, u64 ip)
198 {
199 	struct annotation *notes;
200 	struct symbol *sym = he->ms.sym;
201 	int err = 0;
202 
203 	if (sym == NULL || (use_browser == 0 &&
204 			    (top->sym_filter_entry == NULL ||
205 			     top->sym_filter_entry->ms.sym != sym)))
206 		return;
207 
208 	notes = symbol__annotation(sym);
209 
210 	if (pthread_mutex_trylock(&notes->lock))
211 		return;
212 
213 	err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
214 
215 	pthread_mutex_unlock(&notes->lock);
216 
217 	if (unlikely(err)) {
218 		/*
219 		 * This function is now called with he->hists->lock held.
220 		 * Release it before going to sleep.
221 		 */
222 		pthread_mutex_unlock(&he->hists->lock);
223 
224 		if (err == -ERANGE && !he->ms.map->erange_warned)
225 			ui__warn_map_erange(he->ms.map, sym, ip);
226 		else if (err == -ENOMEM) {
227 			pr_err("Not enough memory for annotating '%s' symbol!\n",
228 			       sym->name);
229 			sleep(1);
230 		}
231 
232 		pthread_mutex_lock(&he->hists->lock);
233 	}
234 }
235 
236 static void perf_top__show_details(struct perf_top *top)
237 {
238 	struct hist_entry *he = top->sym_filter_entry;
239 	struct evsel *evsel;
240 	struct annotation *notes;
241 	struct symbol *symbol;
242 	int more;
243 
244 	if (!he)
245 		return;
246 
247 	evsel = hists_to_evsel(he->hists);
248 
249 	symbol = he->ms.sym;
250 	notes = symbol__annotation(symbol);
251 
252 	pthread_mutex_lock(&notes->lock);
253 
254 	symbol__calc_percent(symbol, evsel);
255 
256 	if (notes->src == NULL)
257 		goto out_unlock;
258 
259 	printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
260 	printf("  Events  Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
261 
262 	more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
263 
264 	if (top->evlist->enabled) {
265 		if (top->zero)
266 			symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
267 		else
268 			symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
269 	}
270 	if (more != 0)
271 		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
272 out_unlock:
273 	pthread_mutex_unlock(&notes->lock);
274 }
275 
276 static void perf_top__resort_hists(struct perf_top *t)
277 {
278 	struct evlist *evlist = t->evlist;
279 	struct evsel *pos;
280 
281 	evlist__for_each_entry(evlist, pos) {
282 		struct hists *hists = evsel__hists(pos);
283 
284 		/*
285 		 * unlink existing entries so that they can be linked
286 		 * in a correct order in hists__match() below.
287 		 */
288 		hists__unlink(hists);
289 
290 		if (evlist->enabled) {
291 			if (t->zero) {
292 				hists__delete_entries(hists);
293 			} else {
294 				hists__decay_entries(hists, t->hide_user_symbols,
295 						     t->hide_kernel_symbols);
296 			}
297 		}
298 
299 		hists__collapse_resort(hists, NULL);
300 
301 		/* Non-group events are considered as leader */
302 		if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
303 			struct hists *leader_hists = evsel__hists(pos->leader);
304 
305 			hists__match(leader_hists, hists);
306 			hists__link(leader_hists, hists);
307 		}
308 	}
309 
310 	evlist__for_each_entry(evlist, pos) {
311 		evsel__output_resort(pos, NULL);
312 	}
313 }
314 
315 static void perf_top__print_sym_table(struct perf_top *top)
316 {
317 	char bf[160];
318 	int printed = 0;
319 	const int win_width = top->winsize.ws_col - 1;
320 	struct evsel *evsel = top->sym_evsel;
321 	struct hists *hists = evsel__hists(evsel);
322 
323 	puts(CONSOLE_CLEAR);
324 
325 	perf_top__header_snprintf(top, bf, sizeof(bf));
326 	printf("%s\n", bf);
327 
328 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
329 
330 	if (!top->record_opts.overwrite &&
331 	    (hists->stats.nr_lost_warned !=
332 	    hists->stats.nr_events[PERF_RECORD_LOST])) {
333 		hists->stats.nr_lost_warned =
334 			      hists->stats.nr_events[PERF_RECORD_LOST];
335 		color_fprintf(stdout, PERF_COLOR_RED,
336 			      "WARNING: LOST %d chunks, Check IO/CPU overload",
337 			      hists->stats.nr_lost_warned);
338 		++printed;
339 	}
340 
341 	if (top->sym_filter_entry) {
342 		perf_top__show_details(top);
343 		return;
344 	}
345 
346 	perf_top__resort_hists(top);
347 
348 	hists__output_recalc_col_len(hists, top->print_entries - printed);
349 	putchar('\n');
350 	hists__fprintf(hists, false, top->print_entries - printed, win_width,
351 		       top->min_percent, stdout, !symbol_conf.use_callchain);
352 }
353 
354 static void prompt_integer(int *target, const char *msg)
355 {
356 	char *buf = malloc(0), *p;
357 	size_t dummy = 0;
358 	int tmp;
359 
360 	fprintf(stdout, "\n%s: ", msg);
361 	if (getline(&buf, &dummy, stdin) < 0)
362 		return;
363 
364 	p = strchr(buf, '\n');
365 	if (p)
366 		*p = 0;
367 
368 	p = buf;
369 	while(*p) {
370 		if (!isdigit(*p))
371 			goto out_free;
372 		p++;
373 	}
374 	tmp = strtoul(buf, NULL, 10);
375 	*target = tmp;
376 out_free:
377 	free(buf);
378 }
379 
380 static void prompt_percent(int *target, const char *msg)
381 {
382 	int tmp = 0;
383 
384 	prompt_integer(&tmp, msg);
385 	if (tmp >= 0 && tmp <= 100)
386 		*target = tmp;
387 }
388 
389 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
390 {
391 	char *buf = malloc(0), *p;
392 	struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
393 	struct hists *hists = evsel__hists(top->sym_evsel);
394 	struct rb_node *next;
395 	size_t dummy = 0;
396 
397 	/* zero counters of active symbol */
398 	if (syme) {
399 		__zero_source_counters(syme);
400 		top->sym_filter_entry = NULL;
401 	}
402 
403 	fprintf(stdout, "\n%s: ", msg);
404 	if (getline(&buf, &dummy, stdin) < 0)
405 		goto out_free;
406 
407 	p = strchr(buf, '\n');
408 	if (p)
409 		*p = 0;
410 
411 	next = rb_first_cached(&hists->entries);
412 	while (next) {
413 		n = rb_entry(next, struct hist_entry, rb_node);
414 		if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
415 			found = n;
416 			break;
417 		}
418 		next = rb_next(&n->rb_node);
419 	}
420 
421 	if (!found) {
422 		fprintf(stderr, "Sorry, %s is not active.\n", buf);
423 		sleep(1);
424 	} else
425 		perf_top__parse_source(top, found);
426 
427 out_free:
428 	free(buf);
429 }
430 
431 static void perf_top__print_mapped_keys(struct perf_top *top)
432 {
433 	char *name = NULL;
434 
435 	if (top->sym_filter_entry) {
436 		struct symbol *sym = top->sym_filter_entry->ms.sym;
437 		name = sym->name;
438 	}
439 
440 	fprintf(stdout, "\nMapped keys:\n");
441 	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top->delay_secs);
442 	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
443 
444 	if (top->evlist->core.nr_entries > 1)
445 		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", evsel__name(top->sym_evsel));
446 
447 	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
448 
449 	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
450 	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
451 	fprintf(stdout, "\t[S]     stop annotation.\n");
452 
453 	fprintf(stdout,
454 		"\t[K]     hide kernel symbols.             \t(%s)\n",
455 		top->hide_kernel_symbols ? "yes" : "no");
456 	fprintf(stdout,
457 		"\t[U]     hide user symbols.               \t(%s)\n",
458 		top->hide_user_symbols ? "yes" : "no");
459 	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top->zero ? 1 : 0);
460 	fprintf(stdout, "\t[qQ]    quit.\n");
461 }
462 
463 static int perf_top__key_mapped(struct perf_top *top, int c)
464 {
465 	switch (c) {
466 		case 'd':
467 		case 'e':
468 		case 'f':
469 		case 'z':
470 		case 'q':
471 		case 'Q':
472 		case 'K':
473 		case 'U':
474 		case 'F':
475 		case 's':
476 		case 'S':
477 			return 1;
478 		case 'E':
479 			return top->evlist->core.nr_entries > 1 ? 1 : 0;
480 		default:
481 			break;
482 	}
483 
484 	return 0;
485 }
486 
487 static bool perf_top__handle_keypress(struct perf_top *top, int c)
488 {
489 	bool ret = true;
490 
491 	if (!perf_top__key_mapped(top, c)) {
492 		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
493 		struct termios save;
494 
495 		perf_top__print_mapped_keys(top);
496 		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
497 		fflush(stdout);
498 
499 		set_term_quiet_input(&save);
500 
501 		poll(&stdin_poll, 1, -1);
502 		c = getc(stdin);
503 
504 		tcsetattr(0, TCSAFLUSH, &save);
505 		if (!perf_top__key_mapped(top, c))
506 			return ret;
507 	}
508 
509 	switch (c) {
510 		case 'd':
511 			prompt_integer(&top->delay_secs, "Enter display delay");
512 			if (top->delay_secs < 1)
513 				top->delay_secs = 1;
514 			break;
515 		case 'e':
516 			prompt_integer(&top->print_entries, "Enter display entries (lines)");
517 			if (top->print_entries == 0) {
518 				perf_top__resize(top);
519 				signal(SIGWINCH, winch_sig);
520 			} else {
521 				signal(SIGWINCH, SIG_DFL);
522 			}
523 			break;
524 		case 'E':
525 			if (top->evlist->core.nr_entries > 1) {
526 				/* Select 0 as the default event: */
527 				int counter = 0;
528 
529 				fprintf(stderr, "\nAvailable events:");
530 
531 				evlist__for_each_entry(top->evlist, top->sym_evsel)
532 					fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, evsel__name(top->sym_evsel));
533 
534 				prompt_integer(&counter, "Enter details event counter");
535 
536 				if (counter >= top->evlist->core.nr_entries) {
537 					top->sym_evsel = evlist__first(top->evlist);
538 					fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
539 					sleep(1);
540 					break;
541 				}
542 				evlist__for_each_entry(top->evlist, top->sym_evsel)
543 					if (top->sym_evsel->idx == counter)
544 						break;
545 			} else
546 				top->sym_evsel = evlist__first(top->evlist);
547 			break;
548 		case 'f':
549 			prompt_integer(&top->count_filter, "Enter display event count filter");
550 			break;
551 		case 'F':
552 			prompt_percent(&top->annotation_opts.min_pcnt,
553 				       "Enter details display event filter (percent)");
554 			break;
555 		case 'K':
556 			top->hide_kernel_symbols = !top->hide_kernel_symbols;
557 			break;
558 		case 'q':
559 		case 'Q':
560 			printf("exiting.\n");
561 			if (top->dump_symtab)
562 				perf_session__fprintf_dsos(top->session, stderr);
563 			ret = false;
564 			break;
565 		case 's':
566 			perf_top__prompt_symbol(top, "Enter details symbol");
567 			break;
568 		case 'S':
569 			if (!top->sym_filter_entry)
570 				break;
571 			else {
572 				struct hist_entry *syme = top->sym_filter_entry;
573 
574 				top->sym_filter_entry = NULL;
575 				__zero_source_counters(syme);
576 			}
577 			break;
578 		case 'U':
579 			top->hide_user_symbols = !top->hide_user_symbols;
580 			break;
581 		case 'z':
582 			top->zero = !top->zero;
583 			break;
584 		default:
585 			break;
586 	}
587 
588 	return ret;
589 }
590 
591 static void perf_top__sort_new_samples(void *arg)
592 {
593 	struct perf_top *t = arg;
594 
595 	if (t->evlist->selected != NULL)
596 		t->sym_evsel = t->evlist->selected;
597 
598 	perf_top__resort_hists(t);
599 
600 	if (t->lost || t->drop)
601 		pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
602 }
603 
604 static void stop_top(void)
605 {
606 	session_done = 1;
607 	done = 1;
608 }
609 
610 static void *display_thread_tui(void *arg)
611 {
612 	struct evsel *pos;
613 	struct perf_top *top = arg;
614 	const char *help = "For a higher level overview, try: perf top --sort comm,dso";
615 	struct hist_browser_timer hbt = {
616 		.timer		= perf_top__sort_new_samples,
617 		.arg		= top,
618 		.refresh	= top->delay_secs,
619 	};
620 	int ret;
621 
622 	/* In order to read symbols from other namespaces perf to  needs to call
623 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
624 	 * unshare(2) the fs so that we may continue to setns into namespaces
625 	 * that we're observing.
626 	 */
627 	unshare(CLONE_FS);
628 
629 	prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
630 
631 repeat:
632 	perf_top__sort_new_samples(top);
633 
634 	/*
635 	 * Initialize the uid_filter_str, in the future the TUI will allow
636 	 * Zooming in/out UIDs. For now just use whatever the user passed
637 	 * via --uid.
638 	 */
639 	evlist__for_each_entry(top->evlist, pos) {
640 		struct hists *hists = evsel__hists(pos);
641 		hists->uid_filter_str = top->record_opts.target.uid_str;
642 	}
643 
644 	ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
645 				       &top->session->header.env, !top->record_opts.overwrite,
646 				       &top->annotation_opts);
647 	if (ret == K_RELOAD) {
648 		top->zero = true;
649 		goto repeat;
650 	} else
651 		stop_top();
652 
653 	return NULL;
654 }
655 
656 static void display_sig(int sig __maybe_unused)
657 {
658 	stop_top();
659 }
660 
661 static void display_setup_sig(void)
662 {
663 	signal(SIGSEGV, sighandler_dump_stack);
664 	signal(SIGFPE, sighandler_dump_stack);
665 	signal(SIGINT,  display_sig);
666 	signal(SIGQUIT, display_sig);
667 	signal(SIGTERM, display_sig);
668 }
669 
670 static void *display_thread(void *arg)
671 {
672 	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
673 	struct termios save;
674 	struct perf_top *top = arg;
675 	int delay_msecs, c;
676 
677 	/* In order to read symbols from other namespaces perf to  needs to call
678 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
679 	 * unshare(2) the fs so that we may continue to setns into namespaces
680 	 * that we're observing.
681 	 */
682 	unshare(CLONE_FS);
683 
684 	prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
685 
686 	display_setup_sig();
687 	pthread__unblock_sigwinch();
688 repeat:
689 	delay_msecs = top->delay_secs * MSEC_PER_SEC;
690 	set_term_quiet_input(&save);
691 	/* trash return*/
692 	clearerr(stdin);
693 	if (poll(&stdin_poll, 1, 0) > 0)
694 		getc(stdin);
695 
696 	while (!done) {
697 		perf_top__print_sym_table(top);
698 		/*
699 		 * Either timeout expired or we got an EINTR due to SIGWINCH,
700 		 * refresh screen in both cases.
701 		 */
702 		switch (poll(&stdin_poll, 1, delay_msecs)) {
703 		case 0:
704 			continue;
705 		case -1:
706 			if (errno == EINTR)
707 				continue;
708 			__fallthrough;
709 		default:
710 			c = getc(stdin);
711 			tcsetattr(0, TCSAFLUSH, &save);
712 
713 			if (perf_top__handle_keypress(top, c))
714 				goto repeat;
715 			stop_top();
716 		}
717 	}
718 
719 	tcsetattr(0, TCSAFLUSH, &save);
720 	return NULL;
721 }
722 
723 static int hist_iter__top_callback(struct hist_entry_iter *iter,
724 				   struct addr_location *al, bool single,
725 				   void *arg)
726 {
727 	struct perf_top *top = arg;
728 	struct hist_entry *he = iter->he;
729 	struct evsel *evsel = iter->evsel;
730 
731 	if (perf_hpp_list.sym && single)
732 		perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
733 
734 	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
735 		     !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
736 		     NULL);
737 	return 0;
738 }
739 
740 static void perf_event__process_sample(struct perf_tool *tool,
741 				       const union perf_event *event,
742 				       struct evsel *evsel,
743 				       struct perf_sample *sample,
744 				       struct machine *machine)
745 {
746 	struct perf_top *top = container_of(tool, struct perf_top, tool);
747 	struct addr_location al;
748 	int err;
749 
750 	if (!machine && perf_guest) {
751 		static struct intlist *seen;
752 
753 		if (!seen)
754 			seen = intlist__new(NULL);
755 
756 		if (!intlist__has_entry(seen, sample->pid)) {
757 			pr_err("Can't find guest [%d]'s kernel information\n",
758 				sample->pid);
759 			intlist__add(seen, sample->pid);
760 		}
761 		return;
762 	}
763 
764 	if (!machine) {
765 		pr_err("%u unprocessable samples recorded.\r",
766 		       top->session->evlist->stats.nr_unprocessable_samples++);
767 		return;
768 	}
769 
770 	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
771 		top->exact_samples++;
772 
773 	if (machine__resolve(machine, &al, sample) < 0)
774 		return;
775 
776 	if (top->stitch_lbr)
777 		al.thread->lbr_stitch_enable = true;
778 
779 	if (!machine->kptr_restrict_warned &&
780 	    symbol_conf.kptr_restrict &&
781 	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
782 		if (!evlist__exclude_kernel(top->session->evlist)) {
783 			ui__warning(
784 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
785 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
786 "Kernel%s samples will not be resolved.\n",
787 			  al.map && map__has_symbols(al.map) ?
788 			  " modules" : "");
789 			if (use_browser <= 0)
790 				sleep(5);
791 		}
792 		machine->kptr_restrict_warned = true;
793 	}
794 
795 	if (al.sym == NULL && al.map != NULL) {
796 		const char *msg = "Kernel samples will not be resolved.\n";
797 		/*
798 		 * As we do lazy loading of symtabs we only will know if the
799 		 * specified vmlinux file is invalid when we actually have a
800 		 * hit in kernel space and then try to load it. So if we get
801 		 * here and there are _no_ symbols in the DSO backing the
802 		 * kernel map, bail out.
803 		 *
804 		 * We may never get here, for instance, if we use -K/
805 		 * --hide-kernel-symbols, even if the user specifies an
806 		 * invalid --vmlinux ;-)
807 		 */
808 		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
809 		    __map__is_kernel(al.map) && map__has_symbols(al.map)) {
810 			if (symbol_conf.vmlinux_name) {
811 				char serr[256];
812 				dso__strerror_load(al.map->dso, serr, sizeof(serr));
813 				ui__warning("The %s file can't be used: %s\n%s",
814 					    symbol_conf.vmlinux_name, serr, msg);
815 			} else {
816 				ui__warning("A vmlinux file was not found.\n%s",
817 					    msg);
818 			}
819 
820 			if (use_browser <= 0)
821 				sleep(5);
822 			top->vmlinux_warned = true;
823 		}
824 	}
825 
826 	if (al.sym == NULL || !al.sym->idle) {
827 		struct hists *hists = evsel__hists(evsel);
828 		struct hist_entry_iter iter = {
829 			.evsel		= evsel,
830 			.sample 	= sample,
831 			.add_entry_cb 	= hist_iter__top_callback,
832 		};
833 
834 		if (symbol_conf.cumulate_callchain)
835 			iter.ops = &hist_iter_cumulative;
836 		else
837 			iter.ops = &hist_iter_normal;
838 
839 		pthread_mutex_lock(&hists->lock);
840 
841 		err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
842 		if (err < 0)
843 			pr_err("Problem incrementing symbol period, skipping event\n");
844 
845 		pthread_mutex_unlock(&hists->lock);
846 	}
847 
848 	addr_location__put(&al);
849 }
850 
851 static void
852 perf_top__process_lost(struct perf_top *top, union perf_event *event,
853 		       struct evsel *evsel)
854 {
855 	struct hists *hists = evsel__hists(evsel);
856 
857 	top->lost += event->lost.lost;
858 	top->lost_total += event->lost.lost;
859 	hists->stats.total_lost += event->lost.lost;
860 }
861 
862 static void
863 perf_top__process_lost_samples(struct perf_top *top,
864 			       union perf_event *event,
865 			       struct evsel *evsel)
866 {
867 	struct hists *hists = evsel__hists(evsel);
868 
869 	top->lost += event->lost_samples.lost;
870 	top->lost_total += event->lost_samples.lost;
871 	hists->stats.total_lost_samples += event->lost_samples.lost;
872 }
873 
874 static u64 last_timestamp;
875 
876 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
877 {
878 	struct record_opts *opts = &top->record_opts;
879 	struct evlist *evlist = top->evlist;
880 	struct mmap *md;
881 	union perf_event *event;
882 
883 	md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
884 	if (perf_mmap__read_init(&md->core) < 0)
885 		return;
886 
887 	while ((event = perf_mmap__read_event(&md->core)) != NULL) {
888 		int ret;
889 
890 		ret = evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
891 		if (ret && ret != -1)
892 			break;
893 
894 		ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
895 		if (ret)
896 			break;
897 
898 		perf_mmap__consume(&md->core);
899 
900 		if (top->qe.rotate) {
901 			pthread_mutex_lock(&top->qe.mutex);
902 			top->qe.rotate = false;
903 			pthread_cond_signal(&top->qe.cond);
904 			pthread_mutex_unlock(&top->qe.mutex);
905 		}
906 	}
907 
908 	perf_mmap__read_done(&md->core);
909 }
910 
911 static void perf_top__mmap_read(struct perf_top *top)
912 {
913 	bool overwrite = top->record_opts.overwrite;
914 	struct evlist *evlist = top->evlist;
915 	int i;
916 
917 	if (overwrite)
918 		evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
919 
920 	for (i = 0; i < top->evlist->core.nr_mmaps; i++)
921 		perf_top__mmap_read_idx(top, i);
922 
923 	if (overwrite) {
924 		evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
925 		evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
926 	}
927 }
928 
929 /*
930  * Check per-event overwrite term.
931  * perf top should support consistent term for all events.
932  * - All events don't have per-event term
933  *   E.g. "cpu/cpu-cycles/,cpu/instructions/"
934  *   Nothing change, return 0.
935  * - All events have same per-event term
936  *   E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
937  *   Using the per-event setting to replace the opts->overwrite if
938  *   they are different, then return 0.
939  * - Events have different per-event term
940  *   E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
941  *   Return -1
942  * - Some of the event set per-event term, but some not.
943  *   E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
944  *   Return -1
945  */
946 static int perf_top__overwrite_check(struct perf_top *top)
947 {
948 	struct record_opts *opts = &top->record_opts;
949 	struct evlist *evlist = top->evlist;
950 	struct evsel_config_term *term;
951 	struct list_head *config_terms;
952 	struct evsel *evsel;
953 	int set, overwrite = -1;
954 
955 	evlist__for_each_entry(evlist, evsel) {
956 		set = -1;
957 		config_terms = &evsel->config_terms;
958 		list_for_each_entry(term, config_terms, list) {
959 			if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
960 				set = term->val.overwrite ? 1 : 0;
961 		}
962 
963 		/* no term for current and previous event (likely) */
964 		if ((overwrite < 0) && (set < 0))
965 			continue;
966 
967 		/* has term for both current and previous event, compare */
968 		if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
969 			return -1;
970 
971 		/* no term for current event but has term for previous one */
972 		if ((overwrite >= 0) && (set < 0))
973 			return -1;
974 
975 		/* has term for current event */
976 		if ((overwrite < 0) && (set >= 0)) {
977 			/* if it's first event, set overwrite */
978 			if (evsel == evlist__first(evlist))
979 				overwrite = set;
980 			else
981 				return -1;
982 		}
983 	}
984 
985 	if ((overwrite >= 0) && (opts->overwrite != overwrite))
986 		opts->overwrite = overwrite;
987 
988 	return 0;
989 }
990 
991 static int perf_top_overwrite_fallback(struct perf_top *top,
992 				       struct evsel *evsel)
993 {
994 	struct record_opts *opts = &top->record_opts;
995 	struct evlist *evlist = top->evlist;
996 	struct evsel *counter;
997 
998 	if (!opts->overwrite)
999 		return 0;
1000 
1001 	/* only fall back when first event fails */
1002 	if (evsel != evlist__first(evlist))
1003 		return 0;
1004 
1005 	evlist__for_each_entry(evlist, counter)
1006 		counter->core.attr.write_backward = false;
1007 	opts->overwrite = false;
1008 	pr_debug2("fall back to non-overwrite mode\n");
1009 	return 1;
1010 }
1011 
1012 static int perf_top__start_counters(struct perf_top *top)
1013 {
1014 	char msg[BUFSIZ];
1015 	struct evsel *counter;
1016 	struct evlist *evlist = top->evlist;
1017 	struct record_opts *opts = &top->record_opts;
1018 
1019 	if (perf_top__overwrite_check(top)) {
1020 		ui__error("perf top only support consistent per-event "
1021 			  "overwrite setting for all events\n");
1022 		goto out_err;
1023 	}
1024 
1025 	evlist__config(evlist, opts, &callchain_param);
1026 
1027 	evlist__for_each_entry(evlist, counter) {
1028 try_again:
1029 		if (evsel__open(counter, top->evlist->core.cpus,
1030 				     top->evlist->core.threads) < 0) {
1031 
1032 			/*
1033 			 * Specially handle overwrite fall back.
1034 			 * Because perf top is the only tool which has
1035 			 * overwrite mode by default, support
1036 			 * both overwrite and non-overwrite mode, and
1037 			 * require consistent mode for all events.
1038 			 *
1039 			 * May move it to generic code with more tools
1040 			 * have similar attribute.
1041 			 */
1042 			if (perf_missing_features.write_backward &&
1043 			    perf_top_overwrite_fallback(top, counter))
1044 				goto try_again;
1045 
1046 			if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
1047 				if (verbose > 0)
1048 					ui__warning("%s\n", msg);
1049 				goto try_again;
1050 			}
1051 
1052 			evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
1053 			ui__error("%s\n", msg);
1054 			goto out_err;
1055 		}
1056 	}
1057 
1058 	if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
1059 		ui__error("Failed to mmap with %d (%s)\n",
1060 			    errno, str_error_r(errno, msg, sizeof(msg)));
1061 		goto out_err;
1062 	}
1063 
1064 	return 0;
1065 
1066 out_err:
1067 	return -1;
1068 }
1069 
1070 static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1071 {
1072 	if (callchain->mode != CHAIN_NONE) {
1073 		if (callchain_register_param(callchain) < 0) {
1074 			ui__error("Can't register callchain params.\n");
1075 			return -EINVAL;
1076 		}
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 static struct ordered_events *rotate_queues(struct perf_top *top)
1083 {
1084 	struct ordered_events *in = top->qe.in;
1085 
1086 	if (top->qe.in == &top->qe.data[1])
1087 		top->qe.in = &top->qe.data[0];
1088 	else
1089 		top->qe.in = &top->qe.data[1];
1090 
1091 	return in;
1092 }
1093 
1094 static void *process_thread(void *arg)
1095 {
1096 	struct perf_top *top = arg;
1097 
1098 	while (!done) {
1099 		struct ordered_events *out, *in = top->qe.in;
1100 
1101 		if (!in->nr_events) {
1102 			usleep(100);
1103 			continue;
1104 		}
1105 
1106 		out = rotate_queues(top);
1107 
1108 		pthread_mutex_lock(&top->qe.mutex);
1109 		top->qe.rotate = true;
1110 		pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1111 		pthread_mutex_unlock(&top->qe.mutex);
1112 
1113 		if (ordered_events__flush(out, OE_FLUSH__TOP))
1114 			pr_err("failed to process events\n");
1115 	}
1116 
1117 	return NULL;
1118 }
1119 
1120 /*
1121  * Allow only 'top->delay_secs' seconds behind samples.
1122  */
1123 static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1124 {
1125 	union perf_event *event = qevent->event;
1126 	u64 delay_timestamp;
1127 
1128 	if (event->header.type != PERF_RECORD_SAMPLE)
1129 		return false;
1130 
1131 	delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1132 	return delay_timestamp < last_timestamp;
1133 }
1134 
1135 static int deliver_event(struct ordered_events *qe,
1136 			 struct ordered_event *qevent)
1137 {
1138 	struct perf_top *top = qe->data;
1139 	struct evlist *evlist = top->evlist;
1140 	struct perf_session *session = top->session;
1141 	union perf_event *event = qevent->event;
1142 	struct perf_sample sample;
1143 	struct evsel *evsel;
1144 	struct machine *machine;
1145 	int ret = -1;
1146 
1147 	if (should_drop(qevent, top)) {
1148 		top->drop++;
1149 		top->drop_total++;
1150 		return 0;
1151 	}
1152 
1153 	ret = evlist__parse_sample(evlist, event, &sample);
1154 	if (ret) {
1155 		pr_err("Can't parse sample, err = %d\n", ret);
1156 		goto next_event;
1157 	}
1158 
1159 	evsel = evlist__id2evsel(session->evlist, sample.id);
1160 	assert(evsel != NULL);
1161 
1162 	if (event->header.type == PERF_RECORD_SAMPLE) {
1163 		if (evswitch__discard(&top->evswitch, evsel))
1164 			return 0;
1165 		++top->samples;
1166 	}
1167 
1168 	switch (sample.cpumode) {
1169 	case PERF_RECORD_MISC_USER:
1170 		++top->us_samples;
1171 		if (top->hide_user_symbols)
1172 			goto next_event;
1173 		machine = &session->machines.host;
1174 		break;
1175 	case PERF_RECORD_MISC_KERNEL:
1176 		++top->kernel_samples;
1177 		if (top->hide_kernel_symbols)
1178 			goto next_event;
1179 		machine = &session->machines.host;
1180 		break;
1181 	case PERF_RECORD_MISC_GUEST_KERNEL:
1182 		++top->guest_kernel_samples;
1183 		machine = perf_session__find_machine(session,
1184 						     sample.pid);
1185 		break;
1186 	case PERF_RECORD_MISC_GUEST_USER:
1187 		++top->guest_us_samples;
1188 		/*
1189 		 * TODO: we don't process guest user from host side
1190 		 * except simple counting.
1191 		 */
1192 		goto next_event;
1193 	default:
1194 		if (event->header.type == PERF_RECORD_SAMPLE)
1195 			goto next_event;
1196 		machine = &session->machines.host;
1197 		break;
1198 	}
1199 
1200 	if (event->header.type == PERF_RECORD_SAMPLE) {
1201 		perf_event__process_sample(&top->tool, event, evsel,
1202 					   &sample, machine);
1203 	} else if (event->header.type == PERF_RECORD_LOST) {
1204 		perf_top__process_lost(top, event, evsel);
1205 	} else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1206 		perf_top__process_lost_samples(top, event, evsel);
1207 	} else if (event->header.type < PERF_RECORD_MAX) {
1208 		hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1209 		machine__process_event(machine, event, &sample);
1210 	} else
1211 		++session->evlist->stats.nr_unknown_events;
1212 
1213 	ret = 0;
1214 next_event:
1215 	return ret;
1216 }
1217 
1218 static void init_process_thread(struct perf_top *top)
1219 {
1220 	ordered_events__init(&top->qe.data[0], deliver_event, top);
1221 	ordered_events__init(&top->qe.data[1], deliver_event, top);
1222 	ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1223 	ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1224 	top->qe.in = &top->qe.data[0];
1225 	pthread_mutex_init(&top->qe.mutex, NULL);
1226 	pthread_cond_init(&top->qe.cond, NULL);
1227 }
1228 
1229 static int __cmd_top(struct perf_top *top)
1230 {
1231 	struct record_opts *opts = &top->record_opts;
1232 	pthread_t thread, thread_process;
1233 	int ret;
1234 
1235 	if (!top->annotation_opts.objdump_path) {
1236 		ret = perf_env__lookup_objdump(&top->session->header.env,
1237 					       &top->annotation_opts.objdump_path);
1238 		if (ret)
1239 			return ret;
1240 	}
1241 
1242 	ret = callchain_param__setup_sample_type(&callchain_param);
1243 	if (ret)
1244 		return ret;
1245 
1246 	if (perf_session__register_idle_thread(top->session) < 0)
1247 		return ret;
1248 
1249 	if (top->nr_threads_synthesize > 1)
1250 		perf_set_multithreaded();
1251 
1252 	init_process_thread(top);
1253 
1254 	if (opts->record_namespaces)
1255 		top->tool.namespace_events = true;
1256 	if (opts->record_cgroup) {
1257 #ifdef HAVE_FILE_HANDLE
1258 		top->tool.cgroup_events = true;
1259 #else
1260 		pr_err("cgroup tracking is not supported.\n");
1261 		return -1;
1262 #endif
1263 	}
1264 
1265 	ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1266 						&top->session->machines.host,
1267 						&top->record_opts);
1268 	if (ret < 0)
1269 		pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
1270 
1271 	ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
1272 					     &top->session->machines.host);
1273 	if (ret < 0)
1274 		pr_debug("Couldn't synthesize cgroup events.\n");
1275 
1276 	machine__synthesize_threads(&top->session->machines.host, &opts->target,
1277 				    top->evlist->core.threads, false,
1278 				    top->nr_threads_synthesize);
1279 
1280 	if (top->nr_threads_synthesize > 1)
1281 		perf_set_singlethreaded();
1282 
1283 	if (perf_hpp_list.socket) {
1284 		ret = perf_env__read_cpu_topology_map(&perf_env);
1285 		if (ret < 0) {
1286 			char errbuf[BUFSIZ];
1287 			const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1288 
1289 			ui__error("Could not read the CPU topology map: %s\n", err);
1290 			return ret;
1291 		}
1292 	}
1293 
1294 	ret = perf_top__start_counters(top);
1295 	if (ret)
1296 		return ret;
1297 
1298 	top->session->evlist = top->evlist;
1299 	perf_session__set_id_hdr_size(top->session);
1300 
1301 	/*
1302 	 * When perf is starting the traced process, all the events (apart from
1303 	 * group members) have enable_on_exec=1 set, so don't spoil it by
1304 	 * prematurely enabling them.
1305 	 *
1306 	 * XXX 'top' still doesn't start workloads like record, trace, but should,
1307 	 * so leave the check here.
1308 	 */
1309         if (!target__none(&opts->target))
1310 		evlist__enable(top->evlist);
1311 
1312 	ret = -1;
1313 	if (pthread_create(&thread_process, NULL, process_thread, top)) {
1314 		ui__error("Could not create process thread.\n");
1315 		return ret;
1316 	}
1317 
1318 	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1319 							    display_thread), top)) {
1320 		ui__error("Could not create display thread.\n");
1321 		goto out_join_thread;
1322 	}
1323 
1324 	if (top->realtime_prio) {
1325 		struct sched_param param;
1326 
1327 		param.sched_priority = top->realtime_prio;
1328 		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1329 			ui__error("Could not set realtime priority.\n");
1330 			goto out_join;
1331 		}
1332 	}
1333 
1334 	/* Wait for a minimal set of events before starting the snapshot */
1335 	evlist__poll(top->evlist, 100);
1336 
1337 	perf_top__mmap_read(top);
1338 
1339 	while (!done) {
1340 		u64 hits = top->samples;
1341 
1342 		perf_top__mmap_read(top);
1343 
1344 		if (opts->overwrite || (hits == top->samples))
1345 			ret = evlist__poll(top->evlist, 100);
1346 
1347 		if (resize) {
1348 			perf_top__resize(top);
1349 			resize = 0;
1350 		}
1351 	}
1352 
1353 	ret = 0;
1354 out_join:
1355 	pthread_join(thread, NULL);
1356 out_join_thread:
1357 	pthread_cond_signal(&top->qe.cond);
1358 	pthread_join(thread_process, NULL);
1359 	return ret;
1360 }
1361 
1362 static int
1363 callchain_opt(const struct option *opt, const char *arg, int unset)
1364 {
1365 	symbol_conf.use_callchain = true;
1366 	return record_callchain_opt(opt, arg, unset);
1367 }
1368 
1369 static int
1370 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1371 {
1372 	struct callchain_param *callchain = opt->value;
1373 
1374 	callchain->enabled = !unset;
1375 	callchain->record_mode = CALLCHAIN_FP;
1376 
1377 	/*
1378 	 * --no-call-graph
1379 	 */
1380 	if (unset) {
1381 		symbol_conf.use_callchain = false;
1382 		callchain->record_mode = CALLCHAIN_NONE;
1383 		return 0;
1384 	}
1385 
1386 	return parse_callchain_top_opt(arg);
1387 }
1388 
1389 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1390 {
1391 	if (!strcmp(var, "top.call-graph")) {
1392 		var = "call-graph.record-mode";
1393 		return perf_default_config(var, value, cb);
1394 	}
1395 	if (!strcmp(var, "top.children")) {
1396 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1397 		return 0;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static int
1404 parse_percent_limit(const struct option *opt, const char *arg,
1405 		    int unset __maybe_unused)
1406 {
1407 	struct perf_top *top = opt->value;
1408 
1409 	top->min_percent = strtof(arg, NULL);
1410 	return 0;
1411 }
1412 
1413 const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1414 	"\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1415 
1416 int cmd_top(int argc, const char **argv)
1417 {
1418 	char errbuf[BUFSIZ];
1419 	struct perf_top top = {
1420 		.count_filter	     = 5,
1421 		.delay_secs	     = 2,
1422 		.record_opts = {
1423 			.mmap_pages	= UINT_MAX,
1424 			.user_freq	= UINT_MAX,
1425 			.user_interval	= ULLONG_MAX,
1426 			.freq		= 4000, /* 4 KHz */
1427 			.target		= {
1428 				.uses_mmap   = true,
1429 			},
1430 			/*
1431 			 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1432 			 * when we pause, fix that and reenable. Probably using a
1433 			 * separate evlist with a dummy event, i.e. a non-overwrite
1434 			 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1435 			 * stays in overwrite mode. -acme
1436 			 * */
1437 			.overwrite	= 0,
1438 			.sample_time	= true,
1439 			.sample_time_set = true,
1440 		},
1441 		.max_stack	     = sysctl__max_stack(),
1442 		.annotation_opts     = annotation__default_options,
1443 		.nr_threads_synthesize = UINT_MAX,
1444 	};
1445 	struct record_opts *opts = &top.record_opts;
1446 	struct target *target = &opts->target;
1447 	const struct option options[] = {
1448 	OPT_CALLBACK('e', "event", &top.evlist, "event",
1449 		     "event selector. use 'perf list' to list available events",
1450 		     parse_events_option),
1451 	OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1452 	OPT_STRING('p', "pid", &target->pid, "pid",
1453 		    "profile events on existing process id"),
1454 	OPT_STRING('t', "tid", &target->tid, "tid",
1455 		    "profile events on existing thread id"),
1456 	OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1457 			    "system-wide collection from all CPUs"),
1458 	OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1459 		    "list of cpus to monitor"),
1460 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1461 		   "file", "vmlinux pathname"),
1462 	OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1463 		    "don't load vmlinux even if found"),
1464 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1465 		   "file", "kallsyms pathname"),
1466 	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1467 		    "hide kernel symbols"),
1468 	OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
1469 		     "number of mmap data pages", evlist__parse_mmap_pages),
1470 	OPT_INTEGER('r', "realtime", &top.realtime_prio,
1471 		    "collect data with this RT SCHED_FIFO priority"),
1472 	OPT_INTEGER('d', "delay", &top.delay_secs,
1473 		    "number of seconds to delay between refreshes"),
1474 	OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1475 			    "dump the symbol table used for profiling"),
1476 	OPT_INTEGER('f', "count-filter", &top.count_filter,
1477 		    "only display functions with more events than this"),
1478 	OPT_BOOLEAN(0, "group", &opts->group,
1479 			    "put the counters into a counter group"),
1480 	OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1481 		    "child tasks do not inherit counters"),
1482 	OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1483 		    "symbol to annotate"),
1484 	OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1485 	OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1486 		     "profile at this frequency",
1487 		      record__parse_freq),
1488 	OPT_INTEGER('E', "entries", &top.print_entries,
1489 		    "display this many functions"),
1490 	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1491 		    "hide user symbols"),
1492 	OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1493 	OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1494 	OPT_INCR('v', "verbose", &verbose,
1495 		    "be more verbose (show counter open errors, etc)"),
1496 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1497 		   "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1498 		   " Please refer the man page for the complete list."),
1499 	OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1500 		   "output field(s): overhead, period, sample plus all of sort keys"),
1501 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1502 		    "Show a column with the number of samples"),
1503 	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1504 			   NULL, "enables call-graph recording and display",
1505 			   &callchain_opt),
1506 	OPT_CALLBACK(0, "call-graph", &callchain_param,
1507 		     "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1508 		     top_callchain_help, &parse_callchain_opt),
1509 	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1510 		    "Accumulate callchains of children and show total overhead as well"),
1511 	OPT_INTEGER(0, "max-stack", &top.max_stack,
1512 		    "Set the maximum stack depth when parsing the callchain. "
1513 		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1514 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1515 		   "ignore callees of these functions in call graphs",
1516 		   report_parse_ignore_callees_opt),
1517 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1518 		    "Show a column with the sum of periods"),
1519 	OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1520 		   "only consider symbols in these dsos"),
1521 	OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1522 		   "only consider symbols in these comms"),
1523 	OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1524 		   "only consider these symbols"),
1525 	OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
1526 		    "Interleave source code with assembly code (default)"),
1527 	OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
1528 		    "Display raw encoding of assembly instructions (default)"),
1529 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1530 		    "Enable kernel symbol demangling"),
1531 	OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
1532 	OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
1533 		    "objdump binary to use for disassembly and annotations"),
1534 	OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
1535 		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
1536 	OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
1537 		    "Add prefix to source file path names in programs (with --prefix-strip)"),
1538 	OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
1539 		    "Strip first N entries of source file path name in programs (with --prefix)"),
1540 	OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1541 	OPT_CALLBACK(0, "percent-limit", &top, "percent",
1542 		     "Don't show entries under that percent", parse_percent_limit),
1543 	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1544 		     "How to display percentage of filtered entries", parse_filter_percentage),
1545 	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1546 		   "width[,width...]",
1547 		   "don't try to adjust column width, use these fixed values"),
1548 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1549 			"per thread proc mmap processing timeout in ms"),
1550 	OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1551 		     "branch any", "sample any taken branches",
1552 		     parse_branch_stack),
1553 	OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1554 		     "branch filter mask", "branch stack filter modes",
1555 		     parse_branch_stack),
1556 	OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1557 		    "Show raw trace event output (do not use print fmt or plugins)"),
1558 	OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1559 		    "Show entries in a hierarchy"),
1560 	OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1561 		    "Use a backward ring buffer, default: no"),
1562 	OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1563 	OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1564 			"number of thread to run event synthesize"),
1565 	OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
1566 		    "Record namespaces events"),
1567 	OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
1568 		    "Record cgroup events"),
1569 	OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1570 		    "Sort the output by the event at the index n in group. "
1571 		    "If n is invalid, sort by the first event. "
1572 		    "WARNING: should be used on grouped events."),
1573 	OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
1574 		    "Enable LBR callgraph stitching approach"),
1575 #ifdef HAVE_LIBPFM
1576 	OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
1577 		"libpfm4 event selector. use 'perf list' to list available events",
1578 		parse_libpfm_events_option),
1579 #endif
1580 	OPTS_EVSWITCH(&top.evswitch),
1581 	OPT_END()
1582 	};
1583 	const char * const top_usage[] = {
1584 		"perf top [<options>]",
1585 		NULL
1586 	};
1587 	int status = hists__init();
1588 
1589 	if (status < 0)
1590 		return status;
1591 
1592 	top.annotation_opts.min_pcnt = 5;
1593 	top.annotation_opts.context  = 4;
1594 
1595 	top.evlist = evlist__new();
1596 	if (top.evlist == NULL)
1597 		return -ENOMEM;
1598 
1599 	status = perf_config(perf_top_config, &top);
1600 	if (status)
1601 		return status;
1602 	/*
1603 	 * Since the per arch annotation init routine may need the cpuid, read
1604 	 * it here, since we are not getting this from the perf.data header.
1605 	 */
1606 	status = perf_env__read_cpuid(&perf_env);
1607 	if (status) {
1608 		/*
1609 		 * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
1610 		 * warn the user explicitely.
1611 		 */
1612 		eprintf(status == ENOSYS ? 1 : 0, verbose,
1613 			"Couldn't read the cpuid for this machine: %s\n",
1614 			str_error_r(errno, errbuf, sizeof(errbuf)));
1615 	}
1616 	top.evlist->env = &perf_env;
1617 
1618 	argc = parse_options(argc, argv, options, top_usage, 0);
1619 	if (argc)
1620 		usage_with_options(top_usage, options);
1621 
1622 	if (annotate_check_args(&top.annotation_opts) < 0)
1623 		goto out_delete_evlist;
1624 
1625 	if (!top.evlist->core.nr_entries &&
1626 	    evlist__add_default(top.evlist) < 0) {
1627 		pr_err("Not enough memory for event selector list\n");
1628 		goto out_delete_evlist;
1629 	}
1630 
1631 	status = evswitch__init(&top.evswitch, top.evlist, stderr);
1632 	if (status)
1633 		goto out_delete_evlist;
1634 
1635 	if (symbol_conf.report_hierarchy) {
1636 		/* disable incompatible options */
1637 		symbol_conf.event_group = false;
1638 		symbol_conf.cumulate_callchain = false;
1639 
1640 		if (field_order) {
1641 			pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1642 			parse_options_usage(top_usage, options, "fields", 0);
1643 			parse_options_usage(NULL, options, "hierarchy", 0);
1644 			goto out_delete_evlist;
1645 		}
1646 	}
1647 
1648 	if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
1649 		pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
1650 		goto out_delete_evlist;
1651 	}
1652 
1653 	if (opts->branch_stack && callchain_param.enabled)
1654 		symbol_conf.show_branchflag_count = true;
1655 
1656 	sort__mode = SORT_MODE__TOP;
1657 	/* display thread wants entries to be collapsed in a different tree */
1658 	perf_hpp_list.need_collapse = 1;
1659 
1660 	if (top.use_stdio)
1661 		use_browser = 0;
1662 	else if (top.use_tui)
1663 		use_browser = 1;
1664 
1665 	setup_browser(false);
1666 
1667 	if (setup_sorting(top.evlist) < 0) {
1668 		if (sort_order)
1669 			parse_options_usage(top_usage, options, "s", 1);
1670 		if (field_order)
1671 			parse_options_usage(sort_order ? NULL : top_usage,
1672 					    options, "fields", 0);
1673 		goto out_delete_evlist;
1674 	}
1675 
1676 	status = target__validate(target);
1677 	if (status) {
1678 		target__strerror(target, status, errbuf, BUFSIZ);
1679 		ui__warning("%s\n", errbuf);
1680 	}
1681 
1682 	status = target__parse_uid(target);
1683 	if (status) {
1684 		int saved_errno = errno;
1685 
1686 		target__strerror(target, status, errbuf, BUFSIZ);
1687 		ui__error("%s\n", errbuf);
1688 
1689 		status = -saved_errno;
1690 		goto out_delete_evlist;
1691 	}
1692 
1693 	if (target__none(target))
1694 		target->system_wide = true;
1695 
1696 	if (evlist__create_maps(top.evlist, target) < 0) {
1697 		ui__error("Couldn't create thread/CPU maps: %s\n",
1698 			  errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1699 		goto out_delete_evlist;
1700 	}
1701 
1702 	if (top.delay_secs < 1)
1703 		top.delay_secs = 1;
1704 
1705 	if (record_opts__config(opts)) {
1706 		status = -EINVAL;
1707 		goto out_delete_evlist;
1708 	}
1709 
1710 	top.sym_evsel = evlist__first(top.evlist);
1711 
1712 	if (!callchain_param.enabled) {
1713 		symbol_conf.cumulate_callchain = false;
1714 		perf_hpp__cancel_cumulate();
1715 	}
1716 
1717 	if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1718 		callchain_param.order = ORDER_CALLER;
1719 
1720 	status = symbol__annotation_init();
1721 	if (status < 0)
1722 		goto out_delete_evlist;
1723 
1724 	annotation_config__init(&top.annotation_opts);
1725 
1726 	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1727 	status = symbol__init(NULL);
1728 	if (status < 0)
1729 		goto out_delete_evlist;
1730 
1731 	sort__setup_elide(stdout);
1732 
1733 	get_term_dimensions(&top.winsize);
1734 	if (top.print_entries == 0) {
1735 		perf_top__update_print_entries(&top);
1736 		signal(SIGWINCH, winch_sig);
1737 	}
1738 
1739 	top.session = perf_session__new(NULL, false, NULL);
1740 	if (IS_ERR(top.session)) {
1741 		status = PTR_ERR(top.session);
1742 		goto out_delete_evlist;
1743 	}
1744 
1745 #ifdef HAVE_LIBBPF_SUPPORT
1746 	if (!top.record_opts.no_bpf_event) {
1747 		top.sb_evlist = evlist__new();
1748 
1749 		if (top.sb_evlist == NULL) {
1750 			pr_err("Couldn't create side band evlist.\n.");
1751 			goto out_delete_evlist;
1752 		}
1753 
1754 		if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
1755 			pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1756 			goto out_delete_evlist;
1757 		}
1758 	}
1759 #endif
1760 
1761 	if (evlist__start_sb_thread(top.sb_evlist, target)) {
1762 		pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1763 		opts->no_bpf_event = true;
1764 	}
1765 
1766 	status = __cmd_top(&top);
1767 
1768 	if (!opts->no_bpf_event)
1769 		evlist__stop_sb_thread(top.sb_evlist);
1770 
1771 out_delete_evlist:
1772 	evlist__delete(top.evlist);
1773 	perf_session__delete(top.session);
1774 
1775 	return status;
1776 }
1777