xref: /openbmc/linux/tools/perf/builtin-top.c (revision 31af04cd)
1 /*
2  * builtin-top.c
3  *
4  * Builtin top command: Display a continuously updated profile of
5  * any workload, CPU or specific PID.
6  *
7  * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
8  *		 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Improvements and fixes by:
11  *
12  *   Arjan van de Ven <arjan@linux.intel.com>
13  *   Yanmin Zhang <yanmin.zhang@intel.com>
14  *   Wu Fengguang <fengguang.wu@intel.com>
15  *   Mike Galbraith <efault@gmx.de>
16  *   Paul Mackerras <paulus@samba.org>
17  *
18  * Released under the GPL v2. (and only v2, not any later version)
19  */
20 #include "builtin.h"
21 
22 #include "perf.h"
23 
24 #include "util/annotate.h"
25 #include "util/config.h"
26 #include "util/color.h"
27 #include "util/drv_configs.h"
28 #include "util/evlist.h"
29 #include "util/evsel.h"
30 #include "util/event.h"
31 #include "util/machine.h"
32 #include "util/session.h"
33 #include "util/symbol.h"
34 #include "util/thread.h"
35 #include "util/thread_map.h"
36 #include "util/top.h"
37 #include <linux/rbtree.h>
38 #include <subcmd/parse-options.h>
39 #include "util/parse-events.h"
40 #include "util/cpumap.h"
41 #include "util/xyarray.h"
42 #include "util/sort.h"
43 #include "util/term.h"
44 #include "util/intlist.h"
45 #include "util/parse-branch-options.h"
46 #include "arch/common.h"
47 
48 #include "util/debug.h"
49 #include "util/ordered-events.h"
50 
51 #include <assert.h>
52 #include <elf.h>
53 #include <fcntl.h>
54 
55 #include <stdio.h>
56 #include <termios.h>
57 #include <unistd.h>
58 #include <inttypes.h>
59 
60 #include <errno.h>
61 #include <time.h>
62 #include <sched.h>
63 #include <signal.h>
64 
65 #include <sys/syscall.h>
66 #include <sys/ioctl.h>
67 #include <poll.h>
68 #include <sys/prctl.h>
69 #include <sys/wait.h>
70 #include <sys/uio.h>
71 #include <sys/utsname.h>
72 #include <sys/mman.h>
73 
74 #include <linux/stringify.h>
75 #include <linux/time64.h>
76 #include <linux/types.h>
77 
78 #include "sane_ctype.h"
79 
80 static volatile int done;
81 static volatile int resize;
82 
83 #define HEADER_LINE_NR  5
84 
85 static void perf_top__update_print_entries(struct perf_top *top)
86 {
87 	top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
88 }
89 
90 static void winch_sig(int sig __maybe_unused)
91 {
92 	resize = 1;
93 }
94 
95 static void perf_top__resize(struct perf_top *top)
96 {
97 	get_term_dimensions(&top->winsize);
98 	perf_top__update_print_entries(top);
99 }
100 
101 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
102 {
103 	struct perf_evsel *evsel = hists_to_evsel(he->hists);
104 	struct symbol *sym;
105 	struct annotation *notes;
106 	struct map *map;
107 	int err = -1;
108 
109 	if (!he || !he->ms.sym)
110 		return -1;
111 
112 	sym = he->ms.sym;
113 	map = he->ms.map;
114 
115 	/*
116 	 * We can't annotate with just /proc/kallsyms
117 	 */
118 	if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
119 	    !dso__is_kcore(map->dso)) {
120 		pr_err("Can't annotate %s: No vmlinux file was found in the "
121 		       "path\n", sym->name);
122 		sleep(1);
123 		return -1;
124 	}
125 
126 	notes = symbol__annotation(sym);
127 	pthread_mutex_lock(&notes->lock);
128 
129 	if (!symbol__hists(sym, top->evlist->nr_entries)) {
130 		pthread_mutex_unlock(&notes->lock);
131 		pr_err("Not enough memory for annotating '%s' symbol!\n",
132 		       sym->name);
133 		sleep(1);
134 		return err;
135 	}
136 
137 	err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
138 	if (err == 0) {
139 		top->sym_filter_entry = he;
140 	} else {
141 		char msg[BUFSIZ];
142 		symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
143 		pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
144 	}
145 
146 	pthread_mutex_unlock(&notes->lock);
147 	return err;
148 }
149 
150 static void __zero_source_counters(struct hist_entry *he)
151 {
152 	struct symbol *sym = he->ms.sym;
153 	symbol__annotate_zero_histograms(sym);
154 }
155 
156 static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
157 {
158 	struct utsname uts;
159 	int err = uname(&uts);
160 
161 	ui__warning("Out of bounds address found:\n\n"
162 		    "Addr:   %" PRIx64 "\n"
163 		    "DSO:    %s %c\n"
164 		    "Map:    %" PRIx64 "-%" PRIx64 "\n"
165 		    "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
166 		    "Arch:   %s\n"
167 		    "Kernel: %s\n"
168 		    "Tools:  %s\n\n"
169 		    "Not all samples will be on the annotation output.\n\n"
170 		    "Please report to linux-kernel@vger.kernel.org\n",
171 		    ip, map->dso->long_name, dso__symtab_origin(map->dso),
172 		    map->start, map->end, sym->start, sym->end,
173 		    sym->binding == STB_GLOBAL ? 'g' :
174 		    sym->binding == STB_LOCAL  ? 'l' : 'w', sym->name,
175 		    err ? "[unknown]" : uts.machine,
176 		    err ? "[unknown]" : uts.release, perf_version_string);
177 	if (use_browser <= 0)
178 		sleep(5);
179 
180 	map->erange_warned = true;
181 }
182 
183 static void perf_top__record_precise_ip(struct perf_top *top,
184 					struct hist_entry *he,
185 					struct perf_sample *sample,
186 					struct perf_evsel *evsel, u64 ip)
187 {
188 	struct annotation *notes;
189 	struct symbol *sym = he->ms.sym;
190 	int err = 0;
191 
192 	if (sym == NULL || (use_browser == 0 &&
193 			    (top->sym_filter_entry == NULL ||
194 			     top->sym_filter_entry->ms.sym != sym)))
195 		return;
196 
197 	notes = symbol__annotation(sym);
198 
199 	if (pthread_mutex_trylock(&notes->lock))
200 		return;
201 
202 	err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
203 
204 	pthread_mutex_unlock(&notes->lock);
205 
206 	if (unlikely(err)) {
207 		/*
208 		 * This function is now called with he->hists->lock held.
209 		 * Release it before going to sleep.
210 		 */
211 		pthread_mutex_unlock(&he->hists->lock);
212 
213 		if (err == -ERANGE && !he->ms.map->erange_warned)
214 			ui__warn_map_erange(he->ms.map, sym, ip);
215 		else if (err == -ENOMEM) {
216 			pr_err("Not enough memory for annotating '%s' symbol!\n",
217 			       sym->name);
218 			sleep(1);
219 		}
220 
221 		pthread_mutex_lock(&he->hists->lock);
222 	}
223 }
224 
225 static void perf_top__show_details(struct perf_top *top)
226 {
227 	struct hist_entry *he = top->sym_filter_entry;
228 	struct perf_evsel *evsel = hists_to_evsel(he->hists);
229 	struct annotation *notes;
230 	struct symbol *symbol;
231 	int more;
232 
233 	if (!he)
234 		return;
235 
236 	symbol = he->ms.sym;
237 	notes = symbol__annotation(symbol);
238 
239 	pthread_mutex_lock(&notes->lock);
240 
241 	symbol__calc_percent(symbol, evsel);
242 
243 	if (notes->src == NULL)
244 		goto out_unlock;
245 
246 	printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
247 	printf("  Events  Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
248 
249 	more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
250 
251 	if (top->evlist->enabled) {
252 		if (top->zero)
253 			symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
254 		else
255 			symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
256 	}
257 	if (more != 0)
258 		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
259 out_unlock:
260 	pthread_mutex_unlock(&notes->lock);
261 }
262 
263 static void perf_top__print_sym_table(struct perf_top *top)
264 {
265 	char bf[160];
266 	int printed = 0;
267 	const int win_width = top->winsize.ws_col - 1;
268 	struct perf_evsel *evsel = top->sym_evsel;
269 	struct hists *hists = evsel__hists(evsel);
270 
271 	puts(CONSOLE_CLEAR);
272 
273 	perf_top__header_snprintf(top, bf, sizeof(bf));
274 	printf("%s\n", bf);
275 
276 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
277 
278 	if (!top->record_opts.overwrite &&
279 	    (hists->stats.nr_lost_warned !=
280 	    hists->stats.nr_events[PERF_RECORD_LOST])) {
281 		hists->stats.nr_lost_warned =
282 			      hists->stats.nr_events[PERF_RECORD_LOST];
283 		color_fprintf(stdout, PERF_COLOR_RED,
284 			      "WARNING: LOST %d chunks, Check IO/CPU overload",
285 			      hists->stats.nr_lost_warned);
286 		++printed;
287 	}
288 
289 	if (top->sym_filter_entry) {
290 		perf_top__show_details(top);
291 		return;
292 	}
293 
294 	if (top->evlist->enabled) {
295 		if (top->zero) {
296 			hists__delete_entries(hists);
297 		} else {
298 			hists__decay_entries(hists, top->hide_user_symbols,
299 					     top->hide_kernel_symbols);
300 		}
301 	}
302 
303 	hists__collapse_resort(hists, NULL);
304 	perf_evsel__output_resort(evsel, NULL);
305 
306 	hists__output_recalc_col_len(hists, top->print_entries - printed);
307 	putchar('\n');
308 	hists__fprintf(hists, false, top->print_entries - printed, win_width,
309 		       top->min_percent, stdout, !symbol_conf.use_callchain);
310 }
311 
312 static void prompt_integer(int *target, const char *msg)
313 {
314 	char *buf = malloc(0), *p;
315 	size_t dummy = 0;
316 	int tmp;
317 
318 	fprintf(stdout, "\n%s: ", msg);
319 	if (getline(&buf, &dummy, stdin) < 0)
320 		return;
321 
322 	p = strchr(buf, '\n');
323 	if (p)
324 		*p = 0;
325 
326 	p = buf;
327 	while(*p) {
328 		if (!isdigit(*p))
329 			goto out_free;
330 		p++;
331 	}
332 	tmp = strtoul(buf, NULL, 10);
333 	*target = tmp;
334 out_free:
335 	free(buf);
336 }
337 
338 static void prompt_percent(int *target, const char *msg)
339 {
340 	int tmp = 0;
341 
342 	prompt_integer(&tmp, msg);
343 	if (tmp >= 0 && tmp <= 100)
344 		*target = tmp;
345 }
346 
347 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
348 {
349 	char *buf = malloc(0), *p;
350 	struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
351 	struct hists *hists = evsel__hists(top->sym_evsel);
352 	struct rb_node *next;
353 	size_t dummy = 0;
354 
355 	/* zero counters of active symbol */
356 	if (syme) {
357 		__zero_source_counters(syme);
358 		top->sym_filter_entry = NULL;
359 	}
360 
361 	fprintf(stdout, "\n%s: ", msg);
362 	if (getline(&buf, &dummy, stdin) < 0)
363 		goto out_free;
364 
365 	p = strchr(buf, '\n');
366 	if (p)
367 		*p = 0;
368 
369 	next = rb_first(&hists->entries);
370 	while (next) {
371 		n = rb_entry(next, struct hist_entry, rb_node);
372 		if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
373 			found = n;
374 			break;
375 		}
376 		next = rb_next(&n->rb_node);
377 	}
378 
379 	if (!found) {
380 		fprintf(stderr, "Sorry, %s is not active.\n", buf);
381 		sleep(1);
382 	} else
383 		perf_top__parse_source(top, found);
384 
385 out_free:
386 	free(buf);
387 }
388 
389 static void perf_top__print_mapped_keys(struct perf_top *top)
390 {
391 	char *name = NULL;
392 
393 	if (top->sym_filter_entry) {
394 		struct symbol *sym = top->sym_filter_entry->ms.sym;
395 		name = sym->name;
396 	}
397 
398 	fprintf(stdout, "\nMapped keys:\n");
399 	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top->delay_secs);
400 	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
401 
402 	if (top->evlist->nr_entries > 1)
403 		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", perf_evsel__name(top->sym_evsel));
404 
405 	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
406 
407 	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
408 	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
409 	fprintf(stdout, "\t[S]     stop annotation.\n");
410 
411 	fprintf(stdout,
412 		"\t[K]     hide kernel symbols.             \t(%s)\n",
413 		top->hide_kernel_symbols ? "yes" : "no");
414 	fprintf(stdout,
415 		"\t[U]     hide user symbols.               \t(%s)\n",
416 		top->hide_user_symbols ? "yes" : "no");
417 	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top->zero ? 1 : 0);
418 	fprintf(stdout, "\t[qQ]    quit.\n");
419 }
420 
421 static int perf_top__key_mapped(struct perf_top *top, int c)
422 {
423 	switch (c) {
424 		case 'd':
425 		case 'e':
426 		case 'f':
427 		case 'z':
428 		case 'q':
429 		case 'Q':
430 		case 'K':
431 		case 'U':
432 		case 'F':
433 		case 's':
434 		case 'S':
435 			return 1;
436 		case 'E':
437 			return top->evlist->nr_entries > 1 ? 1 : 0;
438 		default:
439 			break;
440 	}
441 
442 	return 0;
443 }
444 
445 static bool perf_top__handle_keypress(struct perf_top *top, int c)
446 {
447 	bool ret = true;
448 
449 	if (!perf_top__key_mapped(top, c)) {
450 		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
451 		struct termios save;
452 
453 		perf_top__print_mapped_keys(top);
454 		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
455 		fflush(stdout);
456 
457 		set_term_quiet_input(&save);
458 
459 		poll(&stdin_poll, 1, -1);
460 		c = getc(stdin);
461 
462 		tcsetattr(0, TCSAFLUSH, &save);
463 		if (!perf_top__key_mapped(top, c))
464 			return ret;
465 	}
466 
467 	switch (c) {
468 		case 'd':
469 			prompt_integer(&top->delay_secs, "Enter display delay");
470 			if (top->delay_secs < 1)
471 				top->delay_secs = 1;
472 			break;
473 		case 'e':
474 			prompt_integer(&top->print_entries, "Enter display entries (lines)");
475 			if (top->print_entries == 0) {
476 				perf_top__resize(top);
477 				signal(SIGWINCH, winch_sig);
478 			} else {
479 				signal(SIGWINCH, SIG_DFL);
480 			}
481 			break;
482 		case 'E':
483 			if (top->evlist->nr_entries > 1) {
484 				/* Select 0 as the default event: */
485 				int counter = 0;
486 
487 				fprintf(stderr, "\nAvailable events:");
488 
489 				evlist__for_each_entry(top->evlist, top->sym_evsel)
490 					fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
491 
492 				prompt_integer(&counter, "Enter details event counter");
493 
494 				if (counter >= top->evlist->nr_entries) {
495 					top->sym_evsel = perf_evlist__first(top->evlist);
496 					fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
497 					sleep(1);
498 					break;
499 				}
500 				evlist__for_each_entry(top->evlist, top->sym_evsel)
501 					if (top->sym_evsel->idx == counter)
502 						break;
503 			} else
504 				top->sym_evsel = perf_evlist__first(top->evlist);
505 			break;
506 		case 'f':
507 			prompt_integer(&top->count_filter, "Enter display event count filter");
508 			break;
509 		case 'F':
510 			prompt_percent(&top->annotation_opts.min_pcnt,
511 				       "Enter details display event filter (percent)");
512 			break;
513 		case 'K':
514 			top->hide_kernel_symbols = !top->hide_kernel_symbols;
515 			break;
516 		case 'q':
517 		case 'Q':
518 			printf("exiting.\n");
519 			if (top->dump_symtab)
520 				perf_session__fprintf_dsos(top->session, stderr);
521 			ret = false;
522 			break;
523 		case 's':
524 			perf_top__prompt_symbol(top, "Enter details symbol");
525 			break;
526 		case 'S':
527 			if (!top->sym_filter_entry)
528 				break;
529 			else {
530 				struct hist_entry *syme = top->sym_filter_entry;
531 
532 				top->sym_filter_entry = NULL;
533 				__zero_source_counters(syme);
534 			}
535 			break;
536 		case 'U':
537 			top->hide_user_symbols = !top->hide_user_symbols;
538 			break;
539 		case 'z':
540 			top->zero = !top->zero;
541 			break;
542 		default:
543 			break;
544 	}
545 
546 	return ret;
547 }
548 
549 static void perf_top__sort_new_samples(void *arg)
550 {
551 	struct perf_top *t = arg;
552 	struct perf_evsel *evsel = t->sym_evsel;
553 	struct hists *hists;
554 
555 	if (t->evlist->selected != NULL)
556 		t->sym_evsel = t->evlist->selected;
557 
558 	hists = evsel__hists(evsel);
559 
560 	if (t->evlist->enabled) {
561 		if (t->zero) {
562 			hists__delete_entries(hists);
563 		} else {
564 			hists__decay_entries(hists, t->hide_user_symbols,
565 					     t->hide_kernel_symbols);
566 		}
567 	}
568 
569 	hists__collapse_resort(hists, NULL);
570 	perf_evsel__output_resort(evsel, NULL);
571 
572 	if (t->lost || t->drop)
573 		pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
574 }
575 
576 static void stop_top(void)
577 {
578 	session_done = 1;
579 	done = 1;
580 }
581 
582 static void *display_thread_tui(void *arg)
583 {
584 	struct perf_evsel *pos;
585 	struct perf_top *top = arg;
586 	const char *help = "For a higher level overview, try: perf top --sort comm,dso";
587 	struct hist_browser_timer hbt = {
588 		.timer		= perf_top__sort_new_samples,
589 		.arg		= top,
590 		.refresh	= top->delay_secs,
591 	};
592 
593 	/* In order to read symbols from other namespaces perf to  needs to call
594 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
595 	 * unshare(2) the fs so that we may continue to setns into namespaces
596 	 * that we're observing.
597 	 */
598 	unshare(CLONE_FS);
599 
600 	perf_top__sort_new_samples(top);
601 
602 	/*
603 	 * Initialize the uid_filter_str, in the future the TUI will allow
604 	 * Zooming in/out UIDs. For now just use whatever the user passed
605 	 * via --uid.
606 	 */
607 	evlist__for_each_entry(top->evlist, pos) {
608 		struct hists *hists = evsel__hists(pos);
609 		hists->uid_filter_str = top->record_opts.target.uid_str;
610 	}
611 
612 	perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
613 				      top->min_percent,
614 				      &top->session->header.env,
615 				      !top->record_opts.overwrite,
616 				      &top->annotation_opts);
617 
618 	stop_top();
619 	return NULL;
620 }
621 
622 static void display_sig(int sig __maybe_unused)
623 {
624 	stop_top();
625 }
626 
627 static void display_setup_sig(void)
628 {
629 	signal(SIGSEGV, sighandler_dump_stack);
630 	signal(SIGFPE, sighandler_dump_stack);
631 	signal(SIGINT,  display_sig);
632 	signal(SIGQUIT, display_sig);
633 	signal(SIGTERM, display_sig);
634 }
635 
636 static void *display_thread(void *arg)
637 {
638 	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
639 	struct termios save;
640 	struct perf_top *top = arg;
641 	int delay_msecs, c;
642 
643 	/* In order to read symbols from other namespaces perf to  needs to call
644 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
645 	 * unshare(2) the fs so that we may continue to setns into namespaces
646 	 * that we're observing.
647 	 */
648 	unshare(CLONE_FS);
649 
650 	display_setup_sig();
651 	pthread__unblock_sigwinch();
652 repeat:
653 	delay_msecs = top->delay_secs * MSEC_PER_SEC;
654 	set_term_quiet_input(&save);
655 	/* trash return*/
656 	getc(stdin);
657 
658 	while (!done) {
659 		perf_top__print_sym_table(top);
660 		/*
661 		 * Either timeout expired or we got an EINTR due to SIGWINCH,
662 		 * refresh screen in both cases.
663 		 */
664 		switch (poll(&stdin_poll, 1, delay_msecs)) {
665 		case 0:
666 			continue;
667 		case -1:
668 			if (errno == EINTR)
669 				continue;
670 			__fallthrough;
671 		default:
672 			c = getc(stdin);
673 			tcsetattr(0, TCSAFLUSH, &save);
674 
675 			if (perf_top__handle_keypress(top, c))
676 				goto repeat;
677 			stop_top();
678 		}
679 	}
680 
681 	tcsetattr(0, TCSAFLUSH, &save);
682 	return NULL;
683 }
684 
685 static int hist_iter__top_callback(struct hist_entry_iter *iter,
686 				   struct addr_location *al, bool single,
687 				   void *arg)
688 {
689 	struct perf_top *top = arg;
690 	struct hist_entry *he = iter->he;
691 	struct perf_evsel *evsel = iter->evsel;
692 
693 	if (perf_hpp_list.sym && single)
694 		perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
695 
696 	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
697 		     !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
698 	return 0;
699 }
700 
701 static void perf_event__process_sample(struct perf_tool *tool,
702 				       const union perf_event *event,
703 				       struct perf_evsel *evsel,
704 				       struct perf_sample *sample,
705 				       struct machine *machine)
706 {
707 	struct perf_top *top = container_of(tool, struct perf_top, tool);
708 	struct addr_location al;
709 	int err;
710 
711 	if (!machine && perf_guest) {
712 		static struct intlist *seen;
713 
714 		if (!seen)
715 			seen = intlist__new(NULL);
716 
717 		if (!intlist__has_entry(seen, sample->pid)) {
718 			pr_err("Can't find guest [%d]'s kernel information\n",
719 				sample->pid);
720 			intlist__add(seen, sample->pid);
721 		}
722 		return;
723 	}
724 
725 	if (!machine) {
726 		pr_err("%u unprocessable samples recorded.\r",
727 		       top->session->evlist->stats.nr_unprocessable_samples++);
728 		return;
729 	}
730 
731 	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
732 		top->exact_samples++;
733 
734 	if (machine__resolve(machine, &al, sample) < 0)
735 		return;
736 
737 	if (!machine->kptr_restrict_warned &&
738 	    symbol_conf.kptr_restrict &&
739 	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
740 		if (!perf_evlist__exclude_kernel(top->session->evlist)) {
741 			ui__warning(
742 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
743 "Check /proc/sys/kernel/kptr_restrict.\n\n"
744 "Kernel%s samples will not be resolved.\n",
745 			  al.map && map__has_symbols(al.map) ?
746 			  " modules" : "");
747 			if (use_browser <= 0)
748 				sleep(5);
749 		}
750 		machine->kptr_restrict_warned = true;
751 	}
752 
753 	if (al.sym == NULL && al.map != NULL) {
754 		const char *msg = "Kernel samples will not be resolved.\n";
755 		/*
756 		 * As we do lazy loading of symtabs we only will know if the
757 		 * specified vmlinux file is invalid when we actually have a
758 		 * hit in kernel space and then try to load it. So if we get
759 		 * here and there are _no_ symbols in the DSO backing the
760 		 * kernel map, bail out.
761 		 *
762 		 * We may never get here, for instance, if we use -K/
763 		 * --hide-kernel-symbols, even if the user specifies an
764 		 * invalid --vmlinux ;-)
765 		 */
766 		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
767 		    __map__is_kernel(al.map) && map__has_symbols(al.map)) {
768 			if (symbol_conf.vmlinux_name) {
769 				char serr[256];
770 				dso__strerror_load(al.map->dso, serr, sizeof(serr));
771 				ui__warning("The %s file can't be used: %s\n%s",
772 					    symbol_conf.vmlinux_name, serr, msg);
773 			} else {
774 				ui__warning("A vmlinux file was not found.\n%s",
775 					    msg);
776 			}
777 
778 			if (use_browser <= 0)
779 				sleep(5);
780 			top->vmlinux_warned = true;
781 		}
782 	}
783 
784 	if (al.sym == NULL || !al.sym->idle) {
785 		struct hists *hists = evsel__hists(evsel);
786 		struct hist_entry_iter iter = {
787 			.evsel		= evsel,
788 			.sample 	= sample,
789 			.add_entry_cb 	= hist_iter__top_callback,
790 		};
791 
792 		if (symbol_conf.cumulate_callchain)
793 			iter.ops = &hist_iter_cumulative;
794 		else
795 			iter.ops = &hist_iter_normal;
796 
797 		pthread_mutex_lock(&hists->lock);
798 
799 		err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
800 		if (err < 0)
801 			pr_err("Problem incrementing symbol period, skipping event\n");
802 
803 		pthread_mutex_unlock(&hists->lock);
804 	}
805 
806 	addr_location__put(&al);
807 }
808 
809 static void
810 perf_top__process_lost(struct perf_top *top, union perf_event *event,
811 		       struct perf_evsel *evsel)
812 {
813 	struct hists *hists = evsel__hists(evsel);
814 
815 	top->lost += event->lost.lost;
816 	top->lost_total += event->lost.lost;
817 	hists->stats.total_lost += event->lost.lost;
818 }
819 
820 static void
821 perf_top__process_lost_samples(struct perf_top *top,
822 			       union perf_event *event,
823 			       struct perf_evsel *evsel)
824 {
825 	struct hists *hists = evsel__hists(evsel);
826 
827 	top->lost += event->lost_samples.lost;
828 	top->lost_total += event->lost_samples.lost;
829 	hists->stats.total_lost_samples += event->lost_samples.lost;
830 }
831 
832 static u64 last_timestamp;
833 
834 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
835 {
836 	struct record_opts *opts = &top->record_opts;
837 	struct perf_evlist *evlist = top->evlist;
838 	struct perf_mmap *md;
839 	union perf_event *event;
840 
841 	md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
842 	if (perf_mmap__read_init(md) < 0)
843 		return;
844 
845 	while ((event = perf_mmap__read_event(md)) != NULL) {
846 		int ret;
847 
848 		ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
849 		if (ret && ret != -1)
850 			break;
851 
852 		ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
853 		if (ret)
854 			break;
855 
856 		perf_mmap__consume(md);
857 
858 		if (top->qe.rotate) {
859 			pthread_mutex_lock(&top->qe.mutex);
860 			top->qe.rotate = false;
861 			pthread_cond_signal(&top->qe.cond);
862 			pthread_mutex_unlock(&top->qe.mutex);
863 		}
864 	}
865 
866 	perf_mmap__read_done(md);
867 }
868 
869 static void perf_top__mmap_read(struct perf_top *top)
870 {
871 	bool overwrite = top->record_opts.overwrite;
872 	struct perf_evlist *evlist = top->evlist;
873 	int i;
874 
875 	if (overwrite)
876 		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
877 
878 	for (i = 0; i < top->evlist->nr_mmaps; i++)
879 		perf_top__mmap_read_idx(top, i);
880 
881 	if (overwrite) {
882 		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
883 		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
884 	}
885 }
886 
887 /*
888  * Check per-event overwrite term.
889  * perf top should support consistent term for all events.
890  * - All events don't have per-event term
891  *   E.g. "cpu/cpu-cycles/,cpu/instructions/"
892  *   Nothing change, return 0.
893  * - All events have same per-event term
894  *   E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
895  *   Using the per-event setting to replace the opts->overwrite if
896  *   they are different, then return 0.
897  * - Events have different per-event term
898  *   E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
899  *   Return -1
900  * - Some of the event set per-event term, but some not.
901  *   E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
902  *   Return -1
903  */
904 static int perf_top__overwrite_check(struct perf_top *top)
905 {
906 	struct record_opts *opts = &top->record_opts;
907 	struct perf_evlist *evlist = top->evlist;
908 	struct perf_evsel_config_term *term;
909 	struct list_head *config_terms;
910 	struct perf_evsel *evsel;
911 	int set, overwrite = -1;
912 
913 	evlist__for_each_entry(evlist, evsel) {
914 		set = -1;
915 		config_terms = &evsel->config_terms;
916 		list_for_each_entry(term, config_terms, list) {
917 			if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
918 				set = term->val.overwrite ? 1 : 0;
919 		}
920 
921 		/* no term for current and previous event (likely) */
922 		if ((overwrite < 0) && (set < 0))
923 			continue;
924 
925 		/* has term for both current and previous event, compare */
926 		if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
927 			return -1;
928 
929 		/* no term for current event but has term for previous one */
930 		if ((overwrite >= 0) && (set < 0))
931 			return -1;
932 
933 		/* has term for current event */
934 		if ((overwrite < 0) && (set >= 0)) {
935 			/* if it's first event, set overwrite */
936 			if (evsel == perf_evlist__first(evlist))
937 				overwrite = set;
938 			else
939 				return -1;
940 		}
941 	}
942 
943 	if ((overwrite >= 0) && (opts->overwrite != overwrite))
944 		opts->overwrite = overwrite;
945 
946 	return 0;
947 }
948 
949 static int perf_top_overwrite_fallback(struct perf_top *top,
950 				       struct perf_evsel *evsel)
951 {
952 	struct record_opts *opts = &top->record_opts;
953 	struct perf_evlist *evlist = top->evlist;
954 	struct perf_evsel *counter;
955 
956 	if (!opts->overwrite)
957 		return 0;
958 
959 	/* only fall back when first event fails */
960 	if (evsel != perf_evlist__first(evlist))
961 		return 0;
962 
963 	evlist__for_each_entry(evlist, counter)
964 		counter->attr.write_backward = false;
965 	opts->overwrite = false;
966 	pr_debug2("fall back to non-overwrite mode\n");
967 	return 1;
968 }
969 
970 static int perf_top__start_counters(struct perf_top *top)
971 {
972 	char msg[BUFSIZ];
973 	struct perf_evsel *counter;
974 	struct perf_evlist *evlist = top->evlist;
975 	struct record_opts *opts = &top->record_opts;
976 
977 	if (perf_top__overwrite_check(top)) {
978 		ui__error("perf top only support consistent per-event "
979 			  "overwrite setting for all events\n");
980 		goto out_err;
981 	}
982 
983 	perf_evlist__config(evlist, opts, &callchain_param);
984 
985 	evlist__for_each_entry(evlist, counter) {
986 try_again:
987 		if (perf_evsel__open(counter, top->evlist->cpus,
988 				     top->evlist->threads) < 0) {
989 
990 			/*
991 			 * Specially handle overwrite fall back.
992 			 * Because perf top is the only tool which has
993 			 * overwrite mode by default, support
994 			 * both overwrite and non-overwrite mode, and
995 			 * require consistent mode for all events.
996 			 *
997 			 * May move it to generic code with more tools
998 			 * have similar attribute.
999 			 */
1000 			if (perf_missing_features.write_backward &&
1001 			    perf_top_overwrite_fallback(top, counter))
1002 				goto try_again;
1003 
1004 			if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
1005 				if (verbose > 0)
1006 					ui__warning("%s\n", msg);
1007 				goto try_again;
1008 			}
1009 
1010 			perf_evsel__open_strerror(counter, &opts->target,
1011 						  errno, msg, sizeof(msg));
1012 			ui__error("%s\n", msg);
1013 			goto out_err;
1014 		}
1015 	}
1016 
1017 	if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
1018 		ui__error("Failed to mmap with %d (%s)\n",
1019 			    errno, str_error_r(errno, msg, sizeof(msg)));
1020 		goto out_err;
1021 	}
1022 
1023 	return 0;
1024 
1025 out_err:
1026 	return -1;
1027 }
1028 
1029 static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1030 {
1031 	if (callchain->mode != CHAIN_NONE) {
1032 		if (callchain_register_param(callchain) < 0) {
1033 			ui__error("Can't register callchain params.\n");
1034 			return -EINVAL;
1035 		}
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static struct ordered_events *rotate_queues(struct perf_top *top)
1042 {
1043 	struct ordered_events *in = top->qe.in;
1044 
1045 	if (top->qe.in == &top->qe.data[1])
1046 		top->qe.in = &top->qe.data[0];
1047 	else
1048 		top->qe.in = &top->qe.data[1];
1049 
1050 	return in;
1051 }
1052 
1053 static void *process_thread(void *arg)
1054 {
1055 	struct perf_top *top = arg;
1056 
1057 	while (!done) {
1058 		struct ordered_events *out, *in = top->qe.in;
1059 
1060 		if (!in->nr_events) {
1061 			usleep(100);
1062 			continue;
1063 		}
1064 
1065 		out = rotate_queues(top);
1066 
1067 		pthread_mutex_lock(&top->qe.mutex);
1068 		top->qe.rotate = true;
1069 		pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1070 		pthread_mutex_unlock(&top->qe.mutex);
1071 
1072 		if (ordered_events__flush(out, OE_FLUSH__TOP))
1073 			pr_err("failed to process events\n");
1074 	}
1075 
1076 	return NULL;
1077 }
1078 
1079 /*
1080  * Allow only 'top->delay_secs' seconds behind samples.
1081  */
1082 static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1083 {
1084 	union perf_event *event = qevent->event;
1085 	u64 delay_timestamp;
1086 
1087 	if (event->header.type != PERF_RECORD_SAMPLE)
1088 		return false;
1089 
1090 	delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1091 	return delay_timestamp < last_timestamp;
1092 }
1093 
1094 static int deliver_event(struct ordered_events *qe,
1095 			 struct ordered_event *qevent)
1096 {
1097 	struct perf_top *top = qe->data;
1098 	struct perf_evlist *evlist = top->evlist;
1099 	struct perf_session *session = top->session;
1100 	union perf_event *event = qevent->event;
1101 	struct perf_sample sample;
1102 	struct perf_evsel *evsel;
1103 	struct machine *machine;
1104 	int ret = -1;
1105 
1106 	if (should_drop(qevent, top)) {
1107 		top->drop++;
1108 		top->drop_total++;
1109 		return 0;
1110 	}
1111 
1112 	ret = perf_evlist__parse_sample(evlist, event, &sample);
1113 	if (ret) {
1114 		pr_err("Can't parse sample, err = %d\n", ret);
1115 		goto next_event;
1116 	}
1117 
1118 	evsel = perf_evlist__id2evsel(session->evlist, sample.id);
1119 	assert(evsel != NULL);
1120 
1121 	if (event->header.type == PERF_RECORD_SAMPLE)
1122 		++top->samples;
1123 
1124 	switch (sample.cpumode) {
1125 	case PERF_RECORD_MISC_USER:
1126 		++top->us_samples;
1127 		if (top->hide_user_symbols)
1128 			goto next_event;
1129 		machine = &session->machines.host;
1130 		break;
1131 	case PERF_RECORD_MISC_KERNEL:
1132 		++top->kernel_samples;
1133 		if (top->hide_kernel_symbols)
1134 			goto next_event;
1135 		machine = &session->machines.host;
1136 		break;
1137 	case PERF_RECORD_MISC_GUEST_KERNEL:
1138 		++top->guest_kernel_samples;
1139 		machine = perf_session__find_machine(session,
1140 						     sample.pid);
1141 		break;
1142 	case PERF_RECORD_MISC_GUEST_USER:
1143 		++top->guest_us_samples;
1144 		/*
1145 		 * TODO: we don't process guest user from host side
1146 		 * except simple counting.
1147 		 */
1148 		goto next_event;
1149 	default:
1150 		if (event->header.type == PERF_RECORD_SAMPLE)
1151 			goto next_event;
1152 		machine = &session->machines.host;
1153 		break;
1154 	}
1155 
1156 	if (event->header.type == PERF_RECORD_SAMPLE) {
1157 		perf_event__process_sample(&top->tool, event, evsel,
1158 					   &sample, machine);
1159 	} else if (event->header.type == PERF_RECORD_LOST) {
1160 		perf_top__process_lost(top, event, evsel);
1161 	} else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1162 		perf_top__process_lost_samples(top, event, evsel);
1163 	} else if (event->header.type < PERF_RECORD_MAX) {
1164 		hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1165 		machine__process_event(machine, event, &sample);
1166 	} else
1167 		++session->evlist->stats.nr_unknown_events;
1168 
1169 	ret = 0;
1170 next_event:
1171 	return ret;
1172 }
1173 
1174 static void init_process_thread(struct perf_top *top)
1175 {
1176 	ordered_events__init(&top->qe.data[0], deliver_event, top);
1177 	ordered_events__init(&top->qe.data[1], deliver_event, top);
1178 	ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1179 	ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1180 	top->qe.in = &top->qe.data[0];
1181 	pthread_mutex_init(&top->qe.mutex, NULL);
1182 	pthread_cond_init(&top->qe.cond, NULL);
1183 }
1184 
1185 static int __cmd_top(struct perf_top *top)
1186 {
1187 	char msg[512];
1188 	struct perf_evsel *pos;
1189 	struct perf_evsel_config_term *err_term;
1190 	struct perf_evlist *evlist = top->evlist;
1191 	struct record_opts *opts = &top->record_opts;
1192 	pthread_t thread, thread_process;
1193 	int ret;
1194 
1195 	top->session = perf_session__new(NULL, false, NULL);
1196 	if (top->session == NULL)
1197 		return -1;
1198 
1199 	if (!top->annotation_opts.objdump_path) {
1200 		ret = perf_env__lookup_objdump(&top->session->header.env,
1201 					       &top->annotation_opts.objdump_path);
1202 		if (ret)
1203 			goto out_delete;
1204 	}
1205 
1206 	ret = callchain_param__setup_sample_type(&callchain_param);
1207 	if (ret)
1208 		goto out_delete;
1209 
1210 	if (perf_session__register_idle_thread(top->session) < 0)
1211 		goto out_delete;
1212 
1213 	if (top->nr_threads_synthesize > 1)
1214 		perf_set_multithreaded();
1215 
1216 	init_process_thread(top);
1217 
1218 	machine__synthesize_threads(&top->session->machines.host, &opts->target,
1219 				    top->evlist->threads, false,
1220 				    top->nr_threads_synthesize);
1221 
1222 	if (top->nr_threads_synthesize > 1)
1223 		perf_set_singlethreaded();
1224 
1225 	if (perf_hpp_list.socket) {
1226 		ret = perf_env__read_cpu_topology_map(&perf_env);
1227 		if (ret < 0)
1228 			goto out_err_cpu_topo;
1229 	}
1230 
1231 	ret = perf_top__start_counters(top);
1232 	if (ret)
1233 		goto out_delete;
1234 
1235 	ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
1236 	if (ret) {
1237 		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
1238 			err_term->val.drv_cfg, perf_evsel__name(pos), errno,
1239 			str_error_r(errno, msg, sizeof(msg)));
1240 		goto out_delete;
1241 	}
1242 
1243 	top->session->evlist = top->evlist;
1244 	perf_session__set_id_hdr_size(top->session);
1245 
1246 	/*
1247 	 * When perf is starting the traced process, all the events (apart from
1248 	 * group members) have enable_on_exec=1 set, so don't spoil it by
1249 	 * prematurely enabling them.
1250 	 *
1251 	 * XXX 'top' still doesn't start workloads like record, trace, but should,
1252 	 * so leave the check here.
1253 	 */
1254         if (!target__none(&opts->target))
1255                 perf_evlist__enable(top->evlist);
1256 
1257 	ret = -1;
1258 	if (pthread_create(&thread_process, NULL, process_thread, top)) {
1259 		ui__error("Could not create process thread.\n");
1260 		goto out_delete;
1261 	}
1262 
1263 	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1264 							    display_thread), top)) {
1265 		ui__error("Could not create display thread.\n");
1266 		goto out_join_thread;
1267 	}
1268 
1269 	if (top->realtime_prio) {
1270 		struct sched_param param;
1271 
1272 		param.sched_priority = top->realtime_prio;
1273 		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1274 			ui__error("Could not set realtime priority.\n");
1275 			goto out_join;
1276 		}
1277 	}
1278 
1279 	/* Wait for a minimal set of events before starting the snapshot */
1280 	perf_evlist__poll(top->evlist, 100);
1281 
1282 	perf_top__mmap_read(top);
1283 
1284 	while (!done) {
1285 		u64 hits = top->samples;
1286 
1287 		perf_top__mmap_read(top);
1288 
1289 		if (opts->overwrite || (hits == top->samples))
1290 			ret = perf_evlist__poll(top->evlist, 100);
1291 
1292 		if (resize) {
1293 			perf_top__resize(top);
1294 			resize = 0;
1295 		}
1296 	}
1297 
1298 	ret = 0;
1299 out_join:
1300 	pthread_join(thread, NULL);
1301 out_join_thread:
1302 	pthread_cond_signal(&top->qe.cond);
1303 	pthread_join(thread_process, NULL);
1304 out_delete:
1305 	perf_session__delete(top->session);
1306 	top->session = NULL;
1307 
1308 	return ret;
1309 
1310 out_err_cpu_topo: {
1311 	char errbuf[BUFSIZ];
1312 	const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1313 
1314 	ui__error("Could not read the CPU topology map: %s\n", err);
1315 	goto out_delete;
1316 }
1317 }
1318 
1319 static int
1320 callchain_opt(const struct option *opt, const char *arg, int unset)
1321 {
1322 	symbol_conf.use_callchain = true;
1323 	return record_callchain_opt(opt, arg, unset);
1324 }
1325 
1326 static int
1327 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1328 {
1329 	struct callchain_param *callchain = opt->value;
1330 
1331 	callchain->enabled = !unset;
1332 	callchain->record_mode = CALLCHAIN_FP;
1333 
1334 	/*
1335 	 * --no-call-graph
1336 	 */
1337 	if (unset) {
1338 		symbol_conf.use_callchain = false;
1339 		callchain->record_mode = CALLCHAIN_NONE;
1340 		return 0;
1341 	}
1342 
1343 	return parse_callchain_top_opt(arg);
1344 }
1345 
1346 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1347 {
1348 	if (!strcmp(var, "top.call-graph")) {
1349 		var = "call-graph.record-mode";
1350 		return perf_default_config(var, value, cb);
1351 	}
1352 	if (!strcmp(var, "top.children")) {
1353 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1354 		return 0;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 static int
1361 parse_percent_limit(const struct option *opt, const char *arg,
1362 		    int unset __maybe_unused)
1363 {
1364 	struct perf_top *top = opt->value;
1365 
1366 	top->min_percent = strtof(arg, NULL);
1367 	return 0;
1368 }
1369 
1370 const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1371 	"\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1372 
1373 int cmd_top(int argc, const char **argv)
1374 {
1375 	char errbuf[BUFSIZ];
1376 	struct perf_top top = {
1377 		.count_filter	     = 5,
1378 		.delay_secs	     = 2,
1379 		.record_opts = {
1380 			.mmap_pages	= UINT_MAX,
1381 			.user_freq	= UINT_MAX,
1382 			.user_interval	= ULLONG_MAX,
1383 			.freq		= 4000, /* 4 KHz */
1384 			.target		= {
1385 				.uses_mmap   = true,
1386 			},
1387 			/*
1388 			 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1389 			 * when we pause, fix that and reenable. Probably using a
1390 			 * separate evlist with a dummy event, i.e. a non-overwrite
1391 			 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1392 			 * stays in overwrite mode. -acme
1393 			 * */
1394 			.overwrite	= 0,
1395 			.sample_time	= true,
1396 		},
1397 		.max_stack	     = sysctl__max_stack(),
1398 		.annotation_opts     = annotation__default_options,
1399 		.nr_threads_synthesize = UINT_MAX,
1400 	};
1401 	struct record_opts *opts = &top.record_opts;
1402 	struct target *target = &opts->target;
1403 	const struct option options[] = {
1404 	OPT_CALLBACK('e', "event", &top.evlist, "event",
1405 		     "event selector. use 'perf list' to list available events",
1406 		     parse_events_option),
1407 	OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1408 	OPT_STRING('p', "pid", &target->pid, "pid",
1409 		    "profile events on existing process id"),
1410 	OPT_STRING('t', "tid", &target->tid, "tid",
1411 		    "profile events on existing thread id"),
1412 	OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1413 			    "system-wide collection from all CPUs"),
1414 	OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1415 		    "list of cpus to monitor"),
1416 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1417 		   "file", "vmlinux pathname"),
1418 	OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1419 		    "don't load vmlinux even if found"),
1420 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1421 		   "file", "kallsyms pathname"),
1422 	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1423 		    "hide kernel symbols"),
1424 	OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
1425 		     "number of mmap data pages",
1426 		     perf_evlist__parse_mmap_pages),
1427 	OPT_INTEGER('r', "realtime", &top.realtime_prio,
1428 		    "collect data with this RT SCHED_FIFO priority"),
1429 	OPT_INTEGER('d', "delay", &top.delay_secs,
1430 		    "number of seconds to delay between refreshes"),
1431 	OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1432 			    "dump the symbol table used for profiling"),
1433 	OPT_INTEGER('f', "count-filter", &top.count_filter,
1434 		    "only display functions with more events than this"),
1435 	OPT_BOOLEAN(0, "group", &opts->group,
1436 			    "put the counters into a counter group"),
1437 	OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1438 		    "child tasks do not inherit counters"),
1439 	OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1440 		    "symbol to annotate"),
1441 	OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1442 	OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1443 		     "profile at this frequency",
1444 		      record__parse_freq),
1445 	OPT_INTEGER('E', "entries", &top.print_entries,
1446 		    "display this many functions"),
1447 	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1448 		    "hide user symbols"),
1449 	OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1450 	OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1451 	OPT_INCR('v', "verbose", &verbose,
1452 		    "be more verbose (show counter open errors, etc)"),
1453 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1454 		   "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1455 		   " Please refer the man page for the complete list."),
1456 	OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1457 		   "output field(s): overhead, period, sample plus all of sort keys"),
1458 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1459 		    "Show a column with the number of samples"),
1460 	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1461 			   NULL, "enables call-graph recording and display",
1462 			   &callchain_opt),
1463 	OPT_CALLBACK(0, "call-graph", &callchain_param,
1464 		     "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1465 		     top_callchain_help, &parse_callchain_opt),
1466 	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1467 		    "Accumulate callchains of children and show total overhead as well"),
1468 	OPT_INTEGER(0, "max-stack", &top.max_stack,
1469 		    "Set the maximum stack depth when parsing the callchain. "
1470 		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1471 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1472 		   "ignore callees of these functions in call graphs",
1473 		   report_parse_ignore_callees_opt),
1474 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1475 		    "Show a column with the sum of periods"),
1476 	OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1477 		   "only consider symbols in these dsos"),
1478 	OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1479 		   "only consider symbols in these comms"),
1480 	OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1481 		   "only consider these symbols"),
1482 	OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
1483 		    "Interleave source code with assembly code (default)"),
1484 	OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
1485 		    "Display raw encoding of assembly instructions (default)"),
1486 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1487 		    "Enable kernel symbol demangling"),
1488 	OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
1489 		    "objdump binary to use for disassembly and annotations"),
1490 	OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
1491 		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
1492 	OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1493 	OPT_CALLBACK(0, "percent-limit", &top, "percent",
1494 		     "Don't show entries under that percent", parse_percent_limit),
1495 	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1496 		     "How to display percentage of filtered entries", parse_filter_percentage),
1497 	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1498 		   "width[,width...]",
1499 		   "don't try to adjust column width, use these fixed values"),
1500 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1501 			"per thread proc mmap processing timeout in ms"),
1502 	OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1503 		     "branch any", "sample any taken branches",
1504 		     parse_branch_stack),
1505 	OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1506 		     "branch filter mask", "branch stack filter modes",
1507 		     parse_branch_stack),
1508 	OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1509 		    "Show raw trace event output (do not use print fmt or plugins)"),
1510 	OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1511 		    "Show entries in a hierarchy"),
1512 	OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1513 		    "Use a backward ring buffer, default: no"),
1514 	OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1515 	OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1516 			"number of thread to run event synthesize"),
1517 	OPT_END()
1518 	};
1519 	const char * const top_usage[] = {
1520 		"perf top [<options>]",
1521 		NULL
1522 	};
1523 	int status = hists__init();
1524 
1525 	if (status < 0)
1526 		return status;
1527 
1528 	top.annotation_opts.min_pcnt = 5;
1529 	top.annotation_opts.context  = 4;
1530 
1531 	top.evlist = perf_evlist__new();
1532 	if (top.evlist == NULL)
1533 		return -ENOMEM;
1534 
1535 	status = perf_config(perf_top_config, &top);
1536 	if (status)
1537 		return status;
1538 
1539 	argc = parse_options(argc, argv, options, top_usage, 0);
1540 	if (argc)
1541 		usage_with_options(top_usage, options);
1542 
1543 	if (!top.evlist->nr_entries &&
1544 	    perf_evlist__add_default(top.evlist) < 0) {
1545 		pr_err("Not enough memory for event selector list\n");
1546 		goto out_delete_evlist;
1547 	}
1548 
1549 	if (symbol_conf.report_hierarchy) {
1550 		/* disable incompatible options */
1551 		symbol_conf.event_group = false;
1552 		symbol_conf.cumulate_callchain = false;
1553 
1554 		if (field_order) {
1555 			pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1556 			parse_options_usage(top_usage, options, "fields", 0);
1557 			parse_options_usage(NULL, options, "hierarchy", 0);
1558 			goto out_delete_evlist;
1559 		}
1560 	}
1561 
1562 	if (opts->branch_stack && callchain_param.enabled)
1563 		symbol_conf.show_branchflag_count = true;
1564 
1565 	sort__mode = SORT_MODE__TOP;
1566 	/* display thread wants entries to be collapsed in a different tree */
1567 	perf_hpp_list.need_collapse = 1;
1568 
1569 	if (top.use_stdio)
1570 		use_browser = 0;
1571 	else if (top.use_tui)
1572 		use_browser = 1;
1573 
1574 	setup_browser(false);
1575 
1576 	if (setup_sorting(top.evlist) < 0) {
1577 		if (sort_order)
1578 			parse_options_usage(top_usage, options, "s", 1);
1579 		if (field_order)
1580 			parse_options_usage(sort_order ? NULL : top_usage,
1581 					    options, "fields", 0);
1582 		goto out_delete_evlist;
1583 	}
1584 
1585 	status = target__validate(target);
1586 	if (status) {
1587 		target__strerror(target, status, errbuf, BUFSIZ);
1588 		ui__warning("%s\n", errbuf);
1589 	}
1590 
1591 	status = target__parse_uid(target);
1592 	if (status) {
1593 		int saved_errno = errno;
1594 
1595 		target__strerror(target, status, errbuf, BUFSIZ);
1596 		ui__error("%s\n", errbuf);
1597 
1598 		status = -saved_errno;
1599 		goto out_delete_evlist;
1600 	}
1601 
1602 	if (target__none(target))
1603 		target->system_wide = true;
1604 
1605 	if (perf_evlist__create_maps(top.evlist, target) < 0) {
1606 		ui__error("Couldn't create thread/CPU maps: %s\n",
1607 			  errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1608 		goto out_delete_evlist;
1609 	}
1610 
1611 	if (top.delay_secs < 1)
1612 		top.delay_secs = 1;
1613 
1614 	if (record_opts__config(opts)) {
1615 		status = -EINVAL;
1616 		goto out_delete_evlist;
1617 	}
1618 
1619 	top.sym_evsel = perf_evlist__first(top.evlist);
1620 
1621 	if (!callchain_param.enabled) {
1622 		symbol_conf.cumulate_callchain = false;
1623 		perf_hpp__cancel_cumulate();
1624 	}
1625 
1626 	if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1627 		callchain_param.order = ORDER_CALLER;
1628 
1629 	status = symbol__annotation_init();
1630 	if (status < 0)
1631 		goto out_delete_evlist;
1632 
1633 	annotation_config__init();
1634 
1635 	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1636 	if (symbol__init(NULL) < 0)
1637 		return -1;
1638 
1639 	sort__setup_elide(stdout);
1640 
1641 	get_term_dimensions(&top.winsize);
1642 	if (top.print_entries == 0) {
1643 		perf_top__update_print_entries(&top);
1644 		signal(SIGWINCH, winch_sig);
1645 	}
1646 
1647 	status = __cmd_top(&top);
1648 
1649 out_delete_evlist:
1650 	perf_evlist__delete(top.evlist);
1651 
1652 	return status;
1653 }
1654