xref: /openbmc/linux/tools/perf/builtin-ftrace.c (revision 4bb1eb3c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * builtin-ftrace.c
4  *
5  * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
6  * Copyright (c) 2020  Changbin Du <changbin.du@gmail.com>, significant enhancement.
7  */
8 
9 #include "builtin.h"
10 
11 #include <errno.h>
12 #include <unistd.h>
13 #include <signal.h>
14 #include <stdlib.h>
15 #include <fcntl.h>
16 #include <poll.h>
17 #include <linux/capability.h>
18 #include <linux/string.h>
19 
20 #include "debug.h"
21 #include <subcmd/pager.h>
22 #include <subcmd/parse-options.h>
23 #include <api/fs/tracing_path.h>
24 #include "evlist.h"
25 #include "target.h"
26 #include "cpumap.h"
27 #include "thread_map.h"
28 #include "util/cap.h"
29 #include "util/config.h"
30 #include "util/units.h"
31 #include "util/parse-sublevel-options.h"
32 
33 #define DEFAULT_TRACER  "function_graph"
34 
35 struct perf_ftrace {
36 	struct evlist		*evlist;
37 	struct target		target;
38 	const char		*tracer;
39 	bool			list_avail_functions;
40 	struct list_head	filters;
41 	struct list_head	notrace;
42 	struct list_head	graph_funcs;
43 	struct list_head	nograph_funcs;
44 	int			graph_depth;
45 	unsigned long		percpu_buffer_size;
46 	bool			inherit;
47 	int			func_stack_trace;
48 	int			func_irq_info;
49 	int			graph_nosleep_time;
50 	int			graph_noirqs;
51 	int			graph_verbose;
52 	int			graph_thresh;
53 	unsigned int		initial_delay;
54 };
55 
56 struct filter_entry {
57 	struct list_head	list;
58 	char			name[];
59 };
60 
61 static volatile int workload_exec_errno;
62 static bool done;
63 
64 static void sig_handler(int sig __maybe_unused)
65 {
66 	done = true;
67 }
68 
69 /*
70  * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
71  * we asked by setting its exec_error to the function below,
72  * ftrace__workload_exec_failed_signal.
73  *
74  * XXX We need to handle this more appropriately, emitting an error, etc.
75  */
76 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
77 						siginfo_t *info __maybe_unused,
78 						void *ucontext __maybe_unused)
79 {
80 	workload_exec_errno = info->si_value.sival_int;
81 	done = true;
82 }
83 
84 static int __write_tracing_file(const char *name, const char *val, bool append)
85 {
86 	char *file;
87 	int fd, ret = -1;
88 	ssize_t size = strlen(val);
89 	int flags = O_WRONLY;
90 	char errbuf[512];
91 	char *val_copy;
92 
93 	file = get_tracing_file(name);
94 	if (!file) {
95 		pr_debug("cannot get tracing file: %s\n", name);
96 		return -1;
97 	}
98 
99 	if (append)
100 		flags |= O_APPEND;
101 	else
102 		flags |= O_TRUNC;
103 
104 	fd = open(file, flags);
105 	if (fd < 0) {
106 		pr_debug("cannot open tracing file: %s: %s\n",
107 			 name, str_error_r(errno, errbuf, sizeof(errbuf)));
108 		goto out;
109 	}
110 
111 	/*
112 	 * Copy the original value and append a '\n'. Without this,
113 	 * the kernel can hide possible errors.
114 	 */
115 	val_copy = strdup(val);
116 	if (!val_copy)
117 		goto out_close;
118 	val_copy[size] = '\n';
119 
120 	if (write(fd, val_copy, size + 1) == size + 1)
121 		ret = 0;
122 	else
123 		pr_debug("write '%s' to tracing/%s failed: %s\n",
124 			 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
125 
126 	free(val_copy);
127 out_close:
128 	close(fd);
129 out:
130 	put_tracing_file(file);
131 	return ret;
132 }
133 
134 static int write_tracing_file(const char *name, const char *val)
135 {
136 	return __write_tracing_file(name, val, false);
137 }
138 
139 static int append_tracing_file(const char *name, const char *val)
140 {
141 	return __write_tracing_file(name, val, true);
142 }
143 
144 static int read_tracing_file_to_stdout(const char *name)
145 {
146 	char buf[4096];
147 	char *file;
148 	int fd;
149 	int ret = -1;
150 
151 	file = get_tracing_file(name);
152 	if (!file) {
153 		pr_debug("cannot get tracing file: %s\n", name);
154 		return -1;
155 	}
156 
157 	fd = open(file, O_RDONLY);
158 	if (fd < 0) {
159 		pr_debug("cannot open tracing file: %s: %s\n",
160 			 name, str_error_r(errno, buf, sizeof(buf)));
161 		goto out;
162 	}
163 
164 	/* read contents to stdout */
165 	while (true) {
166 		int n = read(fd, buf, sizeof(buf));
167 		if (n == 0)
168 			break;
169 		else if (n < 0)
170 			goto out_close;
171 
172 		if (fwrite(buf, n, 1, stdout) != 1)
173 			goto out_close;
174 	}
175 	ret = 0;
176 
177 out_close:
178 	close(fd);
179 out:
180 	put_tracing_file(file);
181 	return ret;
182 }
183 
184 static int write_tracing_file_int(const char *name, int value)
185 {
186 	char buf[16];
187 
188 	snprintf(buf, sizeof(buf), "%d", value);
189 	if (write_tracing_file(name, buf) < 0)
190 		return -1;
191 
192 	return 0;
193 }
194 
195 static int write_tracing_option_file(const char *name, const char *val)
196 {
197 	char *file;
198 	int ret;
199 
200 	if (asprintf(&file, "options/%s", name) < 0)
201 		return -1;
202 
203 	ret = __write_tracing_file(file, val, false);
204 	free(file);
205 	return ret;
206 }
207 
208 static int reset_tracing_cpu(void);
209 static void reset_tracing_filters(void);
210 
211 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
212 {
213 	write_tracing_option_file("function-fork", "0");
214 	write_tracing_option_file("func_stack_trace", "0");
215 	write_tracing_option_file("sleep-time", "1");
216 	write_tracing_option_file("funcgraph-irqs", "1");
217 	write_tracing_option_file("funcgraph-proc", "0");
218 	write_tracing_option_file("funcgraph-abstime", "0");
219 	write_tracing_option_file("latency-format", "0");
220 	write_tracing_option_file("irq-info", "0");
221 }
222 
223 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
224 {
225 	if (write_tracing_file("tracing_on", "0") < 0)
226 		return -1;
227 
228 	if (write_tracing_file("current_tracer", "nop") < 0)
229 		return -1;
230 
231 	if (write_tracing_file("set_ftrace_pid", " ") < 0)
232 		return -1;
233 
234 	if (reset_tracing_cpu() < 0)
235 		return -1;
236 
237 	if (write_tracing_file("max_graph_depth", "0") < 0)
238 		return -1;
239 
240 	if (write_tracing_file("tracing_thresh", "0") < 0)
241 		return -1;
242 
243 	reset_tracing_filters();
244 	reset_tracing_options(ftrace);
245 	return 0;
246 }
247 
248 static int set_tracing_pid(struct perf_ftrace *ftrace)
249 {
250 	int i;
251 	char buf[16];
252 
253 	if (target__has_cpu(&ftrace->target))
254 		return 0;
255 
256 	for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
257 		scnprintf(buf, sizeof(buf), "%d",
258 			  ftrace->evlist->core.threads->map[i]);
259 		if (append_tracing_file("set_ftrace_pid", buf) < 0)
260 			return -1;
261 	}
262 	return 0;
263 }
264 
265 static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
266 {
267 	char *cpumask;
268 	size_t mask_size;
269 	int ret;
270 	int last_cpu;
271 
272 	last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
273 	mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
274 	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
275 
276 	cpumask = malloc(mask_size);
277 	if (cpumask == NULL) {
278 		pr_debug("failed to allocate cpu mask\n");
279 		return -1;
280 	}
281 
282 	cpu_map__snprint_mask(cpumap, cpumask, mask_size);
283 
284 	ret = write_tracing_file("tracing_cpumask", cpumask);
285 
286 	free(cpumask);
287 	return ret;
288 }
289 
290 static int set_tracing_cpu(struct perf_ftrace *ftrace)
291 {
292 	struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
293 
294 	if (!target__has_cpu(&ftrace->target))
295 		return 0;
296 
297 	return set_tracing_cpumask(cpumap);
298 }
299 
300 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
301 {
302 	if (!ftrace->func_stack_trace)
303 		return 0;
304 
305 	if (write_tracing_option_file("func_stack_trace", "1") < 0)
306 		return -1;
307 
308 	return 0;
309 }
310 
311 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
312 {
313 	if (!ftrace->func_irq_info)
314 		return 0;
315 
316 	if (write_tracing_option_file("irq-info", "1") < 0)
317 		return -1;
318 
319 	return 0;
320 }
321 
322 static int reset_tracing_cpu(void)
323 {
324 	struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
325 	int ret;
326 
327 	ret = set_tracing_cpumask(cpumap);
328 	perf_cpu_map__put(cpumap);
329 	return ret;
330 }
331 
332 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
333 {
334 	struct filter_entry *pos;
335 
336 	list_for_each_entry(pos, funcs, list) {
337 		if (append_tracing_file(filter_file, pos->name) < 0)
338 			return -1;
339 	}
340 
341 	return 0;
342 }
343 
344 static int set_tracing_filters(struct perf_ftrace *ftrace)
345 {
346 	int ret;
347 
348 	ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
349 	if (ret < 0)
350 		return ret;
351 
352 	ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
353 	if (ret < 0)
354 		return ret;
355 
356 	ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
357 	if (ret < 0)
358 		return ret;
359 
360 	/* old kernels do not have this filter */
361 	__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
362 
363 	return ret;
364 }
365 
366 static void reset_tracing_filters(void)
367 {
368 	write_tracing_file("set_ftrace_filter", " ");
369 	write_tracing_file("set_ftrace_notrace", " ");
370 	write_tracing_file("set_graph_function", " ");
371 	write_tracing_file("set_graph_notrace", " ");
372 }
373 
374 static int set_tracing_depth(struct perf_ftrace *ftrace)
375 {
376 	if (ftrace->graph_depth == 0)
377 		return 0;
378 
379 	if (ftrace->graph_depth < 0) {
380 		pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
381 		return -1;
382 	}
383 
384 	if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
385 		return -1;
386 
387 	return 0;
388 }
389 
390 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
391 {
392 	int ret;
393 
394 	if (ftrace->percpu_buffer_size == 0)
395 		return 0;
396 
397 	ret = write_tracing_file_int("buffer_size_kb",
398 				     ftrace->percpu_buffer_size / 1024);
399 	if (ret < 0)
400 		return ret;
401 
402 	return 0;
403 }
404 
405 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
406 {
407 	if (!ftrace->inherit)
408 		return 0;
409 
410 	if (write_tracing_option_file("function-fork", "1") < 0)
411 		return -1;
412 
413 	return 0;
414 }
415 
416 static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
417 {
418 	if (!ftrace->graph_nosleep_time)
419 		return 0;
420 
421 	if (write_tracing_option_file("sleep-time", "0") < 0)
422 		return -1;
423 
424 	return 0;
425 }
426 
427 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
428 {
429 	if (!ftrace->graph_noirqs)
430 		return 0;
431 
432 	if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
433 		return -1;
434 
435 	return 0;
436 }
437 
438 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
439 {
440 	if (!ftrace->graph_verbose)
441 		return 0;
442 
443 	if (write_tracing_option_file("funcgraph-proc", "1") < 0)
444 		return -1;
445 
446 	if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
447 		return -1;
448 
449 	if (write_tracing_option_file("latency-format", "1") < 0)
450 		return -1;
451 
452 	return 0;
453 }
454 
455 static int set_tracing_thresh(struct perf_ftrace *ftrace)
456 {
457 	int ret;
458 
459 	if (ftrace->graph_thresh == 0)
460 		return 0;
461 
462 	ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
463 	if (ret < 0)
464 		return ret;
465 
466 	return 0;
467 }
468 
469 static int set_tracing_options(struct perf_ftrace *ftrace)
470 {
471 	if (set_tracing_pid(ftrace) < 0) {
472 		pr_err("failed to set ftrace pid\n");
473 		return -1;
474 	}
475 
476 	if (set_tracing_cpu(ftrace) < 0) {
477 		pr_err("failed to set tracing cpumask\n");
478 		return -1;
479 	}
480 
481 	if (set_tracing_func_stack_trace(ftrace) < 0) {
482 		pr_err("failed to set tracing option func_stack_trace\n");
483 		return -1;
484 	}
485 
486 	if (set_tracing_func_irqinfo(ftrace) < 0) {
487 		pr_err("failed to set tracing option irq-info\n");
488 		return -1;
489 	}
490 
491 	if (set_tracing_filters(ftrace) < 0) {
492 		pr_err("failed to set tracing filters\n");
493 		return -1;
494 	}
495 
496 	if (set_tracing_depth(ftrace) < 0) {
497 		pr_err("failed to set graph depth\n");
498 		return -1;
499 	}
500 
501 	if (set_tracing_percpu_buffer_size(ftrace) < 0) {
502 		pr_err("failed to set tracing per-cpu buffer size\n");
503 		return -1;
504 	}
505 
506 	if (set_tracing_trace_inherit(ftrace) < 0) {
507 		pr_err("failed to set tracing option function-fork\n");
508 		return -1;
509 	}
510 
511 	if (set_tracing_sleep_time(ftrace) < 0) {
512 		pr_err("failed to set tracing option sleep-time\n");
513 		return -1;
514 	}
515 
516 	if (set_tracing_funcgraph_irqs(ftrace) < 0) {
517 		pr_err("failed to set tracing option funcgraph-irqs\n");
518 		return -1;
519 	}
520 
521 	if (set_tracing_funcgraph_verbose(ftrace) < 0) {
522 		pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
523 		return -1;
524 	}
525 
526 	if (set_tracing_thresh(ftrace) < 0) {
527 		pr_err("failed to set tracing thresh\n");
528 		return -1;
529 	}
530 
531 	return 0;
532 }
533 
534 static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
535 {
536 	char *trace_file;
537 	int trace_fd;
538 	char buf[4096];
539 	struct pollfd pollfd = {
540 		.events = POLLIN,
541 	};
542 
543 	if (!(perf_cap__capable(CAP_PERFMON) ||
544 	      perf_cap__capable(CAP_SYS_ADMIN))) {
545 		pr_err("ftrace only works for %s!\n",
546 #ifdef HAVE_LIBCAP_SUPPORT
547 		"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
548 #else
549 		"root"
550 #endif
551 		);
552 		return -1;
553 	}
554 
555 	signal(SIGINT, sig_handler);
556 	signal(SIGUSR1, sig_handler);
557 	signal(SIGCHLD, sig_handler);
558 	signal(SIGPIPE, sig_handler);
559 
560 	if (ftrace->list_avail_functions)
561 		return read_tracing_file_to_stdout("available_filter_functions");
562 
563 	if (reset_tracing_files(ftrace) < 0) {
564 		pr_err("failed to reset ftrace\n");
565 		goto out;
566 	}
567 
568 	/* reset ftrace buffer */
569 	if (write_tracing_file("trace", "0") < 0)
570 		goto out;
571 
572 	if (argc && perf_evlist__prepare_workload(ftrace->evlist,
573 				&ftrace->target, argv, false,
574 				ftrace__workload_exec_failed_signal) < 0) {
575 		goto out;
576 	}
577 
578 	if (set_tracing_options(ftrace) < 0)
579 		goto out_reset;
580 
581 	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
582 		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
583 		goto out_reset;
584 	}
585 
586 	setup_pager();
587 
588 	trace_file = get_tracing_file("trace_pipe");
589 	if (!trace_file) {
590 		pr_err("failed to open trace_pipe\n");
591 		goto out_reset;
592 	}
593 
594 	trace_fd = open(trace_file, O_RDONLY);
595 
596 	put_tracing_file(trace_file);
597 
598 	if (trace_fd < 0) {
599 		pr_err("failed to open trace_pipe\n");
600 		goto out_reset;
601 	}
602 
603 	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
604 	pollfd.fd = trace_fd;
605 
606 	/* display column headers */
607 	read_tracing_file_to_stdout("trace");
608 
609 	if (!ftrace->initial_delay) {
610 		if (write_tracing_file("tracing_on", "1") < 0) {
611 			pr_err("can't enable tracing\n");
612 			goto out_close_fd;
613 		}
614 	}
615 
616 	perf_evlist__start_workload(ftrace->evlist);
617 
618 	if (ftrace->initial_delay) {
619 		usleep(ftrace->initial_delay * 1000);
620 		if (write_tracing_file("tracing_on", "1") < 0) {
621 			pr_err("can't enable tracing\n");
622 			goto out_close_fd;
623 		}
624 	}
625 
626 	while (!done) {
627 		if (poll(&pollfd, 1, -1) < 0)
628 			break;
629 
630 		if (pollfd.revents & POLLIN) {
631 			int n = read(trace_fd, buf, sizeof(buf));
632 			if (n < 0)
633 				break;
634 			if (fwrite(buf, n, 1, stdout) != 1)
635 				break;
636 		}
637 	}
638 
639 	write_tracing_file("tracing_on", "0");
640 
641 	if (workload_exec_errno) {
642 		const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
643 		/* flush stdout first so below error msg appears at the end. */
644 		fflush(stdout);
645 		pr_err("workload failed: %s\n", emsg);
646 		goto out_close_fd;
647 	}
648 
649 	/* read remaining buffer contents */
650 	while (true) {
651 		int n = read(trace_fd, buf, sizeof(buf));
652 		if (n <= 0)
653 			break;
654 		if (fwrite(buf, n, 1, stdout) != 1)
655 			break;
656 	}
657 
658 out_close_fd:
659 	close(trace_fd);
660 out_reset:
661 	reset_tracing_files(ftrace);
662 out:
663 	return (done && !workload_exec_errno) ? 0 : -1;
664 }
665 
666 static int perf_ftrace_config(const char *var, const char *value, void *cb)
667 {
668 	struct perf_ftrace *ftrace = cb;
669 
670 	if (!strstarts(var, "ftrace."))
671 		return 0;
672 
673 	if (strcmp(var, "ftrace.tracer"))
674 		return -1;
675 
676 	if (!strcmp(value, "function_graph") ||
677 	    !strcmp(value, "function")) {
678 		ftrace->tracer = value;
679 		return 0;
680 	}
681 
682 	pr_err("Please select \"function_graph\" (default) or \"function\"\n");
683 	return -1;
684 }
685 
686 static int parse_filter_func(const struct option *opt, const char *str,
687 			     int unset __maybe_unused)
688 {
689 	struct list_head *head = opt->value;
690 	struct filter_entry *entry;
691 
692 	entry = malloc(sizeof(*entry) + strlen(str) + 1);
693 	if (entry == NULL)
694 		return -ENOMEM;
695 
696 	strcpy(entry->name, str);
697 	list_add_tail(&entry->list, head);
698 
699 	return 0;
700 }
701 
702 static void delete_filter_func(struct list_head *head)
703 {
704 	struct filter_entry *pos, *tmp;
705 
706 	list_for_each_entry_safe(pos, tmp, head, list) {
707 		list_del_init(&pos->list);
708 		free(pos);
709 	}
710 }
711 
712 static int parse_buffer_size(const struct option *opt,
713 			     const char *str, int unset)
714 {
715 	unsigned long *s = (unsigned long *)opt->value;
716 	static struct parse_tag tags_size[] = {
717 		{ .tag  = 'B', .mult = 1       },
718 		{ .tag  = 'K', .mult = 1 << 10 },
719 		{ .tag  = 'M', .mult = 1 << 20 },
720 		{ .tag  = 'G', .mult = 1 << 30 },
721 		{ .tag  = 0 },
722 	};
723 	unsigned long val;
724 
725 	if (unset) {
726 		*s = 0;
727 		return 0;
728 	}
729 
730 	val = parse_tag_value(str, tags_size);
731 	if (val != (unsigned long) -1) {
732 		if (val < 1024) {
733 			pr_err("buffer size too small, must larger than 1KB.");
734 			return -1;
735 		}
736 		*s = val;
737 		return 0;
738 	}
739 
740 	return -1;
741 }
742 
743 static int parse_func_tracer_opts(const struct option *opt,
744 				  const char *str, int unset)
745 {
746 	int ret;
747 	struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
748 	struct sublevel_option func_tracer_opts[] = {
749 		{ .name = "call-graph",	.value_ptr = &ftrace->func_stack_trace },
750 		{ .name = "irq-info",	.value_ptr = &ftrace->func_irq_info },
751 		{ .name = NULL, }
752 	};
753 
754 	if (unset)
755 		return 0;
756 
757 	ret = perf_parse_sublevel_options(str, func_tracer_opts);
758 	if (ret)
759 		return ret;
760 
761 	return 0;
762 }
763 
764 static int parse_graph_tracer_opts(const struct option *opt,
765 				  const char *str, int unset)
766 {
767 	int ret;
768 	struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
769 	struct sublevel_option graph_tracer_opts[] = {
770 		{ .name = "nosleep-time",	.value_ptr = &ftrace->graph_nosleep_time },
771 		{ .name = "noirqs",		.value_ptr = &ftrace->graph_noirqs },
772 		{ .name = "verbose",		.value_ptr = &ftrace->graph_verbose },
773 		{ .name = "thresh",		.value_ptr = &ftrace->graph_thresh },
774 		{ .name = "depth",		.value_ptr = &ftrace->graph_depth },
775 		{ .name = NULL, }
776 	};
777 
778 	if (unset)
779 		return 0;
780 
781 	ret = perf_parse_sublevel_options(str, graph_tracer_opts);
782 	if (ret)
783 		return ret;
784 
785 	return 0;
786 }
787 
788 static void select_tracer(struct perf_ftrace *ftrace)
789 {
790 	bool graph = !list_empty(&ftrace->graph_funcs) ||
791 		     !list_empty(&ftrace->nograph_funcs);
792 	bool func = !list_empty(&ftrace->filters) ||
793 		    !list_empty(&ftrace->notrace);
794 
795 	/* The function_graph has priority over function tracer. */
796 	if (graph)
797 		ftrace->tracer = "function_graph";
798 	else if (func)
799 		ftrace->tracer = "function";
800 	/* Otherwise, the default tracer is used. */
801 
802 	pr_debug("%s tracer is used\n", ftrace->tracer);
803 }
804 
805 int cmd_ftrace(int argc, const char **argv)
806 {
807 	int ret;
808 	struct perf_ftrace ftrace = {
809 		.tracer = DEFAULT_TRACER,
810 		.target = { .uid = UINT_MAX, },
811 	};
812 	const char * const ftrace_usage[] = {
813 		"perf ftrace [<options>] [<command>]",
814 		"perf ftrace [<options>] -- <command> [<options>]",
815 		NULL
816 	};
817 	const struct option ftrace_options[] = {
818 	OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
819 		   "Tracer to use: function_graph(default) or function"),
820 	OPT_BOOLEAN('F', "funcs", &ftrace.list_avail_functions,
821 		    "Show available functions to filter"),
822 	OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
823 		   "Trace on existing process id"),
824 	/* TODO: Add short option -t after -t/--tracer can be removed. */
825 	OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
826 		   "Trace on existing thread id (exclusive to --pid)"),
827 	OPT_INCR('v', "verbose", &verbose,
828 		 "Be more verbose"),
829 	OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
830 		    "System-wide collection from all CPUs"),
831 	OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
832 		    "List of cpus to monitor"),
833 	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
834 		     "Trace given functions using function tracer",
835 		     parse_filter_func),
836 	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
837 		     "Do not trace given functions", parse_filter_func),
838 	OPT_CALLBACK(0, "func-opts", &ftrace, "options",
839 		     "Function tracer options, available options: call-graph,irq-info",
840 		     parse_func_tracer_opts),
841 	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
842 		     "Trace given functions using function_graph tracer",
843 		     parse_filter_func),
844 	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
845 		     "Set nograph filter on given functions", parse_filter_func),
846 	OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
847 		     "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
848 		     parse_graph_tracer_opts),
849 	OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
850 		     "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
851 	OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
852 		    "Trace children processes"),
853 	OPT_UINTEGER('D', "delay", &ftrace.initial_delay,
854 		     "Number of milliseconds to wait before starting tracing after program start"),
855 	OPT_END()
856 	};
857 
858 	INIT_LIST_HEAD(&ftrace.filters);
859 	INIT_LIST_HEAD(&ftrace.notrace);
860 	INIT_LIST_HEAD(&ftrace.graph_funcs);
861 	INIT_LIST_HEAD(&ftrace.nograph_funcs);
862 
863 	ret = perf_config(perf_ftrace_config, &ftrace);
864 	if (ret < 0)
865 		return -1;
866 
867 	argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
868 			    PARSE_OPT_STOP_AT_NON_OPTION);
869 	if (!argc && target__none(&ftrace.target))
870 		ftrace.target.system_wide = true;
871 
872 	select_tracer(&ftrace);
873 
874 	ret = target__validate(&ftrace.target);
875 	if (ret) {
876 		char errbuf[512];
877 
878 		target__strerror(&ftrace.target, ret, errbuf, 512);
879 		pr_err("%s\n", errbuf);
880 		goto out_delete_filters;
881 	}
882 
883 	ftrace.evlist = evlist__new();
884 	if (ftrace.evlist == NULL) {
885 		ret = -ENOMEM;
886 		goto out_delete_filters;
887 	}
888 
889 	ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
890 	if (ret < 0)
891 		goto out_delete_evlist;
892 
893 	ret = __cmd_ftrace(&ftrace, argc, argv);
894 
895 out_delete_evlist:
896 	evlist__delete(ftrace.evlist);
897 
898 out_delete_filters:
899 	delete_filter_func(&ftrace.filters);
900 	delete_filter_func(&ftrace.notrace);
901 	delete_filter_func(&ftrace.graph_funcs);
902 	delete_filter_func(&ftrace.nograph_funcs);
903 
904 	return ret;
905 }
906