xref: /openbmc/linux/tools/perf/util/bpf_ftrace.c (revision 0df6ade7)
1 #include <stdio.h>
2 #include <fcntl.h>
3 #include <stdint.h>
4 #include <stdlib.h>
5 
6 #include <linux/err.h>
7 
8 #include "util/ftrace.h"
9 #include "util/cpumap.h"
10 #include "util/thread_map.h"
11 #include "util/debug.h"
12 #include "util/evlist.h"
13 #include "util/bpf_counter.h"
14 
15 #include "util/bpf_skel/func_latency.skel.h"
16 
17 static struct func_latency_bpf *skel;
18 
perf_ftrace__latency_prepare_bpf(struct perf_ftrace * ftrace)19 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
20 {
21 	int fd, err;
22 	int i, ncpus = 1, ntasks = 1;
23 	struct filter_entry *func;
24 
25 	if (!list_is_singular(&ftrace->filters)) {
26 		pr_err("ERROR: %s target function(s).\n",
27 		       list_empty(&ftrace->filters) ? "No" : "Too many");
28 		return -1;
29 	}
30 
31 	func = list_first_entry(&ftrace->filters, struct filter_entry, list);
32 
33 	skel = func_latency_bpf__open();
34 	if (!skel) {
35 		pr_err("Failed to open func latency skeleton\n");
36 		return -1;
37 	}
38 
39 	/* don't need to set cpu filter for system-wide mode */
40 	if (ftrace->target.cpu_list) {
41 		ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
42 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
43 	}
44 
45 	if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
46 		ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
47 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
48 	}
49 
50 	set_max_rlimit();
51 
52 	err = func_latency_bpf__load(skel);
53 	if (err) {
54 		pr_err("Failed to load func latency skeleton\n");
55 		goto out;
56 	}
57 
58 	if (ftrace->target.cpu_list) {
59 		u32 cpu;
60 		u8 val = 1;
61 
62 		skel->bss->has_cpu = 1;
63 		fd = bpf_map__fd(skel->maps.cpu_filter);
64 
65 		for (i = 0; i < ncpus; i++) {
66 			cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
67 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
68 		}
69 	}
70 
71 	if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
72 		u32 pid;
73 		u8 val = 1;
74 
75 		skel->bss->has_task = 1;
76 		fd = bpf_map__fd(skel->maps.task_filter);
77 
78 		for (i = 0; i < ntasks; i++) {
79 			pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
80 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
81 		}
82 	}
83 
84 	skel->bss->use_nsec = ftrace->use_nsec;
85 
86 	skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
87 							    false, func->name);
88 	if (IS_ERR(skel->links.func_begin)) {
89 		pr_err("Failed to attach fentry program\n");
90 		err = PTR_ERR(skel->links.func_begin);
91 		goto out;
92 	}
93 
94 	skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
95 							  true, func->name);
96 	if (IS_ERR(skel->links.func_end)) {
97 		pr_err("Failed to attach fexit program\n");
98 		err = PTR_ERR(skel->links.func_end);
99 		goto out;
100 	}
101 
102 	/* XXX: we don't actually use this fd - just for poll() */
103 	return open("/dev/null", O_RDONLY);
104 
105 out:
106 	return err;
107 }
108 
perf_ftrace__latency_start_bpf(struct perf_ftrace * ftrace __maybe_unused)109 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
110 {
111 	skel->bss->enabled = 1;
112 	return 0;
113 }
114 
perf_ftrace__latency_stop_bpf(struct perf_ftrace * ftrace __maybe_unused)115 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
116 {
117 	skel->bss->enabled = 0;
118 	return 0;
119 }
120 
perf_ftrace__latency_read_bpf(struct perf_ftrace * ftrace __maybe_unused,int buckets[])121 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
122 				  int buckets[])
123 {
124 	int i, fd, err;
125 	u32 idx;
126 	u64 *hist;
127 	int ncpus = cpu__max_cpu().cpu;
128 
129 	fd = bpf_map__fd(skel->maps.latency);
130 
131 	hist = calloc(ncpus, sizeof(*hist));
132 	if (hist == NULL)
133 		return -ENOMEM;
134 
135 	for (idx = 0; idx < NUM_BUCKET; idx++) {
136 		err = bpf_map_lookup_elem(fd, &idx, hist);
137 		if (err) {
138 			buckets[idx] = 0;
139 			continue;
140 		}
141 
142 		for (i = 0; i < ncpus; i++)
143 			buckets[idx] += hist[i];
144 	}
145 
146 	free(hist);
147 	return 0;
148 }
149 
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace * ftrace __maybe_unused)150 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
151 {
152 	func_latency_bpf__destroy(skel);
153 	return 0;
154 }
155