xref: /openbmc/linux/tools/perf/util/bpf_counter.c (revision b3d9fc14)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2019 Facebook */
4 
5 #include <assert.h>
6 #include <limits.h>
7 #include <unistd.h>
8 #include <sys/time.h>
9 #include <sys/resource.h>
10 #include <linux/err.h>
11 #include <linux/zalloc.h>
12 #include <bpf/bpf.h>
13 #include <bpf/btf.h>
14 #include <bpf/libbpf.h>
15 
16 #include "bpf_counter.h"
17 #include "counts.h"
18 #include "debug.h"
19 #include "evsel.h"
20 #include "target.h"
21 
22 #include "bpf_skel/bpf_prog_profiler.skel.h"
23 
24 static inline void *u64_to_ptr(__u64 ptr)
25 {
26 	return (void *)(unsigned long)ptr;
27 }
28 
29 static void set_max_rlimit(void)
30 {
31 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
32 
33 	setrlimit(RLIMIT_MEMLOCK, &rinf);
34 }
35 
36 static struct bpf_counter *bpf_counter_alloc(void)
37 {
38 	struct bpf_counter *counter;
39 
40 	counter = zalloc(sizeof(*counter));
41 	if (counter)
42 		INIT_LIST_HEAD(&counter->list);
43 	return counter;
44 }
45 
46 static int bpf_program_profiler__destroy(struct evsel *evsel)
47 {
48 	struct bpf_counter *counter, *tmp;
49 
50 	list_for_each_entry_safe(counter, tmp,
51 				 &evsel->bpf_counter_list, list) {
52 		list_del_init(&counter->list);
53 		bpf_prog_profiler_bpf__destroy(counter->skel);
54 		free(counter);
55 	}
56 	assert(list_empty(&evsel->bpf_counter_list));
57 
58 	return 0;
59 }
60 
61 static char *bpf_target_prog_name(int tgt_fd)
62 {
63 	struct bpf_prog_info_linear *info_linear;
64 	struct bpf_func_info *func_info;
65 	const struct btf_type *t;
66 	char *name = NULL;
67 	struct btf *btf;
68 
69 	info_linear = bpf_program__get_prog_info_linear(
70 		tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
71 	if (IS_ERR_OR_NULL(info_linear)) {
72 		pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
73 		return NULL;
74 	}
75 
76 	if (info_linear->info.btf_id == 0 ||
77 	    btf__get_from_id(info_linear->info.btf_id, &btf)) {
78 		pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
79 		goto out;
80 	}
81 
82 	func_info = u64_to_ptr(info_linear->info.func_info);
83 	t = btf__type_by_id(btf, func_info[0].type_id);
84 	if (!t) {
85 		pr_debug("btf %d doesn't have type %d\n",
86 			 info_linear->info.btf_id, func_info[0].type_id);
87 		goto out;
88 	}
89 	name = strdup(btf__name_by_offset(btf, t->name_off));
90 out:
91 	free(info_linear);
92 	return name;
93 }
94 
95 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
96 {
97 	struct bpf_prog_profiler_bpf *skel;
98 	struct bpf_counter *counter;
99 	struct bpf_program *prog;
100 	char *prog_name;
101 	int prog_fd;
102 	int err;
103 
104 	prog_fd = bpf_prog_get_fd_by_id(prog_id);
105 	if (prog_fd < 0) {
106 		pr_err("Failed to open fd for bpf prog %u\n", prog_id);
107 		return -1;
108 	}
109 	counter = bpf_counter_alloc();
110 	if (!counter) {
111 		close(prog_fd);
112 		return -1;
113 	}
114 
115 	skel = bpf_prog_profiler_bpf__open();
116 	if (!skel) {
117 		pr_err("Failed to open bpf skeleton\n");
118 		goto err_out;
119 	}
120 
121 	skel->rodata->num_cpu = evsel__nr_cpus(evsel);
122 
123 	bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
124 	bpf_map__resize(skel->maps.fentry_readings, 1);
125 	bpf_map__resize(skel->maps.accum_readings, 1);
126 
127 	prog_name = bpf_target_prog_name(prog_fd);
128 	if (!prog_name) {
129 		pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
130 		goto err_out;
131 	}
132 
133 	bpf_object__for_each_program(prog, skel->obj) {
134 		err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
135 		if (err) {
136 			pr_err("bpf_program__set_attach_target failed.\n"
137 			       "Does bpf prog %u have BTF?\n", prog_id);
138 			goto err_out;
139 		}
140 	}
141 	set_max_rlimit();
142 	err = bpf_prog_profiler_bpf__load(skel);
143 	if (err) {
144 		pr_err("bpf_prog_profiler_bpf__load failed\n");
145 		goto err_out;
146 	}
147 
148 	assert(skel != NULL);
149 	counter->skel = skel;
150 	list_add(&counter->list, &evsel->bpf_counter_list);
151 	close(prog_fd);
152 	return 0;
153 err_out:
154 	bpf_prog_profiler_bpf__destroy(skel);
155 	free(counter);
156 	close(prog_fd);
157 	return -1;
158 }
159 
160 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
161 {
162 	char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
163 	u32 prog_id;
164 	int ret;
165 
166 	bpf_str_ = bpf_str = strdup(target->bpf_str);
167 	if (!bpf_str)
168 		return -1;
169 
170 	while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
171 		prog_id = strtoul(tok, &p, 10);
172 		if (prog_id == 0 || prog_id == UINT_MAX ||
173 		    (*p != '\0' && *p != ',')) {
174 			pr_err("Failed to parse bpf prog ids %s\n",
175 			       target->bpf_str);
176 			return -1;
177 		}
178 
179 		ret = bpf_program_profiler_load_one(evsel, prog_id);
180 		if (ret) {
181 			bpf_program_profiler__destroy(evsel);
182 			free(bpf_str_);
183 			return -1;
184 		}
185 		bpf_str = NULL;
186 	}
187 	free(bpf_str_);
188 	return 0;
189 }
190 
191 static int bpf_program_profiler__enable(struct evsel *evsel)
192 {
193 	struct bpf_counter *counter;
194 	int ret;
195 
196 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
197 		assert(counter->skel != NULL);
198 		ret = bpf_prog_profiler_bpf__attach(counter->skel);
199 		if (ret) {
200 			bpf_program_profiler__destroy(evsel);
201 			return ret;
202 		}
203 	}
204 	return 0;
205 }
206 
207 static int bpf_program_profiler__read(struct evsel *evsel)
208 {
209 	// perf_cpu_map uses /sys/devices/system/cpu/online
210 	int num_cpu = evsel__nr_cpus(evsel);
211 	// BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
212 	// Sometimes possible > online, like on a Ryzen 3900X that has 24
213 	// threads but its possible showed 0-31 -acme
214 	int num_cpu_bpf = libbpf_num_possible_cpus();
215 	struct bpf_perf_event_value values[num_cpu_bpf];
216 	struct bpf_counter *counter;
217 	int reading_map_fd;
218 	__u32 key = 0;
219 	int err, cpu;
220 
221 	if (list_empty(&evsel->bpf_counter_list))
222 		return -EAGAIN;
223 
224 	for (cpu = 0; cpu < num_cpu; cpu++) {
225 		perf_counts(evsel->counts, cpu, 0)->val = 0;
226 		perf_counts(evsel->counts, cpu, 0)->ena = 0;
227 		perf_counts(evsel->counts, cpu, 0)->run = 0;
228 	}
229 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
230 		struct bpf_prog_profiler_bpf *skel = counter->skel;
231 
232 		assert(skel != NULL);
233 		reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
234 
235 		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
236 		if (err) {
237 			pr_err("failed to read value\n");
238 			return err;
239 		}
240 
241 		for (cpu = 0; cpu < num_cpu; cpu++) {
242 			perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
243 			perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
244 			perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
245 		}
246 	}
247 	return 0;
248 }
249 
250 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
251 					    int fd)
252 {
253 	struct bpf_prog_profiler_bpf *skel;
254 	struct bpf_counter *counter;
255 	int ret;
256 
257 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
258 		skel = counter->skel;
259 		assert(skel != NULL);
260 
261 		ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
262 					  &cpu, &fd, BPF_ANY);
263 		if (ret)
264 			return ret;
265 	}
266 	return 0;
267 }
268 
269 struct bpf_counter_ops bpf_program_profiler_ops = {
270 	.load       = bpf_program_profiler__load,
271 	.enable	    = bpf_program_profiler__enable,
272 	.read       = bpf_program_profiler__read,
273 	.destroy    = bpf_program_profiler__destroy,
274 	.install_pe = bpf_program_profiler__install_pe,
275 };
276 
277 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
278 {
279 	if (list_empty(&evsel->bpf_counter_list))
280 		return 0;
281 	return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
282 }
283 
284 int bpf_counter__load(struct evsel *evsel, struct target *target)
285 {
286 	if (target__has_bpf(target))
287 		evsel->bpf_counter_ops = &bpf_program_profiler_ops;
288 
289 	if (evsel->bpf_counter_ops)
290 		return evsel->bpf_counter_ops->load(evsel, target);
291 	return 0;
292 }
293 
294 int bpf_counter__enable(struct evsel *evsel)
295 {
296 	if (list_empty(&evsel->bpf_counter_list))
297 		return 0;
298 	return evsel->bpf_counter_ops->enable(evsel);
299 }
300 
301 int bpf_counter__read(struct evsel *evsel)
302 {
303 	if (list_empty(&evsel->bpf_counter_list))
304 		return -EAGAIN;
305 	return evsel->bpf_counter_ops->read(evsel);
306 }
307 
308 void bpf_counter__destroy(struct evsel *evsel)
309 {
310 	if (list_empty(&evsel->bpf_counter_list))
311 		return;
312 	evsel->bpf_counter_ops->destroy(evsel);
313 	evsel->bpf_counter_ops = NULL;
314 }
315