xref: /openbmc/linux/tools/perf/util/bpf_off_cpu.c (revision d9bd26de)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include "util/strlist.h"
15 #include <bpf/bpf.h>
16 
17 #include "bpf_skel/off_cpu.skel.h"
18 
19 #define MAX_STACKS  32
20 #define MAX_PROC  4096
21 /* we don't need actual timestamp, just want to put the samples at last */
22 #define OFF_CPU_TIMESTAMP  (~0ull << 32)
23 
24 static struct off_cpu_bpf *skel;
25 
26 struct off_cpu_key {
27 	u32 pid;
28 	u32 tgid;
29 	u32 stack_id;
30 	u32 state;
31 	u64 cgroup_id;
32 };
33 
34 union off_cpu_data {
35 	struct perf_event_header hdr;
36 	u64 array[1024 / sizeof(u64)];
37 };
38 
off_cpu_config(struct evlist * evlist)39 static int off_cpu_config(struct evlist *evlist)
40 {
41 	struct evsel *evsel;
42 	struct perf_event_attr attr = {
43 		.type	= PERF_TYPE_SOFTWARE,
44 		.config = PERF_COUNT_SW_BPF_OUTPUT,
45 		.size	= sizeof(attr), /* to capture ABI version */
46 	};
47 	char *evname = strdup(OFFCPU_EVENT);
48 
49 	if (evname == NULL)
50 		return -ENOMEM;
51 
52 	evsel = evsel__new(&attr);
53 	if (!evsel) {
54 		free(evname);
55 		return -ENOMEM;
56 	}
57 
58 	evsel->core.attr.freq = 1;
59 	evsel->core.attr.sample_period = 1;
60 	/* off-cpu analysis depends on stack trace */
61 	evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
62 
63 	evlist__add(evlist, evsel);
64 
65 	free(evsel->name);
66 	evsel->name = evname;
67 
68 	return 0;
69 }
70 
off_cpu_start(void * arg)71 static void off_cpu_start(void *arg)
72 {
73 	struct evlist *evlist = arg;
74 
75 	/* update task filter for the given workload */
76 	if (!skel->bss->has_cpu && !skel->bss->has_task &&
77 	    perf_thread_map__pid(evlist->core.threads, 0) != -1) {
78 		int fd;
79 		u32 pid;
80 		u8 val = 1;
81 
82 		skel->bss->has_task = 1;
83 		skel->bss->uses_tgid = 1;
84 		fd = bpf_map__fd(skel->maps.task_filter);
85 		pid = perf_thread_map__pid(evlist->core.threads, 0);
86 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
87 	}
88 
89 	skel->bss->enabled = 1;
90 }
91 
off_cpu_finish(void * arg __maybe_unused)92 static void off_cpu_finish(void *arg __maybe_unused)
93 {
94 	skel->bss->enabled = 0;
95 	off_cpu_bpf__destroy(skel);
96 }
97 
98 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
check_sched_switch_args(void)99 static void check_sched_switch_args(void)
100 {
101 	const struct btf *btf = btf__load_vmlinux_btf();
102 	const struct btf_type *t1, *t2, *t3;
103 	u32 type_id;
104 
105 	type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
106 					 BTF_KIND_TYPEDEF);
107 	if ((s32)type_id < 0)
108 		return;
109 
110 	t1 = btf__type_by_id(btf, type_id);
111 	if (t1 == NULL)
112 		return;
113 
114 	t2 = btf__type_by_id(btf, t1->type);
115 	if (t2 == NULL || !btf_is_ptr(t2))
116 		return;
117 
118 	t3 = btf__type_by_id(btf, t2->type);
119 	/* btf_trace func proto has one more argument for the context */
120 	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
121 		/* new format: pass prev_state as 4th arg */
122 		skel->rodata->has_prev_state = true;
123 	}
124 }
125 
off_cpu_prepare(struct evlist * evlist,struct target * target,struct record_opts * opts)126 int off_cpu_prepare(struct evlist *evlist, struct target *target,
127 		    struct record_opts *opts)
128 {
129 	int err, fd, i;
130 	int ncpus = 1, ntasks = 1, ncgrps = 1;
131 	struct strlist *pid_slist = NULL;
132 	struct str_node *pos;
133 
134 	if (off_cpu_config(evlist) < 0) {
135 		pr_err("Failed to config off-cpu BPF event\n");
136 		return -1;
137 	}
138 
139 	skel = off_cpu_bpf__open();
140 	if (!skel) {
141 		pr_err("Failed to open off-cpu BPF skeleton\n");
142 		return -1;
143 	}
144 
145 	/* don't need to set cpu filter for system-wide mode */
146 	if (target->cpu_list) {
147 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
148 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
149 	}
150 
151 	if (target->pid) {
152 		pid_slist = strlist__new(target->pid, NULL);
153 		if (!pid_slist) {
154 			pr_err("Failed to create a strlist for pid\n");
155 			return -1;
156 		}
157 
158 		ntasks = 0;
159 		strlist__for_each_entry(pos, pid_slist) {
160 			char *end_ptr;
161 			int pid = strtol(pos->s, &end_ptr, 10);
162 
163 			if (pid == INT_MIN || pid == INT_MAX ||
164 			    (*end_ptr != '\0' && *end_ptr != ','))
165 				continue;
166 
167 			ntasks++;
168 		}
169 
170 		if (ntasks < MAX_PROC)
171 			ntasks = MAX_PROC;
172 
173 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
174 	} else if (target__has_task(target)) {
175 		ntasks = perf_thread_map__nr(evlist->core.threads);
176 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
177 	} else if (target__none(target)) {
178 		bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
179 	}
180 
181 	if (evlist__first(evlist)->cgrp) {
182 		ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
183 		bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
184 
185 		if (!cgroup_is_v2("perf_event"))
186 			skel->rodata->uses_cgroup_v1 = true;
187 	}
188 
189 	if (opts->record_cgroup) {
190 		skel->rodata->needs_cgroup = true;
191 
192 		if (!cgroup_is_v2("perf_event"))
193 			skel->rodata->uses_cgroup_v1 = true;
194 	}
195 
196 	set_max_rlimit();
197 	check_sched_switch_args();
198 
199 	err = off_cpu_bpf__load(skel);
200 	if (err) {
201 		pr_err("Failed to load off-cpu skeleton\n");
202 		goto out;
203 	}
204 
205 	if (target->cpu_list) {
206 		u32 cpu;
207 		u8 val = 1;
208 
209 		skel->bss->has_cpu = 1;
210 		fd = bpf_map__fd(skel->maps.cpu_filter);
211 
212 		for (i = 0; i < ncpus; i++) {
213 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
214 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
215 		}
216 	}
217 
218 	if (target->pid) {
219 		u8 val = 1;
220 
221 		skel->bss->has_task = 1;
222 		skel->bss->uses_tgid = 1;
223 		fd = bpf_map__fd(skel->maps.task_filter);
224 
225 		strlist__for_each_entry(pos, pid_slist) {
226 			char *end_ptr;
227 			u32 tgid;
228 			int pid = strtol(pos->s, &end_ptr, 10);
229 
230 			if (pid == INT_MIN || pid == INT_MAX ||
231 			    (*end_ptr != '\0' && *end_ptr != ','))
232 				continue;
233 
234 			tgid = pid;
235 			bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
236 		}
237 	} else if (target__has_task(target)) {
238 		u32 pid;
239 		u8 val = 1;
240 
241 		skel->bss->has_task = 1;
242 		fd = bpf_map__fd(skel->maps.task_filter);
243 
244 		for (i = 0; i < ntasks; i++) {
245 			pid = perf_thread_map__pid(evlist->core.threads, i);
246 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
247 		}
248 	}
249 
250 	if (evlist__first(evlist)->cgrp) {
251 		struct evsel *evsel;
252 		u8 val = 1;
253 
254 		skel->bss->has_cgroup = 1;
255 		fd = bpf_map__fd(skel->maps.cgroup_filter);
256 
257 		evlist__for_each_entry(evlist, evsel) {
258 			struct cgroup *cgrp = evsel->cgrp;
259 
260 			if (cgrp == NULL)
261 				continue;
262 
263 			if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
264 				pr_err("Failed to read cgroup id of %s\n",
265 				       cgrp->name);
266 				goto out;
267 			}
268 
269 			bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
270 		}
271 	}
272 
273 	err = off_cpu_bpf__attach(skel);
274 	if (err) {
275 		pr_err("Failed to attach off-cpu BPF skeleton\n");
276 		goto out;
277 	}
278 
279 	if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
280 	    perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
281 		pr_err("Failed to attach off-cpu skeleton\n");
282 		goto out;
283 	}
284 
285 	return 0;
286 
287 out:
288 	off_cpu_bpf__destroy(skel);
289 	return -1;
290 }
291 
off_cpu_write(struct perf_session * session)292 int off_cpu_write(struct perf_session *session)
293 {
294 	int bytes = 0, size;
295 	int fd, stack;
296 	u64 sample_type, val, sid = 0;
297 	struct evsel *evsel;
298 	struct perf_data_file *file = &session->data->file;
299 	struct off_cpu_key prev, key;
300 	union off_cpu_data data = {
301 		.hdr = {
302 			.type = PERF_RECORD_SAMPLE,
303 			.misc = PERF_RECORD_MISC_USER,
304 		},
305 	};
306 	u64 tstamp = OFF_CPU_TIMESTAMP;
307 
308 	skel->bss->enabled = 0;
309 
310 	evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
311 	if (evsel == NULL) {
312 		pr_err("%s evsel not found\n", OFFCPU_EVENT);
313 		return 0;
314 	}
315 
316 	sample_type = evsel->core.attr.sample_type;
317 
318 	if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
319 		pr_err("not supported sample type: %llx\n",
320 		       (unsigned long long)sample_type);
321 		return -1;
322 	}
323 
324 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
325 		if (evsel->core.id)
326 			sid = evsel->core.id[0];
327 	}
328 
329 	fd = bpf_map__fd(skel->maps.off_cpu);
330 	stack = bpf_map__fd(skel->maps.stacks);
331 	memset(&prev, 0, sizeof(prev));
332 
333 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
334 		int n = 1;  /* start from perf_event_header */
335 		int ip_pos = -1;
336 
337 		bpf_map_lookup_elem(fd, &key, &val);
338 
339 		if (sample_type & PERF_SAMPLE_IDENTIFIER)
340 			data.array[n++] = sid;
341 		if (sample_type & PERF_SAMPLE_IP) {
342 			ip_pos = n;
343 			data.array[n++] = 0;  /* will be updated */
344 		}
345 		if (sample_type & PERF_SAMPLE_TID)
346 			data.array[n++] = (u64)key.pid << 32 | key.tgid;
347 		if (sample_type & PERF_SAMPLE_TIME)
348 			data.array[n++] = tstamp;
349 		if (sample_type & PERF_SAMPLE_ID)
350 			data.array[n++] = sid;
351 		if (sample_type & PERF_SAMPLE_CPU)
352 			data.array[n++] = 0;
353 		if (sample_type & PERF_SAMPLE_PERIOD)
354 			data.array[n++] = val;
355 		if (sample_type & PERF_SAMPLE_CALLCHAIN) {
356 			int len = 0;
357 
358 			/* data.array[n] is callchain->nr (updated later) */
359 			data.array[n + 1] = PERF_CONTEXT_USER;
360 			data.array[n + 2] = 0;
361 
362 			bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
363 			while (data.array[n + 2 + len])
364 				len++;
365 
366 			/* update length of callchain */
367 			data.array[n] = len + 1;
368 
369 			/* update sample ip with the first callchain entry */
370 			if (ip_pos >= 0)
371 				data.array[ip_pos] = data.array[n + 2];
372 
373 			/* calculate sample callchain data array length */
374 			n += len + 2;
375 		}
376 		if (sample_type & PERF_SAMPLE_CGROUP)
377 			data.array[n++] = key.cgroup_id;
378 
379 		size = n * sizeof(u64);
380 		data.hdr.size = size;
381 		bytes += size;
382 
383 		if (perf_data_file__write(file, &data, size) < 0) {
384 			pr_err("failed to write perf data, error: %m\n");
385 			return bytes;
386 		}
387 
388 		prev = key;
389 		/* increase dummy timestamp to sort later samples */
390 		tstamp++;
391 	}
392 	return bytes;
393 }
394