xref: /openbmc/linux/tools/perf/util/bpf_off_cpu.c (revision 7fc96d71)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include <bpf/bpf.h>
15 
16 #include "bpf_skel/off_cpu.skel.h"
17 
18 #define MAX_STACKS  32
19 /* we don't need actual timestamp, just want to put the samples at last */
20 #define OFF_CPU_TIMESTAMP  (~0ull << 32)
21 
22 static struct off_cpu_bpf *skel;
23 
24 struct off_cpu_key {
25 	u32 pid;
26 	u32 tgid;
27 	u32 stack_id;
28 	u32 state;
29 	u64 cgroup_id;
30 };
31 
32 union off_cpu_data {
33 	struct perf_event_header hdr;
34 	u64 array[1024 / sizeof(u64)];
35 };
36 
37 static int off_cpu_config(struct evlist *evlist)
38 {
39 	struct evsel *evsel;
40 	struct perf_event_attr attr = {
41 		.type	= PERF_TYPE_SOFTWARE,
42 		.config = PERF_COUNT_SW_BPF_OUTPUT,
43 		.size	= sizeof(attr), /* to capture ABI version */
44 	};
45 	char *evname = strdup(OFFCPU_EVENT);
46 
47 	if (evname == NULL)
48 		return -ENOMEM;
49 
50 	evsel = evsel__new(&attr);
51 	if (!evsel) {
52 		free(evname);
53 		return -ENOMEM;
54 	}
55 
56 	evsel->core.attr.freq = 1;
57 	evsel->core.attr.sample_period = 1;
58 	/* off-cpu analysis depends on stack trace */
59 	evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
60 
61 	evlist__add(evlist, evsel);
62 
63 	free(evsel->name);
64 	evsel->name = evname;
65 
66 	return 0;
67 }
68 
69 static void off_cpu_start(void *arg)
70 {
71 	struct evlist *evlist = arg;
72 
73 	/* update task filter for the given workload */
74 	if (!skel->bss->has_cpu && !skel->bss->has_task &&
75 	    perf_thread_map__pid(evlist->core.threads, 0) != -1) {
76 		int fd;
77 		u32 pid;
78 		u8 val = 1;
79 
80 		skel->bss->has_task = 1;
81 		fd = bpf_map__fd(skel->maps.task_filter);
82 		pid = perf_thread_map__pid(evlist->core.threads, 0);
83 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
84 	}
85 
86 	skel->bss->enabled = 1;
87 }
88 
89 static void off_cpu_finish(void *arg __maybe_unused)
90 {
91 	skel->bss->enabled = 0;
92 	off_cpu_bpf__destroy(skel);
93 }
94 
95 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
96 static void check_sched_switch_args(void)
97 {
98 	const struct btf *btf = bpf_object__btf(skel->obj);
99 	const struct btf_type *t1, *t2, *t3;
100 	u32 type_id;
101 
102 	type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
103 					 BTF_KIND_TYPEDEF);
104 	if ((s32)type_id < 0)
105 		return;
106 
107 	t1 = btf__type_by_id(btf, type_id);
108 	if (t1 == NULL)
109 		return;
110 
111 	t2 = btf__type_by_id(btf, t1->type);
112 	if (t2 == NULL || !btf_is_ptr(t2))
113 		return;
114 
115 	t3 = btf__type_by_id(btf, t2->type);
116 	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
117 		/* new format: pass prev_state as 4th arg */
118 		skel->rodata->has_prev_state = true;
119 	}
120 }
121 
122 int off_cpu_prepare(struct evlist *evlist, struct target *target,
123 		    struct record_opts *opts)
124 {
125 	int err, fd, i;
126 	int ncpus = 1, ntasks = 1, ncgrps = 1;
127 
128 	if (off_cpu_config(evlist) < 0) {
129 		pr_err("Failed to config off-cpu BPF event\n");
130 		return -1;
131 	}
132 
133 	skel = off_cpu_bpf__open();
134 	if (!skel) {
135 		pr_err("Failed to open off-cpu BPF skeleton\n");
136 		return -1;
137 	}
138 
139 	/* don't need to set cpu filter for system-wide mode */
140 	if (target->cpu_list) {
141 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
142 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
143 	}
144 
145 	if (target__has_task(target)) {
146 		ntasks = perf_thread_map__nr(evlist->core.threads);
147 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
148 	}
149 
150 	if (evlist__first(evlist)->cgrp) {
151 		ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
152 		bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
153 
154 		if (!cgroup_is_v2("perf_event"))
155 			skel->rodata->uses_cgroup_v1 = true;
156 	}
157 
158 	if (opts->record_cgroup) {
159 		skel->rodata->needs_cgroup = true;
160 
161 		if (!cgroup_is_v2("perf_event"))
162 			skel->rodata->uses_cgroup_v1 = true;
163 	}
164 
165 	set_max_rlimit();
166 	check_sched_switch_args();
167 
168 	err = off_cpu_bpf__load(skel);
169 	if (err) {
170 		pr_err("Failed to load off-cpu skeleton\n");
171 		goto out;
172 	}
173 
174 	if (target->cpu_list) {
175 		u32 cpu;
176 		u8 val = 1;
177 
178 		skel->bss->has_cpu = 1;
179 		fd = bpf_map__fd(skel->maps.cpu_filter);
180 
181 		for (i = 0; i < ncpus; i++) {
182 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
183 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
184 		}
185 	}
186 
187 	if (target__has_task(target)) {
188 		u32 pid;
189 		u8 val = 1;
190 
191 		skel->bss->has_task = 1;
192 		fd = bpf_map__fd(skel->maps.task_filter);
193 
194 		for (i = 0; i < ntasks; i++) {
195 			pid = perf_thread_map__pid(evlist->core.threads, i);
196 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
197 		}
198 	}
199 
200 	if (evlist__first(evlist)->cgrp) {
201 		struct evsel *evsel;
202 		u8 val = 1;
203 
204 		skel->bss->has_cgroup = 1;
205 		fd = bpf_map__fd(skel->maps.cgroup_filter);
206 
207 		evlist__for_each_entry(evlist, evsel) {
208 			struct cgroup *cgrp = evsel->cgrp;
209 
210 			if (cgrp == NULL)
211 				continue;
212 
213 			if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
214 				pr_err("Failed to read cgroup id of %s\n",
215 				       cgrp->name);
216 				goto out;
217 			}
218 
219 			bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
220 		}
221 	}
222 
223 	err = off_cpu_bpf__attach(skel);
224 	if (err) {
225 		pr_err("Failed to attach off-cpu BPF skeleton\n");
226 		goto out;
227 	}
228 
229 	if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
230 	    perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
231 		pr_err("Failed to attach off-cpu skeleton\n");
232 		goto out;
233 	}
234 
235 	return 0;
236 
237 out:
238 	off_cpu_bpf__destroy(skel);
239 	return -1;
240 }
241 
242 int off_cpu_write(struct perf_session *session)
243 {
244 	int bytes = 0, size;
245 	int fd, stack;
246 	u64 sample_type, val, sid = 0;
247 	struct evsel *evsel;
248 	struct perf_data_file *file = &session->data->file;
249 	struct off_cpu_key prev, key;
250 	union off_cpu_data data = {
251 		.hdr = {
252 			.type = PERF_RECORD_SAMPLE,
253 			.misc = PERF_RECORD_MISC_USER,
254 		},
255 	};
256 	u64 tstamp = OFF_CPU_TIMESTAMP;
257 
258 	skel->bss->enabled = 0;
259 
260 	evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
261 	if (evsel == NULL) {
262 		pr_err("%s evsel not found\n", OFFCPU_EVENT);
263 		return 0;
264 	}
265 
266 	sample_type = evsel->core.attr.sample_type;
267 
268 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
269 		if (evsel->core.id)
270 			sid = evsel->core.id[0];
271 	}
272 
273 	fd = bpf_map__fd(skel->maps.off_cpu);
274 	stack = bpf_map__fd(skel->maps.stacks);
275 	memset(&prev, 0, sizeof(prev));
276 
277 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
278 		int n = 1;  /* start from perf_event_header */
279 		int ip_pos = -1;
280 
281 		bpf_map_lookup_elem(fd, &key, &val);
282 
283 		if (sample_type & PERF_SAMPLE_IDENTIFIER)
284 			data.array[n++] = sid;
285 		if (sample_type & PERF_SAMPLE_IP) {
286 			ip_pos = n;
287 			data.array[n++] = 0;  /* will be updated */
288 		}
289 		if (sample_type & PERF_SAMPLE_TID)
290 			data.array[n++] = (u64)key.pid << 32 | key.tgid;
291 		if (sample_type & PERF_SAMPLE_TIME)
292 			data.array[n++] = tstamp;
293 		if (sample_type & PERF_SAMPLE_ID)
294 			data.array[n++] = sid;
295 		if (sample_type & PERF_SAMPLE_CPU)
296 			data.array[n++] = 0;
297 		if (sample_type & PERF_SAMPLE_PERIOD)
298 			data.array[n++] = val;
299 		if (sample_type & PERF_SAMPLE_CALLCHAIN) {
300 			int len = 0;
301 
302 			/* data.array[n] is callchain->nr (updated later) */
303 			data.array[n + 1] = PERF_CONTEXT_USER;
304 			data.array[n + 2] = 0;
305 
306 			bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
307 			while (data.array[n + 2 + len])
308 				len++;
309 
310 			/* update length of callchain */
311 			data.array[n] = len + 1;
312 
313 			/* update sample ip with the first callchain entry */
314 			if (ip_pos >= 0)
315 				data.array[ip_pos] = data.array[n + 2];
316 
317 			/* calculate sample callchain data array length */
318 			n += len + 2;
319 		}
320 		if (sample_type & PERF_SAMPLE_CGROUP)
321 			data.array[n++] = key.cgroup_id;
322 		/* TODO: handle more sample types */
323 
324 		size = n * sizeof(u64);
325 		data.hdr.size = size;
326 		bytes += size;
327 
328 		if (perf_data_file__write(file, &data, size) < 0) {
329 			pr_err("failed to write perf data, error: %m\n");
330 			return bytes;
331 		}
332 
333 		prev = key;
334 		/* increase dummy timestamp to sort later samples */
335 		tstamp++;
336 	}
337 	return bytes;
338 }
339