xref: /openbmc/linux/tools/perf/util/bpf_off_cpu.c (revision 5e8bf00e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include <bpf/bpf.h>
15 
16 #include "bpf_skel/off_cpu.skel.h"
17 
18 #define MAX_STACKS  32
19 /* we don't need actual timestamp, just want to put the samples at last */
20 #define OFF_CPU_TIMESTAMP  (~0ull << 32)
21 
22 static struct off_cpu_bpf *skel;
23 
24 struct off_cpu_key {
25 	u32 pid;
26 	u32 tgid;
27 	u32 stack_id;
28 	u32 state;
29 	u64 cgroup_id;
30 };
31 
32 union off_cpu_data {
33 	struct perf_event_header hdr;
34 	u64 array[1024 / sizeof(u64)];
35 };
36 
37 static int off_cpu_config(struct evlist *evlist)
38 {
39 	struct evsel *evsel;
40 	struct perf_event_attr attr = {
41 		.type	= PERF_TYPE_SOFTWARE,
42 		.config = PERF_COUNT_SW_BPF_OUTPUT,
43 		.size	= sizeof(attr), /* to capture ABI version */
44 	};
45 	char *evname = strdup(OFFCPU_EVENT);
46 
47 	if (evname == NULL)
48 		return -ENOMEM;
49 
50 	evsel = evsel__new(&attr);
51 	if (!evsel) {
52 		free(evname);
53 		return -ENOMEM;
54 	}
55 
56 	evsel->core.attr.freq = 1;
57 	evsel->core.attr.sample_period = 1;
58 	/* off-cpu analysis depends on stack trace */
59 	evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
60 
61 	evlist__add(evlist, evsel);
62 
63 	free(evsel->name);
64 	evsel->name = evname;
65 
66 	return 0;
67 }
68 
69 static void off_cpu_start(void *arg)
70 {
71 	struct evlist *evlist = arg;
72 
73 	/* update task filter for the given workload */
74 	if (!skel->bss->has_cpu && !skel->bss->has_task &&
75 	    perf_thread_map__pid(evlist->core.threads, 0) != -1) {
76 		int fd;
77 		u32 pid;
78 		u8 val = 1;
79 
80 		skel->bss->has_task = 1;
81 		fd = bpf_map__fd(skel->maps.task_filter);
82 		pid = perf_thread_map__pid(evlist->core.threads, 0);
83 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
84 	}
85 
86 	skel->bss->enabled = 1;
87 }
88 
89 static void off_cpu_finish(void *arg __maybe_unused)
90 {
91 	skel->bss->enabled = 0;
92 	off_cpu_bpf__destroy(skel);
93 }
94 
95 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
96 static void check_sched_switch_args(void)
97 {
98 	const struct btf *btf = bpf_object__btf(skel->obj);
99 	const struct btf_type *t1, *t2, *t3;
100 	u32 type_id;
101 
102 	type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
103 					 BTF_KIND_TYPEDEF);
104 	if ((s32)type_id < 0)
105 		return;
106 
107 	t1 = btf__type_by_id(btf, type_id);
108 	if (t1 == NULL)
109 		return;
110 
111 	t2 = btf__type_by_id(btf, t1->type);
112 	if (t2 == NULL || !btf_is_ptr(t2))
113 		return;
114 
115 	t3 = btf__type_by_id(btf, t2->type);
116 	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
117 		/* new format: pass prev_state as 4th arg */
118 		skel->rodata->has_prev_state = true;
119 	}
120 }
121 
122 int off_cpu_prepare(struct evlist *evlist, struct target *target,
123 		    struct record_opts *opts)
124 {
125 	int err, fd, i;
126 	int ncpus = 1, ntasks = 1, ncgrps = 1;
127 
128 	if (off_cpu_config(evlist) < 0) {
129 		pr_err("Failed to config off-cpu BPF event\n");
130 		return -1;
131 	}
132 
133 	skel = off_cpu_bpf__open();
134 	if (!skel) {
135 		pr_err("Failed to open off-cpu BPF skeleton\n");
136 		return -1;
137 	}
138 
139 	/* don't need to set cpu filter for system-wide mode */
140 	if (target->cpu_list) {
141 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
142 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
143 	}
144 
145 	if (target__has_task(target)) {
146 		ntasks = perf_thread_map__nr(evlist->core.threads);
147 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
148 	}
149 
150 	if (evlist__first(evlist)->cgrp) {
151 		ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
152 		bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
153 
154 		if (!cgroup_is_v2("perf_event"))
155 			skel->rodata->uses_cgroup_v1 = true;
156 	}
157 
158 	if (opts->record_cgroup) {
159 		skel->rodata->needs_cgroup = true;
160 
161 		if (!cgroup_is_v2("perf_event"))
162 			skel->rodata->uses_cgroup_v1 = true;
163 	}
164 
165 	set_max_rlimit();
166 	check_sched_switch_args();
167 
168 	err = off_cpu_bpf__load(skel);
169 	if (err) {
170 		pr_err("Failed to load off-cpu skeleton\n");
171 		goto out;
172 	}
173 
174 	if (target->cpu_list) {
175 		u32 cpu;
176 		u8 val = 1;
177 
178 		skel->bss->has_cpu = 1;
179 		fd = bpf_map__fd(skel->maps.cpu_filter);
180 
181 		for (i = 0; i < ncpus; i++) {
182 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
183 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
184 		}
185 	}
186 
187 	if (target__has_task(target)) {
188 		u32 pid;
189 		u8 val = 1;
190 
191 		skel->bss->has_task = 1;
192 		fd = bpf_map__fd(skel->maps.task_filter);
193 
194 		for (i = 0; i < ntasks; i++) {
195 			pid = perf_thread_map__pid(evlist->core.threads, i);
196 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
197 		}
198 	}
199 
200 	if (evlist__first(evlist)->cgrp) {
201 		struct evsel *evsel;
202 		u8 val = 1;
203 
204 		skel->bss->has_cgroup = 1;
205 		fd = bpf_map__fd(skel->maps.cgroup_filter);
206 
207 		evlist__for_each_entry(evlist, evsel) {
208 			struct cgroup *cgrp = evsel->cgrp;
209 
210 			if (cgrp == NULL)
211 				continue;
212 
213 			if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
214 				pr_err("Failed to read cgroup id of %s\n",
215 				       cgrp->name);
216 				goto out;
217 			}
218 
219 			bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
220 		}
221 	}
222 
223 	err = off_cpu_bpf__attach(skel);
224 	if (err) {
225 		pr_err("Failed to attach off-cpu BPF skeleton\n");
226 		goto out;
227 	}
228 
229 	if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
230 	    perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
231 		pr_err("Failed to attach off-cpu skeleton\n");
232 		goto out;
233 	}
234 
235 	return 0;
236 
237 out:
238 	off_cpu_bpf__destroy(skel);
239 	return -1;
240 }
241 
242 int off_cpu_write(struct perf_session *session)
243 {
244 	int bytes = 0, size;
245 	int fd, stack;
246 	u64 sample_type, val, sid = 0;
247 	struct evsel *evsel;
248 	struct perf_data_file *file = &session->data->file;
249 	struct off_cpu_key prev, key;
250 	union off_cpu_data data = {
251 		.hdr = {
252 			.type = PERF_RECORD_SAMPLE,
253 			.misc = PERF_RECORD_MISC_USER,
254 		},
255 	};
256 	u64 tstamp = OFF_CPU_TIMESTAMP;
257 
258 	skel->bss->enabled = 0;
259 
260 	evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
261 	if (evsel == NULL) {
262 		pr_err("%s evsel not found\n", OFFCPU_EVENT);
263 		return 0;
264 	}
265 
266 	sample_type = evsel->core.attr.sample_type;
267 
268 	if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
269 		pr_err("not supported sample type: %llx\n",
270 		       (unsigned long long)sample_type);
271 		return -1;
272 	}
273 
274 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
275 		if (evsel->core.id)
276 			sid = evsel->core.id[0];
277 	}
278 
279 	fd = bpf_map__fd(skel->maps.off_cpu);
280 	stack = bpf_map__fd(skel->maps.stacks);
281 	memset(&prev, 0, sizeof(prev));
282 
283 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
284 		int n = 1;  /* start from perf_event_header */
285 		int ip_pos = -1;
286 
287 		bpf_map_lookup_elem(fd, &key, &val);
288 
289 		if (sample_type & PERF_SAMPLE_IDENTIFIER)
290 			data.array[n++] = sid;
291 		if (sample_type & PERF_SAMPLE_IP) {
292 			ip_pos = n;
293 			data.array[n++] = 0;  /* will be updated */
294 		}
295 		if (sample_type & PERF_SAMPLE_TID)
296 			data.array[n++] = (u64)key.pid << 32 | key.tgid;
297 		if (sample_type & PERF_SAMPLE_TIME)
298 			data.array[n++] = tstamp;
299 		if (sample_type & PERF_SAMPLE_ID)
300 			data.array[n++] = sid;
301 		if (sample_type & PERF_SAMPLE_CPU)
302 			data.array[n++] = 0;
303 		if (sample_type & PERF_SAMPLE_PERIOD)
304 			data.array[n++] = val;
305 		if (sample_type & PERF_SAMPLE_CALLCHAIN) {
306 			int len = 0;
307 
308 			/* data.array[n] is callchain->nr (updated later) */
309 			data.array[n + 1] = PERF_CONTEXT_USER;
310 			data.array[n + 2] = 0;
311 
312 			bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
313 			while (data.array[n + 2 + len])
314 				len++;
315 
316 			/* update length of callchain */
317 			data.array[n] = len + 1;
318 
319 			/* update sample ip with the first callchain entry */
320 			if (ip_pos >= 0)
321 				data.array[ip_pos] = data.array[n + 2];
322 
323 			/* calculate sample callchain data array length */
324 			n += len + 2;
325 		}
326 		if (sample_type & PERF_SAMPLE_CGROUP)
327 			data.array[n++] = key.cgroup_id;
328 
329 		size = n * sizeof(u64);
330 		data.hdr.size = size;
331 		bytes += size;
332 
333 		if (perf_data_file__write(file, &data, size) < 0) {
334 			pr_err("failed to write perf data, error: %m\n");
335 			return bytes;
336 		}
337 
338 		prev = key;
339 		/* increase dummy timestamp to sort later samples */
340 		tstamp++;
341 	}
342 	return bytes;
343 }
344