xref: /openbmc/linux/tools/perf/util/bpf_off_cpu.c (revision 07fc958b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include <bpf/bpf.h>
15 
16 #include "bpf_skel/off_cpu.skel.h"
17 
18 #define MAX_STACKS  32
19 /* we don't need actual timestamp, just want to put the samples at last */
20 #define OFF_CPU_TIMESTAMP  (~0ull << 32)
21 
22 static struct off_cpu_bpf *skel;
23 
24 struct off_cpu_key {
25 	u32 pid;
26 	u32 tgid;
27 	u32 stack_id;
28 	u32 state;
29 	u64 cgroup_id;
30 };
31 
32 union off_cpu_data {
33 	struct perf_event_header hdr;
34 	u64 array[1024 / sizeof(u64)];
35 };
36 
37 static int off_cpu_config(struct evlist *evlist)
38 {
39 	struct evsel *evsel;
40 	struct perf_event_attr attr = {
41 		.type	= PERF_TYPE_SOFTWARE,
42 		.config = PERF_COUNT_SW_BPF_OUTPUT,
43 		.size	= sizeof(attr), /* to capture ABI version */
44 	};
45 	char *evname = strdup(OFFCPU_EVENT);
46 
47 	if (evname == NULL)
48 		return -ENOMEM;
49 
50 	evsel = evsel__new(&attr);
51 	if (!evsel) {
52 		free(evname);
53 		return -ENOMEM;
54 	}
55 
56 	evsel->core.attr.freq = 1;
57 	evsel->core.attr.sample_period = 1;
58 	/* off-cpu analysis depends on stack trace */
59 	evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
60 
61 	evlist__add(evlist, evsel);
62 
63 	free(evsel->name);
64 	evsel->name = evname;
65 
66 	return 0;
67 }
68 
69 static void off_cpu_start(void *arg)
70 {
71 	struct evlist *evlist = arg;
72 
73 	/* update task filter for the given workload */
74 	if (!skel->bss->has_cpu && !skel->bss->has_task &&
75 	    perf_thread_map__pid(evlist->core.threads, 0) != -1) {
76 		int fd;
77 		u32 pid;
78 		u8 val = 1;
79 
80 		skel->bss->has_task = 1;
81 		skel->bss->uses_tgid = 1;
82 		fd = bpf_map__fd(skel->maps.task_filter);
83 		pid = perf_thread_map__pid(evlist->core.threads, 0);
84 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
85 	}
86 
87 	skel->bss->enabled = 1;
88 }
89 
90 static void off_cpu_finish(void *arg __maybe_unused)
91 {
92 	skel->bss->enabled = 0;
93 	off_cpu_bpf__destroy(skel);
94 }
95 
96 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
97 static void check_sched_switch_args(void)
98 {
99 	const struct btf *btf = bpf_object__btf(skel->obj);
100 	const struct btf_type *t1, *t2, *t3;
101 	u32 type_id;
102 
103 	type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
104 					 BTF_KIND_TYPEDEF);
105 	if ((s32)type_id < 0)
106 		return;
107 
108 	t1 = btf__type_by_id(btf, type_id);
109 	if (t1 == NULL)
110 		return;
111 
112 	t2 = btf__type_by_id(btf, t1->type);
113 	if (t2 == NULL || !btf_is_ptr(t2))
114 		return;
115 
116 	t3 = btf__type_by_id(btf, t2->type);
117 	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
118 		/* new format: pass prev_state as 4th arg */
119 		skel->rodata->has_prev_state = true;
120 	}
121 }
122 
123 int off_cpu_prepare(struct evlist *evlist, struct target *target,
124 		    struct record_opts *opts)
125 {
126 	int err, fd, i;
127 	int ncpus = 1, ntasks = 1, ncgrps = 1;
128 
129 	if (off_cpu_config(evlist) < 0) {
130 		pr_err("Failed to config off-cpu BPF event\n");
131 		return -1;
132 	}
133 
134 	skel = off_cpu_bpf__open();
135 	if (!skel) {
136 		pr_err("Failed to open off-cpu BPF skeleton\n");
137 		return -1;
138 	}
139 
140 	/* don't need to set cpu filter for system-wide mode */
141 	if (target->cpu_list) {
142 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
143 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
144 	}
145 
146 	if (target__has_task(target)) {
147 		ntasks = perf_thread_map__nr(evlist->core.threads);
148 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
149 	}
150 
151 	if (evlist__first(evlist)->cgrp) {
152 		ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
153 		bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
154 
155 		if (!cgroup_is_v2("perf_event"))
156 			skel->rodata->uses_cgroup_v1 = true;
157 	}
158 
159 	if (opts->record_cgroup) {
160 		skel->rodata->needs_cgroup = true;
161 
162 		if (!cgroup_is_v2("perf_event"))
163 			skel->rodata->uses_cgroup_v1 = true;
164 	}
165 
166 	set_max_rlimit();
167 	check_sched_switch_args();
168 
169 	err = off_cpu_bpf__load(skel);
170 	if (err) {
171 		pr_err("Failed to load off-cpu skeleton\n");
172 		goto out;
173 	}
174 
175 	if (target->cpu_list) {
176 		u32 cpu;
177 		u8 val = 1;
178 
179 		skel->bss->has_cpu = 1;
180 		fd = bpf_map__fd(skel->maps.cpu_filter);
181 
182 		for (i = 0; i < ncpus; i++) {
183 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
184 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
185 		}
186 	}
187 
188 	if (target__has_task(target)) {
189 		u32 pid;
190 		u8 val = 1;
191 
192 		skel->bss->has_task = 1;
193 		fd = bpf_map__fd(skel->maps.task_filter);
194 
195 		for (i = 0; i < ntasks; i++) {
196 			pid = perf_thread_map__pid(evlist->core.threads, i);
197 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
198 		}
199 	}
200 
201 	if (evlist__first(evlist)->cgrp) {
202 		struct evsel *evsel;
203 		u8 val = 1;
204 
205 		skel->bss->has_cgroup = 1;
206 		fd = bpf_map__fd(skel->maps.cgroup_filter);
207 
208 		evlist__for_each_entry(evlist, evsel) {
209 			struct cgroup *cgrp = evsel->cgrp;
210 
211 			if (cgrp == NULL)
212 				continue;
213 
214 			if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
215 				pr_err("Failed to read cgroup id of %s\n",
216 				       cgrp->name);
217 				goto out;
218 			}
219 
220 			bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
221 		}
222 	}
223 
224 	err = off_cpu_bpf__attach(skel);
225 	if (err) {
226 		pr_err("Failed to attach off-cpu BPF skeleton\n");
227 		goto out;
228 	}
229 
230 	if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
231 	    perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
232 		pr_err("Failed to attach off-cpu skeleton\n");
233 		goto out;
234 	}
235 
236 	return 0;
237 
238 out:
239 	off_cpu_bpf__destroy(skel);
240 	return -1;
241 }
242 
243 int off_cpu_write(struct perf_session *session)
244 {
245 	int bytes = 0, size;
246 	int fd, stack;
247 	u64 sample_type, val, sid = 0;
248 	struct evsel *evsel;
249 	struct perf_data_file *file = &session->data->file;
250 	struct off_cpu_key prev, key;
251 	union off_cpu_data data = {
252 		.hdr = {
253 			.type = PERF_RECORD_SAMPLE,
254 			.misc = PERF_RECORD_MISC_USER,
255 		},
256 	};
257 	u64 tstamp = OFF_CPU_TIMESTAMP;
258 
259 	skel->bss->enabled = 0;
260 
261 	evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
262 	if (evsel == NULL) {
263 		pr_err("%s evsel not found\n", OFFCPU_EVENT);
264 		return 0;
265 	}
266 
267 	sample_type = evsel->core.attr.sample_type;
268 
269 	if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
270 		pr_err("not supported sample type: %llx\n",
271 		       (unsigned long long)sample_type);
272 		return -1;
273 	}
274 
275 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
276 		if (evsel->core.id)
277 			sid = evsel->core.id[0];
278 	}
279 
280 	fd = bpf_map__fd(skel->maps.off_cpu);
281 	stack = bpf_map__fd(skel->maps.stacks);
282 	memset(&prev, 0, sizeof(prev));
283 
284 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
285 		int n = 1;  /* start from perf_event_header */
286 		int ip_pos = -1;
287 
288 		bpf_map_lookup_elem(fd, &key, &val);
289 
290 		if (sample_type & PERF_SAMPLE_IDENTIFIER)
291 			data.array[n++] = sid;
292 		if (sample_type & PERF_SAMPLE_IP) {
293 			ip_pos = n;
294 			data.array[n++] = 0;  /* will be updated */
295 		}
296 		if (sample_type & PERF_SAMPLE_TID)
297 			data.array[n++] = (u64)key.pid << 32 | key.tgid;
298 		if (sample_type & PERF_SAMPLE_TIME)
299 			data.array[n++] = tstamp;
300 		if (sample_type & PERF_SAMPLE_ID)
301 			data.array[n++] = sid;
302 		if (sample_type & PERF_SAMPLE_CPU)
303 			data.array[n++] = 0;
304 		if (sample_type & PERF_SAMPLE_PERIOD)
305 			data.array[n++] = val;
306 		if (sample_type & PERF_SAMPLE_CALLCHAIN) {
307 			int len = 0;
308 
309 			/* data.array[n] is callchain->nr (updated later) */
310 			data.array[n + 1] = PERF_CONTEXT_USER;
311 			data.array[n + 2] = 0;
312 
313 			bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
314 			while (data.array[n + 2 + len])
315 				len++;
316 
317 			/* update length of callchain */
318 			data.array[n] = len + 1;
319 
320 			/* update sample ip with the first callchain entry */
321 			if (ip_pos >= 0)
322 				data.array[ip_pos] = data.array[n + 2];
323 
324 			/* calculate sample callchain data array length */
325 			n += len + 2;
326 		}
327 		if (sample_type & PERF_SAMPLE_CGROUP)
328 			data.array[n++] = key.cgroup_id;
329 
330 		size = n * sizeof(u64);
331 		data.hdr.size = size;
332 		bytes += size;
333 
334 		if (perf_data_file__write(file, &data, size) < 0) {
335 			pr_err("failed to write perf data, error: %m\n");
336 			return bytes;
337 		}
338 
339 		prev = key;
340 		/* increase dummy timestamp to sort later samples */
341 		tstamp++;
342 	}
343 	return bytes;
344 }
345