xref: /openbmc/linux/tools/perf/util/bpf_kwork.c (revision ecfb9f40)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf_kwork.c
4  *
5  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
6  */
7 
8 #include <time.h>
9 #include <fcntl.h>
10 #include <signal.h>
11 #include <stdio.h>
12 #include <unistd.h>
13 
14 #include <linux/time64.h>
15 
16 #include "util/debug.h"
17 #include "util/evsel.h"
18 #include "util/kwork.h"
19 
20 #include <bpf/bpf.h>
21 #include <perf/cpumap.h>
22 
23 #include "util/bpf_skel/kwork_trace.skel.h"
24 
25 /*
26  * This should be in sync with "util/kwork_trace.bpf.c"
27  */
28 #define MAX_KWORKNAME 128
29 
30 struct work_key {
31 	u32 type;
32 	u32 cpu;
33 	u64 id;
34 };
35 
36 struct report_data {
37 	u64 nr;
38 	u64 total_time;
39 	u64 max_time;
40 	u64 max_time_start;
41 	u64 max_time_end;
42 };
43 
44 struct kwork_class_bpf {
45 	struct kwork_class *class;
46 
47 	void (*load_prepare)(struct perf_kwork *kwork);
48 	int  (*get_work_name)(struct work_key *key, char **ret_name);
49 };
50 
51 static struct kwork_trace_bpf *skel;
52 
53 static struct timespec ts_start;
54 static struct timespec ts_end;
55 
56 void perf_kwork__trace_start(void)
57 {
58 	clock_gettime(CLOCK_MONOTONIC, &ts_start);
59 	skel->bss->enabled = 1;
60 }
61 
62 void perf_kwork__trace_finish(void)
63 {
64 	clock_gettime(CLOCK_MONOTONIC, &ts_end);
65 	skel->bss->enabled = 0;
66 }
67 
68 static int get_work_name_from_map(struct work_key *key, char **ret_name)
69 {
70 	char name[MAX_KWORKNAME] = { 0 };
71 	int fd = bpf_map__fd(skel->maps.perf_kwork_names);
72 
73 	*ret_name = NULL;
74 
75 	if (fd < 0) {
76 		pr_debug("Invalid names map fd\n");
77 		return 0;
78 	}
79 
80 	if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) {
81 		*ret_name = strdup(name);
82 		if (*ret_name == NULL) {
83 			pr_err("Failed to copy work name\n");
84 			return -1;
85 		}
86 	}
87 
88 	return 0;
89 }
90 
91 static void irq_load_prepare(struct perf_kwork *kwork)
92 {
93 	if (kwork->report == KWORK_REPORT_RUNTIME) {
94 		bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true);
95 		bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true);
96 	}
97 }
98 
99 static struct kwork_class_bpf kwork_irq_bpf = {
100 	.load_prepare  = irq_load_prepare,
101 	.get_work_name = get_work_name_from_map,
102 };
103 
104 static void softirq_load_prepare(struct perf_kwork *kwork)
105 {
106 	if (kwork->report == KWORK_REPORT_RUNTIME) {
107 		bpf_program__set_autoload(skel->progs.report_softirq_entry, true);
108 		bpf_program__set_autoload(skel->progs.report_softirq_exit, true);
109 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
110 		bpf_program__set_autoload(skel->progs.latency_softirq_raise, true);
111 		bpf_program__set_autoload(skel->progs.latency_softirq_entry, true);
112 	}
113 }
114 
115 static struct kwork_class_bpf kwork_softirq_bpf = {
116 	.load_prepare  = softirq_load_prepare,
117 	.get_work_name = get_work_name_from_map,
118 };
119 
120 static void workqueue_load_prepare(struct perf_kwork *kwork)
121 {
122 	if (kwork->report == KWORK_REPORT_RUNTIME) {
123 		bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true);
124 		bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true);
125 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
126 		bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true);
127 		bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true);
128 	}
129 }
130 
131 static struct kwork_class_bpf kwork_workqueue_bpf = {
132 	.load_prepare  = workqueue_load_prepare,
133 	.get_work_name = get_work_name_from_map,
134 };
135 
136 static struct kwork_class_bpf *
137 kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
138 	[KWORK_CLASS_IRQ]       = &kwork_irq_bpf,
139 	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq_bpf,
140 	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf,
141 };
142 
143 static bool valid_kwork_class_type(enum kwork_class_type type)
144 {
145 	return type >= 0 && type < KWORK_CLASS_MAX ? true : false;
146 }
147 
148 static int setup_filters(struct perf_kwork *kwork)
149 {
150 	u8 val = 1;
151 	int i, nr_cpus, key, fd;
152 	struct perf_cpu_map *map;
153 
154 	if (kwork->cpu_list != NULL) {
155 		fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
156 		if (fd < 0) {
157 			pr_debug("Invalid cpu filter fd\n");
158 			return -1;
159 		}
160 
161 		map = perf_cpu_map__new(kwork->cpu_list);
162 		if (map == NULL) {
163 			pr_debug("Invalid cpu_list\n");
164 			return -1;
165 		}
166 
167 		nr_cpus = libbpf_num_possible_cpus();
168 		for (i = 0; i < perf_cpu_map__nr(map); i++) {
169 			struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
170 
171 			if (cpu.cpu >= nr_cpus) {
172 				perf_cpu_map__put(map);
173 				pr_err("Requested cpu %d too large\n", cpu.cpu);
174 				return -1;
175 			}
176 			bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
177 		}
178 		perf_cpu_map__put(map);
179 
180 		skel->bss->has_cpu_filter = 1;
181 	}
182 
183 	if (kwork->profile_name != NULL) {
184 		if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
185 			pr_err("Requested name filter %s too large, limit to %d\n",
186 			       kwork->profile_name, MAX_KWORKNAME - 1);
187 			return -1;
188 		}
189 
190 		fd = bpf_map__fd(skel->maps.perf_kwork_name_filter);
191 		if (fd < 0) {
192 			pr_debug("Invalid name filter fd\n");
193 			return -1;
194 		}
195 
196 		key = 0;
197 		bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
198 
199 		skel->bss->has_name_filter = 1;
200 	}
201 
202 	return 0;
203 }
204 
205 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
206 {
207 	struct bpf_program *prog;
208 	struct kwork_class *class;
209 	struct kwork_class_bpf *class_bpf;
210 	enum kwork_class_type type;
211 
212 	skel = kwork_trace_bpf__open();
213 	if (!skel) {
214 		pr_debug("Failed to open kwork trace skeleton\n");
215 		return -1;
216 	}
217 
218 	/*
219 	 * set all progs to non-autoload,
220 	 * then set corresponding progs according to config
221 	 */
222 	bpf_object__for_each_program(prog, skel->obj)
223 		bpf_program__set_autoload(prog, false);
224 
225 	list_for_each_entry(class, &kwork->class_list, list) {
226 		type = class->type;
227 		if (!valid_kwork_class_type(type) ||
228 		    (kwork_class_bpf_supported_list[type] == NULL)) {
229 			pr_err("Unsupported bpf trace class %s\n", class->name);
230 			goto out;
231 		}
232 
233 		class_bpf = kwork_class_bpf_supported_list[type];
234 		class_bpf->class = class;
235 
236 		if (class_bpf->load_prepare != NULL)
237 			class_bpf->load_prepare(kwork);
238 	}
239 
240 	if (kwork_trace_bpf__load(skel)) {
241 		pr_debug("Failed to load kwork trace skeleton\n");
242 		goto out;
243 	}
244 
245 	if (setup_filters(kwork))
246 		goto out;
247 
248 	if (kwork_trace_bpf__attach(skel)) {
249 		pr_debug("Failed to attach kwork trace skeleton\n");
250 		goto out;
251 	}
252 
253 	return 0;
254 
255 out:
256 	kwork_trace_bpf__destroy(skel);
257 	return -1;
258 }
259 
260 static int add_work(struct perf_kwork *kwork,
261 		    struct work_key *key,
262 		    struct report_data *data)
263 {
264 	struct kwork_work *work;
265 	struct kwork_class_bpf *bpf_trace;
266 	struct kwork_work tmp = {
267 		.id = key->id,
268 		.name = NULL,
269 		.cpu = key->cpu,
270 	};
271 	enum kwork_class_type type = key->type;
272 
273 	if (!valid_kwork_class_type(type)) {
274 		pr_debug("Invalid class type %d to add work\n", type);
275 		return -1;
276 	}
277 
278 	bpf_trace = kwork_class_bpf_supported_list[type];
279 	tmp.class = bpf_trace->class;
280 
281 	if ((bpf_trace->get_work_name != NULL) &&
282 	    (bpf_trace->get_work_name(key, &tmp.name)))
283 		return -1;
284 
285 	work = perf_kwork_add_work(kwork, tmp.class, &tmp);
286 	if (work == NULL)
287 		return -1;
288 
289 	if (kwork->report == KWORK_REPORT_RUNTIME) {
290 		work->nr_atoms = data->nr;
291 		work->total_runtime = data->total_time;
292 		work->max_runtime = data->max_time;
293 		work->max_runtime_start = data->max_time_start;
294 		work->max_runtime_end = data->max_time_end;
295 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
296 		work->nr_atoms = data->nr;
297 		work->total_latency = data->total_time;
298 		work->max_latency = data->max_time;
299 		work->max_latency_start = data->max_time_start;
300 		work->max_latency_end = data->max_time_end;
301 	} else {
302 		pr_debug("Invalid bpf report type %d\n", kwork->report);
303 		return -1;
304 	}
305 
306 	kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
307 	kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
308 
309 	return 0;
310 }
311 
312 int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
313 {
314 	struct report_data data;
315 	struct work_key key = {
316 		.type = 0,
317 		.cpu  = 0,
318 		.id   = 0,
319 	};
320 	struct work_key prev = {
321 		.type = 0,
322 		.cpu  = 0,
323 		.id   = 0,
324 	};
325 	int fd = bpf_map__fd(skel->maps.perf_kwork_report);
326 
327 	if (fd < 0) {
328 		pr_debug("Invalid report fd\n");
329 		return -1;
330 	}
331 
332 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
333 		if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) {
334 			pr_debug("Failed to lookup report elem\n");
335 			return -1;
336 		}
337 
338 		if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0))
339 			return -1;
340 
341 		prev = key;
342 	}
343 	return 0;
344 }
345 
346 void perf_kwork__report_cleanup_bpf(void)
347 {
348 	kwork_trace_bpf__destroy(skel);
349 }
350