1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/debug.h"
3 #include "util/evlist.h"
4 #include "util/machine.h"
5 #include "util/map.h"
6 #include "util/symbol.h"
7 #include "util/target.h"
8 #include "util/thread.h"
9 #include "util/thread_map.h"
10 #include "util/lock-contention.h"
11 #include <linux/zalloc.h>
12 #include <linux/string.h>
13 #include <bpf/bpf.h>
14 
15 #include "bpf_skel/lock_contention.skel.h"
16 #include "bpf_skel/lock_data.h"
17 
18 static struct lock_contention_bpf *skel;
19 
20 int lock_contention_prepare(struct lock_contention *con)
21 {
22 	int i, fd;
23 	int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1;
24 	struct evlist *evlist = con->evlist;
25 	struct target *target = con->target;
26 
27 	skel = lock_contention_bpf__open();
28 	if (!skel) {
29 		pr_err("Failed to open lock-contention BPF skeleton\n");
30 		return -1;
31 	}
32 
33 	bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
34 	bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
35 	bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
36 
37 	if (con->aggr_mode == LOCK_AGGR_TASK)
38 		bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
39 	else
40 		bpf_map__set_max_entries(skel->maps.task_data, 1);
41 
42 	if (con->save_callstack)
43 		bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
44 	else
45 		bpf_map__set_max_entries(skel->maps.stacks, 1);
46 
47 	if (target__has_cpu(target))
48 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
49 	if (target__has_task(target))
50 		ntasks = perf_thread_map__nr(evlist->core.threads);
51 	if (con->filters->nr_types)
52 		ntypes = con->filters->nr_types;
53 
54 	/* resolve lock name filters to addr */
55 	if (con->filters->nr_syms) {
56 		struct symbol *sym;
57 		struct map *kmap;
58 		unsigned long *addrs;
59 
60 		for (i = 0; i < con->filters->nr_syms; i++) {
61 			sym = machine__find_kernel_symbol_by_name(con->machine,
62 								  con->filters->syms[i],
63 								  &kmap);
64 			if (sym == NULL) {
65 				pr_warning("ignore unknown symbol: %s\n",
66 					   con->filters->syms[i]);
67 				continue;
68 			}
69 
70 			addrs = realloc(con->filters->addrs,
71 					(con->filters->nr_addrs + 1) * sizeof(*addrs));
72 			if (addrs == NULL) {
73 				pr_warning("memory allocation failure\n");
74 				continue;
75 			}
76 
77 			addrs[con->filters->nr_addrs++] = kmap->unmap_ip(kmap, sym->start);
78 			con->filters->addrs = addrs;
79 		}
80 		naddrs = con->filters->nr_addrs;
81 	}
82 
83 	bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
84 	bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
85 	bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
86 	bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
87 
88 	if (lock_contention_bpf__load(skel) < 0) {
89 		pr_err("Failed to load lock-contention BPF skeleton\n");
90 		return -1;
91 	}
92 
93 	if (target__has_cpu(target)) {
94 		u32 cpu;
95 		u8 val = 1;
96 
97 		skel->bss->has_cpu = 1;
98 		fd = bpf_map__fd(skel->maps.cpu_filter);
99 
100 		for (i = 0; i < ncpus; i++) {
101 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
102 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
103 		}
104 	}
105 
106 	if (target__has_task(target)) {
107 		u32 pid;
108 		u8 val = 1;
109 
110 		skel->bss->has_task = 1;
111 		fd = bpf_map__fd(skel->maps.task_filter);
112 
113 		for (i = 0; i < ntasks; i++) {
114 			pid = perf_thread_map__pid(evlist->core.threads, i);
115 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
116 		}
117 	}
118 
119 	if (target__none(target) && evlist->workload.pid > 0) {
120 		u32 pid = evlist->workload.pid;
121 		u8 val = 1;
122 
123 		skel->bss->has_task = 1;
124 		fd = bpf_map__fd(skel->maps.task_filter);
125 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
126 	}
127 
128 	if (con->filters->nr_types) {
129 		u8 val = 1;
130 
131 		skel->bss->has_type = 1;
132 		fd = bpf_map__fd(skel->maps.type_filter);
133 
134 		for (i = 0; i < con->filters->nr_types; i++)
135 			bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
136 	}
137 
138 	if (con->filters->nr_addrs) {
139 		u8 val = 1;
140 
141 		skel->bss->has_addr = 1;
142 		fd = bpf_map__fd(skel->maps.addr_filter);
143 
144 		for (i = 0; i < con->filters->nr_addrs; i++)
145 			bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
146 	}
147 
148 	/* these don't work well if in the rodata section */
149 	skel->bss->stack_skip = con->stack_skip;
150 	skel->bss->aggr_mode = con->aggr_mode;
151 	skel->bss->needs_callstack = con->save_callstack;
152 	skel->bss->lock_owner = con->owner;
153 
154 	lock_contention_bpf__attach(skel);
155 	return 0;
156 }
157 
158 int lock_contention_start(void)
159 {
160 	skel->bss->enabled = 1;
161 	return 0;
162 }
163 
164 int lock_contention_stop(void)
165 {
166 	skel->bss->enabled = 0;
167 	return 0;
168 }
169 
170 static const char *lock_contention_get_name(struct lock_contention *con,
171 					    struct contention_key *key,
172 					    u64 *stack_trace)
173 {
174 	int idx = 0;
175 	u64 addr;
176 	const char *name = "";
177 	static char name_buf[KSYM_NAME_LEN];
178 	struct symbol *sym;
179 	struct map *kmap;
180 	struct machine *machine = con->machine;
181 
182 	if (con->aggr_mode == LOCK_AGGR_TASK) {
183 		struct contention_task_data task;
184 		int pid = key->pid;
185 		int task_fd = bpf_map__fd(skel->maps.task_data);
186 
187 		/* do not update idle comm which contains CPU number */
188 		if (pid) {
189 			struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
190 
191 			if (t == NULL)
192 				return name;
193 			if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
194 			    thread__set_comm(t, task.comm, /*timestamp=*/0))
195 				name = task.comm;
196 		}
197 		return name;
198 	}
199 
200 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
201 		sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap);
202 		if (sym)
203 			name = sym->name;
204 		return name;
205 	}
206 
207 	/* LOCK_AGGR_CALLER: skip lock internal functions */
208 	while (machine__is_lock_function(machine, stack_trace[idx]) &&
209 	       idx < con->max_stack - 1)
210 		idx++;
211 
212 	addr = stack_trace[idx];
213 	sym = machine__find_kernel_symbol(machine, addr, &kmap);
214 
215 	if (sym) {
216 		unsigned long offset;
217 
218 		offset = kmap->map_ip(kmap, addr) - sym->start;
219 
220 		if (offset == 0)
221 			return sym->name;
222 
223 		snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
224 	} else {
225 		snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
226 	}
227 
228 	return name_buf;
229 }
230 
231 int lock_contention_read(struct lock_contention *con)
232 {
233 	int fd, stack, err = 0;
234 	struct contention_key *prev_key, key;
235 	struct contention_data data = {};
236 	struct lock_stat *st = NULL;
237 	struct machine *machine = con->machine;
238 	u64 *stack_trace;
239 	size_t stack_size = con->max_stack * sizeof(*stack_trace);
240 
241 	fd = bpf_map__fd(skel->maps.lock_stat);
242 	stack = bpf_map__fd(skel->maps.stacks);
243 
244 	con->lost = skel->bss->lost;
245 
246 	stack_trace = zalloc(stack_size);
247 	if (stack_trace == NULL)
248 		return -1;
249 
250 	if (con->aggr_mode == LOCK_AGGR_TASK) {
251 		struct thread *idle = __machine__findnew_thread(machine,
252 								/*pid=*/0,
253 								/*tid=*/0);
254 		thread__set_comm(idle, "swapper", /*timestamp=*/0);
255 	}
256 
257 	/* make sure it loads the kernel map */
258 	map__load(maps__first(machine->kmaps));
259 
260 	prev_key = NULL;
261 	while (!bpf_map_get_next_key(fd, prev_key, &key)) {
262 		s64 ls_key;
263 		const char *name;
264 
265 		/* to handle errors in the loop body */
266 		err = -1;
267 
268 		bpf_map_lookup_elem(fd, &key, &data);
269 		if (con->save_callstack) {
270 			bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
271 
272 			if (!match_callstack_filter(machine, stack_trace))
273 				goto next;
274 		}
275 
276 		switch (con->aggr_mode) {
277 		case LOCK_AGGR_CALLER:
278 			ls_key = key.stack_id;
279 			break;
280 		case LOCK_AGGR_TASK:
281 			ls_key = key.pid;
282 			break;
283 		case LOCK_AGGR_ADDR:
284 			ls_key = key.lock_addr;
285 			break;
286 		default:
287 			goto next;
288 		}
289 
290 		st = lock_stat_find(ls_key);
291 		if (st != NULL) {
292 			st->wait_time_total += data.total_time;
293 			if (st->wait_time_max < data.max_time)
294 				st->wait_time_max = data.max_time;
295 			if (st->wait_time_min > data.min_time)
296 				st->wait_time_min = data.min_time;
297 
298 			st->nr_contended += data.count;
299 			if (st->nr_contended)
300 				st->avg_wait_time = st->wait_time_total / st->nr_contended;
301 			goto next;
302 		}
303 
304 		name = lock_contention_get_name(con, &key, stack_trace);
305 		st = lock_stat_findnew(ls_key, name, data.flags);
306 		if (st == NULL)
307 			break;
308 
309 		st->nr_contended = data.count;
310 		st->wait_time_total = data.total_time;
311 		st->wait_time_max = data.max_time;
312 		st->wait_time_min = data.min_time;
313 
314 		if (data.count)
315 			st->avg_wait_time = data.total_time / data.count;
316 
317 		if (con->save_callstack) {
318 			st->callstack = memdup(stack_trace, stack_size);
319 			if (st->callstack == NULL)
320 				break;
321 		}
322 
323 next:
324 		prev_key = &key;
325 
326 		/* we're fine now, reset the error */
327 		err = 0;
328 	}
329 
330 	free(stack_trace);
331 
332 	return err;
333 }
334 
335 int lock_contention_finish(void)
336 {
337 	if (skel) {
338 		skel->bss->enabled = 0;
339 		lock_contention_bpf__destroy(skel);
340 	}
341 
342 	return 0;
343 }
344