xref: /openbmc/linux/tools/perf/util/thread.c (revision 9350a917)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
8 #include "dso.h"
9 #include "session.h"
10 #include "thread.h"
11 #include "thread-stack.h"
12 #include "debug.h"
13 #include "namespaces.h"
14 #include "comm.h"
15 #include "map.h"
16 #include "symbol.h"
17 #include "unwind.h"
18 #include "callchain.h"
19 
20 #include <api/fs/fs.h>
21 
22 int thread__init_maps(struct thread *thread, struct machine *machine)
23 {
24 	pid_t pid = thread__pid(thread);
25 
26 	if (pid == thread__tid(thread) || pid == -1) {
27 		thread__set_maps(thread, maps__new(machine));
28 	} else {
29 		struct thread *leader = __machine__findnew_thread(machine, pid, pid);
30 
31 		if (leader) {
32 			thread__set_maps(thread, maps__get(thread__maps(leader)));
33 			thread__put(leader);
34 		}
35 	}
36 
37 	return thread__maps(thread) ? 0 : -1;
38 }
39 
40 struct thread *thread__new(pid_t pid, pid_t tid)
41 {
42 	char *comm_str;
43 	struct comm *comm;
44 	RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
45 	struct thread *thread;
46 
47 	if (ADD_RC_CHK(thread, _thread) != NULL) {
48 		thread__set_pid(thread, pid);
49 		thread__set_tid(thread, tid);
50 		thread__set_ppid(thread, -1);
51 		thread__set_cpu(thread, -1);
52 		thread__set_guest_cpu(thread, -1);
53 		thread__set_lbr_stitch_enable(thread, false);
54 		INIT_LIST_HEAD(thread__namespaces_list(thread));
55 		INIT_LIST_HEAD(thread__comm_list(thread));
56 		init_rwsem(thread__namespaces_lock(thread));
57 		init_rwsem(thread__comm_lock(thread));
58 
59 		comm_str = malloc(32);
60 		if (!comm_str)
61 			goto err_thread;
62 
63 		snprintf(comm_str, 32, ":%d", tid);
64 		comm = comm__new(comm_str, 0, false);
65 		free(comm_str);
66 		if (!comm)
67 			goto err_thread;
68 
69 		list_add(&comm->list, thread__comm_list(thread));
70 		refcount_set(thread__refcnt(thread), 1);
71 		/* Thread holds first ref to nsdata. */
72 		RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
73 		srccode_state_init(thread__srccode_state(thread));
74 	}
75 
76 	return thread;
77 
78 err_thread:
79 	free(thread);
80 	return NULL;
81 }
82 
83 void thread__delete(struct thread *thread)
84 {
85 	struct namespaces *namespaces, *tmp_namespaces;
86 	struct comm *comm, *tmp_comm;
87 
88 	thread_stack__free(thread);
89 
90 	if (thread__maps(thread)) {
91 		maps__put(thread__maps(thread));
92 		thread__set_maps(thread, NULL);
93 	}
94 	down_write(thread__namespaces_lock(thread));
95 	list_for_each_entry_safe(namespaces, tmp_namespaces,
96 				 thread__namespaces_list(thread), list) {
97 		list_del_init(&namespaces->list);
98 		namespaces__free(namespaces);
99 	}
100 	up_write(thread__namespaces_lock(thread));
101 
102 	down_write(thread__comm_lock(thread));
103 	list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
104 		list_del_init(&comm->list);
105 		comm__free(comm);
106 	}
107 	up_write(thread__comm_lock(thread));
108 
109 	nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
110 	srccode_state_free(thread__srccode_state(thread));
111 
112 	exit_rwsem(thread__namespaces_lock(thread));
113 	exit_rwsem(thread__comm_lock(thread));
114 	thread__free_stitch_list(thread);
115 	RC_CHK_FREE(thread);
116 }
117 
118 struct thread *thread__get(struct thread *thread)
119 {
120 	struct thread *result;
121 
122 	if (RC_CHK_GET(result, thread))
123 		refcount_inc(thread__refcnt(thread));
124 
125 	return result;
126 }
127 
128 void thread__put(struct thread *thread)
129 {
130 	if (thread && refcount_dec_and_test(thread__refcnt(thread)))
131 		thread__delete(thread);
132 	else
133 		RC_CHK_PUT(thread);
134 }
135 
136 static struct namespaces *__thread__namespaces(struct thread *thread)
137 {
138 	if (list_empty(thread__namespaces_list(thread)))
139 		return NULL;
140 
141 	return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
142 }
143 
144 struct namespaces *thread__namespaces(struct thread *thread)
145 {
146 	struct namespaces *ns;
147 
148 	down_read(thread__namespaces_lock(thread));
149 	ns = __thread__namespaces(thread);
150 	up_read(thread__namespaces_lock(thread));
151 
152 	return ns;
153 }
154 
155 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
156 				    struct perf_record_namespaces *event)
157 {
158 	struct namespaces *new, *curr = __thread__namespaces(thread);
159 
160 	new = namespaces__new(event);
161 	if (!new)
162 		return -ENOMEM;
163 
164 	list_add(&new->list, thread__namespaces_list(thread));
165 
166 	if (timestamp && curr) {
167 		/*
168 		 * setns syscall must have changed few or all the namespaces
169 		 * of this thread. Update end time for the namespaces
170 		 * previously used.
171 		 */
172 		curr = list_next_entry(new, list);
173 		curr->end_time = timestamp;
174 	}
175 
176 	return 0;
177 }
178 
179 int thread__set_namespaces(struct thread *thread, u64 timestamp,
180 			   struct perf_record_namespaces *event)
181 {
182 	int ret;
183 
184 	down_write(thread__namespaces_lock(thread));
185 	ret = __thread__set_namespaces(thread, timestamp, event);
186 	up_write(thread__namespaces_lock(thread));
187 	return ret;
188 }
189 
190 struct comm *thread__comm(struct thread *thread)
191 {
192 	if (list_empty(thread__comm_list(thread)))
193 		return NULL;
194 
195 	return list_first_entry(thread__comm_list(thread), struct comm, list);
196 }
197 
198 struct comm *thread__exec_comm(struct thread *thread)
199 {
200 	struct comm *comm, *last = NULL, *second_last = NULL;
201 
202 	list_for_each_entry(comm, thread__comm_list(thread), list) {
203 		if (comm->exec)
204 			return comm;
205 		second_last = last;
206 		last = comm;
207 	}
208 
209 	/*
210 	 * 'last' with no start time might be the parent's comm of a synthesized
211 	 * thread (created by processing a synthesized fork event). For a main
212 	 * thread, that is very probably wrong. Prefer a later comm to avoid
213 	 * that case.
214 	 */
215 	if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
216 		return second_last;
217 
218 	return last;
219 }
220 
221 static int ____thread__set_comm(struct thread *thread, const char *str,
222 				u64 timestamp, bool exec)
223 {
224 	struct comm *new, *curr = thread__comm(thread);
225 
226 	/* Override the default :tid entry */
227 	if (!thread__comm_set(thread)) {
228 		int err = comm__override(curr, str, timestamp, exec);
229 		if (err)
230 			return err;
231 	} else {
232 		new = comm__new(str, timestamp, exec);
233 		if (!new)
234 			return -ENOMEM;
235 		list_add(&new->list, thread__comm_list(thread));
236 
237 		if (exec)
238 			unwind__flush_access(thread__maps(thread));
239 	}
240 
241 	thread__set_comm_set(thread, true);
242 
243 	return 0;
244 }
245 
246 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
247 		       bool exec)
248 {
249 	int ret;
250 
251 	down_write(thread__comm_lock(thread));
252 	ret = ____thread__set_comm(thread, str, timestamp, exec);
253 	up_write(thread__comm_lock(thread));
254 	return ret;
255 }
256 
257 int thread__set_comm_from_proc(struct thread *thread)
258 {
259 	char path[64];
260 	char *comm = NULL;
261 	size_t sz;
262 	int err = -1;
263 
264 	if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
265 		       thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
266 	    procfs__read_str(path, &comm, &sz) == 0) {
267 		comm[sz - 1] = '\0';
268 		err = thread__set_comm(thread, comm, 0);
269 	}
270 
271 	return err;
272 }
273 
274 static const char *__thread__comm_str(struct thread *thread)
275 {
276 	const struct comm *comm = thread__comm(thread);
277 
278 	if (!comm)
279 		return NULL;
280 
281 	return comm__str(comm);
282 }
283 
284 const char *thread__comm_str(struct thread *thread)
285 {
286 	const char *str;
287 
288 	down_read(thread__comm_lock(thread));
289 	str = __thread__comm_str(thread);
290 	up_read(thread__comm_lock(thread));
291 
292 	return str;
293 }
294 
295 static int __thread__comm_len(struct thread *thread, const char *comm)
296 {
297 	if (!comm)
298 		return 0;
299 	thread__set_comm_len(thread, strlen(comm));
300 
301 	return thread__var_comm_len(thread);
302 }
303 
304 /* CHECKME: it should probably better return the max comm len from its comm list */
305 int thread__comm_len(struct thread *thread)
306 {
307 	int comm_len = thread__var_comm_len(thread);
308 
309 	if (!comm_len) {
310 		const char *comm;
311 
312 		down_read(thread__comm_lock(thread));
313 		comm = __thread__comm_str(thread);
314 		comm_len = __thread__comm_len(thread, comm);
315 		up_read(thread__comm_lock(thread));
316 	}
317 
318 	return comm_len;
319 }
320 
321 size_t thread__fprintf(struct thread *thread, FILE *fp)
322 {
323 	return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
324 	       maps__fprintf(thread__maps(thread), fp);
325 }
326 
327 int thread__insert_map(struct thread *thread, struct map *map)
328 {
329 	int ret;
330 
331 	ret = unwind__prepare_access(thread__maps(thread), map, NULL);
332 	if (ret)
333 		return ret;
334 
335 	maps__fixup_overlappings(thread__maps(thread), map, stderr);
336 	return maps__insert(thread__maps(thread), map);
337 }
338 
339 static int __thread__prepare_access(struct thread *thread)
340 {
341 	bool initialized = false;
342 	int err = 0;
343 	struct maps *maps = thread__maps(thread);
344 	struct map_rb_node *rb_node;
345 
346 	down_read(maps__lock(maps));
347 
348 	maps__for_each_entry(maps, rb_node) {
349 		err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
350 		if (err || initialized)
351 			break;
352 	}
353 
354 	up_read(maps__lock(maps));
355 
356 	return err;
357 }
358 
359 static int thread__prepare_access(struct thread *thread)
360 {
361 	int err = 0;
362 
363 	if (dwarf_callchain_users)
364 		err = __thread__prepare_access(thread);
365 
366 	return err;
367 }
368 
369 static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
370 {
371 	/* This is new thread, we share map groups for process. */
372 	if (thread__pid(thread) == thread__pid(parent))
373 		return thread__prepare_access(thread);
374 
375 	if (thread__maps(thread) == thread__maps(parent)) {
376 		pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
377 			 thread__pid(thread), thread__tid(thread),
378 			 thread__pid(parent), thread__tid(parent));
379 		return 0;
380 	}
381 	/* But this one is new process, copy maps. */
382 	return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
383 }
384 
385 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
386 {
387 	if (thread__comm_set(parent)) {
388 		const char *comm = thread__comm_str(parent);
389 		int err;
390 		if (!comm)
391 			return -ENOMEM;
392 		err = thread__set_comm(thread, comm, timestamp);
393 		if (err)
394 			return err;
395 	}
396 
397 	thread__set_ppid(thread, thread__tid(parent));
398 	return thread__clone_maps(thread, parent, do_maps_clone);
399 }
400 
401 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
402 					struct addr_location *al)
403 {
404 	size_t i;
405 	const u8 cpumodes[] = {
406 		PERF_RECORD_MISC_USER,
407 		PERF_RECORD_MISC_KERNEL,
408 		PERF_RECORD_MISC_GUEST_USER,
409 		PERF_RECORD_MISC_GUEST_KERNEL
410 	};
411 
412 	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
413 		thread__find_symbol(thread, cpumodes[i], addr, al);
414 		if (al->map)
415 			break;
416 	}
417 }
418 
419 struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
420 {
421 	if (thread__pid(thread) == thread__tid(thread))
422 		return thread__get(thread);
423 
424 	if (thread__pid(thread) == -1)
425 		return NULL;
426 
427 	return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
428 }
429 
430 int thread__memcpy(struct thread *thread, struct machine *machine,
431 		   void *buf, u64 ip, int len, bool *is64bit)
432 {
433 	u8 cpumode = PERF_RECORD_MISC_USER;
434 	struct addr_location al;
435 	struct dso *dso;
436 	long offset;
437 
438 	if (machine__kernel_ip(machine, ip))
439 		cpumode = PERF_RECORD_MISC_KERNEL;
440 
441 	addr_location__init(&al);
442 	if (!thread__find_map(thread, cpumode, ip, &al)) {
443 		addr_location__exit(&al);
444 		return -1;
445 	}
446 
447 	dso = map__dso(al.map);
448 
449 	if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
450 		addr_location__exit(&al);
451 		return -1;
452 	}
453 
454 	offset = map__map_ip(al.map, ip);
455 	if (is64bit)
456 		*is64bit = dso->is_64_bit;
457 
458 	addr_location__exit(&al);
459 
460 	return dso__data_read_offset(dso, machine, offset, buf, len);
461 }
462 
463 void thread__free_stitch_list(struct thread *thread)
464 {
465 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
466 	struct stitch_list *pos, *tmp;
467 
468 	if (!lbr_stitch)
469 		return;
470 
471 	list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
472 		list_del_init(&pos->node);
473 		free(pos);
474 	}
475 
476 	list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
477 		list_del_init(&pos->node);
478 		free(pos);
479 	}
480 
481 	zfree(&lbr_stitch->prev_lbr_cursor);
482 	free(thread__lbr_stitch(thread));
483 	thread__set_lbr_stitch(thread, NULL);
484 }
485