xref: /openbmc/linux/tools/perf/util/thread.c (revision 5214cae7)
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "session.h"
6 #include "thread.h"
7 #include "thread-stack.h"
8 #include "util.h"
9 #include "debug.h"
10 #include "comm.h"
11 #include "unwind.h"
12 
13 int thread__init_map_groups(struct thread *thread, struct machine *machine)
14 {
15 	struct thread *leader;
16 	pid_t pid = thread->pid_;
17 
18 	if (pid == thread->tid || pid == -1) {
19 		thread->mg = map_groups__new(machine);
20 	} else {
21 		leader = __machine__findnew_thread(machine, pid, pid);
22 		if (leader) {
23 			thread->mg = map_groups__get(leader->mg);
24 			thread__put(leader);
25 		}
26 	}
27 
28 	return thread->mg ? 0 : -1;
29 }
30 
31 struct thread *thread__new(pid_t pid, pid_t tid)
32 {
33 	char *comm_str;
34 	struct comm *comm;
35 	struct thread *thread = zalloc(sizeof(*thread));
36 
37 	if (thread != NULL) {
38 		thread->pid_ = pid;
39 		thread->tid = tid;
40 		thread->ppid = -1;
41 		thread->cpu = -1;
42 		INIT_LIST_HEAD(&thread->comm_list);
43 
44 		if (unwind__prepare_access(thread) < 0)
45 			goto err_thread;
46 
47 		comm_str = malloc(32);
48 		if (!comm_str)
49 			goto err_thread;
50 
51 		snprintf(comm_str, 32, ":%d", tid);
52 		comm = comm__new(comm_str, 0, false);
53 		free(comm_str);
54 		if (!comm)
55 			goto err_thread;
56 
57 		list_add(&comm->list, &thread->comm_list);
58 		atomic_set(&thread->refcnt, 1);
59 		RB_CLEAR_NODE(&thread->rb_node);
60 	}
61 
62 	return thread;
63 
64 err_thread:
65 	free(thread);
66 	return NULL;
67 }
68 
69 void thread__delete(struct thread *thread)
70 {
71 	struct comm *comm, *tmp;
72 
73 	BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
74 
75 	thread_stack__free(thread);
76 
77 	if (thread->mg) {
78 		map_groups__put(thread->mg);
79 		thread->mg = NULL;
80 	}
81 	list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) {
82 		list_del(&comm->list);
83 		comm__free(comm);
84 	}
85 	unwind__finish_access(thread);
86 
87 	free(thread);
88 }
89 
90 struct thread *thread__get(struct thread *thread)
91 {
92 	if (thread)
93 		atomic_inc(&thread->refcnt);
94 	return thread;
95 }
96 
97 void thread__put(struct thread *thread)
98 {
99 	if (thread && atomic_dec_and_test(&thread->refcnt)) {
100 		/*
101 		 * Remove it from the dead_threads list, as last reference
102 		 * is gone.
103 		 */
104 		list_del_init(&thread->node);
105 		thread__delete(thread);
106 	}
107 }
108 
109 struct comm *thread__comm(const struct thread *thread)
110 {
111 	if (list_empty(&thread->comm_list))
112 		return NULL;
113 
114 	return list_first_entry(&thread->comm_list, struct comm, list);
115 }
116 
117 struct comm *thread__exec_comm(const struct thread *thread)
118 {
119 	struct comm *comm, *last = NULL;
120 
121 	list_for_each_entry(comm, &thread->comm_list, list) {
122 		if (comm->exec)
123 			return comm;
124 		last = comm;
125 	}
126 
127 	return last;
128 }
129 
130 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
131 		       bool exec)
132 {
133 	struct comm *new, *curr = thread__comm(thread);
134 	int err;
135 
136 	/* Override the default :tid entry */
137 	if (!thread->comm_set) {
138 		err = comm__override(curr, str, timestamp, exec);
139 		if (err)
140 			return err;
141 	} else {
142 		new = comm__new(str, timestamp, exec);
143 		if (!new)
144 			return -ENOMEM;
145 		list_add(&new->list, &thread->comm_list);
146 
147 		if (exec)
148 			unwind__flush_access(thread);
149 	}
150 
151 	thread->comm_set = true;
152 
153 	return 0;
154 }
155 
156 const char *thread__comm_str(const struct thread *thread)
157 {
158 	const struct comm *comm = thread__comm(thread);
159 
160 	if (!comm)
161 		return NULL;
162 
163 	return comm__str(comm);
164 }
165 
166 /* CHECKME: it should probably better return the max comm len from its comm list */
167 int thread__comm_len(struct thread *thread)
168 {
169 	if (!thread->comm_len) {
170 		const char *comm = thread__comm_str(thread);
171 		if (!comm)
172 			return 0;
173 		thread->comm_len = strlen(comm);
174 	}
175 
176 	return thread->comm_len;
177 }
178 
179 size_t thread__fprintf(struct thread *thread, FILE *fp)
180 {
181 	return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
182 	       map_groups__fprintf(thread->mg, fp);
183 }
184 
185 void thread__insert_map(struct thread *thread, struct map *map)
186 {
187 	map_groups__fixup_overlappings(thread->mg, map, stderr);
188 	map_groups__insert(thread->mg, map);
189 }
190 
191 static int thread__clone_map_groups(struct thread *thread,
192 				    struct thread *parent)
193 {
194 	int i;
195 
196 	/* This is new thread, we share map groups for process. */
197 	if (thread->pid_ == parent->pid_)
198 		return 0;
199 
200 	if (thread->mg == parent->mg) {
201 		pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
202 			 thread->pid_, thread->tid, parent->pid_, parent->tid);
203 		return 0;
204 	}
205 
206 	/* But this one is new process, copy maps. */
207 	for (i = 0; i < MAP__NR_TYPES; ++i)
208 		if (map_groups__clone(thread->mg, parent->mg, i) < 0)
209 			return -ENOMEM;
210 
211 	return 0;
212 }
213 
214 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
215 {
216 	int err;
217 
218 	if (parent->comm_set) {
219 		const char *comm = thread__comm_str(parent);
220 		if (!comm)
221 			return -ENOMEM;
222 		err = thread__set_comm(thread, comm, timestamp);
223 		if (err)
224 			return err;
225 	}
226 
227 	thread->ppid = parent->tid;
228 	return thread__clone_map_groups(thread, parent);
229 }
230 
231 void thread__find_cpumode_addr_location(struct thread *thread,
232 					enum map_type type, u64 addr,
233 					struct addr_location *al)
234 {
235 	size_t i;
236 	const u8 const cpumodes[] = {
237 		PERF_RECORD_MISC_USER,
238 		PERF_RECORD_MISC_KERNEL,
239 		PERF_RECORD_MISC_GUEST_USER,
240 		PERF_RECORD_MISC_GUEST_KERNEL
241 	};
242 
243 	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
244 		thread__find_addr_location(thread, cpumodes[i], type, addr, al);
245 		if (al->map)
246 			break;
247 	}
248 }
249