1 // SPDX-License-Identifier: GPL-2.0 2 #include "../perf.h" 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/kernel.h> 8 #include "session.h" 9 #include "thread.h" 10 #include "thread-stack.h" 11 #include "util.h" 12 #include "debug.h" 13 #include "namespaces.h" 14 #include "comm.h" 15 #include "unwind.h" 16 17 #include <api/fs/fs.h> 18 19 int thread__init_map_groups(struct thread *thread, struct machine *machine) 20 { 21 pid_t pid = thread->pid_; 22 23 if (pid == thread->tid || pid == -1) { 24 thread->mg = map_groups__new(machine); 25 } else { 26 struct thread *leader = __machine__findnew_thread(machine, pid, pid); 27 if (leader) { 28 thread->mg = map_groups__get(leader->mg); 29 thread__put(leader); 30 } 31 } 32 33 return thread->mg ? 0 : -1; 34 } 35 36 struct thread *thread__new(pid_t pid, pid_t tid) 37 { 38 char *comm_str; 39 struct comm *comm; 40 struct thread *thread = zalloc(sizeof(*thread)); 41 42 if (thread != NULL) { 43 thread->pid_ = pid; 44 thread->tid = tid; 45 thread->ppid = -1; 46 thread->cpu = -1; 47 INIT_LIST_HEAD(&thread->namespaces_list); 48 INIT_LIST_HEAD(&thread->comm_list); 49 50 comm_str = malloc(32); 51 if (!comm_str) 52 goto err_thread; 53 54 snprintf(comm_str, 32, ":%d", tid); 55 comm = comm__new(comm_str, 0, false); 56 free(comm_str); 57 if (!comm) 58 goto err_thread; 59 60 list_add(&comm->list, &thread->comm_list); 61 refcount_set(&thread->refcnt, 1); 62 RB_CLEAR_NODE(&thread->rb_node); 63 /* Thread holds first ref to nsdata. */ 64 thread->nsinfo = nsinfo__new(pid); 65 } 66 67 return thread; 68 69 err_thread: 70 free(thread); 71 return NULL; 72 } 73 74 void thread__delete(struct thread *thread) 75 { 76 struct namespaces *namespaces, *tmp_namespaces; 77 struct comm *comm, *tmp_comm; 78 79 BUG_ON(!RB_EMPTY_NODE(&thread->rb_node)); 80 81 thread_stack__free(thread); 82 83 if (thread->mg) { 84 map_groups__put(thread->mg); 85 thread->mg = NULL; 86 } 87 list_for_each_entry_safe(namespaces, tmp_namespaces, 88 &thread->namespaces_list, list) { 89 list_del(&namespaces->list); 90 namespaces__free(namespaces); 91 } 92 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { 93 list_del(&comm->list); 94 comm__free(comm); 95 } 96 unwind__finish_access(thread); 97 nsinfo__zput(thread->nsinfo); 98 99 free(thread); 100 } 101 102 struct thread *thread__get(struct thread *thread) 103 { 104 if (thread) 105 refcount_inc(&thread->refcnt); 106 return thread; 107 } 108 109 void thread__put(struct thread *thread) 110 { 111 if (thread && refcount_dec_and_test(&thread->refcnt)) { 112 /* 113 * Remove it from the dead_threads list, as last reference 114 * is gone. 115 */ 116 list_del_init(&thread->node); 117 thread__delete(thread); 118 } 119 } 120 121 struct namespaces *thread__namespaces(const struct thread *thread) 122 { 123 if (list_empty(&thread->namespaces_list)) 124 return NULL; 125 126 return list_first_entry(&thread->namespaces_list, struct namespaces, list); 127 } 128 129 int thread__set_namespaces(struct thread *thread, u64 timestamp, 130 struct namespaces_event *event) 131 { 132 struct namespaces *new, *curr = thread__namespaces(thread); 133 134 new = namespaces__new(event); 135 if (!new) 136 return -ENOMEM; 137 138 list_add(&new->list, &thread->namespaces_list); 139 140 if (timestamp && curr) { 141 /* 142 * setns syscall must have changed few or all the namespaces 143 * of this thread. Update end time for the namespaces 144 * previously used. 145 */ 146 curr = list_next_entry(new, list); 147 curr->end_time = timestamp; 148 } 149 150 return 0; 151 } 152 153 struct comm *thread__comm(const struct thread *thread) 154 { 155 if (list_empty(&thread->comm_list)) 156 return NULL; 157 158 return list_first_entry(&thread->comm_list, struct comm, list); 159 } 160 161 struct comm *thread__exec_comm(const struct thread *thread) 162 { 163 struct comm *comm, *last = NULL; 164 165 list_for_each_entry(comm, &thread->comm_list, list) { 166 if (comm->exec) 167 return comm; 168 last = comm; 169 } 170 171 return last; 172 } 173 174 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, 175 bool exec) 176 { 177 struct comm *new, *curr = thread__comm(thread); 178 179 /* Override the default :tid entry */ 180 if (!thread->comm_set) { 181 int err = comm__override(curr, str, timestamp, exec); 182 if (err) 183 return err; 184 } else { 185 new = comm__new(str, timestamp, exec); 186 if (!new) 187 return -ENOMEM; 188 list_add(&new->list, &thread->comm_list); 189 190 if (exec) 191 unwind__flush_access(thread); 192 } 193 194 thread->comm_set = true; 195 196 return 0; 197 } 198 199 int thread__set_comm_from_proc(struct thread *thread) 200 { 201 char path[64]; 202 char *comm = NULL; 203 size_t sz; 204 int err = -1; 205 206 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm", 207 thread->pid_, thread->tid) >= (int)sizeof(path)) && 208 procfs__read_str(path, &comm, &sz) == 0) { 209 comm[sz - 1] = '\0'; 210 err = thread__set_comm(thread, comm, 0); 211 } 212 213 return err; 214 } 215 216 const char *thread__comm_str(const struct thread *thread) 217 { 218 const struct comm *comm = thread__comm(thread); 219 220 if (!comm) 221 return NULL; 222 223 return comm__str(comm); 224 } 225 226 /* CHECKME: it should probably better return the max comm len from its comm list */ 227 int thread__comm_len(struct thread *thread) 228 { 229 if (!thread->comm_len) { 230 const char *comm = thread__comm_str(thread); 231 if (!comm) 232 return 0; 233 thread->comm_len = strlen(comm); 234 } 235 236 return thread->comm_len; 237 } 238 239 size_t thread__fprintf(struct thread *thread, FILE *fp) 240 { 241 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + 242 map_groups__fprintf(thread->mg, fp); 243 } 244 245 int thread__insert_map(struct thread *thread, struct map *map) 246 { 247 int ret; 248 249 ret = unwind__prepare_access(thread, map, NULL); 250 if (ret) 251 return ret; 252 253 map_groups__fixup_overlappings(thread->mg, map, stderr); 254 map_groups__insert(thread->mg, map); 255 256 return 0; 257 } 258 259 static int __thread__prepare_access(struct thread *thread) 260 { 261 bool initialized = false; 262 int i, err = 0; 263 264 for (i = 0; i < MAP__NR_TYPES; ++i) { 265 struct maps *maps = &thread->mg->maps[i]; 266 struct map *map; 267 268 pthread_rwlock_rdlock(&maps->lock); 269 270 for (map = maps__first(maps); map; map = map__next(map)) { 271 err = unwind__prepare_access(thread, map, &initialized); 272 if (err || initialized) 273 break; 274 } 275 276 pthread_rwlock_unlock(&maps->lock); 277 } 278 279 return err; 280 } 281 282 static int thread__prepare_access(struct thread *thread) 283 { 284 int err = 0; 285 286 if (symbol_conf.use_callchain) 287 err = __thread__prepare_access(thread); 288 289 return err; 290 } 291 292 static int thread__clone_map_groups(struct thread *thread, 293 struct thread *parent) 294 { 295 int i; 296 297 /* This is new thread, we share map groups for process. */ 298 if (thread->pid_ == parent->pid_) 299 return thread__prepare_access(thread); 300 301 if (thread->mg == parent->mg) { 302 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", 303 thread->pid_, thread->tid, parent->pid_, parent->tid); 304 return 0; 305 } 306 307 /* But this one is new process, copy maps. */ 308 for (i = 0; i < MAP__NR_TYPES; ++i) 309 if (map_groups__clone(thread, parent->mg, i) < 0) 310 return -ENOMEM; 311 312 return 0; 313 } 314 315 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) 316 { 317 if (parent->comm_set) { 318 const char *comm = thread__comm_str(parent); 319 int err; 320 if (!comm) 321 return -ENOMEM; 322 err = thread__set_comm(thread, comm, timestamp); 323 if (err) 324 return err; 325 } 326 327 thread->ppid = parent->tid; 328 return thread__clone_map_groups(thread, parent); 329 } 330 331 void thread__find_cpumode_addr_location(struct thread *thread, 332 enum map_type type, u64 addr, 333 struct addr_location *al) 334 { 335 size_t i; 336 const u8 cpumodes[] = { 337 PERF_RECORD_MISC_USER, 338 PERF_RECORD_MISC_KERNEL, 339 PERF_RECORD_MISC_GUEST_USER, 340 PERF_RECORD_MISC_GUEST_KERNEL 341 }; 342 343 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 344 thread__find_addr_location(thread, cpumodes[i], type, addr, al); 345 if (al->map) 346 break; 347 } 348 } 349 350 struct thread *thread__main_thread(struct machine *machine, struct thread *thread) 351 { 352 if (thread->pid_ == thread->tid) 353 return thread__get(thread); 354 355 if (thread->pid_ == -1) 356 return NULL; 357 358 return machine__find_thread(machine, thread->pid_, thread->pid_); 359 } 360