1 // SPDX-License-Identifier: GPL-2.0 2 #include "../perf.h" 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/kernel.h> 8 #include "session.h" 9 #include "thread.h" 10 #include "thread-stack.h" 11 #include "util.h" 12 #include "debug.h" 13 #include "namespaces.h" 14 #include "comm.h" 15 #include "symbol.h" 16 #include "unwind.h" 17 18 #include <api/fs/fs.h> 19 20 int thread__init_map_groups(struct thread *thread, struct machine *machine) 21 { 22 pid_t pid = thread->pid_; 23 24 if (pid == thread->tid || pid == -1) { 25 thread->mg = map_groups__new(machine); 26 } else { 27 struct thread *leader = __machine__findnew_thread(machine, pid, pid); 28 if (leader) { 29 thread->mg = map_groups__get(leader->mg); 30 thread__put(leader); 31 } 32 } 33 34 return thread->mg ? 0 : -1; 35 } 36 37 struct thread *thread__new(pid_t pid, pid_t tid) 38 { 39 char *comm_str; 40 struct comm *comm; 41 struct thread *thread = zalloc(sizeof(*thread)); 42 43 if (thread != NULL) { 44 thread->pid_ = pid; 45 thread->tid = tid; 46 thread->ppid = -1; 47 thread->cpu = -1; 48 INIT_LIST_HEAD(&thread->namespaces_list); 49 INIT_LIST_HEAD(&thread->comm_list); 50 init_rwsem(&thread->namespaces_lock); 51 init_rwsem(&thread->comm_lock); 52 53 comm_str = malloc(32); 54 if (!comm_str) 55 goto err_thread; 56 57 snprintf(comm_str, 32, ":%d", tid); 58 comm = comm__new(comm_str, 0, false); 59 free(comm_str); 60 if (!comm) 61 goto err_thread; 62 63 list_add(&comm->list, &thread->comm_list); 64 refcount_set(&thread->refcnt, 1); 65 RB_CLEAR_NODE(&thread->rb_node); 66 /* Thread holds first ref to nsdata. */ 67 thread->nsinfo = nsinfo__new(pid); 68 srccode_state_init(&thread->srccode_state); 69 } 70 71 return thread; 72 73 err_thread: 74 free(thread); 75 return NULL; 76 } 77 78 void thread__delete(struct thread *thread) 79 { 80 struct namespaces *namespaces, *tmp_namespaces; 81 struct comm *comm, *tmp_comm; 82 83 BUG_ON(!RB_EMPTY_NODE(&thread->rb_node)); 84 85 thread_stack__free(thread); 86 87 if (thread->mg) { 88 map_groups__put(thread->mg); 89 thread->mg = NULL; 90 } 91 down_write(&thread->namespaces_lock); 92 list_for_each_entry_safe(namespaces, tmp_namespaces, 93 &thread->namespaces_list, list) { 94 list_del(&namespaces->list); 95 namespaces__free(namespaces); 96 } 97 up_write(&thread->namespaces_lock); 98 99 down_write(&thread->comm_lock); 100 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { 101 list_del(&comm->list); 102 comm__free(comm); 103 } 104 up_write(&thread->comm_lock); 105 106 unwind__finish_access(thread); 107 nsinfo__zput(thread->nsinfo); 108 srccode_state_free(&thread->srccode_state); 109 110 exit_rwsem(&thread->namespaces_lock); 111 exit_rwsem(&thread->comm_lock); 112 free(thread); 113 } 114 115 struct thread *thread__get(struct thread *thread) 116 { 117 if (thread) 118 refcount_inc(&thread->refcnt); 119 return thread; 120 } 121 122 void thread__put(struct thread *thread) 123 { 124 if (thread && refcount_dec_and_test(&thread->refcnt)) { 125 /* 126 * Remove it from the dead_threads list, as last reference 127 * is gone. 128 */ 129 list_del_init(&thread->node); 130 thread__delete(thread); 131 } 132 } 133 134 struct namespaces *thread__namespaces(const struct thread *thread) 135 { 136 if (list_empty(&thread->namespaces_list)) 137 return NULL; 138 139 return list_first_entry(&thread->namespaces_list, struct namespaces, list); 140 } 141 142 static int __thread__set_namespaces(struct thread *thread, u64 timestamp, 143 struct namespaces_event *event) 144 { 145 struct namespaces *new, *curr = thread__namespaces(thread); 146 147 new = namespaces__new(event); 148 if (!new) 149 return -ENOMEM; 150 151 list_add(&new->list, &thread->namespaces_list); 152 153 if (timestamp && curr) { 154 /* 155 * setns syscall must have changed few or all the namespaces 156 * of this thread. Update end time for the namespaces 157 * previously used. 158 */ 159 curr = list_next_entry(new, list); 160 curr->end_time = timestamp; 161 } 162 163 return 0; 164 } 165 166 int thread__set_namespaces(struct thread *thread, u64 timestamp, 167 struct namespaces_event *event) 168 { 169 int ret; 170 171 down_write(&thread->namespaces_lock); 172 ret = __thread__set_namespaces(thread, timestamp, event); 173 up_write(&thread->namespaces_lock); 174 return ret; 175 } 176 177 struct comm *thread__comm(const struct thread *thread) 178 { 179 if (list_empty(&thread->comm_list)) 180 return NULL; 181 182 return list_first_entry(&thread->comm_list, struct comm, list); 183 } 184 185 struct comm *thread__exec_comm(const struct thread *thread) 186 { 187 struct comm *comm, *last = NULL; 188 189 list_for_each_entry(comm, &thread->comm_list, list) { 190 if (comm->exec) 191 return comm; 192 last = comm; 193 } 194 195 return last; 196 } 197 198 static int ____thread__set_comm(struct thread *thread, const char *str, 199 u64 timestamp, bool exec) 200 { 201 struct comm *new, *curr = thread__comm(thread); 202 203 /* Override the default :tid entry */ 204 if (!thread->comm_set) { 205 int err = comm__override(curr, str, timestamp, exec); 206 if (err) 207 return err; 208 } else { 209 new = comm__new(str, timestamp, exec); 210 if (!new) 211 return -ENOMEM; 212 list_add(&new->list, &thread->comm_list); 213 214 if (exec) 215 unwind__flush_access(thread); 216 } 217 218 thread->comm_set = true; 219 220 return 0; 221 } 222 223 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, 224 bool exec) 225 { 226 int ret; 227 228 down_write(&thread->comm_lock); 229 ret = ____thread__set_comm(thread, str, timestamp, exec); 230 up_write(&thread->comm_lock); 231 return ret; 232 } 233 234 int thread__set_comm_from_proc(struct thread *thread) 235 { 236 char path[64]; 237 char *comm = NULL; 238 size_t sz; 239 int err = -1; 240 241 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm", 242 thread->pid_, thread->tid) >= (int)sizeof(path)) && 243 procfs__read_str(path, &comm, &sz) == 0) { 244 comm[sz - 1] = '\0'; 245 err = thread__set_comm(thread, comm, 0); 246 } 247 248 return err; 249 } 250 251 static const char *__thread__comm_str(const struct thread *thread) 252 { 253 const struct comm *comm = thread__comm(thread); 254 255 if (!comm) 256 return NULL; 257 258 return comm__str(comm); 259 } 260 261 const char *thread__comm_str(const struct thread *thread) 262 { 263 const char *str; 264 265 down_read((struct rw_semaphore *)&thread->comm_lock); 266 str = __thread__comm_str(thread); 267 up_read((struct rw_semaphore *)&thread->comm_lock); 268 269 return str; 270 } 271 272 /* CHECKME: it should probably better return the max comm len from its comm list */ 273 int thread__comm_len(struct thread *thread) 274 { 275 if (!thread->comm_len) { 276 const char *comm = thread__comm_str(thread); 277 if (!comm) 278 return 0; 279 thread->comm_len = strlen(comm); 280 } 281 282 return thread->comm_len; 283 } 284 285 size_t thread__fprintf(struct thread *thread, FILE *fp) 286 { 287 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + 288 map_groups__fprintf(thread->mg, fp); 289 } 290 291 int thread__insert_map(struct thread *thread, struct map *map) 292 { 293 int ret; 294 295 ret = unwind__prepare_access(thread, map, NULL); 296 if (ret) 297 return ret; 298 299 map_groups__fixup_overlappings(thread->mg, map, stderr); 300 map_groups__insert(thread->mg, map); 301 302 return 0; 303 } 304 305 static int __thread__prepare_access(struct thread *thread) 306 { 307 bool initialized = false; 308 int err = 0; 309 struct maps *maps = &thread->mg->maps; 310 struct map *map; 311 312 down_read(&maps->lock); 313 314 for (map = maps__first(maps); map; map = map__next(map)) { 315 err = unwind__prepare_access(thread, map, &initialized); 316 if (err || initialized) 317 break; 318 } 319 320 up_read(&maps->lock); 321 322 return err; 323 } 324 325 static int thread__prepare_access(struct thread *thread) 326 { 327 int err = 0; 328 329 if (symbol_conf.use_callchain) 330 err = __thread__prepare_access(thread); 331 332 return err; 333 } 334 335 static int thread__clone_map_groups(struct thread *thread, 336 struct thread *parent, 337 bool do_maps_clone) 338 { 339 /* This is new thread, we share map groups for process. */ 340 if (thread->pid_ == parent->pid_) 341 return thread__prepare_access(thread); 342 343 if (thread->mg == parent->mg) { 344 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", 345 thread->pid_, thread->tid, parent->pid_, parent->tid); 346 return 0; 347 } 348 /* But this one is new process, copy maps. */ 349 return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0; 350 } 351 352 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) 353 { 354 if (parent->comm_set) { 355 const char *comm = thread__comm_str(parent); 356 int err; 357 if (!comm) 358 return -ENOMEM; 359 err = thread__set_comm(thread, comm, timestamp); 360 if (err) 361 return err; 362 } 363 364 thread->ppid = parent->tid; 365 return thread__clone_map_groups(thread, parent, do_maps_clone); 366 } 367 368 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, 369 struct addr_location *al) 370 { 371 size_t i; 372 const u8 cpumodes[] = { 373 PERF_RECORD_MISC_USER, 374 PERF_RECORD_MISC_KERNEL, 375 PERF_RECORD_MISC_GUEST_USER, 376 PERF_RECORD_MISC_GUEST_KERNEL 377 }; 378 379 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 380 thread__find_symbol(thread, cpumodes[i], addr, al); 381 if (al->map) 382 break; 383 } 384 } 385 386 struct thread *thread__main_thread(struct machine *machine, struct thread *thread) 387 { 388 if (thread->pid_ == thread->tid) 389 return thread__get(thread); 390 391 if (thread->pid_ == -1) 392 return NULL; 393 394 return machine__find_thread(machine, thread->pid_, thread->pid_); 395 } 396