1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <linux/kernel.h> 7 #include <linux/zalloc.h> 8 #include "dso.h" 9 #include "session.h" 10 #include "thread.h" 11 #include "thread-stack.h" 12 #include "debug.h" 13 #include "namespaces.h" 14 #include "comm.h" 15 #include "map.h" 16 #include "symbol.h" 17 #include "unwind.h" 18 #include "callchain.h" 19 20 #include <api/fs/fs.h> 21 22 int thread__init_maps(struct thread *thread, struct machine *machine) 23 { 24 pid_t pid = thread__pid(thread); 25 26 if (pid == thread__tid(thread) || pid == -1) { 27 thread__set_maps(thread, maps__new(machine)); 28 } else { 29 struct thread *leader = __machine__findnew_thread(machine, pid, pid); 30 31 if (leader) { 32 thread__set_maps(thread, maps__get(thread__maps(leader))); 33 thread__put(leader); 34 } 35 } 36 37 return thread__maps(thread) ? 0 : -1; 38 } 39 40 struct thread *thread__new(pid_t pid, pid_t tid) 41 { 42 RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread)); 43 struct thread *thread; 44 45 if (ADD_RC_CHK(thread, _thread) != NULL) { 46 struct comm *comm; 47 char comm_str[32]; 48 49 thread__set_pid(thread, pid); 50 thread__set_tid(thread, tid); 51 thread__set_ppid(thread, -1); 52 thread__set_cpu(thread, -1); 53 thread__set_guest_cpu(thread, -1); 54 thread__set_lbr_stitch_enable(thread, false); 55 INIT_LIST_HEAD(thread__namespaces_list(thread)); 56 INIT_LIST_HEAD(thread__comm_list(thread)); 57 init_rwsem(thread__namespaces_lock(thread)); 58 init_rwsem(thread__comm_lock(thread)); 59 60 snprintf(comm_str, sizeof(comm_str), ":%d", tid); 61 comm = comm__new(comm_str, 0, false); 62 if (!comm) 63 goto err_thread; 64 65 list_add(&comm->list, thread__comm_list(thread)); 66 refcount_set(thread__refcnt(thread), 1); 67 /* Thread holds first ref to nsdata. */ 68 RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid); 69 srccode_state_init(thread__srccode_state(thread)); 70 } 71 72 return thread; 73 74 err_thread: 75 thread__delete(thread); 76 return NULL; 77 } 78 79 static void (*thread__priv_destructor)(void *priv); 80 81 void thread__set_priv_destructor(void (*destructor)(void *priv)) 82 { 83 assert(thread__priv_destructor == NULL); 84 85 thread__priv_destructor = destructor; 86 } 87 88 void thread__delete(struct thread *thread) 89 { 90 struct namespaces *namespaces, *tmp_namespaces; 91 struct comm *comm, *tmp_comm; 92 93 thread_stack__free(thread); 94 95 if (thread__maps(thread)) { 96 maps__put(thread__maps(thread)); 97 thread__set_maps(thread, NULL); 98 } 99 down_write(thread__namespaces_lock(thread)); 100 list_for_each_entry_safe(namespaces, tmp_namespaces, 101 thread__namespaces_list(thread), list) { 102 list_del_init(&namespaces->list); 103 namespaces__free(namespaces); 104 } 105 up_write(thread__namespaces_lock(thread)); 106 107 down_write(thread__comm_lock(thread)); 108 list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) { 109 list_del_init(&comm->list); 110 comm__free(comm); 111 } 112 up_write(thread__comm_lock(thread)); 113 114 nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo); 115 srccode_state_free(thread__srccode_state(thread)); 116 117 exit_rwsem(thread__namespaces_lock(thread)); 118 exit_rwsem(thread__comm_lock(thread)); 119 thread__free_stitch_list(thread); 120 121 if (thread__priv_destructor) 122 thread__priv_destructor(thread__priv(thread)); 123 124 RC_CHK_FREE(thread); 125 } 126 127 struct thread *thread__get(struct thread *thread) 128 { 129 struct thread *result; 130 131 if (RC_CHK_GET(result, thread)) 132 refcount_inc(thread__refcnt(thread)); 133 134 return result; 135 } 136 137 void thread__put(struct thread *thread) 138 { 139 if (thread && refcount_dec_and_test(thread__refcnt(thread))) 140 thread__delete(thread); 141 else 142 RC_CHK_PUT(thread); 143 } 144 145 static struct namespaces *__thread__namespaces(struct thread *thread) 146 { 147 if (list_empty(thread__namespaces_list(thread))) 148 return NULL; 149 150 return list_first_entry(thread__namespaces_list(thread), struct namespaces, list); 151 } 152 153 struct namespaces *thread__namespaces(struct thread *thread) 154 { 155 struct namespaces *ns; 156 157 down_read(thread__namespaces_lock(thread)); 158 ns = __thread__namespaces(thread); 159 up_read(thread__namespaces_lock(thread)); 160 161 return ns; 162 } 163 164 static int __thread__set_namespaces(struct thread *thread, u64 timestamp, 165 struct perf_record_namespaces *event) 166 { 167 struct namespaces *new, *curr = __thread__namespaces(thread); 168 169 new = namespaces__new(event); 170 if (!new) 171 return -ENOMEM; 172 173 list_add(&new->list, thread__namespaces_list(thread)); 174 175 if (timestamp && curr) { 176 /* 177 * setns syscall must have changed few or all the namespaces 178 * of this thread. Update end time for the namespaces 179 * previously used. 180 */ 181 curr = list_next_entry(new, list); 182 curr->end_time = timestamp; 183 } 184 185 return 0; 186 } 187 188 int thread__set_namespaces(struct thread *thread, u64 timestamp, 189 struct perf_record_namespaces *event) 190 { 191 int ret; 192 193 down_write(thread__namespaces_lock(thread)); 194 ret = __thread__set_namespaces(thread, timestamp, event); 195 up_write(thread__namespaces_lock(thread)); 196 return ret; 197 } 198 199 struct comm *thread__comm(struct thread *thread) 200 { 201 if (list_empty(thread__comm_list(thread))) 202 return NULL; 203 204 return list_first_entry(thread__comm_list(thread), struct comm, list); 205 } 206 207 struct comm *thread__exec_comm(struct thread *thread) 208 { 209 struct comm *comm, *last = NULL, *second_last = NULL; 210 211 list_for_each_entry(comm, thread__comm_list(thread), list) { 212 if (comm->exec) 213 return comm; 214 second_last = last; 215 last = comm; 216 } 217 218 /* 219 * 'last' with no start time might be the parent's comm of a synthesized 220 * thread (created by processing a synthesized fork event). For a main 221 * thread, that is very probably wrong. Prefer a later comm to avoid 222 * that case. 223 */ 224 if (second_last && !last->start && thread__pid(thread) == thread__tid(thread)) 225 return second_last; 226 227 return last; 228 } 229 230 static int ____thread__set_comm(struct thread *thread, const char *str, 231 u64 timestamp, bool exec) 232 { 233 struct comm *new, *curr = thread__comm(thread); 234 235 /* Override the default :tid entry */ 236 if (!thread__comm_set(thread)) { 237 int err = comm__override(curr, str, timestamp, exec); 238 if (err) 239 return err; 240 } else { 241 new = comm__new(str, timestamp, exec); 242 if (!new) 243 return -ENOMEM; 244 list_add(&new->list, thread__comm_list(thread)); 245 246 if (exec) 247 unwind__flush_access(thread__maps(thread)); 248 } 249 250 thread__set_comm_set(thread, true); 251 252 return 0; 253 } 254 255 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, 256 bool exec) 257 { 258 int ret; 259 260 down_write(thread__comm_lock(thread)); 261 ret = ____thread__set_comm(thread, str, timestamp, exec); 262 up_write(thread__comm_lock(thread)); 263 return ret; 264 } 265 266 int thread__set_comm_from_proc(struct thread *thread) 267 { 268 char path[64]; 269 char *comm = NULL; 270 size_t sz; 271 int err = -1; 272 273 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm", 274 thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) && 275 procfs__read_str(path, &comm, &sz) == 0) { 276 comm[sz - 1] = '\0'; 277 err = thread__set_comm(thread, comm, 0); 278 } 279 280 return err; 281 } 282 283 static const char *__thread__comm_str(struct thread *thread) 284 { 285 const struct comm *comm = thread__comm(thread); 286 287 if (!comm) 288 return NULL; 289 290 return comm__str(comm); 291 } 292 293 const char *thread__comm_str(struct thread *thread) 294 { 295 const char *str; 296 297 down_read(thread__comm_lock(thread)); 298 str = __thread__comm_str(thread); 299 up_read(thread__comm_lock(thread)); 300 301 return str; 302 } 303 304 static int __thread__comm_len(struct thread *thread, const char *comm) 305 { 306 if (!comm) 307 return 0; 308 thread__set_comm_len(thread, strlen(comm)); 309 310 return thread__var_comm_len(thread); 311 } 312 313 /* CHECKME: it should probably better return the max comm len from its comm list */ 314 int thread__comm_len(struct thread *thread) 315 { 316 int comm_len = thread__var_comm_len(thread); 317 318 if (!comm_len) { 319 const char *comm; 320 321 down_read(thread__comm_lock(thread)); 322 comm = __thread__comm_str(thread); 323 comm_len = __thread__comm_len(thread, comm); 324 up_read(thread__comm_lock(thread)); 325 } 326 327 return comm_len; 328 } 329 330 size_t thread__fprintf(struct thread *thread, FILE *fp) 331 { 332 return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) + 333 maps__fprintf(thread__maps(thread), fp); 334 } 335 336 int thread__insert_map(struct thread *thread, struct map *map) 337 { 338 int ret; 339 340 ret = unwind__prepare_access(thread__maps(thread), map, NULL); 341 if (ret) 342 return ret; 343 344 maps__fixup_overlappings(thread__maps(thread), map, stderr); 345 return maps__insert(thread__maps(thread), map); 346 } 347 348 static int __thread__prepare_access(struct thread *thread) 349 { 350 bool initialized = false; 351 int err = 0; 352 struct maps *maps = thread__maps(thread); 353 struct map_rb_node *rb_node; 354 355 down_read(maps__lock(maps)); 356 357 maps__for_each_entry(maps, rb_node) { 358 err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized); 359 if (err || initialized) 360 break; 361 } 362 363 up_read(maps__lock(maps)); 364 365 return err; 366 } 367 368 static int thread__prepare_access(struct thread *thread) 369 { 370 int err = 0; 371 372 if (dwarf_callchain_users) 373 err = __thread__prepare_access(thread); 374 375 return err; 376 } 377 378 static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone) 379 { 380 /* This is new thread, we share map groups for process. */ 381 if (thread__pid(thread) == thread__pid(parent)) 382 return thread__prepare_access(thread); 383 384 if (thread__maps(thread) == thread__maps(parent)) { 385 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", 386 thread__pid(thread), thread__tid(thread), 387 thread__pid(parent), thread__tid(parent)); 388 return 0; 389 } 390 /* But this one is new process, copy maps. */ 391 return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0; 392 } 393 394 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) 395 { 396 if (thread__comm_set(parent)) { 397 const char *comm = thread__comm_str(parent); 398 int err; 399 if (!comm) 400 return -ENOMEM; 401 err = thread__set_comm(thread, comm, timestamp); 402 if (err) 403 return err; 404 } 405 406 thread__set_ppid(thread, thread__tid(parent)); 407 return thread__clone_maps(thread, parent, do_maps_clone); 408 } 409 410 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, 411 struct addr_location *al) 412 { 413 size_t i; 414 const u8 cpumodes[] = { 415 PERF_RECORD_MISC_USER, 416 PERF_RECORD_MISC_KERNEL, 417 PERF_RECORD_MISC_GUEST_USER, 418 PERF_RECORD_MISC_GUEST_KERNEL 419 }; 420 421 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 422 thread__find_symbol(thread, cpumodes[i], addr, al); 423 if (al->map) 424 break; 425 } 426 } 427 428 struct thread *thread__main_thread(struct machine *machine, struct thread *thread) 429 { 430 if (thread__pid(thread) == thread__tid(thread)) 431 return thread__get(thread); 432 433 if (thread__pid(thread) == -1) 434 return NULL; 435 436 return machine__find_thread(machine, thread__pid(thread), thread__pid(thread)); 437 } 438 439 int thread__memcpy(struct thread *thread, struct machine *machine, 440 void *buf, u64 ip, int len, bool *is64bit) 441 { 442 u8 cpumode = PERF_RECORD_MISC_USER; 443 struct addr_location al; 444 struct dso *dso; 445 long offset; 446 447 if (machine__kernel_ip(machine, ip)) 448 cpumode = PERF_RECORD_MISC_KERNEL; 449 450 addr_location__init(&al); 451 if (!thread__find_map(thread, cpumode, ip, &al)) { 452 addr_location__exit(&al); 453 return -1; 454 } 455 456 dso = map__dso(al.map); 457 458 if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) { 459 addr_location__exit(&al); 460 return -1; 461 } 462 463 offset = map__map_ip(al.map, ip); 464 if (is64bit) 465 *is64bit = dso->is_64_bit; 466 467 addr_location__exit(&al); 468 469 return dso__data_read_offset(dso, machine, offset, buf, len); 470 } 471 472 void thread__free_stitch_list(struct thread *thread) 473 { 474 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 475 struct stitch_list *pos, *tmp; 476 477 if (!lbr_stitch) 478 return; 479 480 list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) { 481 map_symbol__exit(&pos->cursor.ms); 482 list_del_init(&pos->node); 483 free(pos); 484 } 485 486 list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) { 487 list_del_init(&pos->node); 488 free(pos); 489 } 490 491 for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++) 492 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms); 493 494 zfree(&lbr_stitch->prev_lbr_cursor); 495 free(thread__lbr_stitch(thread)); 496 thread__set_lbr_stitch(thread, NULL); 497 } 498