1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <limits.h> 5 #include <stdbool.h> 6 #include <stdlib.h> 7 #include <stdio.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include "string2.h" 12 #include "strlist.h" 13 #include <string.h> 14 #include <api/fs/fs.h> 15 #include "asm/bug.h" 16 #include "thread_map.h" 17 #include "util.h" 18 #include "debug.h" 19 #include "event.h" 20 21 /* Skip "." and ".." directories */ 22 static int filter(const struct dirent *dir) 23 { 24 if (dir->d_name[0] == '.') 25 return 0; 26 else 27 return 1; 28 } 29 30 static void thread_map__reset(struct thread_map *map, int start, int nr) 31 { 32 size_t size = (nr - start) * sizeof(map->map[0]); 33 34 memset(&map->map[start], 0, size); 35 } 36 37 static struct thread_map *thread_map__realloc(struct thread_map *map, int nr) 38 { 39 size_t size = sizeof(*map) + sizeof(map->map[0]) * nr; 40 int start = map ? map->nr : 0; 41 42 map = realloc(map, size); 43 /* 44 * We only realloc to add more items, let's reset new items. 45 */ 46 if (map) 47 thread_map__reset(map, start, nr); 48 49 return map; 50 } 51 52 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr) 53 54 struct thread_map *thread_map__new_by_pid(pid_t pid) 55 { 56 struct thread_map *threads; 57 char name[256]; 58 int items; 59 struct dirent **namelist = NULL; 60 int i; 61 62 sprintf(name, "/proc/%d/task", pid); 63 items = scandir(name, &namelist, filter, NULL); 64 if (items <= 0) 65 return NULL; 66 67 threads = thread_map__alloc(items); 68 if (threads != NULL) { 69 for (i = 0; i < items; i++) 70 thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); 71 threads->nr = items; 72 refcount_set(&threads->refcnt, 1); 73 } 74 75 for (i=0; i<items; i++) 76 zfree(&namelist[i]); 77 free(namelist); 78 79 return threads; 80 } 81 82 struct thread_map *thread_map__new_by_tid(pid_t tid) 83 { 84 struct thread_map *threads = thread_map__alloc(1); 85 86 if (threads != NULL) { 87 thread_map__set_pid(threads, 0, tid); 88 threads->nr = 1; 89 refcount_set(&threads->refcnt, 1); 90 } 91 92 return threads; 93 } 94 95 static struct thread_map *__thread_map__new_all_cpus(uid_t uid) 96 { 97 DIR *proc; 98 int max_threads = 32, items, i; 99 char path[NAME_MAX + 1 + 6]; 100 struct dirent *dirent, **namelist = NULL; 101 struct thread_map *threads = thread_map__alloc(max_threads); 102 103 if (threads == NULL) 104 goto out; 105 106 proc = opendir("/proc"); 107 if (proc == NULL) 108 goto out_free_threads; 109 110 threads->nr = 0; 111 refcount_set(&threads->refcnt, 1); 112 113 while ((dirent = readdir(proc)) != NULL) { 114 char *end; 115 bool grow = false; 116 pid_t pid = strtol(dirent->d_name, &end, 10); 117 118 if (*end) /* only interested in proper numerical dirents */ 119 continue; 120 121 snprintf(path, sizeof(path), "/proc/%s", dirent->d_name); 122 123 if (uid != UINT_MAX) { 124 struct stat st; 125 126 if (stat(path, &st) != 0 || st.st_uid != uid) 127 continue; 128 } 129 130 snprintf(path, sizeof(path), "/proc/%d/task", pid); 131 items = scandir(path, &namelist, filter, NULL); 132 if (items <= 0) 133 goto out_free_closedir; 134 135 while (threads->nr + items >= max_threads) { 136 max_threads *= 2; 137 grow = true; 138 } 139 140 if (grow) { 141 struct thread_map *tmp; 142 143 tmp = thread_map__realloc(threads, max_threads); 144 if (tmp == NULL) 145 goto out_free_namelist; 146 147 threads = tmp; 148 } 149 150 for (i = 0; i < items; i++) { 151 thread_map__set_pid(threads, threads->nr + i, 152 atoi(namelist[i]->d_name)); 153 } 154 155 for (i = 0; i < items; i++) 156 zfree(&namelist[i]); 157 free(namelist); 158 159 threads->nr += items; 160 } 161 162 out_closedir: 163 closedir(proc); 164 out: 165 return threads; 166 167 out_free_threads: 168 free(threads); 169 return NULL; 170 171 out_free_namelist: 172 for (i = 0; i < items; i++) 173 zfree(&namelist[i]); 174 free(namelist); 175 176 out_free_closedir: 177 zfree(&threads); 178 goto out_closedir; 179 } 180 181 struct thread_map *thread_map__new_all_cpus(void) 182 { 183 return __thread_map__new_all_cpus(UINT_MAX); 184 } 185 186 struct thread_map *thread_map__new_by_uid(uid_t uid) 187 { 188 return __thread_map__new_all_cpus(uid); 189 } 190 191 struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) 192 { 193 if (pid != -1) 194 return thread_map__new_by_pid(pid); 195 196 if (tid == -1 && uid != UINT_MAX) 197 return thread_map__new_by_uid(uid); 198 199 return thread_map__new_by_tid(tid); 200 } 201 202 static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) 203 { 204 struct thread_map *threads = NULL, *nt; 205 char name[256]; 206 int items, total_tasks = 0; 207 struct dirent **namelist = NULL; 208 int i, j = 0; 209 pid_t pid, prev_pid = INT_MAX; 210 char *end_ptr; 211 struct str_node *pos; 212 struct strlist_config slist_config = { .dont_dupstr = true, }; 213 struct strlist *slist = strlist__new(pid_str, &slist_config); 214 215 if (!slist) 216 return NULL; 217 218 strlist__for_each_entry(pos, slist) { 219 pid = strtol(pos->s, &end_ptr, 10); 220 221 if (pid == INT_MIN || pid == INT_MAX || 222 (*end_ptr != '\0' && *end_ptr != ',')) 223 goto out_free_threads; 224 225 if (pid == prev_pid) 226 continue; 227 228 sprintf(name, "/proc/%d/task", pid); 229 items = scandir(name, &namelist, filter, NULL); 230 if (items <= 0) 231 goto out_free_threads; 232 233 total_tasks += items; 234 nt = thread_map__realloc(threads, total_tasks); 235 if (nt == NULL) 236 goto out_free_namelist; 237 238 threads = nt; 239 240 for (i = 0; i < items; i++) { 241 thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name)); 242 zfree(&namelist[i]); 243 } 244 threads->nr = total_tasks; 245 free(namelist); 246 } 247 248 out: 249 strlist__delete(slist); 250 if (threads) 251 refcount_set(&threads->refcnt, 1); 252 return threads; 253 254 out_free_namelist: 255 for (i = 0; i < items; i++) 256 zfree(&namelist[i]); 257 free(namelist); 258 259 out_free_threads: 260 zfree(&threads); 261 goto out; 262 } 263 264 struct thread_map *thread_map__new_dummy(void) 265 { 266 struct thread_map *threads = thread_map__alloc(1); 267 268 if (threads != NULL) { 269 thread_map__set_pid(threads, 0, -1); 270 threads->nr = 1; 271 refcount_set(&threads->refcnt, 1); 272 } 273 return threads; 274 } 275 276 struct thread_map *thread_map__new_by_tid_str(const char *tid_str) 277 { 278 struct thread_map *threads = NULL, *nt; 279 int ntasks = 0; 280 pid_t tid, prev_tid = INT_MAX; 281 char *end_ptr; 282 struct str_node *pos; 283 struct strlist_config slist_config = { .dont_dupstr = true, }; 284 struct strlist *slist; 285 286 /* perf-stat expects threads to be generated even if tid not given */ 287 if (!tid_str) 288 return thread_map__new_dummy(); 289 290 slist = strlist__new(tid_str, &slist_config); 291 if (!slist) 292 return NULL; 293 294 strlist__for_each_entry(pos, slist) { 295 tid = strtol(pos->s, &end_ptr, 10); 296 297 if (tid == INT_MIN || tid == INT_MAX || 298 (*end_ptr != '\0' && *end_ptr != ',')) 299 goto out_free_threads; 300 301 if (tid == prev_tid) 302 continue; 303 304 ntasks++; 305 nt = thread_map__realloc(threads, ntasks); 306 307 if (nt == NULL) 308 goto out_free_threads; 309 310 threads = nt; 311 thread_map__set_pid(threads, ntasks - 1, tid); 312 threads->nr = ntasks; 313 } 314 out: 315 if (threads) 316 refcount_set(&threads->refcnt, 1); 317 return threads; 318 319 out_free_threads: 320 zfree(&threads); 321 strlist__delete(slist); 322 goto out; 323 } 324 325 struct thread_map *thread_map__new_str(const char *pid, const char *tid, 326 uid_t uid, bool per_thread) 327 { 328 if (pid) 329 return thread_map__new_by_pid_str(pid); 330 331 if (!tid && uid != UINT_MAX) 332 return thread_map__new_by_uid(uid); 333 334 if (per_thread) 335 return thread_map__new_all_cpus(); 336 337 return thread_map__new_by_tid_str(tid); 338 } 339 340 static void thread_map__delete(struct thread_map *threads) 341 { 342 if (threads) { 343 int i; 344 345 WARN_ONCE(refcount_read(&threads->refcnt) != 0, 346 "thread map refcnt unbalanced\n"); 347 for (i = 0; i < threads->nr; i++) 348 free(thread_map__comm(threads, i)); 349 free(threads); 350 } 351 } 352 353 struct thread_map *thread_map__get(struct thread_map *map) 354 { 355 if (map) 356 refcount_inc(&map->refcnt); 357 return map; 358 } 359 360 void thread_map__put(struct thread_map *map) 361 { 362 if (map && refcount_dec_and_test(&map->refcnt)) 363 thread_map__delete(map); 364 } 365 366 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) 367 { 368 int i; 369 size_t printed = fprintf(fp, "%d thread%s: ", 370 threads->nr, threads->nr > 1 ? "s" : ""); 371 for (i = 0; i < threads->nr; ++i) 372 printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i)); 373 374 return printed + fprintf(fp, "\n"); 375 } 376 377 static int get_comm(char **comm, pid_t pid) 378 { 379 char *path; 380 size_t size; 381 int err; 382 383 if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1) 384 return -ENOMEM; 385 386 err = filename__read_str(path, comm, &size); 387 if (!err) { 388 /* 389 * We're reading 16 bytes, while filename__read_str 390 * allocates data per BUFSIZ bytes, so we can safely 391 * mark the end of the string. 392 */ 393 (*comm)[size] = 0; 394 rtrim(*comm); 395 } 396 397 free(path); 398 return err; 399 } 400 401 static void comm_init(struct thread_map *map, int i) 402 { 403 pid_t pid = thread_map__pid(map, i); 404 char *comm = NULL; 405 406 /* dummy pid comm initialization */ 407 if (pid == -1) { 408 map->map[i].comm = strdup("dummy"); 409 return; 410 } 411 412 /* 413 * The comm name is like extra bonus ;-), 414 * so just warn if we fail for any reason. 415 */ 416 if (get_comm(&comm, pid)) 417 pr_warning("Couldn't resolve comm name for pid %d\n", pid); 418 419 map->map[i].comm = comm; 420 } 421 422 void thread_map__read_comms(struct thread_map *threads) 423 { 424 int i; 425 426 for (i = 0; i < threads->nr; ++i) 427 comm_init(threads, i); 428 } 429 430 static void thread_map__copy_event(struct thread_map *threads, 431 struct thread_map_event *event) 432 { 433 unsigned i; 434 435 threads->nr = (int) event->nr; 436 437 for (i = 0; i < event->nr; i++) { 438 thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid); 439 threads->map[i].comm = strndup(event->entries[i].comm, 16); 440 } 441 442 refcount_set(&threads->refcnt, 1); 443 } 444 445 struct thread_map *thread_map__new_event(struct thread_map_event *event) 446 { 447 struct thread_map *threads; 448 449 threads = thread_map__alloc(event->nr); 450 if (threads) 451 thread_map__copy_event(threads, event); 452 453 return threads; 454 } 455 456 bool thread_map__has(struct thread_map *threads, pid_t pid) 457 { 458 int i; 459 460 for (i = 0; i < threads->nr; ++i) { 461 if (threads->map[i].pid == pid) 462 return true; 463 } 464 465 return false; 466 } 467 468 int thread_map__remove(struct thread_map *threads, int idx) 469 { 470 int i; 471 472 if (threads->nr < 1) 473 return -EINVAL; 474 475 if (idx >= threads->nr) 476 return -EINVAL; 477 478 /* 479 * Free the 'idx' item and shift the rest up. 480 */ 481 free(threads->map[idx].comm); 482 483 for (i = idx; i < threads->nr - 1; i++) 484 threads->map[i] = threads->map[i + 1]; 485 486 threads->nr--; 487 return 0; 488 } 489