xref: /openbmc/linux/tools/perf/util/machine.c (revision 33ac9dba)
1 #include "callchain.h"
2 #include "debug.h"
3 #include "event.h"
4 #include "evsel.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "map.h"
8 #include "sort.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include "vdso.h"
12 #include <stdbool.h>
13 #include <symbol/kallsyms.h>
14 #include "unwind.h"
15 
16 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
17 {
18 	map_groups__init(&machine->kmaps);
19 	RB_CLEAR_NODE(&machine->rb_node);
20 	INIT_LIST_HEAD(&machine->user_dsos);
21 	INIT_LIST_HEAD(&machine->kernel_dsos);
22 
23 	machine->threads = RB_ROOT;
24 	INIT_LIST_HEAD(&machine->dead_threads);
25 	machine->last_match = NULL;
26 
27 	machine->vdso_info = NULL;
28 
29 	machine->kmaps.machine = machine;
30 	machine->pid = pid;
31 
32 	machine->symbol_filter = NULL;
33 	machine->id_hdr_size = 0;
34 
35 	machine->root_dir = strdup(root_dir);
36 	if (machine->root_dir == NULL)
37 		return -ENOMEM;
38 
39 	if (pid != HOST_KERNEL_ID) {
40 		struct thread *thread = machine__findnew_thread(machine, -1,
41 								pid);
42 		char comm[64];
43 
44 		if (thread == NULL)
45 			return -ENOMEM;
46 
47 		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
48 		thread__set_comm(thread, comm, 0);
49 	}
50 
51 	machine->current_tid = NULL;
52 
53 	return 0;
54 }
55 
56 struct machine *machine__new_host(void)
57 {
58 	struct machine *machine = malloc(sizeof(*machine));
59 
60 	if (machine != NULL) {
61 		machine__init(machine, "", HOST_KERNEL_ID);
62 
63 		if (machine__create_kernel_maps(machine) < 0)
64 			goto out_delete;
65 	}
66 
67 	return machine;
68 out_delete:
69 	free(machine);
70 	return NULL;
71 }
72 
73 static void dsos__delete(struct list_head *dsos)
74 {
75 	struct dso *pos, *n;
76 
77 	list_for_each_entry_safe(pos, n, dsos, node) {
78 		list_del(&pos->node);
79 		dso__delete(pos);
80 	}
81 }
82 
83 void machine__delete_dead_threads(struct machine *machine)
84 {
85 	struct thread *n, *t;
86 
87 	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
88 		list_del(&t->node);
89 		thread__delete(t);
90 	}
91 }
92 
93 void machine__delete_threads(struct machine *machine)
94 {
95 	struct rb_node *nd = rb_first(&machine->threads);
96 
97 	while (nd) {
98 		struct thread *t = rb_entry(nd, struct thread, rb_node);
99 
100 		rb_erase(&t->rb_node, &machine->threads);
101 		nd = rb_next(nd);
102 		thread__delete(t);
103 	}
104 }
105 
106 void machine__exit(struct machine *machine)
107 {
108 	map_groups__exit(&machine->kmaps);
109 	dsos__delete(&machine->user_dsos);
110 	dsos__delete(&machine->kernel_dsos);
111 	vdso__exit(machine);
112 	zfree(&machine->root_dir);
113 	zfree(&machine->current_tid);
114 }
115 
116 void machine__delete(struct machine *machine)
117 {
118 	machine__exit(machine);
119 	free(machine);
120 }
121 
122 void machines__init(struct machines *machines)
123 {
124 	machine__init(&machines->host, "", HOST_KERNEL_ID);
125 	machines->guests = RB_ROOT;
126 	machines->symbol_filter = NULL;
127 }
128 
129 void machines__exit(struct machines *machines)
130 {
131 	machine__exit(&machines->host);
132 	/* XXX exit guest */
133 }
134 
135 struct machine *machines__add(struct machines *machines, pid_t pid,
136 			      const char *root_dir)
137 {
138 	struct rb_node **p = &machines->guests.rb_node;
139 	struct rb_node *parent = NULL;
140 	struct machine *pos, *machine = malloc(sizeof(*machine));
141 
142 	if (machine == NULL)
143 		return NULL;
144 
145 	if (machine__init(machine, root_dir, pid) != 0) {
146 		free(machine);
147 		return NULL;
148 	}
149 
150 	machine->symbol_filter = machines->symbol_filter;
151 
152 	while (*p != NULL) {
153 		parent = *p;
154 		pos = rb_entry(parent, struct machine, rb_node);
155 		if (pid < pos->pid)
156 			p = &(*p)->rb_left;
157 		else
158 			p = &(*p)->rb_right;
159 	}
160 
161 	rb_link_node(&machine->rb_node, parent, p);
162 	rb_insert_color(&machine->rb_node, &machines->guests);
163 
164 	return machine;
165 }
166 
167 void machines__set_symbol_filter(struct machines *machines,
168 				 symbol_filter_t symbol_filter)
169 {
170 	struct rb_node *nd;
171 
172 	machines->symbol_filter = symbol_filter;
173 	machines->host.symbol_filter = symbol_filter;
174 
175 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
176 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
177 
178 		machine->symbol_filter = symbol_filter;
179 	}
180 }
181 
182 struct machine *machines__find(struct machines *machines, pid_t pid)
183 {
184 	struct rb_node **p = &machines->guests.rb_node;
185 	struct rb_node *parent = NULL;
186 	struct machine *machine;
187 	struct machine *default_machine = NULL;
188 
189 	if (pid == HOST_KERNEL_ID)
190 		return &machines->host;
191 
192 	while (*p != NULL) {
193 		parent = *p;
194 		machine = rb_entry(parent, struct machine, rb_node);
195 		if (pid < machine->pid)
196 			p = &(*p)->rb_left;
197 		else if (pid > machine->pid)
198 			p = &(*p)->rb_right;
199 		else
200 			return machine;
201 		if (!machine->pid)
202 			default_machine = machine;
203 	}
204 
205 	return default_machine;
206 }
207 
208 struct machine *machines__findnew(struct machines *machines, pid_t pid)
209 {
210 	char path[PATH_MAX];
211 	const char *root_dir = "";
212 	struct machine *machine = machines__find(machines, pid);
213 
214 	if (machine && (machine->pid == pid))
215 		goto out;
216 
217 	if ((pid != HOST_KERNEL_ID) &&
218 	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
219 	    (symbol_conf.guestmount)) {
220 		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
221 		if (access(path, R_OK)) {
222 			static struct strlist *seen;
223 
224 			if (!seen)
225 				seen = strlist__new(true, NULL);
226 
227 			if (!strlist__has_entry(seen, path)) {
228 				pr_err("Can't access file %s\n", path);
229 				strlist__add(seen, path);
230 			}
231 			machine = NULL;
232 			goto out;
233 		}
234 		root_dir = path;
235 	}
236 
237 	machine = machines__add(machines, pid, root_dir);
238 out:
239 	return machine;
240 }
241 
242 void machines__process_guests(struct machines *machines,
243 			      machine__process_t process, void *data)
244 {
245 	struct rb_node *nd;
246 
247 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
248 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
249 		process(pos, data);
250 	}
251 }
252 
253 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
254 {
255 	if (machine__is_host(machine))
256 		snprintf(bf, size, "[%s]", "kernel.kallsyms");
257 	else if (machine__is_default_guest(machine))
258 		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
259 	else {
260 		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
261 			 machine->pid);
262 	}
263 
264 	return bf;
265 }
266 
267 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
268 {
269 	struct rb_node *node;
270 	struct machine *machine;
271 
272 	machines->host.id_hdr_size = id_hdr_size;
273 
274 	for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
275 		machine = rb_entry(node, struct machine, rb_node);
276 		machine->id_hdr_size = id_hdr_size;
277 	}
278 
279 	return;
280 }
281 
282 static void machine__update_thread_pid(struct machine *machine,
283 				       struct thread *th, pid_t pid)
284 {
285 	struct thread *leader;
286 
287 	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
288 		return;
289 
290 	th->pid_ = pid;
291 
292 	if (th->pid_ == th->tid)
293 		return;
294 
295 	leader = machine__findnew_thread(machine, th->pid_, th->pid_);
296 	if (!leader)
297 		goto out_err;
298 
299 	if (!leader->mg)
300 		leader->mg = map_groups__new();
301 
302 	if (!leader->mg)
303 		goto out_err;
304 
305 	if (th->mg == leader->mg)
306 		return;
307 
308 	if (th->mg) {
309 		/*
310 		 * Maps are created from MMAP events which provide the pid and
311 		 * tid.  Consequently there never should be any maps on a thread
312 		 * with an unknown pid.  Just print an error if there are.
313 		 */
314 		if (!map_groups__empty(th->mg))
315 			pr_err("Discarding thread maps for %d:%d\n",
316 			       th->pid_, th->tid);
317 		map_groups__delete(th->mg);
318 	}
319 
320 	th->mg = map_groups__get(leader->mg);
321 
322 	return;
323 
324 out_err:
325 	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
326 }
327 
328 static struct thread *__machine__findnew_thread(struct machine *machine,
329 						pid_t pid, pid_t tid,
330 						bool create)
331 {
332 	struct rb_node **p = &machine->threads.rb_node;
333 	struct rb_node *parent = NULL;
334 	struct thread *th;
335 
336 	/*
337 	 * Front-end cache - TID lookups come in blocks,
338 	 * so most of the time we dont have to look up
339 	 * the full rbtree:
340 	 */
341 	th = machine->last_match;
342 	if (th && th->tid == tid) {
343 		machine__update_thread_pid(machine, th, pid);
344 		return th;
345 	}
346 
347 	while (*p != NULL) {
348 		parent = *p;
349 		th = rb_entry(parent, struct thread, rb_node);
350 
351 		if (th->tid == tid) {
352 			machine->last_match = th;
353 			machine__update_thread_pid(machine, th, pid);
354 			return th;
355 		}
356 
357 		if (tid < th->tid)
358 			p = &(*p)->rb_left;
359 		else
360 			p = &(*p)->rb_right;
361 	}
362 
363 	if (!create)
364 		return NULL;
365 
366 	th = thread__new(pid, tid);
367 	if (th != NULL) {
368 		rb_link_node(&th->rb_node, parent, p);
369 		rb_insert_color(&th->rb_node, &machine->threads);
370 		machine->last_match = th;
371 
372 		/*
373 		 * We have to initialize map_groups separately
374 		 * after rb tree is updated.
375 		 *
376 		 * The reason is that we call machine__findnew_thread
377 		 * within thread__init_map_groups to find the thread
378 		 * leader and that would screwed the rb tree.
379 		 */
380 		if (thread__init_map_groups(th, machine)) {
381 			thread__delete(th);
382 			return NULL;
383 		}
384 	}
385 
386 	return th;
387 }
388 
389 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
390 				       pid_t tid)
391 {
392 	return __machine__findnew_thread(machine, pid, tid, true);
393 }
394 
395 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
396 				    pid_t tid)
397 {
398 	return __machine__findnew_thread(machine, pid, tid, false);
399 }
400 
401 int machine__process_comm_event(struct machine *machine, union perf_event *event,
402 				struct perf_sample *sample)
403 {
404 	struct thread *thread = machine__findnew_thread(machine,
405 							event->comm.pid,
406 							event->comm.tid);
407 
408 	if (dump_trace)
409 		perf_event__fprintf_comm(event, stdout);
410 
411 	if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) {
412 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
413 		return -1;
414 	}
415 
416 	return 0;
417 }
418 
419 int machine__process_lost_event(struct machine *machine __maybe_unused,
420 				union perf_event *event, struct perf_sample *sample __maybe_unused)
421 {
422 	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
423 		    event->lost.id, event->lost.lost);
424 	return 0;
425 }
426 
427 struct map *machine__new_module(struct machine *machine, u64 start,
428 				const char *filename)
429 {
430 	struct map *map;
431 	struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
432 
433 	if (dso == NULL)
434 		return NULL;
435 
436 	map = map__new2(start, dso, MAP__FUNCTION);
437 	if (map == NULL)
438 		return NULL;
439 
440 	if (machine__is_host(machine))
441 		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
442 	else
443 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
444 	map_groups__insert(&machine->kmaps, map);
445 	return map;
446 }
447 
448 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
449 {
450 	struct rb_node *nd;
451 	size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
452 		     __dsos__fprintf(&machines->host.user_dsos, fp);
453 
454 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
455 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
456 		ret += __dsos__fprintf(&pos->kernel_dsos, fp);
457 		ret += __dsos__fprintf(&pos->user_dsos, fp);
458 	}
459 
460 	return ret;
461 }
462 
463 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
464 				     bool (skip)(struct dso *dso, int parm), int parm)
465 {
466 	return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
467 	       __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
468 }
469 
470 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
471 				     bool (skip)(struct dso *dso, int parm), int parm)
472 {
473 	struct rb_node *nd;
474 	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
475 
476 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
477 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
478 		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
479 	}
480 	return ret;
481 }
482 
483 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
484 {
485 	int i;
486 	size_t printed = 0;
487 	struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
488 
489 	if (kdso->has_build_id) {
490 		char filename[PATH_MAX];
491 		if (dso__build_id_filename(kdso, filename, sizeof(filename)))
492 			printed += fprintf(fp, "[0] %s\n", filename);
493 	}
494 
495 	for (i = 0; i < vmlinux_path__nr_entries; ++i)
496 		printed += fprintf(fp, "[%d] %s\n",
497 				   i + kdso->has_build_id, vmlinux_path[i]);
498 
499 	return printed;
500 }
501 
502 size_t machine__fprintf(struct machine *machine, FILE *fp)
503 {
504 	size_t ret = 0;
505 	struct rb_node *nd;
506 
507 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
508 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
509 
510 		ret += thread__fprintf(pos, fp);
511 	}
512 
513 	return ret;
514 }
515 
516 static struct dso *machine__get_kernel(struct machine *machine)
517 {
518 	const char *vmlinux_name = NULL;
519 	struct dso *kernel;
520 
521 	if (machine__is_host(machine)) {
522 		vmlinux_name = symbol_conf.vmlinux_name;
523 		if (!vmlinux_name)
524 			vmlinux_name = "[kernel.kallsyms]";
525 
526 		kernel = dso__kernel_findnew(machine, vmlinux_name,
527 					     "[kernel]",
528 					     DSO_TYPE_KERNEL);
529 	} else {
530 		char bf[PATH_MAX];
531 
532 		if (machine__is_default_guest(machine))
533 			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
534 		if (!vmlinux_name)
535 			vmlinux_name = machine__mmap_name(machine, bf,
536 							  sizeof(bf));
537 
538 		kernel = dso__kernel_findnew(machine, vmlinux_name,
539 					     "[guest.kernel]",
540 					     DSO_TYPE_GUEST_KERNEL);
541 	}
542 
543 	if (kernel != NULL && (!kernel->has_build_id))
544 		dso__read_running_kernel_build_id(kernel, machine);
545 
546 	return kernel;
547 }
548 
549 struct process_args {
550 	u64 start;
551 };
552 
553 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
554 					   size_t bufsz)
555 {
556 	if (machine__is_default_guest(machine))
557 		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
558 	else
559 		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
560 }
561 
562 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
563 
564 /* Figure out the start address of kernel map from /proc/kallsyms.
565  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
566  * symbol_name if it's not that important.
567  */
568 static u64 machine__get_kernel_start_addr(struct machine *machine,
569 					  const char **symbol_name)
570 {
571 	char filename[PATH_MAX];
572 	int i;
573 	const char *name;
574 	u64 addr = 0;
575 
576 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
577 
578 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
579 		return 0;
580 
581 	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
582 		addr = kallsyms__get_function_start(filename, name);
583 		if (addr)
584 			break;
585 	}
586 
587 	if (symbol_name)
588 		*symbol_name = name;
589 
590 	return addr;
591 }
592 
593 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
594 {
595 	enum map_type type;
596 	u64 start = machine__get_kernel_start_addr(machine, NULL);
597 
598 	for (type = 0; type < MAP__NR_TYPES; ++type) {
599 		struct kmap *kmap;
600 
601 		machine->vmlinux_maps[type] = map__new2(start, kernel, type);
602 		if (machine->vmlinux_maps[type] == NULL)
603 			return -1;
604 
605 		machine->vmlinux_maps[type]->map_ip =
606 			machine->vmlinux_maps[type]->unmap_ip =
607 				identity__map_ip;
608 		kmap = map__kmap(machine->vmlinux_maps[type]);
609 		kmap->kmaps = &machine->kmaps;
610 		map_groups__insert(&machine->kmaps,
611 				   machine->vmlinux_maps[type]);
612 	}
613 
614 	return 0;
615 }
616 
617 void machine__destroy_kernel_maps(struct machine *machine)
618 {
619 	enum map_type type;
620 
621 	for (type = 0; type < MAP__NR_TYPES; ++type) {
622 		struct kmap *kmap;
623 
624 		if (machine->vmlinux_maps[type] == NULL)
625 			continue;
626 
627 		kmap = map__kmap(machine->vmlinux_maps[type]);
628 		map_groups__remove(&machine->kmaps,
629 				   machine->vmlinux_maps[type]);
630 		if (kmap->ref_reloc_sym) {
631 			/*
632 			 * ref_reloc_sym is shared among all maps, so free just
633 			 * on one of them.
634 			 */
635 			if (type == MAP__FUNCTION) {
636 				zfree((char **)&kmap->ref_reloc_sym->name);
637 				zfree(&kmap->ref_reloc_sym);
638 			} else
639 				kmap->ref_reloc_sym = NULL;
640 		}
641 
642 		map__delete(machine->vmlinux_maps[type]);
643 		machine->vmlinux_maps[type] = NULL;
644 	}
645 }
646 
647 int machines__create_guest_kernel_maps(struct machines *machines)
648 {
649 	int ret = 0;
650 	struct dirent **namelist = NULL;
651 	int i, items = 0;
652 	char path[PATH_MAX];
653 	pid_t pid;
654 	char *endp;
655 
656 	if (symbol_conf.default_guest_vmlinux_name ||
657 	    symbol_conf.default_guest_modules ||
658 	    symbol_conf.default_guest_kallsyms) {
659 		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
660 	}
661 
662 	if (symbol_conf.guestmount) {
663 		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
664 		if (items <= 0)
665 			return -ENOENT;
666 		for (i = 0; i < items; i++) {
667 			if (!isdigit(namelist[i]->d_name[0])) {
668 				/* Filter out . and .. */
669 				continue;
670 			}
671 			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
672 			if ((*endp != '\0') ||
673 			    (endp == namelist[i]->d_name) ||
674 			    (errno == ERANGE)) {
675 				pr_debug("invalid directory (%s). Skipping.\n",
676 					 namelist[i]->d_name);
677 				continue;
678 			}
679 			sprintf(path, "%s/%s/proc/kallsyms",
680 				symbol_conf.guestmount,
681 				namelist[i]->d_name);
682 			ret = access(path, R_OK);
683 			if (ret) {
684 				pr_debug("Can't access file %s\n", path);
685 				goto failure;
686 			}
687 			machines__create_kernel_maps(machines, pid);
688 		}
689 failure:
690 		free(namelist);
691 	}
692 
693 	return ret;
694 }
695 
696 void machines__destroy_kernel_maps(struct machines *machines)
697 {
698 	struct rb_node *next = rb_first(&machines->guests);
699 
700 	machine__destroy_kernel_maps(&machines->host);
701 
702 	while (next) {
703 		struct machine *pos = rb_entry(next, struct machine, rb_node);
704 
705 		next = rb_next(&pos->rb_node);
706 		rb_erase(&pos->rb_node, &machines->guests);
707 		machine__delete(pos);
708 	}
709 }
710 
711 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
712 {
713 	struct machine *machine = machines__findnew(machines, pid);
714 
715 	if (machine == NULL)
716 		return -1;
717 
718 	return machine__create_kernel_maps(machine);
719 }
720 
721 int machine__load_kallsyms(struct machine *machine, const char *filename,
722 			   enum map_type type, symbol_filter_t filter)
723 {
724 	struct map *map = machine->vmlinux_maps[type];
725 	int ret = dso__load_kallsyms(map->dso, filename, map, filter);
726 
727 	if (ret > 0) {
728 		dso__set_loaded(map->dso, type);
729 		/*
730 		 * Since /proc/kallsyms will have multiple sessions for the
731 		 * kernel, with modules between them, fixup the end of all
732 		 * sections.
733 		 */
734 		__map_groups__fixup_end(&machine->kmaps, type);
735 	}
736 
737 	return ret;
738 }
739 
740 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
741 			       symbol_filter_t filter)
742 {
743 	struct map *map = machine->vmlinux_maps[type];
744 	int ret = dso__load_vmlinux_path(map->dso, map, filter);
745 
746 	if (ret > 0)
747 		dso__set_loaded(map->dso, type);
748 
749 	return ret;
750 }
751 
752 static void map_groups__fixup_end(struct map_groups *mg)
753 {
754 	int i;
755 	for (i = 0; i < MAP__NR_TYPES; ++i)
756 		__map_groups__fixup_end(mg, i);
757 }
758 
759 static char *get_kernel_version(const char *root_dir)
760 {
761 	char version[PATH_MAX];
762 	FILE *file;
763 	char *name, *tmp;
764 	const char *prefix = "Linux version ";
765 
766 	sprintf(version, "%s/proc/version", root_dir);
767 	file = fopen(version, "r");
768 	if (!file)
769 		return NULL;
770 
771 	version[0] = '\0';
772 	tmp = fgets(version, sizeof(version), file);
773 	fclose(file);
774 
775 	name = strstr(version, prefix);
776 	if (!name)
777 		return NULL;
778 	name += strlen(prefix);
779 	tmp = strchr(name, ' ');
780 	if (tmp)
781 		*tmp = '\0';
782 
783 	return strdup(name);
784 }
785 
786 static int map_groups__set_modules_path_dir(struct map_groups *mg,
787 				const char *dir_name, int depth)
788 {
789 	struct dirent *dent;
790 	DIR *dir = opendir(dir_name);
791 	int ret = 0;
792 
793 	if (!dir) {
794 		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
795 		return -1;
796 	}
797 
798 	while ((dent = readdir(dir)) != NULL) {
799 		char path[PATH_MAX];
800 		struct stat st;
801 
802 		/*sshfs might return bad dent->d_type, so we have to stat*/
803 		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
804 		if (stat(path, &st))
805 			continue;
806 
807 		if (S_ISDIR(st.st_mode)) {
808 			if (!strcmp(dent->d_name, ".") ||
809 			    !strcmp(dent->d_name, ".."))
810 				continue;
811 
812 			/* Do not follow top-level source and build symlinks */
813 			if (depth == 0) {
814 				if (!strcmp(dent->d_name, "source") ||
815 				    !strcmp(dent->d_name, "build"))
816 					continue;
817 			}
818 
819 			ret = map_groups__set_modules_path_dir(mg, path,
820 							       depth + 1);
821 			if (ret < 0)
822 				goto out;
823 		} else {
824 			char *dot = strrchr(dent->d_name, '.'),
825 			     dso_name[PATH_MAX];
826 			struct map *map;
827 			char *long_name;
828 
829 			if (dot == NULL || strcmp(dot, ".ko"))
830 				continue;
831 			snprintf(dso_name, sizeof(dso_name), "[%.*s]",
832 				 (int)(dot - dent->d_name), dent->d_name);
833 
834 			strxfrchar(dso_name, '-', '_');
835 			map = map_groups__find_by_name(mg, MAP__FUNCTION,
836 						       dso_name);
837 			if (map == NULL)
838 				continue;
839 
840 			long_name = strdup(path);
841 			if (long_name == NULL) {
842 				ret = -1;
843 				goto out;
844 			}
845 			dso__set_long_name(map->dso, long_name, true);
846 			dso__kernel_module_get_build_id(map->dso, "");
847 		}
848 	}
849 
850 out:
851 	closedir(dir);
852 	return ret;
853 }
854 
855 static int machine__set_modules_path(struct machine *machine)
856 {
857 	char *version;
858 	char modules_path[PATH_MAX];
859 
860 	version = get_kernel_version(machine->root_dir);
861 	if (!version)
862 		return -1;
863 
864 	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
865 		 machine->root_dir, version);
866 	free(version);
867 
868 	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
869 }
870 
871 static int machine__create_module(void *arg, const char *name, u64 start)
872 {
873 	struct machine *machine = arg;
874 	struct map *map;
875 
876 	map = machine__new_module(machine, start, name);
877 	if (map == NULL)
878 		return -1;
879 
880 	dso__kernel_module_get_build_id(map->dso, machine->root_dir);
881 
882 	return 0;
883 }
884 
885 static int machine__create_modules(struct machine *machine)
886 {
887 	const char *modules;
888 	char path[PATH_MAX];
889 
890 	if (machine__is_default_guest(machine)) {
891 		modules = symbol_conf.default_guest_modules;
892 	} else {
893 		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
894 		modules = path;
895 	}
896 
897 	if (symbol__restricted_filename(modules, "/proc/modules"))
898 		return -1;
899 
900 	if (modules__parse(modules, machine, machine__create_module))
901 		return -1;
902 
903 	if (!machine__set_modules_path(machine))
904 		return 0;
905 
906 	pr_debug("Problems setting modules path maps, continuing anyway...\n");
907 
908 	return 0;
909 }
910 
911 int machine__create_kernel_maps(struct machine *machine)
912 {
913 	struct dso *kernel = machine__get_kernel(machine);
914 	const char *name;
915 	u64 addr = machine__get_kernel_start_addr(machine, &name);
916 	if (!addr)
917 		return -1;
918 
919 	if (kernel == NULL ||
920 	    __machine__create_kernel_maps(machine, kernel) < 0)
921 		return -1;
922 
923 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
924 		if (machine__is_host(machine))
925 			pr_debug("Problems creating module maps, "
926 				 "continuing anyway...\n");
927 		else
928 			pr_debug("Problems creating module maps for guest %d, "
929 				 "continuing anyway...\n", machine->pid);
930 	}
931 
932 	/*
933 	 * Now that we have all the maps created, just set the ->end of them:
934 	 */
935 	map_groups__fixup_end(&machine->kmaps);
936 
937 	if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
938 					     addr)) {
939 		machine__destroy_kernel_maps(machine);
940 		return -1;
941 	}
942 
943 	return 0;
944 }
945 
946 static void machine__set_kernel_mmap_len(struct machine *machine,
947 					 union perf_event *event)
948 {
949 	int i;
950 
951 	for (i = 0; i < MAP__NR_TYPES; i++) {
952 		machine->vmlinux_maps[i]->start = event->mmap.start;
953 		machine->vmlinux_maps[i]->end   = (event->mmap.start +
954 						   event->mmap.len);
955 		/*
956 		 * Be a bit paranoid here, some perf.data file came with
957 		 * a zero sized synthesized MMAP event for the kernel.
958 		 */
959 		if (machine->vmlinux_maps[i]->end == 0)
960 			machine->vmlinux_maps[i]->end = ~0ULL;
961 	}
962 }
963 
964 static bool machine__uses_kcore(struct machine *machine)
965 {
966 	struct dso *dso;
967 
968 	list_for_each_entry(dso, &machine->kernel_dsos, node) {
969 		if (dso__is_kcore(dso))
970 			return true;
971 	}
972 
973 	return false;
974 }
975 
976 static int machine__process_kernel_mmap_event(struct machine *machine,
977 					      union perf_event *event)
978 {
979 	struct map *map;
980 	char kmmap_prefix[PATH_MAX];
981 	enum dso_kernel_type kernel_type;
982 	bool is_kernel_mmap;
983 
984 	/* If we have maps from kcore then we do not need or want any others */
985 	if (machine__uses_kcore(machine))
986 		return 0;
987 
988 	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
989 	if (machine__is_host(machine))
990 		kernel_type = DSO_TYPE_KERNEL;
991 	else
992 		kernel_type = DSO_TYPE_GUEST_KERNEL;
993 
994 	is_kernel_mmap = memcmp(event->mmap.filename,
995 				kmmap_prefix,
996 				strlen(kmmap_prefix) - 1) == 0;
997 	if (event->mmap.filename[0] == '/' ||
998 	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
999 
1000 		char short_module_name[1024];
1001 		char *name, *dot;
1002 
1003 		if (event->mmap.filename[0] == '/') {
1004 			name = strrchr(event->mmap.filename, '/');
1005 			if (name == NULL)
1006 				goto out_problem;
1007 
1008 			++name; /* skip / */
1009 			dot = strrchr(name, '.');
1010 			if (dot == NULL)
1011 				goto out_problem;
1012 			snprintf(short_module_name, sizeof(short_module_name),
1013 					"[%.*s]", (int)(dot - name), name);
1014 			strxfrchar(short_module_name, '-', '_');
1015 		} else
1016 			strcpy(short_module_name, event->mmap.filename);
1017 
1018 		map = machine__new_module(machine, event->mmap.start,
1019 					  event->mmap.filename);
1020 		if (map == NULL)
1021 			goto out_problem;
1022 
1023 		name = strdup(short_module_name);
1024 		if (name == NULL)
1025 			goto out_problem;
1026 
1027 		dso__set_short_name(map->dso, name, true);
1028 		map->end = map->start + event->mmap.len;
1029 	} else if (is_kernel_mmap) {
1030 		const char *symbol_name = (event->mmap.filename +
1031 				strlen(kmmap_prefix));
1032 		/*
1033 		 * Should be there already, from the build-id table in
1034 		 * the header.
1035 		 */
1036 		struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
1037 						     kmmap_prefix);
1038 		if (kernel == NULL)
1039 			goto out_problem;
1040 
1041 		kernel->kernel = kernel_type;
1042 		if (__machine__create_kernel_maps(machine, kernel) < 0)
1043 			goto out_problem;
1044 
1045 		machine__set_kernel_mmap_len(machine, event);
1046 
1047 		/*
1048 		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1049 		 * symbol. Effectively having zero here means that at record
1050 		 * time /proc/sys/kernel/kptr_restrict was non zero.
1051 		 */
1052 		if (event->mmap.pgoff != 0) {
1053 			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1054 							 symbol_name,
1055 							 event->mmap.pgoff);
1056 		}
1057 
1058 		if (machine__is_default_guest(machine)) {
1059 			/*
1060 			 * preload dso of guest kernel and modules
1061 			 */
1062 			dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1063 				  NULL);
1064 		}
1065 	}
1066 	return 0;
1067 out_problem:
1068 	return -1;
1069 }
1070 
1071 int machine__process_mmap2_event(struct machine *machine,
1072 				 union perf_event *event,
1073 				 struct perf_sample *sample __maybe_unused)
1074 {
1075 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1076 	struct thread *thread;
1077 	struct map *map;
1078 	enum map_type type;
1079 	int ret = 0;
1080 
1081 	if (dump_trace)
1082 		perf_event__fprintf_mmap2(event, stdout);
1083 
1084 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1085 	    cpumode == PERF_RECORD_MISC_KERNEL) {
1086 		ret = machine__process_kernel_mmap_event(machine, event);
1087 		if (ret < 0)
1088 			goto out_problem;
1089 		return 0;
1090 	}
1091 
1092 	thread = machine__findnew_thread(machine, event->mmap2.pid,
1093 					event->mmap2.tid);
1094 	if (thread == NULL)
1095 		goto out_problem;
1096 
1097 	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1098 		type = MAP__VARIABLE;
1099 	else
1100 		type = MAP__FUNCTION;
1101 
1102 	map = map__new(machine, event->mmap2.start,
1103 			event->mmap2.len, event->mmap2.pgoff,
1104 			event->mmap2.pid, event->mmap2.maj,
1105 			event->mmap2.min, event->mmap2.ino,
1106 			event->mmap2.ino_generation,
1107 			event->mmap2.prot,
1108 			event->mmap2.flags,
1109 			event->mmap2.filename, type, thread);
1110 
1111 	if (map == NULL)
1112 		goto out_problem;
1113 
1114 	thread__insert_map(thread, map);
1115 	return 0;
1116 
1117 out_problem:
1118 	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1119 	return 0;
1120 }
1121 
1122 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1123 				struct perf_sample *sample __maybe_unused)
1124 {
1125 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1126 	struct thread *thread;
1127 	struct map *map;
1128 	enum map_type type;
1129 	int ret = 0;
1130 
1131 	if (dump_trace)
1132 		perf_event__fprintf_mmap(event, stdout);
1133 
1134 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1135 	    cpumode == PERF_RECORD_MISC_KERNEL) {
1136 		ret = machine__process_kernel_mmap_event(machine, event);
1137 		if (ret < 0)
1138 			goto out_problem;
1139 		return 0;
1140 	}
1141 
1142 	thread = machine__findnew_thread(machine, event->mmap.pid,
1143 					 event->mmap.tid);
1144 	if (thread == NULL)
1145 		goto out_problem;
1146 
1147 	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1148 		type = MAP__VARIABLE;
1149 	else
1150 		type = MAP__FUNCTION;
1151 
1152 	map = map__new(machine, event->mmap.start,
1153 			event->mmap.len, event->mmap.pgoff,
1154 			event->mmap.pid, 0, 0, 0, 0, 0, 0,
1155 			event->mmap.filename,
1156 			type, thread);
1157 
1158 	if (map == NULL)
1159 		goto out_problem;
1160 
1161 	thread__insert_map(thread, map);
1162 	return 0;
1163 
1164 out_problem:
1165 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1166 	return 0;
1167 }
1168 
1169 static void machine__remove_thread(struct machine *machine, struct thread *th)
1170 {
1171 	machine->last_match = NULL;
1172 	rb_erase(&th->rb_node, &machine->threads);
1173 	/*
1174 	 * We may have references to this thread, for instance in some hist_entry
1175 	 * instances, so just move them to a separate list.
1176 	 */
1177 	list_add_tail(&th->node, &machine->dead_threads);
1178 }
1179 
1180 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1181 				struct perf_sample *sample)
1182 {
1183 	struct thread *thread = machine__find_thread(machine,
1184 						     event->fork.pid,
1185 						     event->fork.tid);
1186 	struct thread *parent = machine__findnew_thread(machine,
1187 							event->fork.ppid,
1188 							event->fork.ptid);
1189 
1190 	/* if a thread currently exists for the thread id remove it */
1191 	if (thread != NULL)
1192 		machine__remove_thread(machine, thread);
1193 
1194 	thread = machine__findnew_thread(machine, event->fork.pid,
1195 					 event->fork.tid);
1196 	if (dump_trace)
1197 		perf_event__fprintf_task(event, stdout);
1198 
1199 	if (thread == NULL || parent == NULL ||
1200 	    thread__fork(thread, parent, sample->time) < 0) {
1201 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1202 		return -1;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1209 				struct perf_sample *sample __maybe_unused)
1210 {
1211 	struct thread *thread = machine__find_thread(machine,
1212 						     event->fork.pid,
1213 						     event->fork.tid);
1214 
1215 	if (dump_trace)
1216 		perf_event__fprintf_task(event, stdout);
1217 
1218 	if (thread != NULL)
1219 		thread__exited(thread);
1220 
1221 	return 0;
1222 }
1223 
1224 int machine__process_event(struct machine *machine, union perf_event *event,
1225 			   struct perf_sample *sample)
1226 {
1227 	int ret;
1228 
1229 	switch (event->header.type) {
1230 	case PERF_RECORD_COMM:
1231 		ret = machine__process_comm_event(machine, event, sample); break;
1232 	case PERF_RECORD_MMAP:
1233 		ret = machine__process_mmap_event(machine, event, sample); break;
1234 	case PERF_RECORD_MMAP2:
1235 		ret = machine__process_mmap2_event(machine, event, sample); break;
1236 	case PERF_RECORD_FORK:
1237 		ret = machine__process_fork_event(machine, event, sample); break;
1238 	case PERF_RECORD_EXIT:
1239 		ret = machine__process_exit_event(machine, event, sample); break;
1240 	case PERF_RECORD_LOST:
1241 		ret = machine__process_lost_event(machine, event, sample); break;
1242 	default:
1243 		ret = -1;
1244 		break;
1245 	}
1246 
1247 	return ret;
1248 }
1249 
1250 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1251 {
1252 	if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1253 		return 1;
1254 	return 0;
1255 }
1256 
1257 static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1258 			    struct addr_map_symbol *ams,
1259 			    u64 ip)
1260 {
1261 	struct addr_location al;
1262 
1263 	memset(&al, 0, sizeof(al));
1264 	/*
1265 	 * We cannot use the header.misc hint to determine whether a
1266 	 * branch stack address is user, kernel, guest, hypervisor.
1267 	 * Branches may straddle the kernel/user/hypervisor boundaries.
1268 	 * Thus, we have to try consecutively until we find a match
1269 	 * or else, the symbol is unknown
1270 	 */
1271 	thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
1272 
1273 	ams->addr = ip;
1274 	ams->al_addr = al.addr;
1275 	ams->sym = al.sym;
1276 	ams->map = al.map;
1277 }
1278 
1279 static void ip__resolve_data(struct machine *machine, struct thread *thread,
1280 			     u8 m, struct addr_map_symbol *ams, u64 addr)
1281 {
1282 	struct addr_location al;
1283 
1284 	memset(&al, 0, sizeof(al));
1285 
1286 	thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
1287 				   &al);
1288 	ams->addr = addr;
1289 	ams->al_addr = al.addr;
1290 	ams->sym = al.sym;
1291 	ams->map = al.map;
1292 }
1293 
1294 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1295 				     struct addr_location *al)
1296 {
1297 	struct mem_info *mi = zalloc(sizeof(*mi));
1298 
1299 	if (!mi)
1300 		return NULL;
1301 
1302 	ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
1303 	ip__resolve_data(al->machine, al->thread, al->cpumode,
1304 			 &mi->daddr, sample->addr);
1305 	mi->data_src.val = sample->data_src;
1306 
1307 	return mi;
1308 }
1309 
1310 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1311 					   struct addr_location *al)
1312 {
1313 	unsigned int i;
1314 	const struct branch_stack *bs = sample->branch_stack;
1315 	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1316 
1317 	if (!bi)
1318 		return NULL;
1319 
1320 	for (i = 0; i < bs->nr; i++) {
1321 		ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
1322 		ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
1323 		bi[i].flags = bs->entries[i].flags;
1324 	}
1325 	return bi;
1326 }
1327 
1328 static int machine__resolve_callchain_sample(struct machine *machine,
1329 					     struct thread *thread,
1330 					     struct ip_callchain *chain,
1331 					     struct symbol **parent,
1332 					     struct addr_location *root_al,
1333 					     int max_stack)
1334 {
1335 	u8 cpumode = PERF_RECORD_MISC_USER;
1336 	int chain_nr = min(max_stack, (int)chain->nr);
1337 	int i;
1338 	int j;
1339 	int err;
1340 	int skip_idx __maybe_unused;
1341 
1342 	callchain_cursor_reset(&callchain_cursor);
1343 
1344 	if (chain->nr > PERF_MAX_STACK_DEPTH) {
1345 		pr_warning("corrupted callchain. skipping...\n");
1346 		return 0;
1347 	}
1348 
1349 	/*
1350 	 * Based on DWARF debug information, some architectures skip
1351 	 * a callchain entry saved by the kernel.
1352 	 */
1353 	skip_idx = arch_skip_callchain_idx(machine, thread, chain);
1354 
1355 	for (i = 0; i < chain_nr; i++) {
1356 		u64 ip;
1357 		struct addr_location al;
1358 
1359 		if (callchain_param.order == ORDER_CALLEE)
1360 			j = i;
1361 		else
1362 			j = chain->nr - i - 1;
1363 
1364 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1365 		if (j == skip_idx)
1366 			continue;
1367 #endif
1368 		ip = chain->ips[j];
1369 
1370 		if (ip >= PERF_CONTEXT_MAX) {
1371 			switch (ip) {
1372 			case PERF_CONTEXT_HV:
1373 				cpumode = PERF_RECORD_MISC_HYPERVISOR;
1374 				break;
1375 			case PERF_CONTEXT_KERNEL:
1376 				cpumode = PERF_RECORD_MISC_KERNEL;
1377 				break;
1378 			case PERF_CONTEXT_USER:
1379 				cpumode = PERF_RECORD_MISC_USER;
1380 				break;
1381 			default:
1382 				pr_debug("invalid callchain context: "
1383 					 "%"PRId64"\n", (s64) ip);
1384 				/*
1385 				 * It seems the callchain is corrupted.
1386 				 * Discard all.
1387 				 */
1388 				callchain_cursor_reset(&callchain_cursor);
1389 				return 0;
1390 			}
1391 			continue;
1392 		}
1393 
1394 		al.filtered = 0;
1395 		thread__find_addr_location(thread, machine, cpumode,
1396 					   MAP__FUNCTION, ip, &al);
1397 		if (al.sym != NULL) {
1398 			if (sort__has_parent && !*parent &&
1399 			    symbol__match_regex(al.sym, &parent_regex))
1400 				*parent = al.sym;
1401 			else if (have_ignore_callees && root_al &&
1402 			  symbol__match_regex(al.sym, &ignore_callees_regex)) {
1403 				/* Treat this symbol as the root,
1404 				   forgetting its callees. */
1405 				*root_al = al;
1406 				callchain_cursor_reset(&callchain_cursor);
1407 			}
1408 		}
1409 
1410 		err = callchain_cursor_append(&callchain_cursor,
1411 					      ip, al.map, al.sym);
1412 		if (err)
1413 			return err;
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static int unwind_entry(struct unwind_entry *entry, void *arg)
1420 {
1421 	struct callchain_cursor *cursor = arg;
1422 	return callchain_cursor_append(cursor, entry->ip,
1423 				       entry->map, entry->sym);
1424 }
1425 
1426 int machine__resolve_callchain(struct machine *machine,
1427 			       struct perf_evsel *evsel,
1428 			       struct thread *thread,
1429 			       struct perf_sample *sample,
1430 			       struct symbol **parent,
1431 			       struct addr_location *root_al,
1432 			       int max_stack)
1433 {
1434 	int ret;
1435 
1436 	ret = machine__resolve_callchain_sample(machine, thread,
1437 						sample->callchain, parent,
1438 						root_al, max_stack);
1439 	if (ret)
1440 		return ret;
1441 
1442 	/* Can we do dwarf post unwind? */
1443 	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1444 	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1445 		return 0;
1446 
1447 	/* Bail out if nothing was captured. */
1448 	if ((!sample->user_regs.regs) ||
1449 	    (!sample->user_stack.size))
1450 		return 0;
1451 
1452 	return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1453 				   thread, sample, max_stack);
1454 
1455 }
1456 
1457 int machine__for_each_thread(struct machine *machine,
1458 			     int (*fn)(struct thread *thread, void *p),
1459 			     void *priv)
1460 {
1461 	struct rb_node *nd;
1462 	struct thread *thread;
1463 	int rc = 0;
1464 
1465 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1466 		thread = rb_entry(nd, struct thread, rb_node);
1467 		rc = fn(thread, priv);
1468 		if (rc != 0)
1469 			return rc;
1470 	}
1471 
1472 	list_for_each_entry(thread, &machine->dead_threads, node) {
1473 		rc = fn(thread, priv);
1474 		if (rc != 0)
1475 			return rc;
1476 	}
1477 	return rc;
1478 }
1479 
1480 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1481 				  struct target *target, struct thread_map *threads,
1482 				  perf_event__handler_t process, bool data_mmap)
1483 {
1484 	if (target__has_task(target))
1485 		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1486 	else if (target__has_cpu(target))
1487 		return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1488 	/* command specified */
1489 	return 0;
1490 }
1491 
1492 pid_t machine__get_current_tid(struct machine *machine, int cpu)
1493 {
1494 	if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1495 		return -1;
1496 
1497 	return machine->current_tid[cpu];
1498 }
1499 
1500 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1501 			     pid_t tid)
1502 {
1503 	struct thread *thread;
1504 
1505 	if (cpu < 0)
1506 		return -EINVAL;
1507 
1508 	if (!machine->current_tid) {
1509 		int i;
1510 
1511 		machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1512 		if (!machine->current_tid)
1513 			return -ENOMEM;
1514 		for (i = 0; i < MAX_NR_CPUS; i++)
1515 			machine->current_tid[i] = -1;
1516 	}
1517 
1518 	if (cpu >= MAX_NR_CPUS) {
1519 		pr_err("Requested CPU %d too large. ", cpu);
1520 		pr_err("Consider raising MAX_NR_CPUS\n");
1521 		return -EINVAL;
1522 	}
1523 
1524 	machine->current_tid[cpu] = tid;
1525 
1526 	thread = machine__findnew_thread(machine, pid, tid);
1527 	if (!thread)
1528 		return -ENOMEM;
1529 
1530 	thread->cpu = cpu;
1531 
1532 	return 0;
1533 }
1534