xref: /openbmc/linux/tools/perf/util/thread.c (revision 4800cd83)
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "session.h"
6 #include "thread.h"
7 #include "util.h"
8 #include "debug.h"
9 
10 /* Skip "." and ".." directories */
11 static int filter(const struct dirent *dir)
12 {
13 	if (dir->d_name[0] == '.')
14 		return 0;
15 	else
16 		return 1;
17 }
18 
19 struct thread_map *thread_map__new_by_pid(pid_t pid)
20 {
21 	struct thread_map *threads;
22 	char name[256];
23 	int items;
24 	struct dirent **namelist = NULL;
25 	int i;
26 
27 	sprintf(name, "/proc/%d/task", pid);
28 	items = scandir(name, &namelist, filter, NULL);
29 	if (items <= 0)
30                 return NULL;
31 
32 	threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
33 	if (threads != NULL) {
34 		for (i = 0; i < items; i++)
35 			threads->map[i] = atoi(namelist[i]->d_name);
36 		threads->nr = items;
37 	}
38 
39 	for (i=0; i<items; i++)
40 		free(namelist[i]);
41 	free(namelist);
42 
43 	return threads;
44 }
45 
46 struct thread_map *thread_map__new_by_tid(pid_t tid)
47 {
48 	struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
49 
50 	if (threads != NULL) {
51 		threads->map[0] = tid;
52 		threads->nr	= 1;
53 	}
54 
55 	return threads;
56 }
57 
58 struct thread_map *thread_map__new(pid_t pid, pid_t tid)
59 {
60 	if (pid != -1)
61 		return thread_map__new_by_pid(pid);
62 	return thread_map__new_by_tid(tid);
63 }
64 
65 static struct thread *thread__new(pid_t pid)
66 {
67 	struct thread *self = zalloc(sizeof(*self));
68 
69 	if (self != NULL) {
70 		map_groups__init(&self->mg);
71 		self->pid = pid;
72 		self->comm = malloc(32);
73 		if (self->comm)
74 			snprintf(self->comm, 32, ":%d", self->pid);
75 	}
76 
77 	return self;
78 }
79 
80 void thread__delete(struct thread *self)
81 {
82 	map_groups__exit(&self->mg);
83 	free(self->comm);
84 	free(self);
85 }
86 
87 int thread__set_comm(struct thread *self, const char *comm)
88 {
89 	int err;
90 
91 	if (self->comm)
92 		free(self->comm);
93 	self->comm = strdup(comm);
94 	err = self->comm == NULL ? -ENOMEM : 0;
95 	if (!err) {
96 		self->comm_set = true;
97 		map_groups__flush(&self->mg);
98 	}
99 	return err;
100 }
101 
102 int thread__comm_len(struct thread *self)
103 {
104 	if (!self->comm_len) {
105 		if (!self->comm)
106 			return 0;
107 		self->comm_len = strlen(self->comm);
108 	}
109 
110 	return self->comm_len;
111 }
112 
113 static size_t thread__fprintf(struct thread *self, FILE *fp)
114 {
115 	return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
116 	       map_groups__fprintf(&self->mg, verbose, fp);
117 }
118 
119 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
120 {
121 	struct rb_node **p = &self->threads.rb_node;
122 	struct rb_node *parent = NULL;
123 	struct thread *th;
124 
125 	/*
126 	 * Font-end cache - PID lookups come in blocks,
127 	 * so most of the time we dont have to look up
128 	 * the full rbtree:
129 	 */
130 	if (self->last_match && self->last_match->pid == pid)
131 		return self->last_match;
132 
133 	while (*p != NULL) {
134 		parent = *p;
135 		th = rb_entry(parent, struct thread, rb_node);
136 
137 		if (th->pid == pid) {
138 			self->last_match = th;
139 			return th;
140 		}
141 
142 		if (pid < th->pid)
143 			p = &(*p)->rb_left;
144 		else
145 			p = &(*p)->rb_right;
146 	}
147 
148 	th = thread__new(pid);
149 	if (th != NULL) {
150 		rb_link_node(&th->rb_node, parent, p);
151 		rb_insert_color(&th->rb_node, &self->threads);
152 		self->last_match = th;
153 	}
154 
155 	return th;
156 }
157 
158 void thread__insert_map(struct thread *self, struct map *map)
159 {
160 	map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
161 	map_groups__insert(&self->mg, map);
162 }
163 
164 int thread__fork(struct thread *self, struct thread *parent)
165 {
166 	int i;
167 
168 	if (parent->comm_set) {
169 		if (self->comm)
170 			free(self->comm);
171 		self->comm = strdup(parent->comm);
172 		if (!self->comm)
173 			return -ENOMEM;
174 		self->comm_set = true;
175 	}
176 
177 	for (i = 0; i < MAP__NR_TYPES; ++i)
178 		if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
179 			return -ENOMEM;
180 	return 0;
181 }
182 
183 size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
184 {
185 	size_t ret = 0;
186 	struct rb_node *nd;
187 
188 	for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
189 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
190 
191 		ret += thread__fprintf(pos, fp);
192 	}
193 
194 	return ret;
195 }
196