xref: /openbmc/linux/tools/lib/perf/evsel.c (revision 7fde9d6e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <unistd.h>
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
11 #include <stdlib.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 
21 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
22 {
23 	INIT_LIST_HEAD(&evsel->node);
24 	evsel->attr = *attr;
25 }
26 
27 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
28 {
29 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
30 
31 	if (evsel != NULL)
32 		perf_evsel__init(evsel, attr);
33 
34 	return evsel;
35 }
36 
37 void perf_evsel__delete(struct perf_evsel *evsel)
38 {
39 	free(evsel);
40 }
41 
42 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
43 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
44 
45 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
46 {
47 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
48 
49 	if (evsel->fd) {
50 		int cpu, thread;
51 		for (cpu = 0; cpu < ncpus; cpu++) {
52 			for (thread = 0; thread < nthreads; thread++) {
53 				FD(evsel, cpu, thread) = -1;
54 			}
55 		}
56 	}
57 
58 	return evsel->fd != NULL ? 0 : -ENOMEM;
59 }
60 
61 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
62 {
63 	evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
64 
65 	return evsel->mmap != NULL ? 0 : -ENOMEM;
66 }
67 
68 static int
69 sys_perf_event_open(struct perf_event_attr *attr,
70 		    pid_t pid, int cpu, int group_fd,
71 		    unsigned long flags)
72 {
73 	return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
74 }
75 
76 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
77 		     struct perf_thread_map *threads)
78 {
79 	int cpu, thread, err = 0;
80 
81 	if (cpus == NULL) {
82 		static struct perf_cpu_map *empty_cpu_map;
83 
84 		if (empty_cpu_map == NULL) {
85 			empty_cpu_map = perf_cpu_map__dummy_new();
86 			if (empty_cpu_map == NULL)
87 				return -ENOMEM;
88 		}
89 
90 		cpus = empty_cpu_map;
91 	}
92 
93 	if (threads == NULL) {
94 		static struct perf_thread_map *empty_thread_map;
95 
96 		if (empty_thread_map == NULL) {
97 			empty_thread_map = perf_thread_map__new_dummy();
98 			if (empty_thread_map == NULL)
99 				return -ENOMEM;
100 		}
101 
102 		threads = empty_thread_map;
103 	}
104 
105 	if (evsel->fd == NULL &&
106 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
107 		return -ENOMEM;
108 
109 	for (cpu = 0; cpu < cpus->nr; cpu++) {
110 		for (thread = 0; thread < threads->nr; thread++) {
111 			int fd;
112 
113 			fd = sys_perf_event_open(&evsel->attr,
114 						 threads->map[thread].pid,
115 						 cpus->map[cpu], -1, 0);
116 
117 			if (fd < 0)
118 				return -errno;
119 
120 			FD(evsel, cpu, thread) = fd;
121 		}
122 	}
123 
124 	return err;
125 }
126 
127 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
128 {
129 	int thread;
130 
131 	for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
132 		if (FD(evsel, cpu, thread) >= 0)
133 			close(FD(evsel, cpu, thread));
134 		FD(evsel, cpu, thread) = -1;
135 	}
136 }
137 
138 void perf_evsel__close_fd(struct perf_evsel *evsel)
139 {
140 	int cpu;
141 
142 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
143 		perf_evsel__close_fd_cpu(evsel, cpu);
144 }
145 
146 void perf_evsel__free_fd(struct perf_evsel *evsel)
147 {
148 	xyarray__delete(evsel->fd);
149 	evsel->fd = NULL;
150 }
151 
152 void perf_evsel__close(struct perf_evsel *evsel)
153 {
154 	if (evsel->fd == NULL)
155 		return;
156 
157 	perf_evsel__close_fd(evsel);
158 	perf_evsel__free_fd(evsel);
159 }
160 
161 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
162 {
163 	if (evsel->fd == NULL)
164 		return;
165 
166 	perf_evsel__close_fd_cpu(evsel, cpu);
167 }
168 
169 void perf_evsel__munmap(struct perf_evsel *evsel)
170 {
171 	int cpu, thread;
172 
173 	if (evsel->fd == NULL || evsel->mmap == NULL)
174 		return;
175 
176 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
177 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
178 			int fd = FD(evsel, cpu, thread);
179 			struct perf_mmap *map = MMAP(evsel, cpu, thread);
180 
181 			if (fd < 0)
182 				continue;
183 
184 			perf_mmap__munmap(map);
185 		}
186 	}
187 
188 	xyarray__delete(evsel->mmap);
189 	evsel->mmap = NULL;
190 }
191 
192 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
193 {
194 	int ret, cpu, thread;
195 	struct perf_mmap_param mp = {
196 		.prot = PROT_READ | PROT_WRITE,
197 		.mask = (pages * page_size) - 1,
198 	};
199 
200 	if (evsel->fd == NULL || evsel->mmap)
201 		return -EINVAL;
202 
203 	if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
204 		return -ENOMEM;
205 
206 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
207 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
208 			int fd = FD(evsel, cpu, thread);
209 			struct perf_mmap *map = MMAP(evsel, cpu, thread);
210 
211 			if (fd < 0)
212 				continue;
213 
214 			perf_mmap__init(map, NULL, false, NULL);
215 
216 			ret = perf_mmap__mmap(map, &mp, fd, cpu);
217 			if (ret) {
218 				perf_evsel__munmap(evsel);
219 				return ret;
220 			}
221 		}
222 	}
223 
224 	return 0;
225 }
226 
227 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
228 {
229 	if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
230 		return NULL;
231 
232 	return MMAP(evsel, cpu, thread)->base;
233 }
234 
235 int perf_evsel__read_size(struct perf_evsel *evsel)
236 {
237 	u64 read_format = evsel->attr.read_format;
238 	int entry = sizeof(u64); /* value */
239 	int size = 0;
240 	int nr = 1;
241 
242 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
243 		size += sizeof(u64);
244 
245 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
246 		size += sizeof(u64);
247 
248 	if (read_format & PERF_FORMAT_ID)
249 		entry += sizeof(u64);
250 
251 	if (read_format & PERF_FORMAT_GROUP) {
252 		nr = evsel->nr_members;
253 		size += sizeof(u64);
254 	}
255 
256 	size += entry * nr;
257 	return size;
258 }
259 
260 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
261 		     struct perf_counts_values *count)
262 {
263 	size_t size = perf_evsel__read_size(evsel);
264 
265 	memset(count, 0, sizeof(*count));
266 
267 	if (FD(evsel, cpu, thread) < 0)
268 		return -EINVAL;
269 
270 	if (MMAP(evsel, cpu, thread) &&
271 	    !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
272 		return 0;
273 
274 	if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
275 		return -errno;
276 
277 	return 0;
278 }
279 
280 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
281 				 int ioc,  void *arg,
282 				 int cpu)
283 {
284 	int thread;
285 
286 	for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
287 		int fd = FD(evsel, cpu, thread),
288 		    err = ioctl(fd, ioc, arg);
289 
290 		if (err)
291 			return err;
292 	}
293 
294 	return 0;
295 }
296 
297 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
298 {
299 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
300 }
301 
302 int perf_evsel__enable(struct perf_evsel *evsel)
303 {
304 	int i;
305 	int err = 0;
306 
307 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
308 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
309 	return err;
310 }
311 
312 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
313 {
314 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
315 }
316 
317 int perf_evsel__disable(struct perf_evsel *evsel)
318 {
319 	int i;
320 	int err = 0;
321 
322 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
323 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
324 	return err;
325 }
326 
327 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
328 {
329 	int err = 0, i;
330 
331 	for (i = 0; i < evsel->cpus->nr && !err; i++)
332 		err = perf_evsel__run_ioctl(evsel,
333 				     PERF_EVENT_IOC_SET_FILTER,
334 				     (void *)filter, i);
335 	return err;
336 }
337 
338 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
339 {
340 	return evsel->cpus;
341 }
342 
343 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
344 {
345 	return evsel->threads;
346 }
347 
348 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
349 {
350 	return &evsel->attr;
351 }
352 
353 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
354 {
355 	if (ncpus == 0 || nthreads == 0)
356 		return 0;
357 
358 	if (evsel->system_wide)
359 		nthreads = 1;
360 
361 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
362 	if (evsel->sample_id == NULL)
363 		return -ENOMEM;
364 
365 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
366 	if (evsel->id == NULL) {
367 		xyarray__delete(evsel->sample_id);
368 		evsel->sample_id = NULL;
369 		return -ENOMEM;
370 	}
371 
372 	return 0;
373 }
374 
375 void perf_evsel__free_id(struct perf_evsel *evsel)
376 {
377 	xyarray__delete(evsel->sample_id);
378 	evsel->sample_id = NULL;
379 	zfree(&evsel->id);
380 	evsel->ids = 0;
381 }
382