xref: /openbmc/linux/tools/lib/perf/evsel.c (revision 83869019)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <unistd.h>
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
11 #include <stdlib.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <asm/bug.h>
21 
22 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
23 		      int idx)
24 {
25 	INIT_LIST_HEAD(&evsel->node);
26 	evsel->attr = *attr;
27 	evsel->idx  = idx;
28 	evsel->leader = evsel;
29 }
30 
31 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
32 {
33 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
34 
35 	if (evsel != NULL)
36 		perf_evsel__init(evsel, attr, 0);
37 
38 	return evsel;
39 }
40 
41 void perf_evsel__delete(struct perf_evsel *evsel)
42 {
43 	free(evsel);
44 }
45 
46 #define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
47 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
48 
49 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
50 {
51 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
52 
53 	if (evsel->fd) {
54 		int cpu, thread;
55 		for (cpu = 0; cpu < ncpus; cpu++) {
56 			for (thread = 0; thread < nthreads; thread++) {
57 				int *fd = FD(evsel, cpu, thread);
58 
59 				if (fd)
60 					*fd = -1;
61 			}
62 		}
63 	}
64 
65 	return evsel->fd != NULL ? 0 : -ENOMEM;
66 }
67 
68 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
69 {
70 	evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
71 
72 	return evsel->mmap != NULL ? 0 : -ENOMEM;
73 }
74 
75 static int
76 sys_perf_event_open(struct perf_event_attr *attr,
77 		    pid_t pid, int cpu, int group_fd,
78 		    unsigned long flags)
79 {
80 	return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
81 }
82 
83 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
84 {
85 	struct perf_evsel *leader = evsel->leader;
86 	int *fd;
87 
88 	if (evsel == leader) {
89 		*group_fd = -1;
90 		return 0;
91 	}
92 
93 	/*
94 	 * Leader must be already processed/open,
95 	 * if not it's a bug.
96 	 */
97 	if (!leader->fd)
98 		return -ENOTCONN;
99 
100 	fd = FD(leader, cpu, thread);
101 	if (fd == NULL || *fd == -1)
102 		return -EBADF;
103 
104 	*group_fd = *fd;
105 
106 	return 0;
107 }
108 
109 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
110 		     struct perf_thread_map *threads)
111 {
112 	int cpu, thread, err = 0;
113 
114 	if (cpus == NULL) {
115 		static struct perf_cpu_map *empty_cpu_map;
116 
117 		if (empty_cpu_map == NULL) {
118 			empty_cpu_map = perf_cpu_map__dummy_new();
119 			if (empty_cpu_map == NULL)
120 				return -ENOMEM;
121 		}
122 
123 		cpus = empty_cpu_map;
124 	}
125 
126 	if (threads == NULL) {
127 		static struct perf_thread_map *empty_thread_map;
128 
129 		if (empty_thread_map == NULL) {
130 			empty_thread_map = perf_thread_map__new_dummy();
131 			if (empty_thread_map == NULL)
132 				return -ENOMEM;
133 		}
134 
135 		threads = empty_thread_map;
136 	}
137 
138 	if (evsel->fd == NULL &&
139 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
140 		return -ENOMEM;
141 
142 	for (cpu = 0; cpu < cpus->nr; cpu++) {
143 		for (thread = 0; thread < threads->nr; thread++) {
144 			int fd, group_fd, *evsel_fd;
145 
146 			evsel_fd = FD(evsel, cpu, thread);
147 			if (evsel_fd == NULL)
148 				return -EINVAL;
149 
150 			err = get_group_fd(evsel, cpu, thread, &group_fd);
151 			if (err < 0)
152 				return err;
153 
154 			fd = sys_perf_event_open(&evsel->attr,
155 						 threads->map[thread].pid,
156 						 cpus->map[cpu], group_fd, 0);
157 
158 			if (fd < 0)
159 				return -errno;
160 
161 			*evsel_fd = fd;
162 		}
163 	}
164 
165 	return err;
166 }
167 
168 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
169 {
170 	int thread;
171 
172 	for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
173 		int *fd = FD(evsel, cpu, thread);
174 
175 		if (fd && *fd >= 0) {
176 			close(*fd);
177 			*fd = -1;
178 		}
179 	}
180 }
181 
182 void perf_evsel__close_fd(struct perf_evsel *evsel)
183 {
184 	int cpu;
185 
186 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
187 		perf_evsel__close_fd_cpu(evsel, cpu);
188 }
189 
190 void perf_evsel__free_fd(struct perf_evsel *evsel)
191 {
192 	xyarray__delete(evsel->fd);
193 	evsel->fd = NULL;
194 }
195 
196 void perf_evsel__close(struct perf_evsel *evsel)
197 {
198 	if (evsel->fd == NULL)
199 		return;
200 
201 	perf_evsel__close_fd(evsel);
202 	perf_evsel__free_fd(evsel);
203 }
204 
205 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
206 {
207 	if (evsel->fd == NULL)
208 		return;
209 
210 	perf_evsel__close_fd_cpu(evsel, cpu);
211 }
212 
213 void perf_evsel__munmap(struct perf_evsel *evsel)
214 {
215 	int cpu, thread;
216 
217 	if (evsel->fd == NULL || evsel->mmap == NULL)
218 		return;
219 
220 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
221 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
222 			int *fd = FD(evsel, cpu, thread);
223 
224 			if (fd == NULL || *fd < 0)
225 				continue;
226 
227 			perf_mmap__munmap(MMAP(evsel, cpu, thread));
228 		}
229 	}
230 
231 	xyarray__delete(evsel->mmap);
232 	evsel->mmap = NULL;
233 }
234 
235 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
236 {
237 	int ret, cpu, thread;
238 	struct perf_mmap_param mp = {
239 		.prot = PROT_READ | PROT_WRITE,
240 		.mask = (pages * page_size) - 1,
241 	};
242 
243 	if (evsel->fd == NULL || evsel->mmap)
244 		return -EINVAL;
245 
246 	if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
247 		return -ENOMEM;
248 
249 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
250 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
251 			int *fd = FD(evsel, cpu, thread);
252 			struct perf_mmap *map;
253 
254 			if (fd == NULL || *fd < 0)
255 				continue;
256 
257 			map = MMAP(evsel, cpu, thread);
258 			perf_mmap__init(map, NULL, false, NULL);
259 
260 			ret = perf_mmap__mmap(map, &mp, *fd, cpu);
261 			if (ret) {
262 				perf_evsel__munmap(evsel);
263 				return ret;
264 			}
265 		}
266 	}
267 
268 	return 0;
269 }
270 
271 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
272 {
273 	int *fd = FD(evsel, cpu, thread);
274 
275 	if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
276 		return NULL;
277 
278 	return MMAP(evsel, cpu, thread)->base;
279 }
280 
281 int perf_evsel__read_size(struct perf_evsel *evsel)
282 {
283 	u64 read_format = evsel->attr.read_format;
284 	int entry = sizeof(u64); /* value */
285 	int size = 0;
286 	int nr = 1;
287 
288 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
289 		size += sizeof(u64);
290 
291 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
292 		size += sizeof(u64);
293 
294 	if (read_format & PERF_FORMAT_ID)
295 		entry += sizeof(u64);
296 
297 	if (read_format & PERF_FORMAT_GROUP) {
298 		nr = evsel->nr_members;
299 		size += sizeof(u64);
300 	}
301 
302 	size += entry * nr;
303 	return size;
304 }
305 
306 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
307 		     struct perf_counts_values *count)
308 {
309 	size_t size = perf_evsel__read_size(evsel);
310 	int *fd = FD(evsel, cpu, thread);
311 
312 	memset(count, 0, sizeof(*count));
313 
314 	if (fd == NULL || *fd < 0)
315 		return -EINVAL;
316 
317 	if (MMAP(evsel, cpu, thread) &&
318 	    !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
319 		return 0;
320 
321 	if (readn(*fd, count->values, size) <= 0)
322 		return -errno;
323 
324 	return 0;
325 }
326 
327 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
328 				 int ioc,  void *arg,
329 				 int cpu)
330 {
331 	int thread;
332 
333 	for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
334 		int err;
335 		int *fd = FD(evsel, cpu, thread);
336 
337 		if (fd == NULL || *fd < 0)
338 			return -1;
339 
340 		err = ioctl(*fd, ioc, arg);
341 
342 		if (err)
343 			return err;
344 	}
345 
346 	return 0;
347 }
348 
349 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
350 {
351 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
352 }
353 
354 int perf_evsel__enable(struct perf_evsel *evsel)
355 {
356 	int i;
357 	int err = 0;
358 
359 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
360 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
361 	return err;
362 }
363 
364 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
365 {
366 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
367 }
368 
369 int perf_evsel__disable(struct perf_evsel *evsel)
370 {
371 	int i;
372 	int err = 0;
373 
374 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
375 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
376 	return err;
377 }
378 
379 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
380 {
381 	int err = 0, i;
382 
383 	for (i = 0; i < evsel->cpus->nr && !err; i++)
384 		err = perf_evsel__run_ioctl(evsel,
385 				     PERF_EVENT_IOC_SET_FILTER,
386 				     (void *)filter, i);
387 	return err;
388 }
389 
390 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
391 {
392 	return evsel->cpus;
393 }
394 
395 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
396 {
397 	return evsel->threads;
398 }
399 
400 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
401 {
402 	return &evsel->attr;
403 }
404 
405 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
406 {
407 	if (ncpus == 0 || nthreads == 0)
408 		return 0;
409 
410 	if (evsel->system_wide)
411 		nthreads = 1;
412 
413 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
414 	if (evsel->sample_id == NULL)
415 		return -ENOMEM;
416 
417 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
418 	if (evsel->id == NULL) {
419 		xyarray__delete(evsel->sample_id);
420 		evsel->sample_id = NULL;
421 		return -ENOMEM;
422 	}
423 
424 	return 0;
425 }
426 
427 void perf_evsel__free_id(struct perf_evsel *evsel)
428 {
429 	xyarray__delete(evsel->sample_id);
430 	evsel->sample_id = NULL;
431 	zfree(&evsel->id);
432 	evsel->ids = 0;
433 }
434 
435 void perf_counts_values__scale(struct perf_counts_values *count,
436 			       bool scale, __s8 *pscaled)
437 {
438 	s8 scaled = 0;
439 
440 	if (scale) {
441 		if (count->run == 0) {
442 			scaled = -1;
443 			count->val = 0;
444 		} else if (count->run < count->ena) {
445 			scaled = 1;
446 			count->val = (u64)((double)count->val * count->ena / count->run);
447 		}
448 	}
449 
450 	if (pscaled)
451 		*pscaled = scaled;
452 }
453