xref: /openbmc/linux/tools/lib/perf/evlist.c (revision 6c33a6f4)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
7 #include <sys/ioctl.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/xyarray.h>
15 #include <internal/lib.h>
16 #include <linux/zalloc.h>
17 #include <sys/ioctl.h>
18 #include <stdlib.h>
19 #include <errno.h>
20 #include <unistd.h>
21 #include <fcntl.h>
22 #include <signal.h>
23 #include <poll.h>
24 #include <sys/mman.h>
25 #include <perf/cpumap.h>
26 #include <perf/threadmap.h>
27 #include <api/fd/array.h>
28 
29 void perf_evlist__init(struct perf_evlist *evlist)
30 {
31 	int i;
32 
33 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
34 		INIT_HLIST_HEAD(&evlist->heads[i]);
35 	INIT_LIST_HEAD(&evlist->entries);
36 	evlist->nr_entries = 0;
37 	fdarray__init(&evlist->pollfd, 64);
38 }
39 
40 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
41 					  struct perf_evsel *evsel)
42 {
43 	/*
44 	 * We already have cpus for evsel (via PMU sysfs) so
45 	 * keep it, if there's no target cpu list defined.
46 	 */
47 	if (!evsel->own_cpus || evlist->has_user_cpus) {
48 		perf_cpu_map__put(evsel->cpus);
49 		evsel->cpus = perf_cpu_map__get(evlist->cpus);
50 	} else if (evsel->cpus != evsel->own_cpus) {
51 		perf_cpu_map__put(evsel->cpus);
52 		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
53 	}
54 
55 	perf_thread_map__put(evsel->threads);
56 	evsel->threads = perf_thread_map__get(evlist->threads);
57 	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
58 }
59 
60 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
61 {
62 	struct perf_evsel *evsel;
63 
64 	perf_evlist__for_each_evsel(evlist, evsel)
65 		__perf_evlist__propagate_maps(evlist, evsel);
66 }
67 
68 void perf_evlist__add(struct perf_evlist *evlist,
69 		      struct perf_evsel *evsel)
70 {
71 	list_add_tail(&evsel->node, &evlist->entries);
72 	evlist->nr_entries += 1;
73 	__perf_evlist__propagate_maps(evlist, evsel);
74 }
75 
76 void perf_evlist__remove(struct perf_evlist *evlist,
77 			 struct perf_evsel *evsel)
78 {
79 	list_del_init(&evsel->node);
80 	evlist->nr_entries -= 1;
81 }
82 
83 struct perf_evlist *perf_evlist__new(void)
84 {
85 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
86 
87 	if (evlist != NULL)
88 		perf_evlist__init(evlist);
89 
90 	return evlist;
91 }
92 
93 struct perf_evsel *
94 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
95 {
96 	struct perf_evsel *next;
97 
98 	if (!prev) {
99 		next = list_first_entry(&evlist->entries,
100 					struct perf_evsel,
101 					node);
102 	} else {
103 		next = list_next_entry(prev, node);
104 	}
105 
106 	/* Empty list is noticed here so don't need checking on entry. */
107 	if (&next->node == &evlist->entries)
108 		return NULL;
109 
110 	return next;
111 }
112 
113 static void perf_evlist__purge(struct perf_evlist *evlist)
114 {
115 	struct perf_evsel *pos, *n;
116 
117 	perf_evlist__for_each_entry_safe(evlist, n, pos) {
118 		list_del_init(&pos->node);
119 		perf_evsel__delete(pos);
120 	}
121 
122 	evlist->nr_entries = 0;
123 }
124 
125 void perf_evlist__exit(struct perf_evlist *evlist)
126 {
127 	perf_cpu_map__put(evlist->cpus);
128 	perf_thread_map__put(evlist->threads);
129 	evlist->cpus = NULL;
130 	evlist->threads = NULL;
131 	fdarray__exit(&evlist->pollfd);
132 }
133 
134 void perf_evlist__delete(struct perf_evlist *evlist)
135 {
136 	if (evlist == NULL)
137 		return;
138 
139 	perf_evlist__munmap(evlist);
140 	perf_evlist__close(evlist);
141 	perf_evlist__purge(evlist);
142 	perf_evlist__exit(evlist);
143 	free(evlist);
144 }
145 
146 void perf_evlist__set_maps(struct perf_evlist *evlist,
147 			   struct perf_cpu_map *cpus,
148 			   struct perf_thread_map *threads)
149 {
150 	/*
151 	 * Allow for the possibility that one or another of the maps isn't being
152 	 * changed i.e. don't put it.  Note we are assuming the maps that are
153 	 * being applied are brand new and evlist is taking ownership of the
154 	 * original reference count of 1.  If that is not the case it is up to
155 	 * the caller to increase the reference count.
156 	 */
157 	if (cpus != evlist->cpus) {
158 		perf_cpu_map__put(evlist->cpus);
159 		evlist->cpus = perf_cpu_map__get(cpus);
160 	}
161 
162 	if (threads != evlist->threads) {
163 		perf_thread_map__put(evlist->threads);
164 		evlist->threads = perf_thread_map__get(threads);
165 	}
166 
167 	if (!evlist->all_cpus && cpus)
168 		evlist->all_cpus = perf_cpu_map__get(cpus);
169 
170 	perf_evlist__propagate_maps(evlist);
171 }
172 
173 int perf_evlist__open(struct perf_evlist *evlist)
174 {
175 	struct perf_evsel *evsel;
176 	int err;
177 
178 	perf_evlist__for_each_entry(evlist, evsel) {
179 		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
180 		if (err < 0)
181 			goto out_err;
182 	}
183 
184 	return 0;
185 
186 out_err:
187 	perf_evlist__close(evlist);
188 	return err;
189 }
190 
191 void perf_evlist__close(struct perf_evlist *evlist)
192 {
193 	struct perf_evsel *evsel;
194 
195 	perf_evlist__for_each_entry_reverse(evlist, evsel)
196 		perf_evsel__close(evsel);
197 }
198 
199 void perf_evlist__enable(struct perf_evlist *evlist)
200 {
201 	struct perf_evsel *evsel;
202 
203 	perf_evlist__for_each_entry(evlist, evsel)
204 		perf_evsel__enable(evsel);
205 }
206 
207 void perf_evlist__disable(struct perf_evlist *evlist)
208 {
209 	struct perf_evsel *evsel;
210 
211 	perf_evlist__for_each_entry(evlist, evsel)
212 		perf_evsel__disable(evsel);
213 }
214 
215 u64 perf_evlist__read_format(struct perf_evlist *evlist)
216 {
217 	struct perf_evsel *first = perf_evlist__first(evlist);
218 
219 	return first->attr.read_format;
220 }
221 
222 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
223 
224 static void perf_evlist__id_hash(struct perf_evlist *evlist,
225 				 struct perf_evsel *evsel,
226 				 int cpu, int thread, u64 id)
227 {
228 	int hash;
229 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
230 
231 	sid->id = id;
232 	sid->evsel = evsel;
233 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
234 	hlist_add_head(&sid->node, &evlist->heads[hash]);
235 }
236 
237 void perf_evlist__id_add(struct perf_evlist *evlist,
238 			 struct perf_evsel *evsel,
239 			 int cpu, int thread, u64 id)
240 {
241 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
242 	evsel->id[evsel->ids++] = id;
243 }
244 
245 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
246 			   struct perf_evsel *evsel,
247 			   int cpu, int thread, int fd)
248 {
249 	u64 read_data[4] = { 0, };
250 	int id_idx = 1; /* The first entry is the counter value */
251 	u64 id;
252 	int ret;
253 
254 	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
255 	if (!ret)
256 		goto add;
257 
258 	if (errno != ENOTTY)
259 		return -1;
260 
261 	/* Legacy way to get event id.. All hail to old kernels! */
262 
263 	/*
264 	 * This way does not work with group format read, so bail
265 	 * out in that case.
266 	 */
267 	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
268 		return -1;
269 
270 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
271 	    read(fd, &read_data, sizeof(read_data)) == -1)
272 		return -1;
273 
274 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
275 		++id_idx;
276 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
277 		++id_idx;
278 
279 	id = read_data[id_idx];
280 
281 add:
282 	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
283 	return 0;
284 }
285 
286 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
287 {
288 	int nr_cpus = perf_cpu_map__nr(evlist->cpus);
289 	int nr_threads = perf_thread_map__nr(evlist->threads);
290 	int nfds = 0;
291 	struct perf_evsel *evsel;
292 
293 	perf_evlist__for_each_entry(evlist, evsel) {
294 		if (evsel->system_wide)
295 			nfds += nr_cpus;
296 		else
297 			nfds += nr_cpus * nr_threads;
298 	}
299 
300 	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
301 	    fdarray__grow(&evlist->pollfd, nfds) < 0)
302 		return -ENOMEM;
303 
304 	return 0;
305 }
306 
307 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
308 			    void *ptr, short revent)
309 {
310 	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
311 
312 	if (pos >= 0) {
313 		evlist->pollfd.priv[pos].ptr = ptr;
314 		fcntl(fd, F_SETFL, O_NONBLOCK);
315 	}
316 
317 	return pos;
318 }
319 
320 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
321 					 void *arg __maybe_unused)
322 {
323 	struct perf_mmap *map = fda->priv[fd].ptr;
324 
325 	if (map)
326 		perf_mmap__put(map);
327 }
328 
329 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
330 {
331 	return fdarray__filter(&evlist->pollfd, revents_and_mask,
332 			       perf_evlist__munmap_filtered, NULL);
333 }
334 
335 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
336 {
337 	return fdarray__poll(&evlist->pollfd, timeout);
338 }
339 
340 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
341 {
342 	int i;
343 	struct perf_mmap *map;
344 
345 	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
346 	if (!map)
347 		return NULL;
348 
349 	for (i = 0; i < evlist->nr_mmaps; i++) {
350 		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
351 
352 		/*
353 		 * When the perf_mmap() call is made we grab one refcount, plus
354 		 * one extra to let perf_mmap__consume() get the last
355 		 * events after all real references (perf_mmap__get()) are
356 		 * dropped.
357 		 *
358 		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
359 		 * thus does perf_mmap__get() on it.
360 		 */
361 		perf_mmap__init(&map[i], prev, overwrite, NULL);
362 	}
363 
364 	return map;
365 }
366 
367 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
368 				     struct perf_evsel *evsel, int idx, int cpu,
369 				     int thread)
370 {
371 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
372 
373 	sid->idx = idx;
374 	if (evlist->cpus && cpu >= 0)
375 		sid->cpu = evlist->cpus->map[cpu];
376 	else
377 		sid->cpu = -1;
378 	if (!evsel->system_wide && evlist->threads && thread >= 0)
379 		sid->tid = perf_thread_map__pid(evlist->threads, thread);
380 	else
381 		sid->tid = -1;
382 }
383 
384 static struct perf_mmap*
385 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
386 {
387 	struct perf_mmap *maps;
388 
389 	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
390 
391 	if (!maps) {
392 		maps = perf_evlist__alloc_mmap(evlist, overwrite);
393 		if (!maps)
394 			return NULL;
395 
396 		if (overwrite)
397 			evlist->mmap_ovw = maps;
398 		else
399 			evlist->mmap = maps;
400 	}
401 
402 	return &maps[idx];
403 }
404 
405 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
406 
407 static int
408 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
409 			  int output, int cpu)
410 {
411 	return perf_mmap__mmap(map, mp, output, cpu);
412 }
413 
414 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
415 					bool overwrite)
416 {
417 	if (overwrite)
418 		evlist->mmap_ovw_first = map;
419 	else
420 		evlist->mmap_first = map;
421 }
422 
423 static int
424 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
425 	       int idx, struct perf_mmap_param *mp, int cpu_idx,
426 	       int thread, int *_output, int *_output_overwrite)
427 {
428 	int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
429 	struct perf_evsel *evsel;
430 	int revent;
431 
432 	perf_evlist__for_each_entry(evlist, evsel) {
433 		bool overwrite = evsel->attr.write_backward;
434 		struct perf_mmap *map;
435 		int *output, fd, cpu;
436 
437 		if (evsel->system_wide && thread)
438 			continue;
439 
440 		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
441 		if (cpu == -1)
442 			continue;
443 
444 		map = ops->get(evlist, overwrite, idx);
445 		if (map == NULL)
446 			return -ENOMEM;
447 
448 		if (overwrite) {
449 			mp->prot = PROT_READ;
450 			output   = _output_overwrite;
451 		} else {
452 			mp->prot = PROT_READ | PROT_WRITE;
453 			output   = _output;
454 		}
455 
456 		fd = FD(evsel, cpu, thread);
457 
458 		if (*output == -1) {
459 			*output = fd;
460 
461 			/*
462 			 * The last one will be done at perf_mmap__consume(), so that we
463 			 * make sure we don't prevent tools from consuming every last event in
464 			 * the ring buffer.
465 			 *
466 			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
467 			 * anymore, but the last events for it are still in the ring buffer,
468 			 * waiting to be consumed.
469 			 *
470 			 * Tools can chose to ignore this at their own discretion, but the
471 			 * evlist layer can't just drop it when filtering events in
472 			 * perf_evlist__filter_pollfd().
473 			 */
474 			refcount_set(&map->refcnt, 2);
475 
476 			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
477 				return -1;
478 
479 			if (!idx)
480 				perf_evlist__set_mmap_first(evlist, map, overwrite);
481 		} else {
482 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
483 				return -1;
484 
485 			perf_mmap__get(map);
486 		}
487 
488 		revent = !overwrite ? POLLIN : 0;
489 
490 		if (!evsel->system_wide &&
491 		    perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
492 			perf_mmap__put(map);
493 			return -1;
494 		}
495 
496 		if (evsel->attr.read_format & PERF_FORMAT_ID) {
497 			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
498 						   fd) < 0)
499 				return -1;
500 			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
501 						 thread);
502 		}
503 	}
504 
505 	return 0;
506 }
507 
508 static int
509 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
510 		struct perf_mmap_param *mp)
511 {
512 	int thread;
513 	int nr_threads = perf_thread_map__nr(evlist->threads);
514 
515 	for (thread = 0; thread < nr_threads; thread++) {
516 		int output = -1;
517 		int output_overwrite = -1;
518 
519 		if (ops->idx)
520 			ops->idx(evlist, mp, thread, false);
521 
522 		if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
523 				   &output, &output_overwrite))
524 			goto out_unmap;
525 	}
526 
527 	return 0;
528 
529 out_unmap:
530 	perf_evlist__munmap(evlist);
531 	return -1;
532 }
533 
534 static int
535 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
536 	     struct perf_mmap_param *mp)
537 {
538 	int nr_threads = perf_thread_map__nr(evlist->threads);
539 	int nr_cpus    = perf_cpu_map__nr(evlist->cpus);
540 	int cpu, thread;
541 
542 	for (cpu = 0; cpu < nr_cpus; cpu++) {
543 		int output = -1;
544 		int output_overwrite = -1;
545 
546 		if (ops->idx)
547 			ops->idx(evlist, mp, cpu, true);
548 
549 		for (thread = 0; thread < nr_threads; thread++) {
550 			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
551 					   thread, &output, &output_overwrite))
552 				goto out_unmap;
553 		}
554 	}
555 
556 	return 0;
557 
558 out_unmap:
559 	perf_evlist__munmap(evlist);
560 	return -1;
561 }
562 
563 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
564 {
565 	int nr_mmaps;
566 
567 	nr_mmaps = perf_cpu_map__nr(evlist->cpus);
568 	if (perf_cpu_map__empty(evlist->cpus))
569 		nr_mmaps = perf_thread_map__nr(evlist->threads);
570 
571 	return nr_mmaps;
572 }
573 
574 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
575 			  struct perf_evlist_mmap_ops *ops,
576 			  struct perf_mmap_param *mp)
577 {
578 	struct perf_evsel *evsel;
579 	const struct perf_cpu_map *cpus = evlist->cpus;
580 	const struct perf_thread_map *threads = evlist->threads;
581 
582 	if (!ops || !ops->get || !ops->mmap)
583 		return -EINVAL;
584 
585 	mp->mask = evlist->mmap_len - page_size - 1;
586 
587 	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
588 
589 	perf_evlist__for_each_entry(evlist, evsel) {
590 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
591 		    evsel->sample_id == NULL &&
592 		    perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
593 			return -ENOMEM;
594 	}
595 
596 	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
597 		return -ENOMEM;
598 
599 	if (perf_cpu_map__empty(cpus))
600 		return mmap_per_thread(evlist, ops, mp);
601 
602 	return mmap_per_cpu(evlist, ops, mp);
603 }
604 
605 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
606 {
607 	struct perf_mmap_param mp;
608 	struct perf_evlist_mmap_ops ops = {
609 		.get  = perf_evlist__mmap_cb_get,
610 		.mmap = perf_evlist__mmap_cb_mmap,
611 	};
612 
613 	evlist->mmap_len = (pages + 1) * page_size;
614 
615 	return perf_evlist__mmap_ops(evlist, &ops, &mp);
616 }
617 
618 void perf_evlist__munmap(struct perf_evlist *evlist)
619 {
620 	int i;
621 
622 	if (evlist->mmap) {
623 		for (i = 0; i < evlist->nr_mmaps; i++)
624 			perf_mmap__munmap(&evlist->mmap[i]);
625 	}
626 
627 	if (evlist->mmap_ovw) {
628 		for (i = 0; i < evlist->nr_mmaps; i++)
629 			perf_mmap__munmap(&evlist->mmap_ovw[i]);
630 	}
631 
632 	zfree(&evlist->mmap);
633 	zfree(&evlist->mmap_ovw);
634 }
635 
636 struct perf_mmap*
637 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
638 		       bool overwrite)
639 {
640 	if (map)
641 		return map->next;
642 
643 	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
644 }
645