xref: /openbmc/linux/tools/lib/perf/evlist.c (revision bbaf1ff0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
7 #include <sys/ioctl.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/lib.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <errno.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <signal.h>
21 #include <poll.h>
22 #include <sys/mman.h>
23 #include <perf/cpumap.h>
24 #include <perf/threadmap.h>
25 #include <api/fd/array.h>
26 #include "internal.h"
27 
28 void perf_evlist__init(struct perf_evlist *evlist)
29 {
30 	INIT_LIST_HEAD(&evlist->entries);
31 	evlist->nr_entries = 0;
32 	fdarray__init(&evlist->pollfd, 64);
33 	perf_evlist__reset_id_hash(evlist);
34 }
35 
36 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
37 					  struct perf_evsel *evsel)
38 {
39 	/*
40 	 * We already have cpus for evsel (via PMU sysfs) so
41 	 * keep it, if there's no target cpu list defined.
42 	 */
43 	if (evsel->system_wide) {
44 		perf_cpu_map__put(evsel->cpus);
45 		evsel->cpus = perf_cpu_map__new(NULL);
46 	} else if (!evsel->own_cpus || evlist->has_user_cpus ||
47 		   (!evsel->requires_cpu && perf_cpu_map__empty(evlist->user_requested_cpus))) {
48 		perf_cpu_map__put(evsel->cpus);
49 		evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
50 	} else if (evsel->cpus != evsel->own_cpus) {
51 		perf_cpu_map__put(evsel->cpus);
52 		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
53 	}
54 
55 	if (evsel->system_wide) {
56 		perf_thread_map__put(evsel->threads);
57 		evsel->threads = perf_thread_map__new_dummy();
58 	} else {
59 		perf_thread_map__put(evsel->threads);
60 		evsel->threads = perf_thread_map__get(evlist->threads);
61 	}
62 
63 	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
64 }
65 
66 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
67 {
68 	struct perf_evsel *evsel;
69 
70 	evlist->needs_map_propagation = true;
71 
72 	perf_evlist__for_each_evsel(evlist, evsel)
73 		__perf_evlist__propagate_maps(evlist, evsel);
74 }
75 
76 void perf_evlist__add(struct perf_evlist *evlist,
77 		      struct perf_evsel *evsel)
78 {
79 	evsel->idx = evlist->nr_entries;
80 	list_add_tail(&evsel->node, &evlist->entries);
81 	evlist->nr_entries += 1;
82 
83 	if (evlist->needs_map_propagation)
84 		__perf_evlist__propagate_maps(evlist, evsel);
85 }
86 
87 void perf_evlist__remove(struct perf_evlist *evlist,
88 			 struct perf_evsel *evsel)
89 {
90 	list_del_init(&evsel->node);
91 	evlist->nr_entries -= 1;
92 }
93 
94 struct perf_evlist *perf_evlist__new(void)
95 {
96 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
97 
98 	if (evlist != NULL)
99 		perf_evlist__init(evlist);
100 
101 	return evlist;
102 }
103 
104 struct perf_evsel *
105 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
106 {
107 	struct perf_evsel *next;
108 
109 	if (!prev) {
110 		next = list_first_entry(&evlist->entries,
111 					struct perf_evsel,
112 					node);
113 	} else {
114 		next = list_next_entry(prev, node);
115 	}
116 
117 	/* Empty list is noticed here so don't need checking on entry. */
118 	if (&next->node == &evlist->entries)
119 		return NULL;
120 
121 	return next;
122 }
123 
124 static void perf_evlist__purge(struct perf_evlist *evlist)
125 {
126 	struct perf_evsel *pos, *n;
127 
128 	perf_evlist__for_each_entry_safe(evlist, n, pos) {
129 		list_del_init(&pos->node);
130 		perf_evsel__delete(pos);
131 	}
132 
133 	evlist->nr_entries = 0;
134 }
135 
136 void perf_evlist__exit(struct perf_evlist *evlist)
137 {
138 	perf_cpu_map__put(evlist->user_requested_cpus);
139 	perf_cpu_map__put(evlist->all_cpus);
140 	perf_thread_map__put(evlist->threads);
141 	evlist->user_requested_cpus = NULL;
142 	evlist->all_cpus = NULL;
143 	evlist->threads = NULL;
144 	fdarray__exit(&evlist->pollfd);
145 }
146 
147 void perf_evlist__delete(struct perf_evlist *evlist)
148 {
149 	if (evlist == NULL)
150 		return;
151 
152 	perf_evlist__munmap(evlist);
153 	perf_evlist__close(evlist);
154 	perf_evlist__purge(evlist);
155 	perf_evlist__exit(evlist);
156 	free(evlist);
157 }
158 
159 void perf_evlist__set_maps(struct perf_evlist *evlist,
160 			   struct perf_cpu_map *cpus,
161 			   struct perf_thread_map *threads)
162 {
163 	/*
164 	 * Allow for the possibility that one or another of the maps isn't being
165 	 * changed i.e. don't put it.  Note we are assuming the maps that are
166 	 * being applied are brand new and evlist is taking ownership of the
167 	 * original reference count of 1.  If that is not the case it is up to
168 	 * the caller to increase the reference count.
169 	 */
170 	if (cpus != evlist->user_requested_cpus) {
171 		perf_cpu_map__put(evlist->user_requested_cpus);
172 		evlist->user_requested_cpus = perf_cpu_map__get(cpus);
173 	}
174 
175 	if (threads != evlist->threads) {
176 		perf_thread_map__put(evlist->threads);
177 		evlist->threads = perf_thread_map__get(threads);
178 	}
179 
180 	perf_evlist__propagate_maps(evlist);
181 }
182 
183 int perf_evlist__open(struct perf_evlist *evlist)
184 {
185 	struct perf_evsel *evsel;
186 	int err;
187 
188 	perf_evlist__for_each_entry(evlist, evsel) {
189 		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
190 		if (err < 0)
191 			goto out_err;
192 	}
193 
194 	return 0;
195 
196 out_err:
197 	perf_evlist__close(evlist);
198 	return err;
199 }
200 
201 void perf_evlist__close(struct perf_evlist *evlist)
202 {
203 	struct perf_evsel *evsel;
204 
205 	perf_evlist__for_each_entry_reverse(evlist, evsel)
206 		perf_evsel__close(evsel);
207 }
208 
209 void perf_evlist__enable(struct perf_evlist *evlist)
210 {
211 	struct perf_evsel *evsel;
212 
213 	perf_evlist__for_each_entry(evlist, evsel)
214 		perf_evsel__enable(evsel);
215 }
216 
217 void perf_evlist__disable(struct perf_evlist *evlist)
218 {
219 	struct perf_evsel *evsel;
220 
221 	perf_evlist__for_each_entry(evlist, evsel)
222 		perf_evsel__disable(evsel);
223 }
224 
225 u64 perf_evlist__read_format(struct perf_evlist *evlist)
226 {
227 	struct perf_evsel *first = perf_evlist__first(evlist);
228 
229 	return first->attr.read_format;
230 }
231 
232 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
233 
234 static void perf_evlist__id_hash(struct perf_evlist *evlist,
235 				 struct perf_evsel *evsel,
236 				 int cpu, int thread, u64 id)
237 {
238 	int hash;
239 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
240 
241 	sid->id = id;
242 	sid->evsel = evsel;
243 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
244 	hlist_add_head(&sid->node, &evlist->heads[hash]);
245 }
246 
247 void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
248 {
249 	int i;
250 
251 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
252 		INIT_HLIST_HEAD(&evlist->heads[i]);
253 }
254 
255 void perf_evlist__id_add(struct perf_evlist *evlist,
256 			 struct perf_evsel *evsel,
257 			 int cpu, int thread, u64 id)
258 {
259 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
260 	evsel->id[evsel->ids++] = id;
261 }
262 
263 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
264 			   struct perf_evsel *evsel,
265 			   int cpu, int thread, int fd)
266 {
267 	u64 read_data[4] = { 0, };
268 	int id_idx = 1; /* The first entry is the counter value */
269 	u64 id;
270 	int ret;
271 
272 	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
273 	if (!ret)
274 		goto add;
275 
276 	if (errno != ENOTTY)
277 		return -1;
278 
279 	/* Legacy way to get event id.. All hail to old kernels! */
280 
281 	/*
282 	 * This way does not work with group format read, so bail
283 	 * out in that case.
284 	 */
285 	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
286 		return -1;
287 
288 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
289 	    read(fd, &read_data, sizeof(read_data)) == -1)
290 		return -1;
291 
292 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
293 		++id_idx;
294 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
295 		++id_idx;
296 
297 	id = read_data[id_idx];
298 
299 add:
300 	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
301 	return 0;
302 }
303 
304 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
305 {
306 	int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
307 	int nr_threads = perf_thread_map__nr(evlist->threads);
308 	int nfds = 0;
309 	struct perf_evsel *evsel;
310 
311 	perf_evlist__for_each_entry(evlist, evsel) {
312 		if (evsel->system_wide)
313 			nfds += nr_cpus;
314 		else
315 			nfds += nr_cpus * nr_threads;
316 	}
317 
318 	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
319 	    fdarray__grow(&evlist->pollfd, nfds) < 0)
320 		return -ENOMEM;
321 
322 	return 0;
323 }
324 
325 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
326 			    void *ptr, short revent, enum fdarray_flags flags)
327 {
328 	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
329 
330 	if (pos >= 0) {
331 		evlist->pollfd.priv[pos].ptr = ptr;
332 		fcntl(fd, F_SETFL, O_NONBLOCK);
333 	}
334 
335 	return pos;
336 }
337 
338 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
339 					 void *arg __maybe_unused)
340 {
341 	struct perf_mmap *map = fda->priv[fd].ptr;
342 
343 	if (map)
344 		perf_mmap__put(map);
345 }
346 
347 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
348 {
349 	return fdarray__filter(&evlist->pollfd, revents_and_mask,
350 			       perf_evlist__munmap_filtered, NULL);
351 }
352 
353 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
354 {
355 	return fdarray__poll(&evlist->pollfd, timeout);
356 }
357 
358 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
359 {
360 	int i;
361 	struct perf_mmap *map;
362 
363 	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
364 	if (!map)
365 		return NULL;
366 
367 	for (i = 0; i < evlist->nr_mmaps; i++) {
368 		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
369 
370 		/*
371 		 * When the perf_mmap() call is made we grab one refcount, plus
372 		 * one extra to let perf_mmap__consume() get the last
373 		 * events after all real references (perf_mmap__get()) are
374 		 * dropped.
375 		 *
376 		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
377 		 * thus does perf_mmap__get() on it.
378 		 */
379 		perf_mmap__init(&map[i], prev, overwrite, NULL);
380 	}
381 
382 	return map;
383 }
384 
385 static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
386 {
387 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
388 
389 	sid->idx = idx;
390 	sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
391 	sid->tid = perf_thread_map__pid(evsel->threads, thread);
392 }
393 
394 static struct perf_mmap*
395 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
396 {
397 	struct perf_mmap *maps;
398 
399 	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
400 
401 	if (!maps) {
402 		maps = perf_evlist__alloc_mmap(evlist, overwrite);
403 		if (!maps)
404 			return NULL;
405 
406 		if (overwrite)
407 			evlist->mmap_ovw = maps;
408 		else
409 			evlist->mmap = maps;
410 	}
411 
412 	return &maps[idx];
413 }
414 
415 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
416 
417 static int
418 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
419 			  int output, struct perf_cpu cpu)
420 {
421 	return perf_mmap__mmap(map, mp, output, cpu);
422 }
423 
424 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
425 					bool overwrite)
426 {
427 	if (overwrite)
428 		evlist->mmap_ovw_first = map;
429 	else
430 		evlist->mmap_first = map;
431 }
432 
433 static int
434 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
435 	       int idx, struct perf_mmap_param *mp, int cpu_idx,
436 	       int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
437 {
438 	struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
439 	struct perf_evsel *evsel;
440 	int revent;
441 
442 	perf_evlist__for_each_entry(evlist, evsel) {
443 		bool overwrite = evsel->attr.write_backward;
444 		enum fdarray_flags flgs;
445 		struct perf_mmap *map;
446 		int *output, fd, cpu;
447 
448 		if (evsel->system_wide && thread)
449 			continue;
450 
451 		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
452 		if (cpu == -1)
453 			continue;
454 
455 		map = ops->get(evlist, overwrite, idx);
456 		if (map == NULL)
457 			return -ENOMEM;
458 
459 		if (overwrite) {
460 			mp->prot = PROT_READ;
461 			output   = _output_overwrite;
462 		} else {
463 			mp->prot = PROT_READ | PROT_WRITE;
464 			output   = _output;
465 		}
466 
467 		fd = FD(evsel, cpu, thread);
468 
469 		if (*output == -1) {
470 			*output = fd;
471 
472 			/*
473 			 * The last one will be done at perf_mmap__consume(), so that we
474 			 * make sure we don't prevent tools from consuming every last event in
475 			 * the ring buffer.
476 			 *
477 			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
478 			 * anymore, but the last events for it are still in the ring buffer,
479 			 * waiting to be consumed.
480 			 *
481 			 * Tools can chose to ignore this at their own discretion, but the
482 			 * evlist layer can't just drop it when filtering events in
483 			 * perf_evlist__filter_pollfd().
484 			 */
485 			refcount_set(&map->refcnt, 2);
486 
487 			if (ops->idx)
488 				ops->idx(evlist, evsel, mp, idx);
489 
490 			/* Debug message used by test scripts */
491 			pr_debug("idx %d: mmapping fd %d\n", idx, *output);
492 			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
493 				return -1;
494 
495 			*nr_mmaps += 1;
496 
497 			if (!idx)
498 				perf_evlist__set_mmap_first(evlist, map, overwrite);
499 		} else {
500 			/* Debug message used by test scripts */
501 			pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
502 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
503 				return -1;
504 
505 			perf_mmap__get(map);
506 		}
507 
508 		revent = !overwrite ? POLLIN : 0;
509 
510 		flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
511 		if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
512 			perf_mmap__put(map);
513 			return -1;
514 		}
515 
516 		if (evsel->attr.read_format & PERF_FORMAT_ID) {
517 			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
518 						   fd) < 0)
519 				return -1;
520 			perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
521 		}
522 	}
523 
524 	return 0;
525 }
526 
527 static int
528 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
529 		struct perf_mmap_param *mp)
530 {
531 	int nr_threads = perf_thread_map__nr(evlist->threads);
532 	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
533 	int cpu, thread, idx = 0;
534 	int nr_mmaps = 0;
535 
536 	pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
537 		 __func__, nr_cpus, nr_threads);
538 
539 	/* per-thread mmaps */
540 	for (thread = 0; thread < nr_threads; thread++, idx++) {
541 		int output = -1;
542 		int output_overwrite = -1;
543 
544 		if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
545 				   &output_overwrite, &nr_mmaps))
546 			goto out_unmap;
547 	}
548 
549 	/* system-wide mmaps i.e. per-cpu */
550 	for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
551 		int output = -1;
552 		int output_overwrite = -1;
553 
554 		if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
555 				   &output_overwrite, &nr_mmaps))
556 			goto out_unmap;
557 	}
558 
559 	if (nr_mmaps != evlist->nr_mmaps)
560 		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
561 
562 	return 0;
563 
564 out_unmap:
565 	perf_evlist__munmap(evlist);
566 	return -1;
567 }
568 
569 static int
570 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
571 	     struct perf_mmap_param *mp)
572 {
573 	int nr_threads = perf_thread_map__nr(evlist->threads);
574 	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
575 	int nr_mmaps = 0;
576 	int cpu, thread;
577 
578 	pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
579 
580 	for (cpu = 0; cpu < nr_cpus; cpu++) {
581 		int output = -1;
582 		int output_overwrite = -1;
583 
584 		for (thread = 0; thread < nr_threads; thread++) {
585 			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
586 					   thread, &output, &output_overwrite, &nr_mmaps))
587 				goto out_unmap;
588 		}
589 	}
590 
591 	if (nr_mmaps != evlist->nr_mmaps)
592 		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
593 
594 	return 0;
595 
596 out_unmap:
597 	perf_evlist__munmap(evlist);
598 	return -1;
599 }
600 
601 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
602 {
603 	int nr_mmaps;
604 
605 	/* One for each CPU */
606 	nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
607 	if (perf_cpu_map__empty(evlist->all_cpus)) {
608 		/* Plus one for each thread */
609 		nr_mmaps += perf_thread_map__nr(evlist->threads);
610 		/* Minus the per-thread CPU (-1) */
611 		nr_mmaps -= 1;
612 	}
613 
614 	return nr_mmaps;
615 }
616 
617 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
618 			  struct perf_evlist_mmap_ops *ops,
619 			  struct perf_mmap_param *mp)
620 {
621 	const struct perf_cpu_map *cpus = evlist->all_cpus;
622 	struct perf_evsel *evsel;
623 
624 	if (!ops || !ops->get || !ops->mmap)
625 		return -EINVAL;
626 
627 	mp->mask = evlist->mmap_len - page_size - 1;
628 
629 	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
630 
631 	perf_evlist__for_each_entry(evlist, evsel) {
632 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
633 		    evsel->sample_id == NULL &&
634 		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
635 			return -ENOMEM;
636 	}
637 
638 	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
639 		return -ENOMEM;
640 
641 	if (perf_cpu_map__empty(cpus))
642 		return mmap_per_thread(evlist, ops, mp);
643 
644 	return mmap_per_cpu(evlist, ops, mp);
645 }
646 
647 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
648 {
649 	struct perf_mmap_param mp;
650 	struct perf_evlist_mmap_ops ops = {
651 		.get  = perf_evlist__mmap_cb_get,
652 		.mmap = perf_evlist__mmap_cb_mmap,
653 	};
654 
655 	evlist->mmap_len = (pages + 1) * page_size;
656 
657 	return perf_evlist__mmap_ops(evlist, &ops, &mp);
658 }
659 
660 void perf_evlist__munmap(struct perf_evlist *evlist)
661 {
662 	int i;
663 
664 	if (evlist->mmap) {
665 		for (i = 0; i < evlist->nr_mmaps; i++)
666 			perf_mmap__munmap(&evlist->mmap[i]);
667 	}
668 
669 	if (evlist->mmap_ovw) {
670 		for (i = 0; i < evlist->nr_mmaps; i++)
671 			perf_mmap__munmap(&evlist->mmap_ovw[i]);
672 	}
673 
674 	zfree(&evlist->mmap);
675 	zfree(&evlist->mmap_ovw);
676 }
677 
678 struct perf_mmap*
679 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
680 		       bool overwrite)
681 {
682 	if (map)
683 		return map->next;
684 
685 	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
686 }
687 
688 void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
689 {
690 	struct perf_evsel *evsel;
691 	int n = 0;
692 
693 	__perf_evlist__for_each_entry(list, evsel) {
694 		evsel->leader = leader;
695 		n++;
696 	}
697 	leader->nr_members = n;
698 }
699 
700 void perf_evlist__set_leader(struct perf_evlist *evlist)
701 {
702 	if (evlist->nr_entries) {
703 		struct perf_evsel *first = list_entry(evlist->entries.next,
704 						struct perf_evsel, node);
705 
706 		__perf_evlist__set_leader(&evlist->entries, first);
707 	}
708 }
709 
710 int perf_evlist__nr_groups(struct perf_evlist *evlist)
711 {
712 	struct perf_evsel *evsel;
713 	int nr_groups = 0;
714 
715 	perf_evlist__for_each_evsel(evlist, evsel) {
716 		/*
717 		 * evsels by default have a nr_members of 1, and they are their
718 		 * own leader. If the nr_members is >1 then this is an
719 		 * indication of a group.
720 		 */
721 		if (evsel->leader == evsel && evsel->nr_members > 1)
722 			nr_groups++;
723 	}
724 	return nr_groups;
725 }
726