13ce311afSJiri Olsa // SPDX-License-Identifier: GPL-2.0
23ce311afSJiri Olsa #include <perf/evlist.h>
33ce311afSJiri Olsa #include <perf/evsel.h>
43ce311afSJiri Olsa #include <linux/bitops.h>
53ce311afSJiri Olsa #include <linux/list.h>
63ce311afSJiri Olsa #include <linux/hash.h>
73ce311afSJiri Olsa #include <sys/ioctl.h>
83ce311afSJiri Olsa #include <internal/evlist.h>
93ce311afSJiri Olsa #include <internal/evsel.h>
103ce311afSJiri Olsa #include <internal/xyarray.h>
113ce311afSJiri Olsa #include <internal/mmap.h>
123ce311afSJiri Olsa #include <internal/cpumap.h>
133ce311afSJiri Olsa #include <internal/threadmap.h>
143ce311afSJiri Olsa #include <internal/lib.h>
153ce311afSJiri Olsa #include <linux/zalloc.h>
163ce311afSJiri Olsa #include <stdlib.h>
173ce311afSJiri Olsa #include <errno.h>
183ce311afSJiri Olsa #include <unistd.h>
193ce311afSJiri Olsa #include <fcntl.h>
203ce311afSJiri Olsa #include <signal.h>
213ce311afSJiri Olsa #include <poll.h>
223ce311afSJiri Olsa #include <sys/mman.h>
233ce311afSJiri Olsa #include <perf/cpumap.h>
243ce311afSJiri Olsa #include <perf/threadmap.h>
253ce311afSJiri Olsa #include <api/fd/array.h>
264ce47d84SAdrian Hunter #include "internal.h"
273ce311afSJiri Olsa
perf_evlist__init(struct perf_evlist * evlist)283ce311afSJiri Olsa void perf_evlist__init(struct perf_evlist *evlist)
293ce311afSJiri Olsa {
303ce311afSJiri Olsa INIT_LIST_HEAD(&evlist->entries);
313ce311afSJiri Olsa evlist->nr_entries = 0;
323ce311afSJiri Olsa fdarray__init(&evlist->pollfd, 64);
33e2a99c9aSNamhyung Kim perf_evlist__reset_id_hash(evlist);
343ce311afSJiri Olsa }
353ce311afSJiri Olsa
__perf_evlist__propagate_maps(struct perf_evlist * evlist,struct perf_evsel * evsel)363ce311afSJiri Olsa static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
373ce311afSJiri Olsa struct perf_evsel *evsel)
383ce311afSJiri Olsa {
3906b552eeSNamhyung Kim if (evsel->system_wide) {
40ef91871cSIan Rogers /* System wide: set the cpu map of the evsel to all online CPUs. */
4106b552eeSNamhyung Kim perf_cpu_map__put(evsel->cpus);
4206b552eeSNamhyung Kim evsel->cpus = perf_cpu_map__new(NULL);
43ef91871cSIan Rogers } else if (evlist->has_user_cpus && evsel->is_pmu_core) {
44ef91871cSIan Rogers /*
45ef91871cSIan Rogers * User requested CPUs on a core PMU, ensure the requested CPUs
46ef91871cSIan Rogers * are valid by intersecting with those of the PMU.
47ef91871cSIan Rogers */
48ef91871cSIan Rogers perf_cpu_map__put(evsel->cpus);
49ef91871cSIan Rogers evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
5006b552eeSNamhyung Kim } else if (!evsel->own_cpus || evlist->has_user_cpus ||
51ef91871cSIan Rogers (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
52ef91871cSIan Rogers /*
53ef91871cSIan Rogers * The PMU didn't specify a default cpu map, this isn't a core
54ef91871cSIan Rogers * event and the user requested CPUs or the evlist user
55ef91871cSIan Rogers * requested CPUs have the "any CPU" (aka dummy) CPU value. In
56ef91871cSIan Rogers * which case use the user requested CPUs rather than the PMU
57ef91871cSIan Rogers * ones.
58ef91871cSIan Rogers */
59bef69bd7SNamhyung Kim perf_cpu_map__put(evsel->cpus);
600df6ade7SIan Rogers evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
613ce311afSJiri Olsa } else if (evsel->cpus != evsel->own_cpus) {
62ef91871cSIan Rogers /*
63ef91871cSIan Rogers * No user requested cpu map but the PMU cpu map doesn't match
64ef91871cSIan Rogers * the evsel's. Reset it back to the PMU cpu map.
65ef91871cSIan Rogers */
663ce311afSJiri Olsa perf_cpu_map__put(evsel->cpus);
673ce311afSJiri Olsa evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
683ce311afSJiri Olsa }
693ce311afSJiri Olsa
7006b552eeSNamhyung Kim if (evsel->system_wide) {
7106b552eeSNamhyung Kim perf_thread_map__put(evsel->threads);
7206b552eeSNamhyung Kim evsel->threads = perf_thread_map__new_dummy();
7306b552eeSNamhyung Kim } else {
743ce311afSJiri Olsa perf_thread_map__put(evsel->threads);
753ce311afSJiri Olsa evsel->threads = perf_thread_map__get(evlist->threads);
76a41e24f6SAdrian Hunter }
77a41e24f6SAdrian Hunter
783ce311afSJiri Olsa evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
793ce311afSJiri Olsa }
803ce311afSJiri Olsa
perf_evlist__propagate_maps(struct perf_evlist * evlist)813ce311afSJiri Olsa static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
823ce311afSJiri Olsa {
833ce311afSJiri Olsa struct perf_evsel *evsel;
843ce311afSJiri Olsa
857e2450bbSNamhyung Kim evlist->needs_map_propagation = true;
8633cd6928SIan Rogers
873ce311afSJiri Olsa perf_evlist__for_each_evsel(evlist, evsel)
883ce311afSJiri Olsa __perf_evlist__propagate_maps(evlist, evsel);
893ce311afSJiri Olsa }
903ce311afSJiri Olsa
perf_evlist__add(struct perf_evlist * evlist,struct perf_evsel * evsel)913ce311afSJiri Olsa void perf_evlist__add(struct perf_evlist *evlist,
923ce311afSJiri Olsa struct perf_evsel *evsel)
933ce311afSJiri Olsa {
9438fe0e01SJiri Olsa evsel->idx = evlist->nr_entries;
953ce311afSJiri Olsa list_add_tail(&evsel->node, &evlist->entries);
963ce311afSJiri Olsa evlist->nr_entries += 1;
977e2450bbSNamhyung Kim
987e2450bbSNamhyung Kim if (evlist->needs_map_propagation)
993ce311afSJiri Olsa __perf_evlist__propagate_maps(evlist, evsel);
1003ce311afSJiri Olsa }
1013ce311afSJiri Olsa
perf_evlist__remove(struct perf_evlist * evlist,struct perf_evsel * evsel)1023ce311afSJiri Olsa void perf_evlist__remove(struct perf_evlist *evlist,
1033ce311afSJiri Olsa struct perf_evsel *evsel)
1043ce311afSJiri Olsa {
1053ce311afSJiri Olsa list_del_init(&evsel->node);
1063ce311afSJiri Olsa evlist->nr_entries -= 1;
1073ce311afSJiri Olsa }
1083ce311afSJiri Olsa
perf_evlist__new(void)1093ce311afSJiri Olsa struct perf_evlist *perf_evlist__new(void)
1103ce311afSJiri Olsa {
1113ce311afSJiri Olsa struct perf_evlist *evlist = zalloc(sizeof(*evlist));
1123ce311afSJiri Olsa
1133ce311afSJiri Olsa if (evlist != NULL)
1143ce311afSJiri Olsa perf_evlist__init(evlist);
1153ce311afSJiri Olsa
1163ce311afSJiri Olsa return evlist;
1173ce311afSJiri Olsa }
1183ce311afSJiri Olsa
1193ce311afSJiri Olsa struct perf_evsel *
perf_evlist__next(struct perf_evlist * evlist,struct perf_evsel * prev)1203ce311afSJiri Olsa perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
1213ce311afSJiri Olsa {
1223ce311afSJiri Olsa struct perf_evsel *next;
1233ce311afSJiri Olsa
1243ce311afSJiri Olsa if (!prev) {
1253ce311afSJiri Olsa next = list_first_entry(&evlist->entries,
1263ce311afSJiri Olsa struct perf_evsel,
1273ce311afSJiri Olsa node);
1283ce311afSJiri Olsa } else {
1293ce311afSJiri Olsa next = list_next_entry(prev, node);
1303ce311afSJiri Olsa }
1313ce311afSJiri Olsa
1323ce311afSJiri Olsa /* Empty list is noticed here so don't need checking on entry. */
1333ce311afSJiri Olsa if (&next->node == &evlist->entries)
1343ce311afSJiri Olsa return NULL;
1353ce311afSJiri Olsa
1363ce311afSJiri Olsa return next;
1373ce311afSJiri Olsa }
1383ce311afSJiri Olsa
perf_evlist__purge(struct perf_evlist * evlist)1393ce311afSJiri Olsa static void perf_evlist__purge(struct perf_evlist *evlist)
1403ce311afSJiri Olsa {
1413ce311afSJiri Olsa struct perf_evsel *pos, *n;
1423ce311afSJiri Olsa
1433ce311afSJiri Olsa perf_evlist__for_each_entry_safe(evlist, n, pos) {
1443ce311afSJiri Olsa list_del_init(&pos->node);
1453ce311afSJiri Olsa perf_evsel__delete(pos);
1463ce311afSJiri Olsa }
1473ce311afSJiri Olsa
1483ce311afSJiri Olsa evlist->nr_entries = 0;
1493ce311afSJiri Olsa }
1503ce311afSJiri Olsa
perf_evlist__exit(struct perf_evlist * evlist)1513ce311afSJiri Olsa void perf_evlist__exit(struct perf_evlist *evlist)
1523ce311afSJiri Olsa {
1530df6ade7SIan Rogers perf_cpu_map__put(evlist->user_requested_cpus);
1544599d292SIan Rogers perf_cpu_map__put(evlist->all_cpus);
1553ce311afSJiri Olsa perf_thread_map__put(evlist->threads);
1560df6ade7SIan Rogers evlist->user_requested_cpus = NULL;
15744d041b7SHe Zhe evlist->all_cpus = NULL;
1583ce311afSJiri Olsa evlist->threads = NULL;
1593ce311afSJiri Olsa fdarray__exit(&evlist->pollfd);
1603ce311afSJiri Olsa }
1613ce311afSJiri Olsa
perf_evlist__delete(struct perf_evlist * evlist)1623ce311afSJiri Olsa void perf_evlist__delete(struct perf_evlist *evlist)
1633ce311afSJiri Olsa {
1643ce311afSJiri Olsa if (evlist == NULL)
1653ce311afSJiri Olsa return;
1663ce311afSJiri Olsa
1673ce311afSJiri Olsa perf_evlist__munmap(evlist);
1683ce311afSJiri Olsa perf_evlist__close(evlist);
1693ce311afSJiri Olsa perf_evlist__purge(evlist);
1703ce311afSJiri Olsa perf_evlist__exit(evlist);
1713ce311afSJiri Olsa free(evlist);
1723ce311afSJiri Olsa }
1733ce311afSJiri Olsa
perf_evlist__set_maps(struct perf_evlist * evlist,struct perf_cpu_map * cpus,struct perf_thread_map * threads)1743ce311afSJiri Olsa void perf_evlist__set_maps(struct perf_evlist *evlist,
1753ce311afSJiri Olsa struct perf_cpu_map *cpus,
1763ce311afSJiri Olsa struct perf_thread_map *threads)
1773ce311afSJiri Olsa {
1783ce311afSJiri Olsa /*
1793ce311afSJiri Olsa * Allow for the possibility that one or another of the maps isn't being
1803ce311afSJiri Olsa * changed i.e. don't put it. Note we are assuming the maps that are
1813ce311afSJiri Olsa * being applied are brand new and evlist is taking ownership of the
1823ce311afSJiri Olsa * original reference count of 1. If that is not the case it is up to
1833ce311afSJiri Olsa * the caller to increase the reference count.
1843ce311afSJiri Olsa */
1850df6ade7SIan Rogers if (cpus != evlist->user_requested_cpus) {
1860df6ade7SIan Rogers perf_cpu_map__put(evlist->user_requested_cpus);
1870df6ade7SIan Rogers evlist->user_requested_cpus = perf_cpu_map__get(cpus);
1883ce311afSJiri Olsa }
1893ce311afSJiri Olsa
1903ce311afSJiri Olsa if (threads != evlist->threads) {
1913ce311afSJiri Olsa perf_thread_map__put(evlist->threads);
1923ce311afSJiri Olsa evlist->threads = perf_thread_map__get(threads);
1933ce311afSJiri Olsa }
1943ce311afSJiri Olsa
1953ce311afSJiri Olsa perf_evlist__propagate_maps(evlist);
1963ce311afSJiri Olsa }
1973ce311afSJiri Olsa
perf_evlist__open(struct perf_evlist * evlist)1983ce311afSJiri Olsa int perf_evlist__open(struct perf_evlist *evlist)
1993ce311afSJiri Olsa {
2003ce311afSJiri Olsa struct perf_evsel *evsel;
2013ce311afSJiri Olsa int err;
2023ce311afSJiri Olsa
2033ce311afSJiri Olsa perf_evlist__for_each_entry(evlist, evsel) {
2043ce311afSJiri Olsa err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
2053ce311afSJiri Olsa if (err < 0)
2063ce311afSJiri Olsa goto out_err;
2073ce311afSJiri Olsa }
2083ce311afSJiri Olsa
2093ce311afSJiri Olsa return 0;
2103ce311afSJiri Olsa
2113ce311afSJiri Olsa out_err:
2123ce311afSJiri Olsa perf_evlist__close(evlist);
2133ce311afSJiri Olsa return err;
2143ce311afSJiri Olsa }
2153ce311afSJiri Olsa
perf_evlist__close(struct perf_evlist * evlist)2163ce311afSJiri Olsa void perf_evlist__close(struct perf_evlist *evlist)
2173ce311afSJiri Olsa {
2183ce311afSJiri Olsa struct perf_evsel *evsel;
2193ce311afSJiri Olsa
2203ce311afSJiri Olsa perf_evlist__for_each_entry_reverse(evlist, evsel)
2213ce311afSJiri Olsa perf_evsel__close(evsel);
2223ce311afSJiri Olsa }
2233ce311afSJiri Olsa
perf_evlist__enable(struct perf_evlist * evlist)2243ce311afSJiri Olsa void perf_evlist__enable(struct perf_evlist *evlist)
2253ce311afSJiri Olsa {
2263ce311afSJiri Olsa struct perf_evsel *evsel;
2273ce311afSJiri Olsa
2283ce311afSJiri Olsa perf_evlist__for_each_entry(evlist, evsel)
2293ce311afSJiri Olsa perf_evsel__enable(evsel);
2303ce311afSJiri Olsa }
2313ce311afSJiri Olsa
perf_evlist__disable(struct perf_evlist * evlist)2323ce311afSJiri Olsa void perf_evlist__disable(struct perf_evlist *evlist)
2333ce311afSJiri Olsa {
2343ce311afSJiri Olsa struct perf_evsel *evsel;
2353ce311afSJiri Olsa
2363ce311afSJiri Olsa perf_evlist__for_each_entry(evlist, evsel)
2373ce311afSJiri Olsa perf_evsel__disable(evsel);
2383ce311afSJiri Olsa }
2393ce311afSJiri Olsa
perf_evlist__read_format(struct perf_evlist * evlist)2403ce311afSJiri Olsa u64 perf_evlist__read_format(struct perf_evlist *evlist)
2413ce311afSJiri Olsa {
2423ce311afSJiri Olsa struct perf_evsel *first = perf_evlist__first(evlist);
2433ce311afSJiri Olsa
2443ce311afSJiri Olsa return first->attr.read_format;
2453ce311afSJiri Olsa }
2463ce311afSJiri Olsa
2473ce311afSJiri Olsa #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
2483ce311afSJiri Olsa
perf_evlist__id_hash(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu_map_idx,int thread,u64 id)2493ce311afSJiri Olsa static void perf_evlist__id_hash(struct perf_evlist *evlist,
2503ce311afSJiri Olsa struct perf_evsel *evsel,
251d0e2f7aeSIan Rogers int cpu_map_idx, int thread, u64 id)
2523ce311afSJiri Olsa {
2533ce311afSJiri Olsa int hash;
254d0e2f7aeSIan Rogers struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
2553ce311afSJiri Olsa
2563ce311afSJiri Olsa sid->id = id;
2573ce311afSJiri Olsa sid->evsel = evsel;
2583ce311afSJiri Olsa hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
2593ce311afSJiri Olsa hlist_add_head(&sid->node, &evlist->heads[hash]);
2603ce311afSJiri Olsa }
2613ce311afSJiri Olsa
perf_evlist__reset_id_hash(struct perf_evlist * evlist)262e2a99c9aSNamhyung Kim void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
263e2a99c9aSNamhyung Kim {
264e2a99c9aSNamhyung Kim int i;
265e2a99c9aSNamhyung Kim
266e2a99c9aSNamhyung Kim for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
267e2a99c9aSNamhyung Kim INIT_HLIST_HEAD(&evlist->heads[i]);
268e2a99c9aSNamhyung Kim }
269e2a99c9aSNamhyung Kim
perf_evlist__id_add(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu_map_idx,int thread,u64 id)2703ce311afSJiri Olsa void perf_evlist__id_add(struct perf_evlist *evlist,
2713ce311afSJiri Olsa struct perf_evsel *evsel,
272d0e2f7aeSIan Rogers int cpu_map_idx, int thread, u64 id)
2733ce311afSJiri Olsa {
274d0e2f7aeSIan Rogers if (!SID(evsel, cpu_map_idx, thread))
275d0e2f7aeSIan Rogers return;
276d0e2f7aeSIan Rogers
277d0e2f7aeSIan Rogers perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
2783ce311afSJiri Olsa evsel->id[evsel->ids++] = id;
2793ce311afSJiri Olsa }
2803ce311afSJiri Olsa
perf_evlist__id_add_fd(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu_map_idx,int thread,int fd)2813ce311afSJiri Olsa int perf_evlist__id_add_fd(struct perf_evlist *evlist,
2823ce311afSJiri Olsa struct perf_evsel *evsel,
283d0e2f7aeSIan Rogers int cpu_map_idx, int thread, int fd)
2843ce311afSJiri Olsa {
2853ce311afSJiri Olsa u64 read_data[4] = { 0, };
2863ce311afSJiri Olsa int id_idx = 1; /* The first entry is the counter value */
2873ce311afSJiri Olsa u64 id;
2883ce311afSJiri Olsa int ret;
2893ce311afSJiri Olsa
290d0e2f7aeSIan Rogers if (!SID(evsel, cpu_map_idx, thread))
291d0e2f7aeSIan Rogers return -1;
292d0e2f7aeSIan Rogers
2933ce311afSJiri Olsa ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
2943ce311afSJiri Olsa if (!ret)
2953ce311afSJiri Olsa goto add;
2963ce311afSJiri Olsa
2973ce311afSJiri Olsa if (errno != ENOTTY)
2983ce311afSJiri Olsa return -1;
2993ce311afSJiri Olsa
3003ce311afSJiri Olsa /* Legacy way to get event id.. All hail to old kernels! */
3013ce311afSJiri Olsa
3023ce311afSJiri Olsa /*
3033ce311afSJiri Olsa * This way does not work with group format read, so bail
3043ce311afSJiri Olsa * out in that case.
3053ce311afSJiri Olsa */
3063ce311afSJiri Olsa if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
3073ce311afSJiri Olsa return -1;
3083ce311afSJiri Olsa
3093ce311afSJiri Olsa if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
3103ce311afSJiri Olsa read(fd, &read_data, sizeof(read_data)) == -1)
3113ce311afSJiri Olsa return -1;
3123ce311afSJiri Olsa
3133ce311afSJiri Olsa if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3143ce311afSJiri Olsa ++id_idx;
3153ce311afSJiri Olsa if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3163ce311afSJiri Olsa ++id_idx;
3173ce311afSJiri Olsa
3183ce311afSJiri Olsa id = read_data[id_idx];
3193ce311afSJiri Olsa
3203ce311afSJiri Olsa add:
321d0e2f7aeSIan Rogers perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
3223ce311afSJiri Olsa return 0;
3233ce311afSJiri Olsa }
3243ce311afSJiri Olsa
perf_evlist__alloc_pollfd(struct perf_evlist * evlist)3253ce311afSJiri Olsa int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
3263ce311afSJiri Olsa {
3277be1feddSAdrian Hunter int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
3283ce311afSJiri Olsa int nr_threads = perf_thread_map__nr(evlist->threads);
3293ce311afSJiri Olsa int nfds = 0;
3303ce311afSJiri Olsa struct perf_evsel *evsel;
3313ce311afSJiri Olsa
3323ce311afSJiri Olsa perf_evlist__for_each_entry(evlist, evsel) {
3333ce311afSJiri Olsa if (evsel->system_wide)
3343ce311afSJiri Olsa nfds += nr_cpus;
3353ce311afSJiri Olsa else
3363ce311afSJiri Olsa nfds += nr_cpus * nr_threads;
3373ce311afSJiri Olsa }
3383ce311afSJiri Olsa
3393ce311afSJiri Olsa if (fdarray__available_entries(&evlist->pollfd) < nfds &&
3403ce311afSJiri Olsa fdarray__grow(&evlist->pollfd, nfds) < 0)
3413ce311afSJiri Olsa return -ENOMEM;
3423ce311afSJiri Olsa
3433ce311afSJiri Olsa return 0;
3443ce311afSJiri Olsa }
3453ce311afSJiri Olsa
perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd,void * ptr,short revent,enum fdarray_flags flags)3463ce311afSJiri Olsa int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
347ab4c1f9fSAlexey Budankov void *ptr, short revent, enum fdarray_flags flags)
3483ce311afSJiri Olsa {
349ab4c1f9fSAlexey Budankov int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
3503ce311afSJiri Olsa
3513ce311afSJiri Olsa if (pos >= 0) {
3523ce311afSJiri Olsa evlist->pollfd.priv[pos].ptr = ptr;
3533ce311afSJiri Olsa fcntl(fd, F_SETFL, O_NONBLOCK);
3543ce311afSJiri Olsa }
3553ce311afSJiri Olsa
3563ce311afSJiri Olsa return pos;
3573ce311afSJiri Olsa }
3583ce311afSJiri Olsa
perf_evlist__munmap_filtered(struct fdarray * fda,int fd,void * arg __maybe_unused)3593ce311afSJiri Olsa static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
3603ce311afSJiri Olsa void *arg __maybe_unused)
3613ce311afSJiri Olsa {
3623ce311afSJiri Olsa struct perf_mmap *map = fda->priv[fd].ptr;
3633ce311afSJiri Olsa
3643ce311afSJiri Olsa if (map)
3653ce311afSJiri Olsa perf_mmap__put(map);
3663ce311afSJiri Olsa }
3673ce311afSJiri Olsa
perf_evlist__filter_pollfd(struct perf_evlist * evlist,short revents_and_mask)3683ce311afSJiri Olsa int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
3693ce311afSJiri Olsa {
3703ce311afSJiri Olsa return fdarray__filter(&evlist->pollfd, revents_and_mask,
3713ce311afSJiri Olsa perf_evlist__munmap_filtered, NULL);
3723ce311afSJiri Olsa }
3733ce311afSJiri Olsa
perf_evlist__poll(struct perf_evlist * evlist,int timeout)3743ce311afSJiri Olsa int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
3753ce311afSJiri Olsa {
3763ce311afSJiri Olsa return fdarray__poll(&evlist->pollfd, timeout);
3773ce311afSJiri Olsa }
3783ce311afSJiri Olsa
perf_evlist__alloc_mmap(struct perf_evlist * evlist,bool overwrite)3793ce311afSJiri Olsa static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
3803ce311afSJiri Olsa {
3813ce311afSJiri Olsa int i;
3823ce311afSJiri Olsa struct perf_mmap *map;
3833ce311afSJiri Olsa
3843ce311afSJiri Olsa map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
3853ce311afSJiri Olsa if (!map)
3863ce311afSJiri Olsa return NULL;
3873ce311afSJiri Olsa
3883ce311afSJiri Olsa for (i = 0; i < evlist->nr_mmaps; i++) {
3893ce311afSJiri Olsa struct perf_mmap *prev = i ? &map[i - 1] : NULL;
3903ce311afSJiri Olsa
3913ce311afSJiri Olsa /*
3923ce311afSJiri Olsa * When the perf_mmap() call is made we grab one refcount, plus
3933ce311afSJiri Olsa * one extra to let perf_mmap__consume() get the last
3943ce311afSJiri Olsa * events after all real references (perf_mmap__get()) are
3953ce311afSJiri Olsa * dropped.
3963ce311afSJiri Olsa *
3973ce311afSJiri Olsa * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
3983ce311afSJiri Olsa * thus does perf_mmap__get() on it.
3993ce311afSJiri Olsa */
4003ce311afSJiri Olsa perf_mmap__init(&map[i], prev, overwrite, NULL);
4013ce311afSJiri Olsa }
4023ce311afSJiri Olsa
4033ce311afSJiri Olsa return map;
4043ce311afSJiri Olsa }
4053ce311afSJiri Olsa
perf_evsel__set_sid_idx(struct perf_evsel * evsel,int idx,int cpu,int thread)406fc705fecSAdrian Hunter static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
4073ce311afSJiri Olsa {
4083ce311afSJiri Olsa struct perf_sample_id *sid = SID(evsel, cpu, thread);
4093ce311afSJiri Olsa
4103ce311afSJiri Olsa sid->idx = idx;
411fc705fecSAdrian Hunter sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
412fc705fecSAdrian Hunter sid->tid = perf_thread_map__pid(evsel->threads, thread);
4133ce311afSJiri Olsa }
4143ce311afSJiri Olsa
4153ce311afSJiri Olsa static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist * evlist,bool overwrite,int idx)4163ce311afSJiri Olsa perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
4173ce311afSJiri Olsa {
4183ce311afSJiri Olsa struct perf_mmap *maps;
4193ce311afSJiri Olsa
4203ce311afSJiri Olsa maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
4213ce311afSJiri Olsa
4223ce311afSJiri Olsa if (!maps) {
4233ce311afSJiri Olsa maps = perf_evlist__alloc_mmap(evlist, overwrite);
4243ce311afSJiri Olsa if (!maps)
4253ce311afSJiri Olsa return NULL;
4263ce311afSJiri Olsa
4273ce311afSJiri Olsa if (overwrite)
4283ce311afSJiri Olsa evlist->mmap_ovw = maps;
4293ce311afSJiri Olsa else
4303ce311afSJiri Olsa evlist->mmap = maps;
4313ce311afSJiri Olsa }
4323ce311afSJiri Olsa
4333ce311afSJiri Olsa return &maps[idx];
4343ce311afSJiri Olsa }
4353ce311afSJiri Olsa
4363ce311afSJiri Olsa #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
4373ce311afSJiri Olsa
4383ce311afSJiri Olsa static int
perf_evlist__mmap_cb_mmap(struct perf_mmap * map,struct perf_mmap_param * mp,int output,struct perf_cpu cpu)4393ce311afSJiri Olsa perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
4406d18804bSIan Rogers int output, struct perf_cpu cpu)
4413ce311afSJiri Olsa {
4423ce311afSJiri Olsa return perf_mmap__mmap(map, mp, output, cpu);
4433ce311afSJiri Olsa }
4443ce311afSJiri Olsa
perf_evlist__set_mmap_first(struct perf_evlist * evlist,struct perf_mmap * map,bool overwrite)4453ce311afSJiri Olsa static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
4463ce311afSJiri Olsa bool overwrite)
4473ce311afSJiri Olsa {
4483ce311afSJiri Olsa if (overwrite)
4493ce311afSJiri Olsa evlist->mmap_ovw_first = map;
4503ce311afSJiri Olsa else
4513ce311afSJiri Olsa evlist->mmap_first = map;
4523ce311afSJiri Olsa }
4533ce311afSJiri Olsa
4543ce311afSJiri Olsa static int
mmap_per_evsel(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,int idx,struct perf_mmap_param * mp,int cpu_idx,int thread,int * _output,int * _output_overwrite,int * nr_mmaps)4553ce311afSJiri Olsa mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
4563ce311afSJiri Olsa int idx, struct perf_mmap_param *mp, int cpu_idx,
4574ce47d84SAdrian Hunter int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
4583ce311afSJiri Olsa {
4597be1feddSAdrian Hunter struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
4603ce311afSJiri Olsa struct perf_evsel *evsel;
4613ce311afSJiri Olsa int revent;
4623ce311afSJiri Olsa
4633ce311afSJiri Olsa perf_evlist__for_each_entry(evlist, evsel) {
4643ce311afSJiri Olsa bool overwrite = evsel->attr.write_backward;
4656cc44796SAdrian Hunter enum fdarray_flags flgs;
4663ce311afSJiri Olsa struct perf_mmap *map;
4673ce311afSJiri Olsa int *output, fd, cpu;
4683ce311afSJiri Olsa
4693ce311afSJiri Olsa if (evsel->system_wide && thread)
4703ce311afSJiri Olsa continue;
4713ce311afSJiri Olsa
4723ce311afSJiri Olsa cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
4733ce311afSJiri Olsa if (cpu == -1)
4743ce311afSJiri Olsa continue;
4753ce311afSJiri Olsa
4763ce311afSJiri Olsa map = ops->get(evlist, overwrite, idx);
4773ce311afSJiri Olsa if (map == NULL)
4783ce311afSJiri Olsa return -ENOMEM;
4793ce311afSJiri Olsa
4803ce311afSJiri Olsa if (overwrite) {
4813ce311afSJiri Olsa mp->prot = PROT_READ;
4823ce311afSJiri Olsa output = _output_overwrite;
4833ce311afSJiri Olsa } else {
4843ce311afSJiri Olsa mp->prot = PROT_READ | PROT_WRITE;
4853ce311afSJiri Olsa output = _output;
4863ce311afSJiri Olsa }
4873ce311afSJiri Olsa
4883ce311afSJiri Olsa fd = FD(evsel, cpu, thread);
4893ce311afSJiri Olsa
4903ce311afSJiri Olsa if (*output == -1) {
4913ce311afSJiri Olsa *output = fd;
4923ce311afSJiri Olsa
4933ce311afSJiri Olsa /*
4943ce311afSJiri Olsa * The last one will be done at perf_mmap__consume(), so that we
4953ce311afSJiri Olsa * make sure we don't prevent tools from consuming every last event in
4963ce311afSJiri Olsa * the ring buffer.
4973ce311afSJiri Olsa *
4983ce311afSJiri Olsa * I.e. we can get the POLLHUP meaning that the fd doesn't exist
4993ce311afSJiri Olsa * anymore, but the last events for it are still in the ring buffer,
5003ce311afSJiri Olsa * waiting to be consumed.
5013ce311afSJiri Olsa *
5023ce311afSJiri Olsa * Tools can chose to ignore this at their own discretion, but the
5033ce311afSJiri Olsa * evlist layer can't just drop it when filtering events in
5043ce311afSJiri Olsa * perf_evlist__filter_pollfd().
5053ce311afSJiri Olsa */
5063ce311afSJiri Olsa refcount_set(&map->refcnt, 2);
5073ce311afSJiri Olsa
508d8fe2efbSAdrian Hunter if (ops->idx)
5098f111be6SAdrian Hunter ops->idx(evlist, evsel, mp, idx);
510d8fe2efbSAdrian Hunter
511da406202SAdrian Hunter /* Debug message used by test scripts */
5127864d8f7SAdrian Hunter pr_debug("idx %d: mmapping fd %d\n", idx, *output);
5133ce311afSJiri Olsa if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
5143ce311afSJiri Olsa return -1;
5153ce311afSJiri Olsa
5164ce47d84SAdrian Hunter *nr_mmaps += 1;
5174ce47d84SAdrian Hunter
5183ce311afSJiri Olsa if (!idx)
5193ce311afSJiri Olsa perf_evlist__set_mmap_first(evlist, map, overwrite);
5203ce311afSJiri Olsa } else {
521da406202SAdrian Hunter /* Debug message used by test scripts */
5227864d8f7SAdrian Hunter pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
5233ce311afSJiri Olsa if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
5243ce311afSJiri Olsa return -1;
5253ce311afSJiri Olsa
5263ce311afSJiri Olsa perf_mmap__get(map);
5273ce311afSJiri Olsa }
5283ce311afSJiri Olsa
5293ce311afSJiri Olsa revent = !overwrite ? POLLIN : 0;
5303ce311afSJiri Olsa
5316cc44796SAdrian Hunter flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
5326cc44796SAdrian Hunter if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
5333ce311afSJiri Olsa perf_mmap__put(map);
5343ce311afSJiri Olsa return -1;
5353ce311afSJiri Olsa }
5363ce311afSJiri Olsa
5373ce311afSJiri Olsa if (evsel->attr.read_format & PERF_FORMAT_ID) {
5383ce311afSJiri Olsa if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
5393ce311afSJiri Olsa fd) < 0)
5403ce311afSJiri Olsa return -1;
541fc705fecSAdrian Hunter perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
5423ce311afSJiri Olsa }
5433ce311afSJiri Olsa }
5443ce311afSJiri Olsa
5453ce311afSJiri Olsa return 0;
5463ce311afSJiri Olsa }
5473ce311afSJiri Olsa
5483ce311afSJiri Olsa static int
mmap_per_thread(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)5497864d8f7SAdrian Hunter mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
5507864d8f7SAdrian Hunter struct perf_mmap_param *mp)
5517864d8f7SAdrian Hunter {
5527864d8f7SAdrian Hunter int nr_threads = perf_thread_map__nr(evlist->threads);
5537864d8f7SAdrian Hunter int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
5547864d8f7SAdrian Hunter int cpu, thread, idx = 0;
5557864d8f7SAdrian Hunter int nr_mmaps = 0;
5567864d8f7SAdrian Hunter
5577864d8f7SAdrian Hunter pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
5587864d8f7SAdrian Hunter __func__, nr_cpus, nr_threads);
5597864d8f7SAdrian Hunter
5607864d8f7SAdrian Hunter /* per-thread mmaps */
5617864d8f7SAdrian Hunter for (thread = 0; thread < nr_threads; thread++, idx++) {
5627864d8f7SAdrian Hunter int output = -1;
5637864d8f7SAdrian Hunter int output_overwrite = -1;
5647864d8f7SAdrian Hunter
5657864d8f7SAdrian Hunter if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
5667864d8f7SAdrian Hunter &output_overwrite, &nr_mmaps))
5677864d8f7SAdrian Hunter goto out_unmap;
5687864d8f7SAdrian Hunter }
5697864d8f7SAdrian Hunter
5707864d8f7SAdrian Hunter /* system-wide mmaps i.e. per-cpu */
5717864d8f7SAdrian Hunter for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
5727864d8f7SAdrian Hunter int output = -1;
5737864d8f7SAdrian Hunter int output_overwrite = -1;
5747864d8f7SAdrian Hunter
5757864d8f7SAdrian Hunter if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
5767864d8f7SAdrian Hunter &output_overwrite, &nr_mmaps))
5777864d8f7SAdrian Hunter goto out_unmap;
5787864d8f7SAdrian Hunter }
5797864d8f7SAdrian Hunter
5807864d8f7SAdrian Hunter if (nr_mmaps != evlist->nr_mmaps)
5817864d8f7SAdrian Hunter pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
5827864d8f7SAdrian Hunter
5837864d8f7SAdrian Hunter return 0;
5847864d8f7SAdrian Hunter
5857864d8f7SAdrian Hunter out_unmap:
5867864d8f7SAdrian Hunter perf_evlist__munmap(evlist);
5877864d8f7SAdrian Hunter return -1;
5887864d8f7SAdrian Hunter }
5897864d8f7SAdrian Hunter
5907864d8f7SAdrian Hunter static int
mmap_per_cpu(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)5913ce311afSJiri Olsa mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
5923ce311afSJiri Olsa struct perf_mmap_param *mp)
5933ce311afSJiri Olsa {
5943ce311afSJiri Olsa int nr_threads = perf_thread_map__nr(evlist->threads);
5957be1feddSAdrian Hunter int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
5964ce47d84SAdrian Hunter int nr_mmaps = 0;
5973ce311afSJiri Olsa int cpu, thread;
5983ce311afSJiri Olsa
5997864d8f7SAdrian Hunter pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
6007864d8f7SAdrian Hunter
6013ce311afSJiri Olsa for (cpu = 0; cpu < nr_cpus; cpu++) {
6023ce311afSJiri Olsa int output = -1;
6033ce311afSJiri Olsa int output_overwrite = -1;
6043ce311afSJiri Olsa
6053ce311afSJiri Olsa for (thread = 0; thread < nr_threads; thread++) {
6063ce311afSJiri Olsa if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
6074ce47d84SAdrian Hunter thread, &output, &output_overwrite, &nr_mmaps))
6083ce311afSJiri Olsa goto out_unmap;
6093ce311afSJiri Olsa }
6103ce311afSJiri Olsa }
6113ce311afSJiri Olsa
6124ce47d84SAdrian Hunter if (nr_mmaps != evlist->nr_mmaps)
6134ce47d84SAdrian Hunter pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
6144ce47d84SAdrian Hunter
6153ce311afSJiri Olsa return 0;
6163ce311afSJiri Olsa
6173ce311afSJiri Olsa out_unmap:
6183ce311afSJiri Olsa perf_evlist__munmap(evlist);
6193ce311afSJiri Olsa return -1;
6203ce311afSJiri Olsa }
6213ce311afSJiri Olsa
perf_evlist__nr_mmaps(struct perf_evlist * evlist)6223ce311afSJiri Olsa static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
6233ce311afSJiri Olsa {
6243ce311afSJiri Olsa int nr_mmaps;
6253ce311afSJiri Olsa
626ae4f8ae1SAdrian Hunter /* One for each CPU */
6277be1feddSAdrian Hunter nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
628ae4f8ae1SAdrian Hunter if (perf_cpu_map__empty(evlist->all_cpus)) {
629ae4f8ae1SAdrian Hunter /* Plus one for each thread */
630ae4f8ae1SAdrian Hunter nr_mmaps += perf_thread_map__nr(evlist->threads);
631ae4f8ae1SAdrian Hunter /* Minus the per-thread CPU (-1) */
632ae4f8ae1SAdrian Hunter nr_mmaps -= 1;
633ae4f8ae1SAdrian Hunter }
6343ce311afSJiri Olsa
6353ce311afSJiri Olsa return nr_mmaps;
6363ce311afSJiri Olsa }
6373ce311afSJiri Olsa
perf_evlist__mmap_ops(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)6383ce311afSJiri Olsa int perf_evlist__mmap_ops(struct perf_evlist *evlist,
6393ce311afSJiri Olsa struct perf_evlist_mmap_ops *ops,
6403ce311afSJiri Olsa struct perf_mmap_param *mp)
6413ce311afSJiri Olsa {
6427864d8f7SAdrian Hunter const struct perf_cpu_map *cpus = evlist->all_cpus;
6433ce311afSJiri Olsa struct perf_evsel *evsel;
6443ce311afSJiri Olsa
6453ce311afSJiri Olsa if (!ops || !ops->get || !ops->mmap)
6463ce311afSJiri Olsa return -EINVAL;
6473ce311afSJiri Olsa
6483ce311afSJiri Olsa mp->mask = evlist->mmap_len - page_size - 1;
6493ce311afSJiri Olsa
6503ce311afSJiri Olsa evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
6513ce311afSJiri Olsa
6523ce311afSJiri Olsa perf_evlist__for_each_entry(evlist, evsel) {
6533ce311afSJiri Olsa if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
6543ce311afSJiri Olsa evsel->sample_id == NULL &&
655a668cc07SAdrian Hunter perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
6563ce311afSJiri Olsa return -ENOMEM;
6573ce311afSJiri Olsa }
6583ce311afSJiri Olsa
6593ce311afSJiri Olsa if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
6603ce311afSJiri Olsa return -ENOMEM;
6613ce311afSJiri Olsa
6627864d8f7SAdrian Hunter if (perf_cpu_map__empty(cpus))
6637864d8f7SAdrian Hunter return mmap_per_thread(evlist, ops, mp);
6647864d8f7SAdrian Hunter
6653ce311afSJiri Olsa return mmap_per_cpu(evlist, ops, mp);
6663ce311afSJiri Olsa }
6673ce311afSJiri Olsa
perf_evlist__mmap(struct perf_evlist * evlist,int pages)6683ce311afSJiri Olsa int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
6693ce311afSJiri Olsa {
6703ce311afSJiri Olsa struct perf_mmap_param mp;
6713ce311afSJiri Olsa struct perf_evlist_mmap_ops ops = {
6723ce311afSJiri Olsa .get = perf_evlist__mmap_cb_get,
6733ce311afSJiri Olsa .mmap = perf_evlist__mmap_cb_mmap,
6743ce311afSJiri Olsa };
6753ce311afSJiri Olsa
6763ce311afSJiri Olsa evlist->mmap_len = (pages + 1) * page_size;
6773ce311afSJiri Olsa
6783ce311afSJiri Olsa return perf_evlist__mmap_ops(evlist, &ops, &mp);
6793ce311afSJiri Olsa }
6803ce311afSJiri Olsa
perf_evlist__munmap(struct perf_evlist * evlist)6813ce311afSJiri Olsa void perf_evlist__munmap(struct perf_evlist *evlist)
6823ce311afSJiri Olsa {
6833ce311afSJiri Olsa int i;
6843ce311afSJiri Olsa
6853ce311afSJiri Olsa if (evlist->mmap) {
6863ce311afSJiri Olsa for (i = 0; i < evlist->nr_mmaps; i++)
6873ce311afSJiri Olsa perf_mmap__munmap(&evlist->mmap[i]);
6883ce311afSJiri Olsa }
6893ce311afSJiri Olsa
6903ce311afSJiri Olsa if (evlist->mmap_ovw) {
6913ce311afSJiri Olsa for (i = 0; i < evlist->nr_mmaps; i++)
6923ce311afSJiri Olsa perf_mmap__munmap(&evlist->mmap_ovw[i]);
6933ce311afSJiri Olsa }
6943ce311afSJiri Olsa
6953ce311afSJiri Olsa zfree(&evlist->mmap);
6963ce311afSJiri Olsa zfree(&evlist->mmap_ovw);
6973ce311afSJiri Olsa }
6983ce311afSJiri Olsa
6993ce311afSJiri Olsa struct perf_mmap*
perf_evlist__next_mmap(struct perf_evlist * evlist,struct perf_mmap * map,bool overwrite)7003ce311afSJiri Olsa perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
7013ce311afSJiri Olsa bool overwrite)
7023ce311afSJiri Olsa {
7033ce311afSJiri Olsa if (map)
7043ce311afSJiri Olsa return map->next;
7053ce311afSJiri Olsa
7063ce311afSJiri Olsa return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
7073ce311afSJiri Olsa }
7082e6263abSJiri Olsa
__perf_evlist__set_leader(struct list_head * list,struct perf_evsel * leader)709ecdcf630SIan Rogers void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
7102e6263abSJiri Olsa {
7115dd827e0SIan Rogers struct perf_evsel *evsel;
7125dd827e0SIan Rogers int n = 0;
7132e6263abSJiri Olsa
7145dd827e0SIan Rogers __perf_evlist__for_each_entry(list, evsel) {
7152e6263abSJiri Olsa evsel->leader = leader;
7165dd827e0SIan Rogers n++;
7175dd827e0SIan Rogers }
7185dd827e0SIan Rogers leader->nr_members = n;
7192e6263abSJiri Olsa }
7202e6263abSJiri Olsa
perf_evlist__set_leader(struct perf_evlist * evlist)7212e6263abSJiri Olsa void perf_evlist__set_leader(struct perf_evlist *evlist)
7222e6263abSJiri Olsa {
7232e6263abSJiri Olsa if (evlist->nr_entries) {
724ecdcf630SIan Rogers struct perf_evsel *first = list_entry(evlist->entries.next,
725ecdcf630SIan Rogers struct perf_evsel, node);
726ecdcf630SIan Rogers
727ecdcf630SIan Rogers __perf_evlist__set_leader(&evlist->entries, first);
7282e6263abSJiri Olsa }
7292e6263abSJiri Olsa }
7309d2dc632SIan Rogers
perf_evlist__nr_groups(struct perf_evlist * evlist)7319d2dc632SIan Rogers int perf_evlist__nr_groups(struct perf_evlist *evlist)
7329d2dc632SIan Rogers {
7339d2dc632SIan Rogers struct perf_evsel *evsel;
7349d2dc632SIan Rogers int nr_groups = 0;
7359d2dc632SIan Rogers
7369d2dc632SIan Rogers perf_evlist__for_each_evsel(evlist, evsel) {
7379d2dc632SIan Rogers /*
7389d2dc632SIan Rogers * evsels by default have a nr_members of 1, and they are their
7399d2dc632SIan Rogers * own leader. If the nr_members is >1 then this is an
7409d2dc632SIan Rogers * indication of a group.
7419d2dc632SIan Rogers */
7429d2dc632SIan Rogers if (evsel->leader == evsel && evsel->nr_members > 1)
7439d2dc632SIan Rogers nr_groups++;
7449d2dc632SIan Rogers }
7459d2dc632SIan Rogers return nr_groups;
7469d2dc632SIan Rogers }
747*f3305280SYang Jihong
perf_evlist__go_system_wide(struct perf_evlist * evlist,struct perf_evsel * evsel)748*f3305280SYang Jihong void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
749*f3305280SYang Jihong {
750*f3305280SYang Jihong if (!evsel->system_wide) {
751*f3305280SYang Jihong evsel->system_wide = true;
752*f3305280SYang Jihong if (evlist->needs_map_propagation)
753*f3305280SYang Jihong __perf_evlist__propagate_maps(evlist, evsel);
754*f3305280SYang Jihong }
755*f3305280SYang Jihong }
756