xref: /openbmc/linux/tools/perf/util/evsel.c (revision 22fd411a)
1 #include "evsel.h"
2 #include "../perf.h"
3 #include "util.h"
4 #include "cpumap.h"
5 #include "thread.h"
6 
7 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
8 
9 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
10 {
11 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
12 
13 	if (evsel != NULL) {
14 		evsel->idx	   = idx;
15 		evsel->attr	   = *attr;
16 		INIT_LIST_HEAD(&evsel->node);
17 	}
18 
19 	return evsel;
20 }
21 
22 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
23 {
24 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
25 	return evsel->fd != NULL ? 0 : -ENOMEM;
26 }
27 
28 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
29 {
30 	evsel->counts = zalloc((sizeof(*evsel->counts) +
31 				(ncpus * sizeof(struct perf_counts_values))));
32 	return evsel->counts != NULL ? 0 : -ENOMEM;
33 }
34 
35 void perf_evsel__free_fd(struct perf_evsel *evsel)
36 {
37 	xyarray__delete(evsel->fd);
38 	evsel->fd = NULL;
39 }
40 
41 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
42 {
43 	int cpu, thread;
44 
45 	for (cpu = 0; cpu < ncpus; cpu++)
46 		for (thread = 0; thread < nthreads; ++thread) {
47 			close(FD(evsel, cpu, thread));
48 			FD(evsel, cpu, thread) = -1;
49 		}
50 }
51 
52 void perf_evsel__delete(struct perf_evsel *evsel)
53 {
54 	assert(list_empty(&evsel->node));
55 	xyarray__delete(evsel->fd);
56 	free(evsel);
57 }
58 
59 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
60 			      int cpu, int thread, bool scale)
61 {
62 	struct perf_counts_values count;
63 	size_t nv = scale ? 3 : 1;
64 
65 	if (FD(evsel, cpu, thread) < 0)
66 		return -EINVAL;
67 
68 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
69 		return -ENOMEM;
70 
71 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
72 		return -errno;
73 
74 	if (scale) {
75 		if (count.run == 0)
76 			count.val = 0;
77 		else if (count.run < count.ena)
78 			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
79 	} else
80 		count.ena = count.run = 0;
81 
82 	evsel->counts->cpu[cpu] = count;
83 	return 0;
84 }
85 
86 int __perf_evsel__read(struct perf_evsel *evsel,
87 		       int ncpus, int nthreads, bool scale)
88 {
89 	size_t nv = scale ? 3 : 1;
90 	int cpu, thread;
91 	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
92 
93 	aggr->val = 0;
94 
95 	for (cpu = 0; cpu < ncpus; cpu++) {
96 		for (thread = 0; thread < nthreads; thread++) {
97 			if (FD(evsel, cpu, thread) < 0)
98 				continue;
99 
100 			if (readn(FD(evsel, cpu, thread),
101 				  &count, nv * sizeof(u64)) < 0)
102 				return -errno;
103 
104 			aggr->val += count.val;
105 			if (scale) {
106 				aggr->ena += count.ena;
107 				aggr->run += count.run;
108 			}
109 		}
110 	}
111 
112 	evsel->counts->scaled = 0;
113 	if (scale) {
114 		if (aggr->run == 0) {
115 			evsel->counts->scaled = -1;
116 			aggr->val = 0;
117 			return 0;
118 		}
119 
120 		if (aggr->run < aggr->ena) {
121 			evsel->counts->scaled = 1;
122 			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
123 		}
124 	} else
125 		aggr->ena = aggr->run = 0;
126 
127 	return 0;
128 }
129 
130 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
131 			      struct thread_map *threads)
132 {
133 	int cpu, thread;
134 
135 	if (evsel->fd == NULL &&
136 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
137 		return -1;
138 
139 	for (cpu = 0; cpu < cpus->nr; cpu++) {
140 		for (thread = 0; thread < threads->nr; thread++) {
141 			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
142 								     threads->map[thread],
143 								     cpus->map[cpu], -1, 0);
144 			if (FD(evsel, cpu, thread) < 0)
145 				goto out_close;
146 		}
147 	}
148 
149 	return 0;
150 
151 out_close:
152 	do {
153 		while (--thread >= 0) {
154 			close(FD(evsel, cpu, thread));
155 			FD(evsel, cpu, thread) = -1;
156 		}
157 		thread = threads->nr;
158 	} while (--cpu >= 0);
159 	return -1;
160 }
161 
162 static struct {
163 	struct cpu_map map;
164 	int cpus[1];
165 } empty_cpu_map = {
166 	.map.nr	= 1,
167 	.cpus	= { -1, },
168 };
169 
170 static struct {
171 	struct thread_map map;
172 	int threads[1];
173 } empty_thread_map = {
174 	.map.nr	 = 1,
175 	.threads = { -1, },
176 };
177 
178 int perf_evsel__open(struct perf_evsel *evsel,
179 		     struct cpu_map *cpus, struct thread_map *threads)
180 {
181 
182 	if (cpus == NULL) {
183 		/* Work around old compiler warnings about strict aliasing */
184 		cpus = &empty_cpu_map.map;
185 	}
186 
187 	if (threads == NULL)
188 		threads = &empty_thread_map.map;
189 
190 	return __perf_evsel__open(evsel, cpus, threads);
191 }
192 
193 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
194 {
195 	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
196 }
197 
198 int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
199 {
200 	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
201 }
202