xref: /openbmc/linux/tools/perf/util/record.c (revision e2c75e76)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "evlist.h"
3 #include "evsel.h"
4 #include "cpumap.h"
5 #include "parse-events.h"
6 #include <errno.h>
7 #include <api/fs/fs.h>
8 #include "util.h"
9 #include "cloexec.h"
10 
11 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
12 
13 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
14 {
15 	struct perf_evlist *evlist;
16 	struct perf_evsel *evsel;
17 	unsigned long flags = perf_event_open_cloexec_flag();
18 	int err = -EAGAIN, fd;
19 	static pid_t pid = -1;
20 
21 	evlist = perf_evlist__new();
22 	if (!evlist)
23 		return -ENOMEM;
24 
25 	if (parse_events(evlist, str, NULL))
26 		goto out_delete;
27 
28 	evsel = perf_evlist__first(evlist);
29 
30 	while (1) {
31 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
32 		if (fd < 0) {
33 			if (pid == -1 && errno == EACCES) {
34 				pid = 0;
35 				continue;
36 			}
37 			goto out_delete;
38 		}
39 		break;
40 	}
41 	close(fd);
42 
43 	fn(evsel);
44 
45 	fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
46 	if (fd < 0) {
47 		if (errno == EINVAL)
48 			err = -EINVAL;
49 		goto out_delete;
50 	}
51 	close(fd);
52 	err = 0;
53 
54 out_delete:
55 	perf_evlist__delete(evlist);
56 	return err;
57 }
58 
59 static bool perf_probe_api(setup_probe_fn_t fn)
60 {
61 	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
62 	struct cpu_map *cpus;
63 	int cpu, ret, i = 0;
64 
65 	cpus = cpu_map__new(NULL);
66 	if (!cpus)
67 		return false;
68 	cpu = cpus->map[0];
69 	cpu_map__put(cpus);
70 
71 	do {
72 		ret = perf_do_probe_api(fn, cpu, try[i++]);
73 		if (!ret)
74 			return true;
75 	} while (ret == -EAGAIN && try[i]);
76 
77 	return false;
78 }
79 
80 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
81 {
82 	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
83 }
84 
85 static void perf_probe_comm_exec(struct perf_evsel *evsel)
86 {
87 	evsel->attr.comm_exec = 1;
88 }
89 
90 static void perf_probe_context_switch(struct perf_evsel *evsel)
91 {
92 	evsel->attr.context_switch = 1;
93 }
94 
95 bool perf_can_sample_identifier(void)
96 {
97 	return perf_probe_api(perf_probe_sample_identifier);
98 }
99 
100 static bool perf_can_comm_exec(void)
101 {
102 	return perf_probe_api(perf_probe_comm_exec);
103 }
104 
105 bool perf_can_record_switch_events(void)
106 {
107 	return perf_probe_api(perf_probe_context_switch);
108 }
109 
110 bool perf_can_record_cpu_wide(void)
111 {
112 	struct perf_event_attr attr = {
113 		.type = PERF_TYPE_SOFTWARE,
114 		.config = PERF_COUNT_SW_CPU_CLOCK,
115 		.exclude_kernel = 1,
116 	};
117 	struct cpu_map *cpus;
118 	int cpu, fd;
119 
120 	cpus = cpu_map__new(NULL);
121 	if (!cpus)
122 		return false;
123 	cpu = cpus->map[0];
124 	cpu_map__put(cpus);
125 
126 	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
127 	if (fd < 0)
128 		return false;
129 	close(fd);
130 
131 	return true;
132 }
133 
134 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
135 			 struct callchain_param *callchain)
136 {
137 	struct perf_evsel *evsel;
138 	bool use_sample_identifier = false;
139 	bool use_comm_exec;
140 	bool sample_id = opts->sample_id;
141 
142 	/*
143 	 * Set the evsel leader links before we configure attributes,
144 	 * since some might depend on this info.
145 	 */
146 	if (opts->group)
147 		perf_evlist__set_leader(evlist);
148 
149 	if (evlist->cpus->map[0] < 0)
150 		opts->no_inherit = true;
151 
152 	use_comm_exec = perf_can_comm_exec();
153 
154 	evlist__for_each_entry(evlist, evsel) {
155 		perf_evsel__config(evsel, opts, callchain);
156 		if (evsel->tracking && use_comm_exec)
157 			evsel->attr.comm_exec = 1;
158 	}
159 
160 	if (opts->full_auxtrace) {
161 		/*
162 		 * Need to be able to synthesize and parse selected events with
163 		 * arbitrary sample types, which requires always being able to
164 		 * match the id.
165 		 */
166 		use_sample_identifier = perf_can_sample_identifier();
167 		sample_id = true;
168 	} else if (evlist->nr_entries > 1) {
169 		struct perf_evsel *first = perf_evlist__first(evlist);
170 
171 		evlist__for_each_entry(evlist, evsel) {
172 			if (evsel->attr.sample_type == first->attr.sample_type)
173 				continue;
174 			use_sample_identifier = perf_can_sample_identifier();
175 			break;
176 		}
177 		sample_id = true;
178 	}
179 
180 	if (sample_id) {
181 		evlist__for_each_entry(evlist, evsel)
182 			perf_evsel__set_sample_id(evsel, use_sample_identifier);
183 	}
184 
185 	perf_evlist__set_id_pos(evlist);
186 }
187 
188 static int get_max_rate(unsigned int *rate)
189 {
190 	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
191 }
192 
193 static int record_opts__config_freq(struct record_opts *opts)
194 {
195 	bool user_freq = opts->user_freq != UINT_MAX;
196 	unsigned int max_rate;
197 
198 	if (opts->user_interval != ULLONG_MAX)
199 		opts->default_interval = opts->user_interval;
200 	if (user_freq)
201 		opts->freq = opts->user_freq;
202 
203 	/*
204 	 * User specified count overrides default frequency.
205 	 */
206 	if (opts->default_interval)
207 		opts->freq = 0;
208 	else if (opts->freq) {
209 		opts->default_interval = opts->freq;
210 	} else {
211 		pr_err("frequency and count are zero, aborting\n");
212 		return -1;
213 	}
214 
215 	if (get_max_rate(&max_rate))
216 		return 0;
217 
218 	/*
219 	 * User specified frequency is over current maximum.
220 	 */
221 	if (user_freq && (max_rate < opts->freq)) {
222 		pr_err("Maximum frequency rate (%u) reached.\n"
223 		   "Please use -F freq option with lower value or consider\n"
224 		   "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
225 		   max_rate);
226 		return -1;
227 	}
228 
229 	/*
230 	 * Default frequency is over current maximum.
231 	 */
232 	if (max_rate < opts->freq) {
233 		pr_warning("Lowering default frequency rate to %u.\n"
234 			   "Please consider tweaking "
235 			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
236 			   max_rate);
237 		opts->freq = max_rate;
238 	}
239 
240 	return 0;
241 }
242 
243 int record_opts__config(struct record_opts *opts)
244 {
245 	return record_opts__config_freq(opts);
246 }
247 
248 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
249 {
250 	struct perf_evlist *temp_evlist;
251 	struct perf_evsel *evsel;
252 	int err, fd, cpu;
253 	bool ret = false;
254 	pid_t pid = -1;
255 
256 	temp_evlist = perf_evlist__new();
257 	if (!temp_evlist)
258 		return false;
259 
260 	err = parse_events(temp_evlist, str, NULL);
261 	if (err)
262 		goto out_delete;
263 
264 	evsel = perf_evlist__last(temp_evlist);
265 
266 	if (!evlist || cpu_map__empty(evlist->cpus)) {
267 		struct cpu_map *cpus = cpu_map__new(NULL);
268 
269 		cpu =  cpus ? cpus->map[0] : 0;
270 		cpu_map__put(cpus);
271 	} else {
272 		cpu = evlist->cpus->map[0];
273 	}
274 
275 	while (1) {
276 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
277 					 perf_event_open_cloexec_flag());
278 		if (fd < 0) {
279 			if (pid == -1 && errno == EACCES) {
280 				pid = 0;
281 				continue;
282 			}
283 			goto out_delete;
284 		}
285 		break;
286 	}
287 	close(fd);
288 	ret = true;
289 
290 out_delete:
291 	perf_evlist__delete(temp_evlist);
292 	return ret;
293 }
294