xref: /openbmc/linux/tools/perf/util/record.c (revision bf070bb0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "evlist.h"
3 #include "evsel.h"
4 #include "cpumap.h"
5 #include "parse-events.h"
6 #include <errno.h>
7 #include <api/fs/fs.h>
8 #include "util.h"
9 #include "cloexec.h"
10 
11 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
12 
13 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
14 {
15 	struct perf_evlist *evlist;
16 	struct perf_evsel *evsel;
17 	unsigned long flags = perf_event_open_cloexec_flag();
18 	int err = -EAGAIN, fd;
19 	static pid_t pid = -1;
20 
21 	evlist = perf_evlist__new();
22 	if (!evlist)
23 		return -ENOMEM;
24 
25 	if (parse_events(evlist, str, NULL))
26 		goto out_delete;
27 
28 	evsel = perf_evlist__first(evlist);
29 
30 	while (1) {
31 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
32 		if (fd < 0) {
33 			if (pid == -1 && errno == EACCES) {
34 				pid = 0;
35 				continue;
36 			}
37 			goto out_delete;
38 		}
39 		break;
40 	}
41 	close(fd);
42 
43 	fn(evsel);
44 
45 	fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
46 	if (fd < 0) {
47 		if (errno == EINVAL)
48 			err = -EINVAL;
49 		goto out_delete;
50 	}
51 	close(fd);
52 	err = 0;
53 
54 out_delete:
55 	perf_evlist__delete(evlist);
56 	return err;
57 }
58 
59 static bool perf_probe_api(setup_probe_fn_t fn)
60 {
61 	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
62 	struct cpu_map *cpus;
63 	int cpu, ret, i = 0;
64 
65 	cpus = cpu_map__new(NULL);
66 	if (!cpus)
67 		return false;
68 	cpu = cpus->map[0];
69 	cpu_map__put(cpus);
70 
71 	do {
72 		ret = perf_do_probe_api(fn, cpu, try[i++]);
73 		if (!ret)
74 			return true;
75 	} while (ret == -EAGAIN && try[i]);
76 
77 	return false;
78 }
79 
80 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
81 {
82 	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
83 }
84 
85 static void perf_probe_comm_exec(struct perf_evsel *evsel)
86 {
87 	evsel->attr.comm_exec = 1;
88 }
89 
90 static void perf_probe_context_switch(struct perf_evsel *evsel)
91 {
92 	evsel->attr.context_switch = 1;
93 }
94 
95 bool perf_can_sample_identifier(void)
96 {
97 	return perf_probe_api(perf_probe_sample_identifier);
98 }
99 
100 static bool perf_can_comm_exec(void)
101 {
102 	return perf_probe_api(perf_probe_comm_exec);
103 }
104 
105 bool perf_can_record_switch_events(void)
106 {
107 	return perf_probe_api(perf_probe_context_switch);
108 }
109 
110 bool perf_can_record_cpu_wide(void)
111 {
112 	struct perf_event_attr attr = {
113 		.type = PERF_TYPE_SOFTWARE,
114 		.config = PERF_COUNT_SW_CPU_CLOCK,
115 		.exclude_kernel = 1,
116 	};
117 	struct cpu_map *cpus;
118 	int cpu, fd;
119 
120 	cpus = cpu_map__new(NULL);
121 	if (!cpus)
122 		return false;
123 	cpu = cpus->map[0];
124 	cpu_map__put(cpus);
125 
126 	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
127 	if (fd < 0)
128 		return false;
129 	close(fd);
130 
131 	return true;
132 }
133 
134 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
135 			 struct callchain_param *callchain)
136 {
137 	struct perf_evsel *evsel;
138 	bool use_sample_identifier = false;
139 	bool use_comm_exec;
140 
141 	/*
142 	 * Set the evsel leader links before we configure attributes,
143 	 * since some might depend on this info.
144 	 */
145 	if (opts->group)
146 		perf_evlist__set_leader(evlist);
147 
148 	if (evlist->cpus->map[0] < 0)
149 		opts->no_inherit = true;
150 
151 	use_comm_exec = perf_can_comm_exec();
152 
153 	evlist__for_each_entry(evlist, evsel) {
154 		perf_evsel__config(evsel, opts, callchain);
155 		if (evsel->tracking && use_comm_exec)
156 			evsel->attr.comm_exec = 1;
157 	}
158 
159 	if (opts->full_auxtrace) {
160 		/*
161 		 * Need to be able to synthesize and parse selected events with
162 		 * arbitrary sample types, which requires always being able to
163 		 * match the id.
164 		 */
165 		use_sample_identifier = perf_can_sample_identifier();
166 		evlist__for_each_entry(evlist, evsel)
167 			perf_evsel__set_sample_id(evsel, use_sample_identifier);
168 	} else if (evlist->nr_entries > 1) {
169 		struct perf_evsel *first = perf_evlist__first(evlist);
170 
171 		evlist__for_each_entry(evlist, evsel) {
172 			if (evsel->attr.sample_type == first->attr.sample_type)
173 				continue;
174 			use_sample_identifier = perf_can_sample_identifier();
175 			break;
176 		}
177 		evlist__for_each_entry(evlist, evsel)
178 			perf_evsel__set_sample_id(evsel, use_sample_identifier);
179 	}
180 
181 	perf_evlist__set_id_pos(evlist);
182 }
183 
184 static int get_max_rate(unsigned int *rate)
185 {
186 	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
187 }
188 
189 static int record_opts__config_freq(struct record_opts *opts)
190 {
191 	bool user_freq = opts->user_freq != UINT_MAX;
192 	unsigned int max_rate;
193 
194 	if (opts->user_interval != ULLONG_MAX)
195 		opts->default_interval = opts->user_interval;
196 	if (user_freq)
197 		opts->freq = opts->user_freq;
198 
199 	/*
200 	 * User specified count overrides default frequency.
201 	 */
202 	if (opts->default_interval)
203 		opts->freq = 0;
204 	else if (opts->freq) {
205 		opts->default_interval = opts->freq;
206 	} else {
207 		pr_err("frequency and count are zero, aborting\n");
208 		return -1;
209 	}
210 
211 	if (get_max_rate(&max_rate))
212 		return 0;
213 
214 	/*
215 	 * User specified frequency is over current maximum.
216 	 */
217 	if (user_freq && (max_rate < opts->freq)) {
218 		pr_err("Maximum frequency rate (%u) reached.\n"
219 		   "Please use -F freq option with lower value or consider\n"
220 		   "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
221 		   max_rate);
222 		return -1;
223 	}
224 
225 	/*
226 	 * Default frequency is over current maximum.
227 	 */
228 	if (max_rate < opts->freq) {
229 		pr_warning("Lowering default frequency rate to %u.\n"
230 			   "Please consider tweaking "
231 			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
232 			   max_rate);
233 		opts->freq = max_rate;
234 	}
235 
236 	return 0;
237 }
238 
239 int record_opts__config(struct record_opts *opts)
240 {
241 	return record_opts__config_freq(opts);
242 }
243 
244 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
245 {
246 	struct perf_evlist *temp_evlist;
247 	struct perf_evsel *evsel;
248 	int err, fd, cpu;
249 	bool ret = false;
250 	pid_t pid = -1;
251 
252 	temp_evlist = perf_evlist__new();
253 	if (!temp_evlist)
254 		return false;
255 
256 	err = parse_events(temp_evlist, str, NULL);
257 	if (err)
258 		goto out_delete;
259 
260 	evsel = perf_evlist__last(temp_evlist);
261 
262 	if (!evlist || cpu_map__empty(evlist->cpus)) {
263 		struct cpu_map *cpus = cpu_map__new(NULL);
264 
265 		cpu =  cpus ? cpus->map[0] : 0;
266 		cpu_map__put(cpus);
267 	} else {
268 		cpu = evlist->cpus->map[0];
269 	}
270 
271 	while (1) {
272 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
273 					 perf_event_open_cloexec_flag());
274 		if (fd < 0) {
275 			if (pid == -1 && errno == EACCES) {
276 				pid = 0;
277 				continue;
278 			}
279 			goto out_delete;
280 		}
281 		break;
282 	}
283 	close(fd);
284 	ret = true;
285 
286 out_delete:
287 	perf_evlist__delete(temp_evlist);
288 	return ret;
289 }
290