xref: /openbmc/linux/tools/perf/arch/arm/util/cs-etm.c (revision b8d312aa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <api/fs/fs.h>
8 #include <linux/bits.h>
9 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/coresight-pmu.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
16 
17 #include "cs-etm.h"
18 #include "../../perf.h"
19 #include "../../util/auxtrace.h"
20 #include "../../util/cpumap.h"
21 #include "../../util/evlist.h"
22 #include "../../util/evsel.h"
23 #include "../../util/pmu.h"
24 #include "../../util/thread_map.h"
25 #include "../../util/cs-etm.h"
26 #include "../../util/util.h"
27 
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <sys/stat.h>
31 
32 struct cs_etm_recording {
33 	struct auxtrace_record	itr;
34 	struct perf_pmu		*cs_etm_pmu;
35 	struct perf_evlist	*evlist;
36 	int			wrapped_cnt;
37 	bool			*wrapped;
38 	bool			snapshot_mode;
39 	size_t			snapshot_size;
40 };
41 
42 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
43 	[CS_ETM_ETMCCER]	= "mgmt/etmccer",
44 	[CS_ETM_ETMIDR]		= "mgmt/etmidr",
45 };
46 
47 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
48 	[CS_ETMV4_TRCIDR0]		= "trcidr/trcidr0",
49 	[CS_ETMV4_TRCIDR1]		= "trcidr/trcidr1",
50 	[CS_ETMV4_TRCIDR2]		= "trcidr/trcidr2",
51 	[CS_ETMV4_TRCIDR8]		= "trcidr/trcidr8",
52 	[CS_ETMV4_TRCAUTHSTATUS]	= "mgmt/trcauthstatus",
53 };
54 
55 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
56 
57 static int cs_etm_set_context_id(struct auxtrace_record *itr,
58 				 struct perf_evsel *evsel, int cpu)
59 {
60 	struct cs_etm_recording *ptr;
61 	struct perf_pmu *cs_etm_pmu;
62 	char path[PATH_MAX];
63 	int err = -EINVAL;
64 	u32 val;
65 
66 	ptr = container_of(itr, struct cs_etm_recording, itr);
67 	cs_etm_pmu = ptr->cs_etm_pmu;
68 
69 	if (!cs_etm_is_etmv4(itr, cpu))
70 		goto out;
71 
72 	/* Get a handle on TRCIRD2 */
73 	snprintf(path, PATH_MAX, "cpu%d/%s",
74 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
75 	err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
76 
77 	/* There was a problem reading the file, bailing out */
78 	if (err != 1) {
79 		pr_err("%s: can't read file %s\n",
80 		       CORESIGHT_ETM_PMU_NAME, path);
81 		goto out;
82 	}
83 
84 	/*
85 	 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
86 	 * is supported:
87 	 *  0b00000 Context ID tracing is not supported.
88 	 *  0b00100 Maximum of 32-bit Context ID size.
89 	 *  All other values are reserved.
90 	 */
91 	val = BMVAL(val, 5, 9);
92 	if (!val || val != 0x4) {
93 		err = -EINVAL;
94 		goto out;
95 	}
96 
97 	/* All good, let the kernel know */
98 	evsel->attr.config |= (1 << ETM_OPT_CTXTID);
99 	err = 0;
100 
101 out:
102 
103 	return err;
104 }
105 
106 static int cs_etm_set_timestamp(struct auxtrace_record *itr,
107 				struct perf_evsel *evsel, int cpu)
108 {
109 	struct cs_etm_recording *ptr;
110 	struct perf_pmu *cs_etm_pmu;
111 	char path[PATH_MAX];
112 	int err = -EINVAL;
113 	u32 val;
114 
115 	ptr = container_of(itr, struct cs_etm_recording, itr);
116 	cs_etm_pmu = ptr->cs_etm_pmu;
117 
118 	if (!cs_etm_is_etmv4(itr, cpu))
119 		goto out;
120 
121 	/* Get a handle on TRCIRD0 */
122 	snprintf(path, PATH_MAX, "cpu%d/%s",
123 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
124 	err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
125 
126 	/* There was a problem reading the file, bailing out */
127 	if (err != 1) {
128 		pr_err("%s: can't read file %s\n",
129 		       CORESIGHT_ETM_PMU_NAME, path);
130 		goto out;
131 	}
132 
133 	/*
134 	 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
135 	 * is supported:
136 	 *  0b00000 Global timestamping is not implemented
137 	 *  0b00110 Implementation supports a maximum timestamp of 48bits.
138 	 *  0b01000 Implementation supports a maximum timestamp of 64bits.
139 	 */
140 	val &= GENMASK(28, 24);
141 	if (!val) {
142 		err = -EINVAL;
143 		goto out;
144 	}
145 
146 	/* All good, let the kernel know */
147 	evsel->attr.config |= (1 << ETM_OPT_TS);
148 	err = 0;
149 
150 out:
151 	return err;
152 }
153 
154 static int cs_etm_set_option(struct auxtrace_record *itr,
155 			     struct perf_evsel *evsel, u32 option)
156 {
157 	int i, err = -EINVAL;
158 	struct cpu_map *event_cpus = evsel->evlist->cpus;
159 	struct cpu_map *online_cpus = cpu_map__new(NULL);
160 
161 	/* Set option of each CPU we have */
162 	for (i = 0; i < cpu__max_cpu(); i++) {
163 		if (!cpu_map__has(event_cpus, i) ||
164 		    !cpu_map__has(online_cpus, i))
165 			continue;
166 
167 		if (option & ETM_OPT_CTXTID) {
168 			err = cs_etm_set_context_id(itr, evsel, i);
169 			if (err)
170 				goto out;
171 		}
172 		if (option & ETM_OPT_TS) {
173 			err = cs_etm_set_timestamp(itr, evsel, i);
174 			if (err)
175 				goto out;
176 		}
177 		if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
178 			/* Nothing else is currently supported */
179 			goto out;
180 	}
181 
182 	err = 0;
183 out:
184 	cpu_map__put(online_cpus);
185 	return err;
186 }
187 
188 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
189 					 struct record_opts *opts,
190 					 const char *str)
191 {
192 	struct cs_etm_recording *ptr =
193 				container_of(itr, struct cs_etm_recording, itr);
194 	unsigned long long snapshot_size = 0;
195 	char *endptr;
196 
197 	if (str) {
198 		snapshot_size = strtoull(str, &endptr, 0);
199 		if (*endptr || snapshot_size > SIZE_MAX)
200 			return -1;
201 	}
202 
203 	opts->auxtrace_snapshot_mode = true;
204 	opts->auxtrace_snapshot_size = snapshot_size;
205 	ptr->snapshot_size = snapshot_size;
206 
207 	return 0;
208 }
209 
210 static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
211 				struct perf_evsel *evsel)
212 {
213 	char msg[BUFSIZ], path[PATH_MAX], *sink;
214 	struct perf_evsel_config_term *term;
215 	int ret = -EINVAL;
216 	u32 hash;
217 
218 	if (evsel->attr.config2 & GENMASK(31, 0))
219 		return 0;
220 
221 	list_for_each_entry(term, &evsel->config_terms, list) {
222 		if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
223 			continue;
224 
225 		sink = term->val.drv_cfg;
226 		snprintf(path, PATH_MAX, "sinks/%s", sink);
227 
228 		ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
229 		if (ret != 1) {
230 			pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
231 			       sink, perf_evsel__name(evsel), errno,
232 			       str_error_r(errno, msg, sizeof(msg)));
233 			return ret;
234 		}
235 
236 		evsel->attr.config2 |= hash;
237 		return 0;
238 	}
239 
240 	/*
241 	 * No sink was provided on the command line - for _now_ treat
242 	 * this as an error.
243 	 */
244 	return ret;
245 }
246 
247 static int cs_etm_recording_options(struct auxtrace_record *itr,
248 				    struct perf_evlist *evlist,
249 				    struct record_opts *opts)
250 {
251 	int ret;
252 	struct cs_etm_recording *ptr =
253 				container_of(itr, struct cs_etm_recording, itr);
254 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
255 	struct perf_evsel *evsel, *cs_etm_evsel = NULL;
256 	struct cpu_map *cpus = evlist->cpus;
257 	bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
258 	int err = 0;
259 
260 	ptr->evlist = evlist;
261 	ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
262 
263 	if (perf_can_record_switch_events())
264 		opts->record_switch_events = true;
265 
266 	evlist__for_each_entry(evlist, evsel) {
267 		if (evsel->attr.type == cs_etm_pmu->type) {
268 			if (cs_etm_evsel) {
269 				pr_err("There may be only one %s event\n",
270 				       CORESIGHT_ETM_PMU_NAME);
271 				return -EINVAL;
272 			}
273 			evsel->attr.freq = 0;
274 			evsel->attr.sample_period = 1;
275 			cs_etm_evsel = evsel;
276 			opts->full_auxtrace = true;
277 		}
278 	}
279 
280 	/* no need to continue if at least one event of interest was found */
281 	if (!cs_etm_evsel)
282 		return 0;
283 
284 	ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
285 	if (ret)
286 		return ret;
287 
288 	if (opts->use_clockid) {
289 		pr_err("Cannot use clockid (-k option) with %s\n",
290 		       CORESIGHT_ETM_PMU_NAME);
291 		return -EINVAL;
292 	}
293 
294 	/* we are in snapshot mode */
295 	if (opts->auxtrace_snapshot_mode) {
296 		/*
297 		 * No size were given to '-S' or '-m,', so go with
298 		 * the default
299 		 */
300 		if (!opts->auxtrace_snapshot_size &&
301 		    !opts->auxtrace_mmap_pages) {
302 			if (privileged) {
303 				opts->auxtrace_mmap_pages = MiB(4) / page_size;
304 			} else {
305 				opts->auxtrace_mmap_pages =
306 							KiB(128) / page_size;
307 				if (opts->mmap_pages == UINT_MAX)
308 					opts->mmap_pages = KiB(256) / page_size;
309 			}
310 		} else if (!opts->auxtrace_mmap_pages && !privileged &&
311 						opts->mmap_pages == UINT_MAX) {
312 			opts->mmap_pages = KiB(256) / page_size;
313 		}
314 
315 		/*
316 		 * '-m,xyz' was specified but no snapshot size, so make the
317 		 * snapshot size as big as the auxtrace mmap area.
318 		 */
319 		if (!opts->auxtrace_snapshot_size) {
320 			opts->auxtrace_snapshot_size =
321 				opts->auxtrace_mmap_pages * (size_t)page_size;
322 		}
323 
324 		/*
325 		 * -Sxyz was specified but no auxtrace mmap area, so make the
326 		 * auxtrace mmap area big enough to fit the requested snapshot
327 		 * size.
328 		 */
329 		if (!opts->auxtrace_mmap_pages) {
330 			size_t sz = opts->auxtrace_snapshot_size;
331 
332 			sz = round_up(sz, page_size) / page_size;
333 			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
334 		}
335 
336 		/* Snapshost size can't be bigger than the auxtrace area */
337 		if (opts->auxtrace_snapshot_size >
338 				opts->auxtrace_mmap_pages * (size_t)page_size) {
339 			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
340 			       opts->auxtrace_snapshot_size,
341 			       opts->auxtrace_mmap_pages * (size_t)page_size);
342 			return -EINVAL;
343 		}
344 
345 		/* Something went wrong somewhere - this shouldn't happen */
346 		if (!opts->auxtrace_snapshot_size ||
347 		    !opts->auxtrace_mmap_pages) {
348 			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
349 			return -EINVAL;
350 		}
351 	}
352 
353 	/* We are in full trace mode but '-m,xyz' wasn't specified */
354 	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
355 		if (privileged) {
356 			opts->auxtrace_mmap_pages = MiB(4) / page_size;
357 		} else {
358 			opts->auxtrace_mmap_pages = KiB(128) / page_size;
359 			if (opts->mmap_pages == UINT_MAX)
360 				opts->mmap_pages = KiB(256) / page_size;
361 		}
362 
363 	}
364 
365 	/* Validate auxtrace_mmap_pages provided by user */
366 	if (opts->auxtrace_mmap_pages) {
367 		unsigned int max_page = (KiB(128) / page_size);
368 		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
369 
370 		if (!privileged &&
371 		    opts->auxtrace_mmap_pages > max_page) {
372 			opts->auxtrace_mmap_pages = max_page;
373 			pr_err("auxtrace too big, truncating to %d\n",
374 			       max_page);
375 		}
376 
377 		if (!is_power_of_2(sz)) {
378 			pr_err("Invalid mmap size for %s: must be a power of 2\n",
379 			       CORESIGHT_ETM_PMU_NAME);
380 			return -EINVAL;
381 		}
382 	}
383 
384 	if (opts->auxtrace_snapshot_mode)
385 		pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
386 			  opts->auxtrace_snapshot_size);
387 
388 	/*
389 	 * To obtain the auxtrace buffer file descriptor, the auxtrace
390 	 * event must come first.
391 	 */
392 	perf_evlist__to_front(evlist, cs_etm_evsel);
393 
394 	/*
395 	 * In the case of per-cpu mmaps, we need the CPU on the
396 	 * AUX event.  We also need the contextID in order to be notified
397 	 * when a context switch happened.
398 	 */
399 	if (!cpu_map__empty(cpus)) {
400 		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
401 
402 		err = cs_etm_set_option(itr, cs_etm_evsel,
403 					ETM_OPT_CTXTID | ETM_OPT_TS);
404 		if (err)
405 			goto out;
406 	}
407 
408 	/* Add dummy event to keep tracking */
409 	if (opts->full_auxtrace) {
410 		struct perf_evsel *tracking_evsel;
411 
412 		err = parse_events(evlist, "dummy:u", NULL);
413 		if (err)
414 			goto out;
415 
416 		tracking_evsel = perf_evlist__last(evlist);
417 		perf_evlist__set_tracking_event(evlist, tracking_evsel);
418 
419 		tracking_evsel->attr.freq = 0;
420 		tracking_evsel->attr.sample_period = 1;
421 
422 		/* In per-cpu case, always need the time of mmap events etc */
423 		if (!cpu_map__empty(cpus))
424 			perf_evsel__set_sample_bit(tracking_evsel, TIME);
425 	}
426 
427 out:
428 	return err;
429 }
430 
431 static u64 cs_etm_get_config(struct auxtrace_record *itr)
432 {
433 	u64 config = 0;
434 	struct cs_etm_recording *ptr =
435 			container_of(itr, struct cs_etm_recording, itr);
436 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
437 	struct perf_evlist *evlist = ptr->evlist;
438 	struct perf_evsel *evsel;
439 
440 	evlist__for_each_entry(evlist, evsel) {
441 		if (evsel->attr.type == cs_etm_pmu->type) {
442 			/*
443 			 * Variable perf_event_attr::config is assigned to
444 			 * ETMv3/PTM.  The bit fields have been made to match
445 			 * the ETMv3.5 ETRMCR register specification.  See the
446 			 * PMU_FORMAT_ATTR() declarations in
447 			 * drivers/hwtracing/coresight/coresight-perf.c for
448 			 * details.
449 			 */
450 			config = evsel->attr.config;
451 			break;
452 		}
453 	}
454 
455 	return config;
456 }
457 
458 #ifndef BIT
459 #define BIT(N) (1UL << (N))
460 #endif
461 
462 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
463 {
464 	u64 config = 0;
465 	u64 config_opts = 0;
466 
467 	/*
468 	 * The perf event variable config bits represent both
469 	 * the command line options and register programming
470 	 * bits in ETMv3/PTM. For ETMv4 we must remap options
471 	 * to real bits
472 	 */
473 	config_opts = cs_etm_get_config(itr);
474 	if (config_opts & BIT(ETM_OPT_CYCACC))
475 		config |= BIT(ETM4_CFG_BIT_CYCACC);
476 	if (config_opts & BIT(ETM_OPT_CTXTID))
477 		config |= BIT(ETM4_CFG_BIT_CTXTID);
478 	if (config_opts & BIT(ETM_OPT_TS))
479 		config |= BIT(ETM4_CFG_BIT_TS);
480 	if (config_opts & BIT(ETM_OPT_RETSTK))
481 		config |= BIT(ETM4_CFG_BIT_RETSTK);
482 
483 	return config;
484 }
485 
486 static size_t
487 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
488 		      struct perf_evlist *evlist __maybe_unused)
489 {
490 	int i;
491 	int etmv3 = 0, etmv4 = 0;
492 	struct cpu_map *event_cpus = evlist->cpus;
493 	struct cpu_map *online_cpus = cpu_map__new(NULL);
494 
495 	/* cpu map is not empty, we have specific CPUs to work with */
496 	if (!cpu_map__empty(event_cpus)) {
497 		for (i = 0; i < cpu__max_cpu(); i++) {
498 			if (!cpu_map__has(event_cpus, i) ||
499 			    !cpu_map__has(online_cpus, i))
500 				continue;
501 
502 			if (cs_etm_is_etmv4(itr, i))
503 				etmv4++;
504 			else
505 				etmv3++;
506 		}
507 	} else {
508 		/* get configuration for all CPUs in the system */
509 		for (i = 0; i < cpu__max_cpu(); i++) {
510 			if (!cpu_map__has(online_cpus, i))
511 				continue;
512 
513 			if (cs_etm_is_etmv4(itr, i))
514 				etmv4++;
515 			else
516 				etmv3++;
517 		}
518 	}
519 
520 	cpu_map__put(online_cpus);
521 
522 	return (CS_ETM_HEADER_SIZE +
523 	       (etmv4 * CS_ETMV4_PRIV_SIZE) +
524 	       (etmv3 * CS_ETMV3_PRIV_SIZE));
525 }
526 
527 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
528 {
529 	bool ret = false;
530 	char path[PATH_MAX];
531 	int scan;
532 	unsigned int val;
533 	struct cs_etm_recording *ptr =
534 			container_of(itr, struct cs_etm_recording, itr);
535 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
536 
537 	/* Take any of the RO files for ETMv4 and see if it present */
538 	snprintf(path, PATH_MAX, "cpu%d/%s",
539 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
540 	scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
541 
542 	/* The file was read successfully, we have a winner */
543 	if (scan == 1)
544 		ret = true;
545 
546 	return ret;
547 }
548 
549 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
550 {
551 	char pmu_path[PATH_MAX];
552 	int scan;
553 	unsigned int val = 0;
554 
555 	/* Get RO metadata from sysfs */
556 	snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
557 
558 	scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
559 	if (scan != 1)
560 		pr_err("%s: error reading: %s\n", __func__, pmu_path);
561 
562 	return val;
563 }
564 
565 static void cs_etm_get_metadata(int cpu, u32 *offset,
566 				struct auxtrace_record *itr,
567 				struct auxtrace_info_event *info)
568 {
569 	u32 increment;
570 	u64 magic;
571 	struct cs_etm_recording *ptr =
572 			container_of(itr, struct cs_etm_recording, itr);
573 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
574 
575 	/* first see what kind of tracer this cpu is affined to */
576 	if (cs_etm_is_etmv4(itr, cpu)) {
577 		magic = __perf_cs_etmv4_magic;
578 		/* Get trace configuration register */
579 		info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
580 						cs_etmv4_get_config(itr);
581 		/* Get traceID from the framework */
582 		info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
583 						coresight_get_trace_id(cpu);
584 		/* Get read-only information from sysFS */
585 		info->priv[*offset + CS_ETMV4_TRCIDR0] =
586 			cs_etm_get_ro(cs_etm_pmu, cpu,
587 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
588 		info->priv[*offset + CS_ETMV4_TRCIDR1] =
589 			cs_etm_get_ro(cs_etm_pmu, cpu,
590 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
591 		info->priv[*offset + CS_ETMV4_TRCIDR2] =
592 			cs_etm_get_ro(cs_etm_pmu, cpu,
593 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
594 		info->priv[*offset + CS_ETMV4_TRCIDR8] =
595 			cs_etm_get_ro(cs_etm_pmu, cpu,
596 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
597 		info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
598 			cs_etm_get_ro(cs_etm_pmu, cpu,
599 				      metadata_etmv4_ro
600 				      [CS_ETMV4_TRCAUTHSTATUS]);
601 
602 		/* How much space was used */
603 		increment = CS_ETMV4_PRIV_MAX;
604 	} else {
605 		magic = __perf_cs_etmv3_magic;
606 		/* Get configuration register */
607 		info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
608 		/* Get traceID from the framework */
609 		info->priv[*offset + CS_ETM_ETMTRACEIDR] =
610 						coresight_get_trace_id(cpu);
611 		/* Get read-only information from sysFS */
612 		info->priv[*offset + CS_ETM_ETMCCER] =
613 			cs_etm_get_ro(cs_etm_pmu, cpu,
614 				      metadata_etmv3_ro[CS_ETM_ETMCCER]);
615 		info->priv[*offset + CS_ETM_ETMIDR] =
616 			cs_etm_get_ro(cs_etm_pmu, cpu,
617 				      metadata_etmv3_ro[CS_ETM_ETMIDR]);
618 
619 		/* How much space was used */
620 		increment = CS_ETM_PRIV_MAX;
621 	}
622 
623 	/* Build generic header portion */
624 	info->priv[*offset + CS_ETM_MAGIC] = magic;
625 	info->priv[*offset + CS_ETM_CPU] = cpu;
626 	/* Where the next CPU entry should start from */
627 	*offset += increment;
628 }
629 
630 static int cs_etm_info_fill(struct auxtrace_record *itr,
631 			    struct perf_session *session,
632 			    struct auxtrace_info_event *info,
633 			    size_t priv_size)
634 {
635 	int i;
636 	u32 offset;
637 	u64 nr_cpu, type;
638 	struct cpu_map *cpu_map;
639 	struct cpu_map *event_cpus = session->evlist->cpus;
640 	struct cpu_map *online_cpus = cpu_map__new(NULL);
641 	struct cs_etm_recording *ptr =
642 			container_of(itr, struct cs_etm_recording, itr);
643 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
644 
645 	if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
646 		return -EINVAL;
647 
648 	if (!session->evlist->nr_mmaps)
649 		return -EINVAL;
650 
651 	/* If the cpu_map is empty all online CPUs are involved */
652 	if (cpu_map__empty(event_cpus)) {
653 		cpu_map = online_cpus;
654 	} else {
655 		/* Make sure all specified CPUs are online */
656 		for (i = 0; i < cpu_map__nr(event_cpus); i++) {
657 			if (cpu_map__has(event_cpus, i) &&
658 			    !cpu_map__has(online_cpus, i))
659 				return -EINVAL;
660 		}
661 
662 		cpu_map = event_cpus;
663 	}
664 
665 	nr_cpu = cpu_map__nr(cpu_map);
666 	/* Get PMU type as dynamically assigned by the core */
667 	type = cs_etm_pmu->type;
668 
669 	/* First fill out the session header */
670 	info->type = PERF_AUXTRACE_CS_ETM;
671 	info->priv[CS_HEADER_VERSION_0] = 0;
672 	info->priv[CS_PMU_TYPE_CPUS] = type << 32;
673 	info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
674 	info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
675 
676 	offset = CS_ETM_SNAPSHOT + 1;
677 
678 	for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
679 		if (cpu_map__has(cpu_map, i))
680 			cs_etm_get_metadata(i, &offset, itr, info);
681 
682 	cpu_map__put(online_cpus);
683 
684 	return 0;
685 }
686 
687 static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
688 {
689 	bool *wrapped;
690 	int cnt = ptr->wrapped_cnt;
691 
692 	/* Make @ptr->wrapped as big as @idx */
693 	while (cnt <= idx)
694 		cnt++;
695 
696 	/*
697 	 * Free'ed in cs_etm_recording_free().  Using realloc() to avoid
698 	 * cross compilation problems where the host's system supports
699 	 * reallocarray() but not the target.
700 	 */
701 	wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
702 	if (!wrapped)
703 		return -ENOMEM;
704 
705 	wrapped[cnt - 1] = false;
706 	ptr->wrapped_cnt = cnt;
707 	ptr->wrapped = wrapped;
708 
709 	return 0;
710 }
711 
712 static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
713 				      size_t buffer_size, u64 head)
714 {
715 	u64 i, watermark;
716 	u64 *buf = (u64 *)buffer;
717 	size_t buf_size = buffer_size;
718 
719 	/*
720 	 * We want to look the very last 512 byte (chosen arbitrarily) in
721 	 * the ring buffer.
722 	 */
723 	watermark = buf_size - 512;
724 
725 	/*
726 	 * @head is continuously increasing - if its value is equal or greater
727 	 * than the size of the ring buffer, it has wrapped around.
728 	 */
729 	if (head >= buffer_size)
730 		return true;
731 
732 	/*
733 	 * The value of @head is somewhere within the size of the ring buffer.
734 	 * This can be that there hasn't been enough data to fill the ring
735 	 * buffer yet or the trace time was so long that @head has numerically
736 	 * wrapped around.  To find we need to check if we have data at the very
737 	 * end of the ring buffer.  We can reliably do this because mmap'ed
738 	 * pages are zeroed out and there is a fresh mapping with every new
739 	 * session.
740 	 */
741 
742 	/* @head is less than 512 byte from the end of the ring buffer */
743 	if (head > watermark)
744 		watermark = head;
745 
746 	/*
747 	 * Speed things up by using 64 bit transactions (see "u64 *buf" above)
748 	 */
749 	watermark >>= 3;
750 	buf_size >>= 3;
751 
752 	/*
753 	 * If we find trace data at the end of the ring buffer, @head has
754 	 * been there and has numerically wrapped around at least once.
755 	 */
756 	for (i = watermark; i < buf_size; i++)
757 		if (buf[i])
758 			return true;
759 
760 	return false;
761 }
762 
763 static int cs_etm_find_snapshot(struct auxtrace_record *itr,
764 				int idx, struct auxtrace_mmap *mm,
765 				unsigned char *data,
766 				u64 *head, u64 *old)
767 {
768 	int err;
769 	bool wrapped;
770 	struct cs_etm_recording *ptr =
771 			container_of(itr, struct cs_etm_recording, itr);
772 
773 	/*
774 	 * Allocate memory to keep track of wrapping if this is the first
775 	 * time we deal with this *mm.
776 	 */
777 	if (idx >= ptr->wrapped_cnt) {
778 		err = cs_etm_alloc_wrapped_array(ptr, idx);
779 		if (err)
780 			return err;
781 	}
782 
783 	/*
784 	 * Check to see if *head has wrapped around.  If it hasn't only the
785 	 * amount of data between *head and *old is snapshot'ed to avoid
786 	 * bloating the perf.data file with zeros.  But as soon as *head has
787 	 * wrapped around the entire size of the AUX ring buffer it taken.
788 	 */
789 	wrapped = ptr->wrapped[idx];
790 	if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
791 		wrapped = true;
792 		ptr->wrapped[idx] = true;
793 	}
794 
795 	pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
796 		  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
797 
798 	/* No wrap has occurred, we can just use *head and *old. */
799 	if (!wrapped)
800 		return 0;
801 
802 	/*
803 	 * *head has wrapped around - adjust *head and *old to pickup the
804 	 * entire content of the AUX buffer.
805 	 */
806 	if (*head >= mm->len) {
807 		*old = *head - mm->len;
808 	} else {
809 		*head += mm->len;
810 		*old = *head - mm->len;
811 	}
812 
813 	return 0;
814 }
815 
816 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
817 {
818 	struct cs_etm_recording *ptr =
819 			container_of(itr, struct cs_etm_recording, itr);
820 	struct perf_evsel *evsel;
821 
822 	evlist__for_each_entry(ptr->evlist, evsel) {
823 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
824 			return perf_evsel__disable(evsel);
825 	}
826 	return -EINVAL;
827 }
828 
829 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
830 {
831 	struct cs_etm_recording *ptr =
832 			container_of(itr, struct cs_etm_recording, itr);
833 	struct perf_evsel *evsel;
834 
835 	evlist__for_each_entry(ptr->evlist, evsel) {
836 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
837 			return perf_evsel__enable(evsel);
838 	}
839 	return -EINVAL;
840 }
841 
842 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
843 {
844 	return (((u64) rand() <<  0) & 0x00000000FFFFFFFFull) |
845 		(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
846 }
847 
848 static void cs_etm_recording_free(struct auxtrace_record *itr)
849 {
850 	struct cs_etm_recording *ptr =
851 			container_of(itr, struct cs_etm_recording, itr);
852 
853 	zfree(&ptr->wrapped);
854 	free(ptr);
855 }
856 
857 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
858 {
859 	struct cs_etm_recording *ptr =
860 			container_of(itr, struct cs_etm_recording, itr);
861 	struct perf_evsel *evsel;
862 
863 	evlist__for_each_entry(ptr->evlist, evsel) {
864 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
865 			return perf_evlist__enable_event_idx(ptr->evlist,
866 							     evsel, idx);
867 	}
868 
869 	return -EINVAL;
870 }
871 
872 struct auxtrace_record *cs_etm_record_init(int *err)
873 {
874 	struct perf_pmu *cs_etm_pmu;
875 	struct cs_etm_recording *ptr;
876 
877 	cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
878 
879 	if (!cs_etm_pmu) {
880 		*err = -EINVAL;
881 		goto out;
882 	}
883 
884 	ptr = zalloc(sizeof(struct cs_etm_recording));
885 	if (!ptr) {
886 		*err = -ENOMEM;
887 		goto out;
888 	}
889 
890 	ptr->cs_etm_pmu			= cs_etm_pmu;
891 	ptr->itr.parse_snapshot_options	= cs_etm_parse_snapshot_options;
892 	ptr->itr.recording_options	= cs_etm_recording_options;
893 	ptr->itr.info_priv_size		= cs_etm_info_priv_size;
894 	ptr->itr.info_fill		= cs_etm_info_fill;
895 	ptr->itr.find_snapshot		= cs_etm_find_snapshot;
896 	ptr->itr.snapshot_start		= cs_etm_snapshot_start;
897 	ptr->itr.snapshot_finish	= cs_etm_snapshot_finish;
898 	ptr->itr.reference		= cs_etm_reference;
899 	ptr->itr.free			= cs_etm_recording_free;
900 	ptr->itr.read_finish		= cs_etm_read_finish;
901 
902 	*err = 0;
903 	return &ptr->itr;
904 out:
905 	return NULL;
906 }
907