xref: /openbmc/linux/tools/perf/arch/arm/util/cs-etm.c (revision 82e6fdd6)
1 /*
2  * Copyright(C) 2015 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <api/fs/fs.h>
19 #include <linux/bitops.h>
20 #include <linux/compiler.h>
21 #include <linux/coresight-pmu.h>
22 #include <linux/kernel.h>
23 #include <linux/log2.h>
24 #include <linux/types.h>
25 
26 #include "cs-etm.h"
27 #include "../../perf.h"
28 #include "../../util/auxtrace.h"
29 #include "../../util/cpumap.h"
30 #include "../../util/evlist.h"
31 #include "../../util/evsel.h"
32 #include "../../util/pmu.h"
33 #include "../../util/thread_map.h"
34 #include "../../util/cs-etm.h"
35 
36 #include <stdlib.h>
37 #include <sys/stat.h>
38 
39 #define ENABLE_SINK_MAX	128
40 #define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
41 
42 struct cs_etm_recording {
43 	struct auxtrace_record	itr;
44 	struct perf_pmu		*cs_etm_pmu;
45 	struct perf_evlist	*evlist;
46 	bool			snapshot_mode;
47 	size_t			snapshot_size;
48 };
49 
50 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
51 
52 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
53 					 struct record_opts *opts,
54 					 const char *str)
55 {
56 	struct cs_etm_recording *ptr =
57 				container_of(itr, struct cs_etm_recording, itr);
58 	unsigned long long snapshot_size = 0;
59 	char *endptr;
60 
61 	if (str) {
62 		snapshot_size = strtoull(str, &endptr, 0);
63 		if (*endptr || snapshot_size > SIZE_MAX)
64 			return -1;
65 	}
66 
67 	opts->auxtrace_snapshot_mode = true;
68 	opts->auxtrace_snapshot_size = snapshot_size;
69 	ptr->snapshot_size = snapshot_size;
70 
71 	return 0;
72 }
73 
74 static int cs_etm_recording_options(struct auxtrace_record *itr,
75 				    struct perf_evlist *evlist,
76 				    struct record_opts *opts)
77 {
78 	struct cs_etm_recording *ptr =
79 				container_of(itr, struct cs_etm_recording, itr);
80 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
81 	struct perf_evsel *evsel, *cs_etm_evsel = NULL;
82 	const struct cpu_map *cpus = evlist->cpus;
83 	bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
84 
85 	ptr->evlist = evlist;
86 	ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
87 
88 	evlist__for_each_entry(evlist, evsel) {
89 		if (evsel->attr.type == cs_etm_pmu->type) {
90 			if (cs_etm_evsel) {
91 				pr_err("There may be only one %s event\n",
92 				       CORESIGHT_ETM_PMU_NAME);
93 				return -EINVAL;
94 			}
95 			evsel->attr.freq = 0;
96 			evsel->attr.sample_period = 1;
97 			cs_etm_evsel = evsel;
98 			opts->full_auxtrace = true;
99 		}
100 	}
101 
102 	/* no need to continue if at least one event of interest was found */
103 	if (!cs_etm_evsel)
104 		return 0;
105 
106 	if (opts->use_clockid) {
107 		pr_err("Cannot use clockid (-k option) with %s\n",
108 		       CORESIGHT_ETM_PMU_NAME);
109 		return -EINVAL;
110 	}
111 
112 	/* we are in snapshot mode */
113 	if (opts->auxtrace_snapshot_mode) {
114 		/*
115 		 * No size were given to '-S' or '-m,', so go with
116 		 * the default
117 		 */
118 		if (!opts->auxtrace_snapshot_size &&
119 		    !opts->auxtrace_mmap_pages) {
120 			if (privileged) {
121 				opts->auxtrace_mmap_pages = MiB(4) / page_size;
122 			} else {
123 				opts->auxtrace_mmap_pages =
124 							KiB(128) / page_size;
125 				if (opts->mmap_pages == UINT_MAX)
126 					opts->mmap_pages = KiB(256) / page_size;
127 			}
128 		} else if (!opts->auxtrace_mmap_pages && !privileged &&
129 						opts->mmap_pages == UINT_MAX) {
130 			opts->mmap_pages = KiB(256) / page_size;
131 		}
132 
133 		/*
134 		 * '-m,xyz' was specified but no snapshot size, so make the
135 		 * snapshot size as big as the auxtrace mmap area.
136 		 */
137 		if (!opts->auxtrace_snapshot_size) {
138 			opts->auxtrace_snapshot_size =
139 				opts->auxtrace_mmap_pages * (size_t)page_size;
140 		}
141 
142 		/*
143 		 * -Sxyz was specified but no auxtrace mmap area, so make the
144 		 * auxtrace mmap area big enough to fit the requested snapshot
145 		 * size.
146 		 */
147 		if (!opts->auxtrace_mmap_pages) {
148 			size_t sz = opts->auxtrace_snapshot_size;
149 
150 			sz = round_up(sz, page_size) / page_size;
151 			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
152 		}
153 
154 		/* Snapshost size can't be bigger than the auxtrace area */
155 		if (opts->auxtrace_snapshot_size >
156 				opts->auxtrace_mmap_pages * (size_t)page_size) {
157 			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
158 			       opts->auxtrace_snapshot_size,
159 			       opts->auxtrace_mmap_pages * (size_t)page_size);
160 			return -EINVAL;
161 		}
162 
163 		/* Something went wrong somewhere - this shouldn't happen */
164 		if (!opts->auxtrace_snapshot_size ||
165 		    !opts->auxtrace_mmap_pages) {
166 			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
167 			return -EINVAL;
168 		}
169 	}
170 
171 	/* We are in full trace mode but '-m,xyz' wasn't specified */
172 	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
173 		if (privileged) {
174 			opts->auxtrace_mmap_pages = MiB(4) / page_size;
175 		} else {
176 			opts->auxtrace_mmap_pages = KiB(128) / page_size;
177 			if (opts->mmap_pages == UINT_MAX)
178 				opts->mmap_pages = KiB(256) / page_size;
179 		}
180 
181 	}
182 
183 	/* Validate auxtrace_mmap_pages provided by user */
184 	if (opts->auxtrace_mmap_pages) {
185 		unsigned int max_page = (KiB(128) / page_size);
186 		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
187 
188 		if (!privileged &&
189 		    opts->auxtrace_mmap_pages > max_page) {
190 			opts->auxtrace_mmap_pages = max_page;
191 			pr_err("auxtrace too big, truncating to %d\n",
192 			       max_page);
193 		}
194 
195 		if (!is_power_of_2(sz)) {
196 			pr_err("Invalid mmap size for %s: must be a power of 2\n",
197 			       CORESIGHT_ETM_PMU_NAME);
198 			return -EINVAL;
199 		}
200 	}
201 
202 	if (opts->auxtrace_snapshot_mode)
203 		pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
204 			  opts->auxtrace_snapshot_size);
205 
206 	/*
207 	 * To obtain the auxtrace buffer file descriptor, the auxtrace
208 	 * event must come first.
209 	 */
210 	perf_evlist__to_front(evlist, cs_etm_evsel);
211 
212 	/*
213 	 * In the case of per-cpu mmaps, we need the CPU on the
214 	 * AUX event.
215 	 */
216 	if (!cpu_map__empty(cpus))
217 		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
218 
219 	/* Add dummy event to keep tracking */
220 	if (opts->full_auxtrace) {
221 		struct perf_evsel *tracking_evsel;
222 		int err;
223 
224 		err = parse_events(evlist, "dummy:u", NULL);
225 		if (err)
226 			return err;
227 
228 		tracking_evsel = perf_evlist__last(evlist);
229 		perf_evlist__set_tracking_event(evlist, tracking_evsel);
230 
231 		tracking_evsel->attr.freq = 0;
232 		tracking_evsel->attr.sample_period = 1;
233 
234 		/* In per-cpu case, always need the time of mmap events etc */
235 		if (!cpu_map__empty(cpus))
236 			perf_evsel__set_sample_bit(tracking_evsel, TIME);
237 	}
238 
239 	return 0;
240 }
241 
242 static u64 cs_etm_get_config(struct auxtrace_record *itr)
243 {
244 	u64 config = 0;
245 	struct cs_etm_recording *ptr =
246 			container_of(itr, struct cs_etm_recording, itr);
247 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
248 	struct perf_evlist *evlist = ptr->evlist;
249 	struct perf_evsel *evsel;
250 
251 	evlist__for_each_entry(evlist, evsel) {
252 		if (evsel->attr.type == cs_etm_pmu->type) {
253 			/*
254 			 * Variable perf_event_attr::config is assigned to
255 			 * ETMv3/PTM.  The bit fields have been made to match
256 			 * the ETMv3.5 ETRMCR register specification.  See the
257 			 * PMU_FORMAT_ATTR() declarations in
258 			 * drivers/hwtracing/coresight/coresight-perf.c for
259 			 * details.
260 			 */
261 			config = evsel->attr.config;
262 			break;
263 		}
264 	}
265 
266 	return config;
267 }
268 
269 #ifndef BIT
270 #define BIT(N) (1UL << (N))
271 #endif
272 
273 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
274 {
275 	u64 config = 0;
276 	u64 config_opts = 0;
277 
278 	/*
279 	 * The perf event variable config bits represent both
280 	 * the command line options and register programming
281 	 * bits in ETMv3/PTM. For ETMv4 we must remap options
282 	 * to real bits
283 	 */
284 	config_opts = cs_etm_get_config(itr);
285 	if (config_opts & BIT(ETM_OPT_CYCACC))
286 		config |= BIT(ETM4_CFG_BIT_CYCACC);
287 	if (config_opts & BIT(ETM_OPT_TS))
288 		config |= BIT(ETM4_CFG_BIT_TS);
289 	if (config_opts & BIT(ETM_OPT_RETSTK))
290 		config |= BIT(ETM4_CFG_BIT_RETSTK);
291 
292 	return config;
293 }
294 
295 static size_t
296 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
297 		      struct perf_evlist *evlist __maybe_unused)
298 {
299 	int i;
300 	int etmv3 = 0, etmv4 = 0;
301 	struct cpu_map *event_cpus = evlist->cpus;
302 	struct cpu_map *online_cpus = cpu_map__new(NULL);
303 
304 	/* cpu map is not empty, we have specific CPUs to work with */
305 	if (!cpu_map__empty(event_cpus)) {
306 		for (i = 0; i < cpu__max_cpu(); i++) {
307 			if (!cpu_map__has(event_cpus, i) ||
308 			    !cpu_map__has(online_cpus, i))
309 				continue;
310 
311 			if (cs_etm_is_etmv4(itr, i))
312 				etmv4++;
313 			else
314 				etmv3++;
315 		}
316 	} else {
317 		/* get configuration for all CPUs in the system */
318 		for (i = 0; i < cpu__max_cpu(); i++) {
319 			if (!cpu_map__has(online_cpus, i))
320 				continue;
321 
322 			if (cs_etm_is_etmv4(itr, i))
323 				etmv4++;
324 			else
325 				etmv3++;
326 		}
327 	}
328 
329 	cpu_map__put(online_cpus);
330 
331 	return (CS_ETM_HEADER_SIZE +
332 	       (etmv4 * CS_ETMV4_PRIV_SIZE) +
333 	       (etmv3 * CS_ETMV3_PRIV_SIZE));
334 }
335 
336 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
337 	[CS_ETM_ETMCCER]	= "mgmt/etmccer",
338 	[CS_ETM_ETMIDR]		= "mgmt/etmidr",
339 };
340 
341 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
342 	[CS_ETMV4_TRCIDR0]		= "trcidr/trcidr0",
343 	[CS_ETMV4_TRCIDR1]		= "trcidr/trcidr1",
344 	[CS_ETMV4_TRCIDR2]		= "trcidr/trcidr2",
345 	[CS_ETMV4_TRCIDR8]		= "trcidr/trcidr8",
346 	[CS_ETMV4_TRCAUTHSTATUS]	= "mgmt/trcauthstatus",
347 };
348 
349 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
350 {
351 	bool ret = false;
352 	char path[PATH_MAX];
353 	int scan;
354 	unsigned int val;
355 	struct cs_etm_recording *ptr =
356 			container_of(itr, struct cs_etm_recording, itr);
357 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
358 
359 	/* Take any of the RO files for ETMv4 and see if it present */
360 	snprintf(path, PATH_MAX, "cpu%d/%s",
361 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
362 	scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
363 
364 	/* The file was read successfully, we have a winner */
365 	if (scan == 1)
366 		ret = true;
367 
368 	return ret;
369 }
370 
371 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
372 {
373 	char pmu_path[PATH_MAX];
374 	int scan;
375 	unsigned int val = 0;
376 
377 	/* Get RO metadata from sysfs */
378 	snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
379 
380 	scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
381 	if (scan != 1)
382 		pr_err("%s: error reading: %s\n", __func__, pmu_path);
383 
384 	return val;
385 }
386 
387 static void cs_etm_get_metadata(int cpu, u32 *offset,
388 				struct auxtrace_record *itr,
389 				struct auxtrace_info_event *info)
390 {
391 	u32 increment;
392 	u64 magic;
393 	struct cs_etm_recording *ptr =
394 			container_of(itr, struct cs_etm_recording, itr);
395 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
396 
397 	/* first see what kind of tracer this cpu is affined to */
398 	if (cs_etm_is_etmv4(itr, cpu)) {
399 		magic = __perf_cs_etmv4_magic;
400 		/* Get trace configuration register */
401 		info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
402 						cs_etmv4_get_config(itr);
403 		/* Get traceID from the framework */
404 		info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
405 						coresight_get_trace_id(cpu);
406 		/* Get read-only information from sysFS */
407 		info->priv[*offset + CS_ETMV4_TRCIDR0] =
408 			cs_etm_get_ro(cs_etm_pmu, cpu,
409 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
410 		info->priv[*offset + CS_ETMV4_TRCIDR1] =
411 			cs_etm_get_ro(cs_etm_pmu, cpu,
412 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
413 		info->priv[*offset + CS_ETMV4_TRCIDR2] =
414 			cs_etm_get_ro(cs_etm_pmu, cpu,
415 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
416 		info->priv[*offset + CS_ETMV4_TRCIDR8] =
417 			cs_etm_get_ro(cs_etm_pmu, cpu,
418 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
419 		info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
420 			cs_etm_get_ro(cs_etm_pmu, cpu,
421 				      metadata_etmv4_ro
422 				      [CS_ETMV4_TRCAUTHSTATUS]);
423 
424 		/* How much space was used */
425 		increment = CS_ETMV4_PRIV_MAX;
426 	} else {
427 		magic = __perf_cs_etmv3_magic;
428 		/* Get configuration register */
429 		info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
430 		/* Get traceID from the framework */
431 		info->priv[*offset + CS_ETM_ETMTRACEIDR] =
432 						coresight_get_trace_id(cpu);
433 		/* Get read-only information from sysFS */
434 		info->priv[*offset + CS_ETM_ETMCCER] =
435 			cs_etm_get_ro(cs_etm_pmu, cpu,
436 				      metadata_etmv3_ro[CS_ETM_ETMCCER]);
437 		info->priv[*offset + CS_ETM_ETMIDR] =
438 			cs_etm_get_ro(cs_etm_pmu, cpu,
439 				      metadata_etmv3_ro[CS_ETM_ETMIDR]);
440 
441 		/* How much space was used */
442 		increment = CS_ETM_PRIV_MAX;
443 	}
444 
445 	/* Build generic header portion */
446 	info->priv[*offset + CS_ETM_MAGIC] = magic;
447 	info->priv[*offset + CS_ETM_CPU] = cpu;
448 	/* Where the next CPU entry should start from */
449 	*offset += increment;
450 }
451 
452 static int cs_etm_info_fill(struct auxtrace_record *itr,
453 			    struct perf_session *session,
454 			    struct auxtrace_info_event *info,
455 			    size_t priv_size)
456 {
457 	int i;
458 	u32 offset;
459 	u64 nr_cpu, type;
460 	struct cpu_map *cpu_map;
461 	struct cpu_map *event_cpus = session->evlist->cpus;
462 	struct cpu_map *online_cpus = cpu_map__new(NULL);
463 	struct cs_etm_recording *ptr =
464 			container_of(itr, struct cs_etm_recording, itr);
465 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
466 
467 	if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
468 		return -EINVAL;
469 
470 	if (!session->evlist->nr_mmaps)
471 		return -EINVAL;
472 
473 	/* If the cpu_map is empty all online CPUs are involved */
474 	if (cpu_map__empty(event_cpus)) {
475 		cpu_map = online_cpus;
476 	} else {
477 		/* Make sure all specified CPUs are online */
478 		for (i = 0; i < cpu_map__nr(event_cpus); i++) {
479 			if (cpu_map__has(event_cpus, i) &&
480 			    !cpu_map__has(online_cpus, i))
481 				return -EINVAL;
482 		}
483 
484 		cpu_map = event_cpus;
485 	}
486 
487 	nr_cpu = cpu_map__nr(cpu_map);
488 	/* Get PMU type as dynamically assigned by the core */
489 	type = cs_etm_pmu->type;
490 
491 	/* First fill out the session header */
492 	info->type = PERF_AUXTRACE_CS_ETM;
493 	info->priv[CS_HEADER_VERSION_0] = 0;
494 	info->priv[CS_PMU_TYPE_CPUS] = type << 32;
495 	info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
496 	info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
497 
498 	offset = CS_ETM_SNAPSHOT + 1;
499 
500 	for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
501 		if (cpu_map__has(cpu_map, i))
502 			cs_etm_get_metadata(i, &offset, itr, info);
503 
504 	cpu_map__put(online_cpus);
505 
506 	return 0;
507 }
508 
509 static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
510 				int idx, struct auxtrace_mmap *mm,
511 				unsigned char *data __maybe_unused,
512 				u64 *head, u64 *old)
513 {
514 	pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
515 		  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
516 
517 	*old = *head;
518 	*head += mm->len;
519 
520 	return 0;
521 }
522 
523 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
524 {
525 	struct cs_etm_recording *ptr =
526 			container_of(itr, struct cs_etm_recording, itr);
527 	struct perf_evsel *evsel;
528 
529 	evlist__for_each_entry(ptr->evlist, evsel) {
530 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
531 			return perf_evsel__disable(evsel);
532 	}
533 	return -EINVAL;
534 }
535 
536 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
537 {
538 	struct cs_etm_recording *ptr =
539 			container_of(itr, struct cs_etm_recording, itr);
540 	struct perf_evsel *evsel;
541 
542 	evlist__for_each_entry(ptr->evlist, evsel) {
543 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
544 			return perf_evsel__enable(evsel);
545 	}
546 	return -EINVAL;
547 }
548 
549 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
550 {
551 	return (((u64) rand() <<  0) & 0x00000000FFFFFFFFull) |
552 		(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
553 }
554 
555 static void cs_etm_recording_free(struct auxtrace_record *itr)
556 {
557 	struct cs_etm_recording *ptr =
558 			container_of(itr, struct cs_etm_recording, itr);
559 	free(ptr);
560 }
561 
562 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
563 {
564 	struct cs_etm_recording *ptr =
565 			container_of(itr, struct cs_etm_recording, itr);
566 	struct perf_evsel *evsel;
567 
568 	evlist__for_each_entry(ptr->evlist, evsel) {
569 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
570 			return perf_evlist__enable_event_idx(ptr->evlist,
571 							     evsel, idx);
572 	}
573 
574 	return -EINVAL;
575 }
576 
577 struct auxtrace_record *cs_etm_record_init(int *err)
578 {
579 	struct perf_pmu *cs_etm_pmu;
580 	struct cs_etm_recording *ptr;
581 
582 	cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
583 
584 	if (!cs_etm_pmu) {
585 		*err = -EINVAL;
586 		goto out;
587 	}
588 
589 	ptr = zalloc(sizeof(struct cs_etm_recording));
590 	if (!ptr) {
591 		*err = -ENOMEM;
592 		goto out;
593 	}
594 
595 	ptr->cs_etm_pmu			= cs_etm_pmu;
596 	ptr->itr.parse_snapshot_options	= cs_etm_parse_snapshot_options;
597 	ptr->itr.recording_options	= cs_etm_recording_options;
598 	ptr->itr.info_priv_size		= cs_etm_info_priv_size;
599 	ptr->itr.info_fill		= cs_etm_info_fill;
600 	ptr->itr.find_snapshot		= cs_etm_find_snapshot;
601 	ptr->itr.snapshot_start		= cs_etm_snapshot_start;
602 	ptr->itr.snapshot_finish	= cs_etm_snapshot_finish;
603 	ptr->itr.reference		= cs_etm_reference;
604 	ptr->itr.free			= cs_etm_recording_free;
605 	ptr->itr.read_finish		= cs_etm_read_finish;
606 
607 	*err = 0;
608 	return &ptr->itr;
609 out:
610 	return NULL;
611 }
612 
613 static FILE *cs_device__open_file(const char *name)
614 {
615 	struct stat st;
616 	char path[PATH_MAX];
617 	const char *sysfs;
618 
619 	sysfs = sysfs__mountpoint();
620 	if (!sysfs)
621 		return NULL;
622 
623 	snprintf(path, PATH_MAX,
624 		 "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
625 
626 	if (stat(path, &st) < 0)
627 		return NULL;
628 
629 	return fopen(path, "w");
630 
631 }
632 
633 static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
634 {
635 	va_list args;
636 	FILE *file;
637 	int ret = -EINVAL;
638 
639 	va_start(args, fmt);
640 	file = cs_device__open_file(name);
641 	if (file) {
642 		ret = vfprintf(file, fmt, args);
643 		fclose(file);
644 	}
645 	va_end(args);
646 	return ret;
647 }
648 
649 int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
650 {
651 	int ret;
652 	char enable_sink[ENABLE_SINK_MAX];
653 
654 	snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
655 		 term->val.drv_cfg, "enable_sink");
656 
657 	ret = cs_device__print_file(enable_sink, "%d", 1);
658 	if (ret < 0)
659 		return ret;
660 
661 	return 0;
662 }
663