xref: /openbmc/linux/tools/perf/arch/arm/util/cs-etm.c (revision 930c429a)
1 /*
2  * Copyright(C) 2015 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <api/fs/fs.h>
19 #include <linux/bitops.h>
20 #include <linux/compiler.h>
21 #include <linux/coresight-pmu.h>
22 #include <linux/kernel.h>
23 #include <linux/log2.h>
24 #include <linux/types.h>
25 
26 #include "cs-etm.h"
27 #include "../../perf.h"
28 #include "../../util/auxtrace.h"
29 #include "../../util/cpumap.h"
30 #include "../../util/evlist.h"
31 #include "../../util/evsel.h"
32 #include "../../util/pmu.h"
33 #include "../../util/thread_map.h"
34 #include "../../util/cs-etm.h"
35 
36 #include <stdlib.h>
37 #include <sys/stat.h>
38 
39 #define ENABLE_SINK_MAX	128
40 #define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
41 
42 struct cs_etm_recording {
43 	struct auxtrace_record	itr;
44 	struct perf_pmu		*cs_etm_pmu;
45 	struct perf_evlist	*evlist;
46 	bool			snapshot_mode;
47 	size_t			snapshot_size;
48 };
49 
50 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
51 
52 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
53 					 struct record_opts *opts,
54 					 const char *str)
55 {
56 	struct cs_etm_recording *ptr =
57 				container_of(itr, struct cs_etm_recording, itr);
58 	unsigned long long snapshot_size = 0;
59 	char *endptr;
60 
61 	if (str) {
62 		snapshot_size = strtoull(str, &endptr, 0);
63 		if (*endptr || snapshot_size > SIZE_MAX)
64 			return -1;
65 	}
66 
67 	opts->auxtrace_snapshot_mode = true;
68 	opts->auxtrace_snapshot_size = snapshot_size;
69 	ptr->snapshot_size = snapshot_size;
70 
71 	return 0;
72 }
73 
74 static int cs_etm_recording_options(struct auxtrace_record *itr,
75 				    struct perf_evlist *evlist,
76 				    struct record_opts *opts)
77 {
78 	struct cs_etm_recording *ptr =
79 				container_of(itr, struct cs_etm_recording, itr);
80 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
81 	struct perf_evsel *evsel, *cs_etm_evsel = NULL;
82 	const struct cpu_map *cpus = evlist->cpus;
83 	bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
84 
85 	ptr->evlist = evlist;
86 	ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
87 
88 	evlist__for_each_entry(evlist, evsel) {
89 		if (evsel->attr.type == cs_etm_pmu->type) {
90 			if (cs_etm_evsel) {
91 				pr_err("There may be only one %s event\n",
92 				       CORESIGHT_ETM_PMU_NAME);
93 				return -EINVAL;
94 			}
95 			evsel->attr.freq = 0;
96 			evsel->attr.sample_period = 1;
97 			cs_etm_evsel = evsel;
98 			opts->full_auxtrace = true;
99 		}
100 	}
101 
102 	/* no need to continue if at least one event of interest was found */
103 	if (!cs_etm_evsel)
104 		return 0;
105 
106 	if (opts->use_clockid) {
107 		pr_err("Cannot use clockid (-k option) with %s\n",
108 		       CORESIGHT_ETM_PMU_NAME);
109 		return -EINVAL;
110 	}
111 
112 	/* we are in snapshot mode */
113 	if (opts->auxtrace_snapshot_mode) {
114 		/*
115 		 * No size were given to '-S' or '-m,', so go with
116 		 * the default
117 		 */
118 		if (!opts->auxtrace_snapshot_size &&
119 		    !opts->auxtrace_mmap_pages) {
120 			if (privileged) {
121 				opts->auxtrace_mmap_pages = MiB(4) / page_size;
122 			} else {
123 				opts->auxtrace_mmap_pages =
124 							KiB(128) / page_size;
125 				if (opts->mmap_pages == UINT_MAX)
126 					opts->mmap_pages = KiB(256) / page_size;
127 			}
128 		} else if (!opts->auxtrace_mmap_pages && !privileged &&
129 						opts->mmap_pages == UINT_MAX) {
130 			opts->mmap_pages = KiB(256) / page_size;
131 		}
132 
133 		/*
134 		 * '-m,xyz' was specified but no snapshot size, so make the
135 		 * snapshot size as big as the auxtrace mmap area.
136 		 */
137 		if (!opts->auxtrace_snapshot_size) {
138 			opts->auxtrace_snapshot_size =
139 				opts->auxtrace_mmap_pages * (size_t)page_size;
140 		}
141 
142 		/*
143 		 * -Sxyz was specified but no auxtrace mmap area, so make the
144 		 * auxtrace mmap area big enough to fit the requested snapshot
145 		 * size.
146 		 */
147 		if (!opts->auxtrace_mmap_pages) {
148 			size_t sz = opts->auxtrace_snapshot_size;
149 
150 			sz = round_up(sz, page_size) / page_size;
151 			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
152 		}
153 
154 		/* Snapshost size can't be bigger than the auxtrace area */
155 		if (opts->auxtrace_snapshot_size >
156 				opts->auxtrace_mmap_pages * (size_t)page_size) {
157 			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
158 			       opts->auxtrace_snapshot_size,
159 			       opts->auxtrace_mmap_pages * (size_t)page_size);
160 			return -EINVAL;
161 		}
162 
163 		/* Something went wrong somewhere - this shouldn't happen */
164 		if (!opts->auxtrace_snapshot_size ||
165 		    !opts->auxtrace_mmap_pages) {
166 			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
167 			return -EINVAL;
168 		}
169 	}
170 
171 	/* We are in full trace mode but '-m,xyz' wasn't specified */
172 	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
173 		if (privileged) {
174 			opts->auxtrace_mmap_pages = MiB(4) / page_size;
175 		} else {
176 			opts->auxtrace_mmap_pages = KiB(128) / page_size;
177 			if (opts->mmap_pages == UINT_MAX)
178 				opts->mmap_pages = KiB(256) / page_size;
179 		}
180 
181 	}
182 
183 	/* Validate auxtrace_mmap_pages provided by user */
184 	if (opts->auxtrace_mmap_pages) {
185 		unsigned int max_page = (KiB(128) / page_size);
186 		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
187 
188 		if (!privileged &&
189 		    opts->auxtrace_mmap_pages > max_page) {
190 			opts->auxtrace_mmap_pages = max_page;
191 			pr_err("auxtrace too big, truncating to %d\n",
192 			       max_page);
193 		}
194 
195 		if (!is_power_of_2(sz)) {
196 			pr_err("Invalid mmap size for %s: must be a power of 2\n",
197 			       CORESIGHT_ETM_PMU_NAME);
198 			return -EINVAL;
199 		}
200 	}
201 
202 	if (opts->auxtrace_snapshot_mode)
203 		pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
204 			  opts->auxtrace_snapshot_size);
205 
206 	/*
207 	 * To obtain the auxtrace buffer file descriptor, the auxtrace
208 	 * event must come first.
209 	 */
210 	perf_evlist__to_front(evlist, cs_etm_evsel);
211 
212 	/*
213 	 * In the case of per-cpu mmaps, we need the CPU on the
214 	 * AUX event.
215 	 */
216 	if (!cpu_map__empty(cpus))
217 		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
218 
219 	/* Add dummy event to keep tracking */
220 	if (opts->full_auxtrace) {
221 		struct perf_evsel *tracking_evsel;
222 		int err;
223 
224 		err = parse_events(evlist, "dummy:u", NULL);
225 		if (err)
226 			return err;
227 
228 		tracking_evsel = perf_evlist__last(evlist);
229 		perf_evlist__set_tracking_event(evlist, tracking_evsel);
230 
231 		tracking_evsel->attr.freq = 0;
232 		tracking_evsel->attr.sample_period = 1;
233 
234 		/* In per-cpu case, always need the time of mmap events etc */
235 		if (!cpu_map__empty(cpus))
236 			perf_evsel__set_sample_bit(tracking_evsel, TIME);
237 	}
238 
239 	return 0;
240 }
241 
242 static u64 cs_etm_get_config(struct auxtrace_record *itr)
243 {
244 	u64 config = 0;
245 	struct cs_etm_recording *ptr =
246 			container_of(itr, struct cs_etm_recording, itr);
247 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
248 	struct perf_evlist *evlist = ptr->evlist;
249 	struct perf_evsel *evsel;
250 
251 	evlist__for_each_entry(evlist, evsel) {
252 		if (evsel->attr.type == cs_etm_pmu->type) {
253 			/*
254 			 * Variable perf_event_attr::config is assigned to
255 			 * ETMv3/PTM.  The bit fields have been made to match
256 			 * the ETMv3.5 ETRMCR register specification.  See the
257 			 * PMU_FORMAT_ATTR() declarations in
258 			 * drivers/hwtracing/coresight/coresight-perf.c for
259 			 * details.
260 			 */
261 			config = evsel->attr.config;
262 			break;
263 		}
264 	}
265 
266 	return config;
267 }
268 
269 #ifndef BIT
270 #define BIT(N) (1UL << (N))
271 #endif
272 
273 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
274 {
275 	u64 config = 0;
276 	u64 config_opts = 0;
277 
278 	/*
279 	 * The perf event variable config bits represent both
280 	 * the command line options and register programming
281 	 * bits in ETMv3/PTM. For ETMv4 we must remap options
282 	 * to real bits
283 	 */
284 	config_opts = cs_etm_get_config(itr);
285 	if (config_opts & BIT(ETM_OPT_CYCACC))
286 		config |= BIT(ETM4_CFG_BIT_CYCACC);
287 	if (config_opts & BIT(ETM_OPT_TS))
288 		config |= BIT(ETM4_CFG_BIT_TS);
289 	if (config_opts & BIT(ETM_OPT_RETSTK))
290 		config |= BIT(ETM4_CFG_BIT_RETSTK);
291 
292 	return config;
293 }
294 
295 static size_t
296 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
297 		      struct perf_evlist *evlist __maybe_unused)
298 {
299 	int i;
300 	int etmv3 = 0, etmv4 = 0;
301 	const struct cpu_map *cpus = evlist->cpus;
302 
303 	/* cpu map is not empty, we have specific CPUs to work with */
304 	if (!cpu_map__empty(cpus)) {
305 		for (i = 0; i < cpu_map__nr(cpus); i++) {
306 			if (cs_etm_is_etmv4(itr, cpus->map[i]))
307 				etmv4++;
308 			else
309 				etmv3++;
310 		}
311 	} else {
312 		/* get configuration for all CPUs in the system */
313 		for (i = 0; i < cpu__max_cpu(); i++) {
314 			if (cs_etm_is_etmv4(itr, i))
315 				etmv4++;
316 			else
317 				etmv3++;
318 		}
319 	}
320 
321 	return (CS_ETM_HEADER_SIZE +
322 	       (etmv4 * CS_ETMV4_PRIV_SIZE) +
323 	       (etmv3 * CS_ETMV3_PRIV_SIZE));
324 }
325 
326 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
327 	[CS_ETM_ETMCCER]	= "mgmt/etmccer",
328 	[CS_ETM_ETMIDR]		= "mgmt/etmidr",
329 };
330 
331 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
332 	[CS_ETMV4_TRCIDR0]		= "trcidr/trcidr0",
333 	[CS_ETMV4_TRCIDR1]		= "trcidr/trcidr1",
334 	[CS_ETMV4_TRCIDR2]		= "trcidr/trcidr2",
335 	[CS_ETMV4_TRCIDR8]		= "trcidr/trcidr8",
336 	[CS_ETMV4_TRCAUTHSTATUS]	= "mgmt/trcauthstatus",
337 };
338 
339 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
340 {
341 	bool ret = false;
342 	char path[PATH_MAX];
343 	int scan;
344 	unsigned int val;
345 	struct cs_etm_recording *ptr =
346 			container_of(itr, struct cs_etm_recording, itr);
347 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
348 
349 	/* Take any of the RO files for ETMv4 and see if it present */
350 	snprintf(path, PATH_MAX, "cpu%d/%s",
351 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
352 	scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
353 
354 	/* The file was read successfully, we have a winner */
355 	if (scan == 1)
356 		ret = true;
357 
358 	return ret;
359 }
360 
361 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
362 {
363 	char pmu_path[PATH_MAX];
364 	int scan;
365 	unsigned int val = 0;
366 
367 	/* Get RO metadata from sysfs */
368 	snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
369 
370 	scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
371 	if (scan != 1)
372 		pr_err("%s: error reading: %s\n", __func__, pmu_path);
373 
374 	return val;
375 }
376 
377 static void cs_etm_get_metadata(int cpu, u32 *offset,
378 				struct auxtrace_record *itr,
379 				struct auxtrace_info_event *info)
380 {
381 	u32 increment;
382 	u64 magic;
383 	struct cs_etm_recording *ptr =
384 			container_of(itr, struct cs_etm_recording, itr);
385 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
386 
387 	/* first see what kind of tracer this cpu is affined to */
388 	if (cs_etm_is_etmv4(itr, cpu)) {
389 		magic = __perf_cs_etmv4_magic;
390 		/* Get trace configuration register */
391 		info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
392 						cs_etmv4_get_config(itr);
393 		/* Get traceID from the framework */
394 		info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
395 						coresight_get_trace_id(cpu);
396 		/* Get read-only information from sysFS */
397 		info->priv[*offset + CS_ETMV4_TRCIDR0] =
398 			cs_etm_get_ro(cs_etm_pmu, cpu,
399 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
400 		info->priv[*offset + CS_ETMV4_TRCIDR1] =
401 			cs_etm_get_ro(cs_etm_pmu, cpu,
402 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
403 		info->priv[*offset + CS_ETMV4_TRCIDR2] =
404 			cs_etm_get_ro(cs_etm_pmu, cpu,
405 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
406 		info->priv[*offset + CS_ETMV4_TRCIDR8] =
407 			cs_etm_get_ro(cs_etm_pmu, cpu,
408 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
409 		info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
410 			cs_etm_get_ro(cs_etm_pmu, cpu,
411 				      metadata_etmv4_ro
412 				      [CS_ETMV4_TRCAUTHSTATUS]);
413 
414 		/* How much space was used */
415 		increment = CS_ETMV4_PRIV_MAX;
416 	} else {
417 		magic = __perf_cs_etmv3_magic;
418 		/* Get configuration register */
419 		info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
420 		/* Get traceID from the framework */
421 		info->priv[*offset + CS_ETM_ETMTRACEIDR] =
422 						coresight_get_trace_id(cpu);
423 		/* Get read-only information from sysFS */
424 		info->priv[*offset + CS_ETM_ETMCCER] =
425 			cs_etm_get_ro(cs_etm_pmu, cpu,
426 				      metadata_etmv3_ro[CS_ETM_ETMCCER]);
427 		info->priv[*offset + CS_ETM_ETMIDR] =
428 			cs_etm_get_ro(cs_etm_pmu, cpu,
429 				      metadata_etmv3_ro[CS_ETM_ETMIDR]);
430 
431 		/* How much space was used */
432 		increment = CS_ETM_PRIV_MAX;
433 	}
434 
435 	/* Build generic header portion */
436 	info->priv[*offset + CS_ETM_MAGIC] = magic;
437 	info->priv[*offset + CS_ETM_CPU] = cpu;
438 	/* Where the next CPU entry should start from */
439 	*offset += increment;
440 }
441 
442 static int cs_etm_info_fill(struct auxtrace_record *itr,
443 			    struct perf_session *session,
444 			    struct auxtrace_info_event *info,
445 			    size_t priv_size)
446 {
447 	int i;
448 	u32 offset;
449 	u64 nr_cpu, type;
450 	const struct cpu_map *cpus = session->evlist->cpus;
451 	struct cs_etm_recording *ptr =
452 			container_of(itr, struct cs_etm_recording, itr);
453 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
454 
455 	if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
456 		return -EINVAL;
457 
458 	if (!session->evlist->nr_mmaps)
459 		return -EINVAL;
460 
461 	/* If the cpu_map is empty all CPUs are involved */
462 	nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus);
463 	/* Get PMU type as dynamically assigned by the core */
464 	type = cs_etm_pmu->type;
465 
466 	/* First fill out the session header */
467 	info->type = PERF_AUXTRACE_CS_ETM;
468 	info->priv[CS_HEADER_VERSION_0] = 0;
469 	info->priv[CS_PMU_TYPE_CPUS] = type << 32;
470 	info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
471 	info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
472 
473 	offset = CS_ETM_SNAPSHOT + 1;
474 
475 	/* cpu map is not empty, we have specific CPUs to work with */
476 	if (!cpu_map__empty(cpus)) {
477 		for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++)
478 			cs_etm_get_metadata(cpus->map[i], &offset, itr, info);
479 	} else {
480 		/* get configuration for all CPUs in the system */
481 		for (i = 0; i < cpu__max_cpu(); i++)
482 			cs_etm_get_metadata(i, &offset, itr, info);
483 	}
484 
485 	return 0;
486 }
487 
488 static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
489 				int idx, struct auxtrace_mmap *mm,
490 				unsigned char *data __maybe_unused,
491 				u64 *head, u64 *old)
492 {
493 	pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
494 		  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
495 
496 	*old = *head;
497 	*head += mm->len;
498 
499 	return 0;
500 }
501 
502 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
503 {
504 	struct cs_etm_recording *ptr =
505 			container_of(itr, struct cs_etm_recording, itr);
506 	struct perf_evsel *evsel;
507 
508 	evlist__for_each_entry(ptr->evlist, evsel) {
509 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
510 			return perf_evsel__disable(evsel);
511 	}
512 	return -EINVAL;
513 }
514 
515 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
516 {
517 	struct cs_etm_recording *ptr =
518 			container_of(itr, struct cs_etm_recording, itr);
519 	struct perf_evsel *evsel;
520 
521 	evlist__for_each_entry(ptr->evlist, evsel) {
522 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
523 			return perf_evsel__enable(evsel);
524 	}
525 	return -EINVAL;
526 }
527 
528 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
529 {
530 	return (((u64) rand() <<  0) & 0x00000000FFFFFFFFull) |
531 		(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
532 }
533 
534 static void cs_etm_recording_free(struct auxtrace_record *itr)
535 {
536 	struct cs_etm_recording *ptr =
537 			container_of(itr, struct cs_etm_recording, itr);
538 	free(ptr);
539 }
540 
541 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
542 {
543 	struct cs_etm_recording *ptr =
544 			container_of(itr, struct cs_etm_recording, itr);
545 	struct perf_evsel *evsel;
546 
547 	evlist__for_each_entry(ptr->evlist, evsel) {
548 		if (evsel->attr.type == ptr->cs_etm_pmu->type)
549 			return perf_evlist__enable_event_idx(ptr->evlist,
550 							     evsel, idx);
551 	}
552 
553 	return -EINVAL;
554 }
555 
556 struct auxtrace_record *cs_etm_record_init(int *err)
557 {
558 	struct perf_pmu *cs_etm_pmu;
559 	struct cs_etm_recording *ptr;
560 
561 	cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
562 
563 	if (!cs_etm_pmu) {
564 		*err = -EINVAL;
565 		goto out;
566 	}
567 
568 	ptr = zalloc(sizeof(struct cs_etm_recording));
569 	if (!ptr) {
570 		*err = -ENOMEM;
571 		goto out;
572 	}
573 
574 	ptr->cs_etm_pmu			= cs_etm_pmu;
575 	ptr->itr.parse_snapshot_options	= cs_etm_parse_snapshot_options;
576 	ptr->itr.recording_options	= cs_etm_recording_options;
577 	ptr->itr.info_priv_size		= cs_etm_info_priv_size;
578 	ptr->itr.info_fill		= cs_etm_info_fill;
579 	ptr->itr.find_snapshot		= cs_etm_find_snapshot;
580 	ptr->itr.snapshot_start		= cs_etm_snapshot_start;
581 	ptr->itr.snapshot_finish	= cs_etm_snapshot_finish;
582 	ptr->itr.reference		= cs_etm_reference;
583 	ptr->itr.free			= cs_etm_recording_free;
584 	ptr->itr.read_finish		= cs_etm_read_finish;
585 
586 	*err = 0;
587 	return &ptr->itr;
588 out:
589 	return NULL;
590 }
591 
592 static FILE *cs_device__open_file(const char *name)
593 {
594 	struct stat st;
595 	char path[PATH_MAX];
596 	const char *sysfs;
597 
598 	sysfs = sysfs__mountpoint();
599 	if (!sysfs)
600 		return NULL;
601 
602 	snprintf(path, PATH_MAX,
603 		 "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
604 
605 	if (stat(path, &st) < 0)
606 		return NULL;
607 
608 	return fopen(path, "w");
609 
610 }
611 
612 static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
613 {
614 	va_list args;
615 	FILE *file;
616 	int ret = -EINVAL;
617 
618 	va_start(args, fmt);
619 	file = cs_device__open_file(name);
620 	if (file) {
621 		ret = vfprintf(file, fmt, args);
622 		fclose(file);
623 	}
624 	va_end(args);
625 	return ret;
626 }
627 
628 int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
629 {
630 	int ret;
631 	char enable_sink[ENABLE_SINK_MAX];
632 
633 	snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
634 		 term->val.drv_cfg, "enable_sink");
635 
636 	ret = cs_device__print_file(enable_sink, "%d", 1);
637 	if (ret < 0)
638 		return ret;
639 
640 	return 0;
641 }
642