xref: /openbmc/linux/tools/perf/arch/arm/util/auxtrace.c (revision df0e68c1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <stdbool.h>
8 #include <linux/coresight-pmu.h>
9 #include <linux/zalloc.h>
10 
11 #include "../../../util/auxtrace.h"
12 #include "../../../util/debug.h"
13 #include "../../../util/evlist.h"
14 #include "../../../util/pmu.h"
15 #include "cs-etm.h"
16 #include "arm-spe.h"
17 
18 static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
19 {
20 	struct perf_pmu **arm_spe_pmus = NULL;
21 	int ret, i, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
22 	/* arm_spe_xxxxxxxxx\0 */
23 	char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10];
24 
25 	arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus);
26 	if (!arm_spe_pmus) {
27 		pr_err("spes alloc failed\n");
28 		*err = -ENOMEM;
29 		return NULL;
30 	}
31 
32 	for (i = 0; i < nr_cpus; i++) {
33 		ret = sprintf(arm_spe_pmu_name, "%s%d", ARM_SPE_PMU_NAME, i);
34 		if (ret < 0) {
35 			pr_err("sprintf failed\n");
36 			*err = -ENOMEM;
37 			return NULL;
38 		}
39 
40 		arm_spe_pmus[*nr_spes] = perf_pmu__find(arm_spe_pmu_name);
41 		if (arm_spe_pmus[*nr_spes]) {
42 			pr_debug2("%s %d: arm_spe_pmu %d type %d name %s\n",
43 				 __func__, __LINE__, *nr_spes,
44 				 arm_spe_pmus[*nr_spes]->type,
45 				 arm_spe_pmus[*nr_spes]->name);
46 			(*nr_spes)++;
47 		}
48 	}
49 
50 	return arm_spe_pmus;
51 }
52 
53 struct auxtrace_record
54 *auxtrace_record__init(struct evlist *evlist, int *err)
55 {
56 	struct perf_pmu	*cs_etm_pmu;
57 	struct evsel *evsel;
58 	bool found_etm = false;
59 	struct perf_pmu *found_spe = NULL;
60 	struct perf_pmu **arm_spe_pmus = NULL;
61 	int nr_spes = 0;
62 	int i = 0;
63 
64 	if (!evlist)
65 		return NULL;
66 
67 	cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
68 	arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
69 
70 	evlist__for_each_entry(evlist, evsel) {
71 		if (cs_etm_pmu &&
72 		    evsel->core.attr.type == cs_etm_pmu->type)
73 			found_etm = true;
74 
75 		if (!nr_spes || found_spe)
76 			continue;
77 
78 		for (i = 0; i < nr_spes; i++) {
79 			if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
80 				found_spe = arm_spe_pmus[i];
81 				break;
82 			}
83 		}
84 	}
85 	free(arm_spe_pmus);
86 
87 	if (found_etm && found_spe) {
88 		pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
89 		*err = -EOPNOTSUPP;
90 		return NULL;
91 	}
92 
93 	if (found_etm)
94 		return cs_etm_record_init(err);
95 
96 #if defined(__aarch64__)
97 	if (found_spe)
98 		return arm_spe_recording_init(err, found_spe);
99 #endif
100 
101 	/*
102 	 * Clear 'err' even if we haven't found an event - that way perf
103 	 * record can still be used even if tracers aren't present.  The NULL
104 	 * return value will take care of telling the infrastructure HW tracing
105 	 * isn't available.
106 	 */
107 	*err = 0;
108 	return NULL;
109 }
110 
111 #if defined(__arm__)
112 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
113 {
114 	struct perf_event_mmap_page *pc = mm->userpg;
115 	u64 result;
116 
117 	__asm__ __volatile__(
118 "	ldrd    %0, %H0, [%1]"
119 	: "=&r" (result)
120 	: "r" (&pc->aux_head), "Qo" (pc->aux_head)
121 	);
122 
123 	return result;
124 }
125 
126 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
127 {
128 	struct perf_event_mmap_page *pc = mm->userpg;
129 
130 	/* Ensure all reads are done before we write the tail out */
131 	smp_mb();
132 
133 	__asm__ __volatile__(
134 "	strd    %2, %H2, [%1]"
135 	: "=Qo" (pc->aux_tail)
136 	: "r" (&pc->aux_tail), "r" (tail)
137 	);
138 
139 	return 0;
140 }
141 #endif
142