1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2019 Arm Limited
4 * Author: Andrew Murray <Andrew.Murray@arm.com>
5 */
6 #include <linux/kvm_host.h>
7 #include <linux/perf_event.h>
8
9 static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
10
11 /*
12 * Given the perf event attributes and system type, determine
13 * if we are going to need to switch counters at guest entry/exit.
14 */
kvm_pmu_switch_needed(struct perf_event_attr * attr)15 static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
16 {
17 /**
18 * With VHE the guest kernel runs at EL1 and the host at EL2,
19 * where user (EL0) is excluded then we have no reason to switch
20 * counters.
21 */
22 if (has_vhe() && attr->exclude_user)
23 return false;
24
25 /* Only switch if attributes are different */
26 return (attr->exclude_host != attr->exclude_guest);
27 }
28
kvm_get_pmu_events(void)29 struct kvm_pmu_events *kvm_get_pmu_events(void)
30 {
31 return this_cpu_ptr(&kvm_pmu_events);
32 }
33
34 /*
35 * Add events to track that we may want to switch at guest entry/exit
36 * time.
37 */
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)38 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
39 {
40 struct kvm_pmu_events *pmu = kvm_get_pmu_events();
41
42 if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
43 return;
44
45 if (!attr->exclude_host)
46 pmu->events_host |= set;
47 if (!attr->exclude_guest)
48 pmu->events_guest |= set;
49 }
50
51 /*
52 * Stop tracking events
53 */
kvm_clr_pmu_events(u32 clr)54 void kvm_clr_pmu_events(u32 clr)
55 {
56 struct kvm_pmu_events *pmu = kvm_get_pmu_events();
57
58 if (!kvm_arm_support_pmu_v3())
59 return;
60
61 pmu->events_host &= ~clr;
62 pmu->events_guest &= ~clr;
63 }
64
65 #define PMEVTYPER_READ_CASE(idx) \
66 case idx: \
67 return read_sysreg(pmevtyper##idx##_el0)
68
69 #define PMEVTYPER_WRITE_CASE(idx) \
70 case idx: \
71 write_sysreg(val, pmevtyper##idx##_el0); \
72 break
73
74 #define PMEVTYPER_CASES(readwrite) \
75 PMEVTYPER_##readwrite##_CASE(0); \
76 PMEVTYPER_##readwrite##_CASE(1); \
77 PMEVTYPER_##readwrite##_CASE(2); \
78 PMEVTYPER_##readwrite##_CASE(3); \
79 PMEVTYPER_##readwrite##_CASE(4); \
80 PMEVTYPER_##readwrite##_CASE(5); \
81 PMEVTYPER_##readwrite##_CASE(6); \
82 PMEVTYPER_##readwrite##_CASE(7); \
83 PMEVTYPER_##readwrite##_CASE(8); \
84 PMEVTYPER_##readwrite##_CASE(9); \
85 PMEVTYPER_##readwrite##_CASE(10); \
86 PMEVTYPER_##readwrite##_CASE(11); \
87 PMEVTYPER_##readwrite##_CASE(12); \
88 PMEVTYPER_##readwrite##_CASE(13); \
89 PMEVTYPER_##readwrite##_CASE(14); \
90 PMEVTYPER_##readwrite##_CASE(15); \
91 PMEVTYPER_##readwrite##_CASE(16); \
92 PMEVTYPER_##readwrite##_CASE(17); \
93 PMEVTYPER_##readwrite##_CASE(18); \
94 PMEVTYPER_##readwrite##_CASE(19); \
95 PMEVTYPER_##readwrite##_CASE(20); \
96 PMEVTYPER_##readwrite##_CASE(21); \
97 PMEVTYPER_##readwrite##_CASE(22); \
98 PMEVTYPER_##readwrite##_CASE(23); \
99 PMEVTYPER_##readwrite##_CASE(24); \
100 PMEVTYPER_##readwrite##_CASE(25); \
101 PMEVTYPER_##readwrite##_CASE(26); \
102 PMEVTYPER_##readwrite##_CASE(27); \
103 PMEVTYPER_##readwrite##_CASE(28); \
104 PMEVTYPER_##readwrite##_CASE(29); \
105 PMEVTYPER_##readwrite##_CASE(30)
106
107 /*
108 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
109 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
110 */
kvm_vcpu_pmu_read_evtype_direct(int idx)111 static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
112 {
113 switch (idx) {
114 PMEVTYPER_CASES(READ);
115 case ARMV8_PMU_CYCLE_IDX:
116 return read_sysreg(pmccfiltr_el0);
117 default:
118 WARN_ON(1);
119 }
120
121 return 0;
122 }
123
124 /*
125 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
126 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
127 */
kvm_vcpu_pmu_write_evtype_direct(int idx,u32 val)128 static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
129 {
130 switch (idx) {
131 PMEVTYPER_CASES(WRITE);
132 case ARMV8_PMU_CYCLE_IDX:
133 write_sysreg(val, pmccfiltr_el0);
134 break;
135 default:
136 WARN_ON(1);
137 }
138 }
139
140 /*
141 * Modify ARMv8 PMU events to include EL0 counting
142 */
kvm_vcpu_pmu_enable_el0(unsigned long events)143 static void kvm_vcpu_pmu_enable_el0(unsigned long events)
144 {
145 u64 typer;
146 u32 counter;
147
148 for_each_set_bit(counter, &events, 32) {
149 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
150 typer &= ~ARMV8_PMU_EXCLUDE_EL0;
151 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
152 }
153 }
154
155 /*
156 * Modify ARMv8 PMU events to exclude EL0 counting
157 */
kvm_vcpu_pmu_disable_el0(unsigned long events)158 static void kvm_vcpu_pmu_disable_el0(unsigned long events)
159 {
160 u64 typer;
161 u32 counter;
162
163 for_each_set_bit(counter, &events, 32) {
164 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
165 typer |= ARMV8_PMU_EXCLUDE_EL0;
166 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
167 }
168 }
169
170 /*
171 * On VHE ensure that only guest events have EL0 counting enabled.
172 * This is called from both vcpu_{load,put} and the sysreg handling.
173 * Since the latter is preemptible, special care must be taken to
174 * disable preemption.
175 */
kvm_vcpu_pmu_restore_guest(struct kvm_vcpu * vcpu)176 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
177 {
178 struct kvm_pmu_events *pmu;
179 u32 events_guest, events_host;
180
181 if (!kvm_arm_support_pmu_v3() || !has_vhe())
182 return;
183
184 preempt_disable();
185 pmu = kvm_get_pmu_events();
186 events_guest = pmu->events_guest;
187 events_host = pmu->events_host;
188
189 kvm_vcpu_pmu_enable_el0(events_guest);
190 kvm_vcpu_pmu_disable_el0(events_host);
191 preempt_enable();
192 }
193
194 /*
195 * On VHE ensure that only host events have EL0 counting enabled
196 */
kvm_vcpu_pmu_restore_host(struct kvm_vcpu * vcpu)197 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
198 {
199 struct kvm_pmu_events *pmu;
200 u32 events_guest, events_host;
201
202 if (!kvm_arm_support_pmu_v3() || !has_vhe())
203 return;
204
205 pmu = kvm_get_pmu_events();
206 events_guest = pmu->events_guest;
207 events_host = pmu->events_host;
208
209 kvm_vcpu_pmu_enable_el0(events_host);
210 kvm_vcpu_pmu_disable_el0(events_guest);
211 }
212
213 /*
214 * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on the pCPU
215 * where PMUSERENR_EL0 for the guest is loaded, since PMUSERENR_EL0 is switched
216 * to the value for the guest on vcpu_load(). The value for the host EL0
217 * will be restored on vcpu_put(), before returning to userspace.
218 * This isn't necessary for nVHE, as the register is context switched for
219 * every guest enter/exit.
220 *
221 * Return true if KVM takes care of the register. Otherwise return false.
222 */
kvm_set_pmuserenr(u64 val)223 bool kvm_set_pmuserenr(u64 val)
224 {
225 struct kvm_cpu_context *hctxt;
226 struct kvm_vcpu *vcpu;
227
228 if (!kvm_arm_support_pmu_v3() || !has_vhe())
229 return false;
230
231 vcpu = kvm_get_running_vcpu();
232 if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
233 return false;
234
235 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
236 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
237 return true;
238 }
239
240 /*
241 * If we interrupted the guest to update the host PMU context, make
242 * sure we re-apply the guest EL0 state.
243 */
kvm_vcpu_pmu_resync_el0(void)244 void kvm_vcpu_pmu_resync_el0(void)
245 {
246 struct kvm_vcpu *vcpu;
247
248 if (!has_vhe() || !in_interrupt())
249 return;
250
251 vcpu = kvm_get_running_vcpu();
252 if (!vcpu)
253 return;
254
255 kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
256 }
257