xref: /openbmc/linux/arch/arm64/kvm/pmu.c (revision b664e06d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2019 Arm Limited
4  * Author: Andrew Murray <Andrew.Murray@arm.com>
5  */
6 #include <linux/kvm_host.h>
7 #include <linux/perf_event.h>
8 #include <asm/kvm_hyp.h>
9 
10 /*
11  * Given the perf event attributes and system type, determine
12  * if we are going to need to switch counters at guest entry/exit.
13  */
14 static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
15 {
16 	/**
17 	 * With VHE the guest kernel runs at EL1 and the host at EL2,
18 	 * where user (EL0) is excluded then we have no reason to switch
19 	 * counters.
20 	 */
21 	if (has_vhe() && attr->exclude_user)
22 		return false;
23 
24 	/* Only switch if attributes are different */
25 	return (attr->exclude_host != attr->exclude_guest);
26 }
27 
28 /*
29  * Add events to track that we may want to switch at guest entry/exit
30  * time.
31  */
32 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
33 {
34 	struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
35 
36 	if (!kvm_pmu_switch_needed(attr))
37 		return;
38 
39 	if (!attr->exclude_host)
40 		ctx->pmu_events.events_host |= set;
41 	if (!attr->exclude_guest)
42 		ctx->pmu_events.events_guest |= set;
43 }
44 
45 /*
46  * Stop tracking events
47  */
48 void kvm_clr_pmu_events(u32 clr)
49 {
50 	struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
51 
52 	ctx->pmu_events.events_host &= ~clr;
53 	ctx->pmu_events.events_guest &= ~clr;
54 }
55 
56 /**
57  * Disable host events, enable guest events
58  */
59 bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
60 {
61 	struct kvm_host_data *host;
62 	struct kvm_pmu_events *pmu;
63 
64 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
65 	pmu = &host->pmu_events;
66 
67 	if (pmu->events_host)
68 		write_sysreg(pmu->events_host, pmcntenclr_el0);
69 
70 	if (pmu->events_guest)
71 		write_sysreg(pmu->events_guest, pmcntenset_el0);
72 
73 	return (pmu->events_host || pmu->events_guest);
74 }
75 
76 /**
77  * Disable guest events, enable host events
78  */
79 void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
80 {
81 	struct kvm_host_data *host;
82 	struct kvm_pmu_events *pmu;
83 
84 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
85 	pmu = &host->pmu_events;
86 
87 	if (pmu->events_guest)
88 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
89 
90 	if (pmu->events_host)
91 		write_sysreg(pmu->events_host, pmcntenset_el0);
92 }
93 
94 #define PMEVTYPER_READ_CASE(idx)				\
95 	case idx:						\
96 		return read_sysreg(pmevtyper##idx##_el0)
97 
98 #define PMEVTYPER_WRITE_CASE(idx)				\
99 	case idx:						\
100 		write_sysreg(val, pmevtyper##idx##_el0);	\
101 		break
102 
103 #define PMEVTYPER_CASES(readwrite)				\
104 	PMEVTYPER_##readwrite##_CASE(0);			\
105 	PMEVTYPER_##readwrite##_CASE(1);			\
106 	PMEVTYPER_##readwrite##_CASE(2);			\
107 	PMEVTYPER_##readwrite##_CASE(3);			\
108 	PMEVTYPER_##readwrite##_CASE(4);			\
109 	PMEVTYPER_##readwrite##_CASE(5);			\
110 	PMEVTYPER_##readwrite##_CASE(6);			\
111 	PMEVTYPER_##readwrite##_CASE(7);			\
112 	PMEVTYPER_##readwrite##_CASE(8);			\
113 	PMEVTYPER_##readwrite##_CASE(9);			\
114 	PMEVTYPER_##readwrite##_CASE(10);			\
115 	PMEVTYPER_##readwrite##_CASE(11);			\
116 	PMEVTYPER_##readwrite##_CASE(12);			\
117 	PMEVTYPER_##readwrite##_CASE(13);			\
118 	PMEVTYPER_##readwrite##_CASE(14);			\
119 	PMEVTYPER_##readwrite##_CASE(15);			\
120 	PMEVTYPER_##readwrite##_CASE(16);			\
121 	PMEVTYPER_##readwrite##_CASE(17);			\
122 	PMEVTYPER_##readwrite##_CASE(18);			\
123 	PMEVTYPER_##readwrite##_CASE(19);			\
124 	PMEVTYPER_##readwrite##_CASE(20);			\
125 	PMEVTYPER_##readwrite##_CASE(21);			\
126 	PMEVTYPER_##readwrite##_CASE(22);			\
127 	PMEVTYPER_##readwrite##_CASE(23);			\
128 	PMEVTYPER_##readwrite##_CASE(24);			\
129 	PMEVTYPER_##readwrite##_CASE(25);			\
130 	PMEVTYPER_##readwrite##_CASE(26);			\
131 	PMEVTYPER_##readwrite##_CASE(27);			\
132 	PMEVTYPER_##readwrite##_CASE(28);			\
133 	PMEVTYPER_##readwrite##_CASE(29);			\
134 	PMEVTYPER_##readwrite##_CASE(30)
135 
136 /*
137  * Read a value direct from PMEVTYPER<idx> where idx is 0-30
138  * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
139  */
140 static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
141 {
142 	switch (idx) {
143 	PMEVTYPER_CASES(READ);
144 	case ARMV8_PMU_CYCLE_IDX:
145 		return read_sysreg(pmccfiltr_el0);
146 	default:
147 		WARN_ON(1);
148 	}
149 
150 	return 0;
151 }
152 
153 /*
154  * Write a value direct to PMEVTYPER<idx> where idx is 0-30
155  * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
156  */
157 static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
158 {
159 	switch (idx) {
160 	PMEVTYPER_CASES(WRITE);
161 	case ARMV8_PMU_CYCLE_IDX:
162 		write_sysreg(val, pmccfiltr_el0);
163 		break;
164 	default:
165 		WARN_ON(1);
166 	}
167 }
168 
169 /*
170  * Modify ARMv8 PMU events to include EL0 counting
171  */
172 static void kvm_vcpu_pmu_enable_el0(unsigned long events)
173 {
174 	u64 typer;
175 	u32 counter;
176 
177 	for_each_set_bit(counter, &events, 32) {
178 		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
179 		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
180 		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
181 	}
182 }
183 
184 /*
185  * Modify ARMv8 PMU events to exclude EL0 counting
186  */
187 static void kvm_vcpu_pmu_disable_el0(unsigned long events)
188 {
189 	u64 typer;
190 	u32 counter;
191 
192 	for_each_set_bit(counter, &events, 32) {
193 		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
194 		typer |= ARMV8_PMU_EXCLUDE_EL0;
195 		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
196 	}
197 }
198 
199 /*
200  * On VHE ensure that only guest events have EL0 counting enabled
201  */
202 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
203 {
204 	struct kvm_cpu_context *host_ctxt;
205 	struct kvm_host_data *host;
206 	u32 events_guest, events_host;
207 
208 	if (!has_vhe())
209 		return;
210 
211 	host_ctxt = vcpu->arch.host_cpu_context;
212 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
213 	events_guest = host->pmu_events.events_guest;
214 	events_host = host->pmu_events.events_host;
215 
216 	kvm_vcpu_pmu_enable_el0(events_guest);
217 	kvm_vcpu_pmu_disable_el0(events_host);
218 }
219 
220 /*
221  * On VHE ensure that only host events have EL0 counting enabled
222  */
223 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
224 {
225 	struct kvm_cpu_context *host_ctxt;
226 	struct kvm_host_data *host;
227 	u32 events_guest, events_host;
228 
229 	if (!has_vhe())
230 		return;
231 
232 	host_ctxt = vcpu->arch.host_cpu_context;
233 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
234 	events_guest = host->pmu_events.events_guest;
235 	events_host = host->pmu_events.events_host;
236 
237 	kvm_vcpu_pmu_enable_el0(events_host);
238 	kvm_vcpu_pmu_disable_el0(events_guest);
239 }
240