xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/switch.c (revision df202b452fe6c6d6f1351bad485e2367ef1e644e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
9 
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
15 
16 #include <kvm/arm_psci.h>
17 
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 
29 #include <nvhe/fixed_config.h>
30 #include <nvhe/mem_protect.h>
31 
32 /* Non-VHE specific context */
33 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
35 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
36 
37 static void __activate_traps(struct kvm_vcpu *vcpu)
38 {
39 	u64 val;
40 
41 	___activate_traps(vcpu);
42 	__activate_traps_common(vcpu);
43 
44 	val = vcpu->arch.cptr_el2;
45 	val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
46 	if (!update_fp_enabled(vcpu)) {
47 		val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
48 		__activate_traps_fpsimd32(vcpu);
49 	}
50 	if (cpus_have_final_cap(ARM64_SME))
51 		val |= CPTR_EL2_TSM;
52 
53 	write_sysreg(val, cptr_el2);
54 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
55 
56 	if (cpus_have_final_cap(ARM64_SME)) {
57 		val = read_sysreg_s(SYS_HFGRTR_EL2);
58 		val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
59 			 HFGxTR_EL2_nSMPRI_EL1_MASK);
60 		write_sysreg_s(val, SYS_HFGRTR_EL2);
61 
62 		val = read_sysreg_s(SYS_HFGWTR_EL2);
63 		val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
64 			 HFGxTR_EL2_nSMPRI_EL1_MASK);
65 		write_sysreg_s(val, SYS_HFGWTR_EL2);
66 	}
67 
68 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
69 		struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
70 
71 		isb();
72 		/*
73 		 * At this stage, and thanks to the above isb(), S2 is
74 		 * configured and enabled. We can now restore the guest's S1
75 		 * configuration: SCTLR, and only then TCR.
76 		 */
77 		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
78 		isb();
79 		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
80 	}
81 }
82 
83 static void __deactivate_traps(struct kvm_vcpu *vcpu)
84 {
85 	extern char __kvm_hyp_host_vector[];
86 	u64 cptr;
87 
88 	___deactivate_traps(vcpu);
89 
90 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
91 		u64 val;
92 
93 		/*
94 		 * Set the TCR and SCTLR registers in the exact opposite
95 		 * sequence as __activate_traps (first prevent walks,
96 		 * then force the MMU on). A generous sprinkling of isb()
97 		 * ensure that things happen in this exact order.
98 		 */
99 		val = read_sysreg_el1(SYS_TCR);
100 		write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
101 		isb();
102 		val = read_sysreg_el1(SYS_SCTLR);
103 		write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
104 		isb();
105 	}
106 
107 	__deactivate_traps_common(vcpu);
108 
109 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
110 
111 	if (cpus_have_final_cap(ARM64_SME)) {
112 		u64 val;
113 
114 		val = read_sysreg_s(SYS_HFGRTR_EL2);
115 		val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
116 			HFGxTR_EL2_nSMPRI_EL1_MASK;
117 		write_sysreg_s(val, SYS_HFGRTR_EL2);
118 
119 		val = read_sysreg_s(SYS_HFGWTR_EL2);
120 		val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
121 			HFGxTR_EL2_nSMPRI_EL1_MASK;
122 		write_sysreg_s(val, SYS_HFGWTR_EL2);
123 	}
124 
125 	cptr = CPTR_EL2_DEFAULT;
126 	if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
127 		cptr |= CPTR_EL2_TZ;
128 	if (cpus_have_final_cap(ARM64_SME))
129 		cptr &= ~CPTR_EL2_TSM;
130 
131 	write_sysreg(cptr, cptr_el2);
132 	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
133 }
134 
135 /* Save VGICv3 state on non-VHE systems */
136 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
137 {
138 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
139 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
140 		__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
141 	}
142 }
143 
144 /* Restore VGICv3 state on non_VEH systems */
145 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
146 {
147 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
148 		__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
149 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
150 	}
151 }
152 
153 /**
154  * Disable host events, enable guest events
155  */
156 static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
157 {
158 	struct kvm_host_data *host;
159 	struct kvm_pmu_events *pmu;
160 
161 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
162 	pmu = &host->pmu_events;
163 
164 	if (pmu->events_host)
165 		write_sysreg(pmu->events_host, pmcntenclr_el0);
166 
167 	if (pmu->events_guest)
168 		write_sysreg(pmu->events_guest, pmcntenset_el0);
169 
170 	return (pmu->events_host || pmu->events_guest);
171 }
172 
173 /**
174  * Disable guest events, enable host events
175  */
176 static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
177 {
178 	struct kvm_host_data *host;
179 	struct kvm_pmu_events *pmu;
180 
181 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
182 	pmu = &host->pmu_events;
183 
184 	if (pmu->events_guest)
185 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
186 
187 	if (pmu->events_host)
188 		write_sysreg(pmu->events_host, pmcntenset_el0);
189 }
190 
191 /**
192  * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
193  *
194  * Returns true if the hypervisor has handled the exit, and control should go
195  * back to the guest, or false if it hasn't.
196  */
197 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
198 {
199 	/*
200 	 * Make sure we handle the exit for workarounds and ptrauth
201 	 * before the pKVM handling, as the latter could decide to
202 	 * UNDEF.
203 	 */
204 	return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
205 		kvm_handle_pvm_sysreg(vcpu, exit_code));
206 }
207 
208 /**
209  * Handler for protected floating-point and Advanced SIMD accesses.
210  *
211  * Returns true if the hypervisor has handled the exit, and control should go
212  * back to the guest, or false if it hasn't.
213  */
214 static bool kvm_handle_pvm_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
215 {
216 	/* Linux guests assume support for floating-point and Advanced SIMD. */
217 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
218 				PVM_ID_AA64PFR0_ALLOW));
219 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
220 				PVM_ID_AA64PFR0_ALLOW));
221 
222 	return kvm_hyp_handle_fpsimd(vcpu, exit_code);
223 }
224 
225 static const exit_handler_fn hyp_exit_handlers[] = {
226 	[0 ... ESR_ELx_EC_MAX]		= NULL,
227 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
228 	[ESR_ELx_EC_SYS64]		= kvm_hyp_handle_sysreg,
229 	[ESR_ELx_EC_SVE]		= kvm_hyp_handle_fpsimd,
230 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
231 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
232 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
233 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
234 };
235 
236 static const exit_handler_fn pvm_exit_handlers[] = {
237 	[0 ... ESR_ELx_EC_MAX]		= NULL,
238 	[ESR_ELx_EC_SYS64]		= kvm_handle_pvm_sys64,
239 	[ESR_ELx_EC_SVE]		= kvm_handle_pvm_restricted,
240 	[ESR_ELx_EC_FP_ASIMD]		= kvm_handle_pvm_fpsimd,
241 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
242 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
243 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
244 };
245 
246 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
247 {
248 	if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
249 		return pvm_exit_handlers;
250 
251 	return hyp_exit_handlers;
252 }
253 
254 /*
255  * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
256  * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
257  * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
258  * hypervisor spots a guest in such a state ensure it is handled, and don't
259  * trust the host to spot or fix it.  The check below is based on the one in
260  * kvm_arch_vcpu_ioctl_run().
261  *
262  * Returns false if the guest ran in AArch32 when it shouldn't have, and
263  * thus should exit to the host, or true if a the guest run loop can continue.
264  */
265 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
266 {
267 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
268 
269 	if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
270 		/*
271 		 * As we have caught the guest red-handed, decide that it isn't
272 		 * fit for purpose anymore by making the vcpu invalid. The VMM
273 		 * can try and fix it by re-initializing the vcpu with
274 		 * KVM_ARM_VCPU_INIT, however, this is likely not possible for
275 		 * protected VMs.
276 		 */
277 		vcpu->arch.target = -1;
278 		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
279 		*exit_code |= ARM_EXCEPTION_IL;
280 	}
281 }
282 
283 /* Switch to the guest for legacy non-VHE systems */
284 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
285 {
286 	struct kvm_cpu_context *host_ctxt;
287 	struct kvm_cpu_context *guest_ctxt;
288 	struct kvm_s2_mmu *mmu;
289 	bool pmu_switch_needed;
290 	u64 exit_code;
291 
292 	/*
293 	 * Having IRQs masked via PMR when entering the guest means the GIC
294 	 * will not signal the CPU of interrupts of lower priority, and the
295 	 * only way to get out will be via guest exceptions.
296 	 * Naturally, we want to avoid this.
297 	 */
298 	if (system_uses_irq_prio_masking()) {
299 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
300 		pmr_sync();
301 	}
302 
303 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
304 	host_ctxt->__hyp_running_vcpu = vcpu;
305 	guest_ctxt = &vcpu->arch.ctxt;
306 
307 	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
308 
309 	__sysreg_save_state_nvhe(host_ctxt);
310 	/*
311 	 * We must flush and disable the SPE buffer for nVHE, as
312 	 * the translation regime(EL1&0) is going to be loaded with
313 	 * that of the guest. And we must do this before we change the
314 	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
315 	 * before we load guest Stage1.
316 	 */
317 	__debug_save_host_buffers_nvhe(vcpu);
318 
319 	__kvm_adjust_pc(vcpu);
320 
321 	/*
322 	 * We must restore the 32-bit state before the sysregs, thanks
323 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
324 	 *
325 	 * Also, and in order to be able to deal with erratum #1319537 (A57)
326 	 * and #1319367 (A72), we must ensure that all VM-related sysreg are
327 	 * restored before we enable S2 translation.
328 	 */
329 	__sysreg32_restore_state(vcpu);
330 	__sysreg_restore_state_nvhe(guest_ctxt);
331 
332 	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
333 	__load_stage2(mmu, kern_hyp_va(mmu->arch));
334 	__activate_traps(vcpu);
335 
336 	__hyp_vgic_restore_state(vcpu);
337 	__timer_enable_traps(vcpu);
338 
339 	__debug_switch_to_guest(vcpu);
340 
341 	do {
342 		/* Jump in the fire! */
343 		exit_code = __guest_enter(vcpu);
344 
345 		/* And we're baaack! */
346 	} while (fixup_guest_exit(vcpu, &exit_code));
347 
348 	__sysreg_save_state_nvhe(guest_ctxt);
349 	__sysreg32_save_state(vcpu);
350 	__timer_disable_traps(vcpu);
351 	__hyp_vgic_save_state(vcpu);
352 
353 	__deactivate_traps(vcpu);
354 	__load_host_stage2();
355 
356 	__sysreg_restore_state_nvhe(host_ctxt);
357 
358 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
359 		__fpsimd_save_fpexc32(vcpu);
360 
361 	__debug_switch_to_host(vcpu);
362 	/*
363 	 * This must come after restoring the host sysregs, since a non-VHE
364 	 * system may enable SPE here and make use of the TTBRs.
365 	 */
366 	__debug_restore_host_buffers_nvhe(vcpu);
367 
368 	if (pmu_switch_needed)
369 		__pmu_switch_to_host(host_ctxt);
370 
371 	/* Returning to host will clear PSR.I, remask PMR if needed */
372 	if (system_uses_irq_prio_masking())
373 		gic_write_pmr(GIC_PRIO_IRQOFF);
374 
375 	host_ctxt->__hyp_running_vcpu = NULL;
376 
377 	return exit_code;
378 }
379 
380 void __noreturn hyp_panic(void)
381 {
382 	u64 spsr = read_sysreg_el2(SYS_SPSR);
383 	u64 elr = read_sysreg_el2(SYS_ELR);
384 	u64 par = read_sysreg_par();
385 	struct kvm_cpu_context *host_ctxt;
386 	struct kvm_vcpu *vcpu;
387 
388 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
389 	vcpu = host_ctxt->__hyp_running_vcpu;
390 
391 	if (vcpu) {
392 		__timer_disable_traps(vcpu);
393 		__deactivate_traps(vcpu);
394 		__load_host_stage2();
395 		__sysreg_restore_state_nvhe(host_ctxt);
396 	}
397 
398 	__hyp_do_panic(host_ctxt, spsr, elr, par);
399 	unreachable();
400 }
401 
402 asmlinkage void kvm_unexpected_el2_exception(void)
403 {
404 	return __kvm_unexpected_el2_exception();
405 }
406