xref: /openbmc/linux/arch/arm64/kvm/hyp/include/hyp/fault.h (revision b0803ba7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_HYP_FAULT_H__
8 #define __ARM64_KVM_HYP_FAULT_H__
9 
10 #include <asm/kvm_asm.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
14 
__translate_far_to_hpfar(u64 far,u64 * hpfar)15 static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
16 {
17 	u64 par, tmp;
18 
19 	/*
20 	 * Resolve the IPA the hard way using the guest VA.
21 	 *
22 	 * Stage-1 translation already validated the memory access
23 	 * rights. As such, we can use the EL1 translation regime, and
24 	 * don't have to distinguish between EL0 and EL1 access.
25 	 *
26 	 * We do need to save/restore PAR_EL1 though, as we haven't
27 	 * saved the guest context yet, and we may return early...
28 	 */
29 	par = read_sysreg_par();
30 	if (!__kvm_at("s1e1r", far))
31 		tmp = read_sysreg_par();
32 	else
33 		tmp = SYS_PAR_EL1_F; /* back to the guest */
34 	write_sysreg(par, par_el1);
35 
36 	if (unlikely(tmp & SYS_PAR_EL1_F))
37 		return false; /* Translation failed, back to guest */
38 
39 	/* Convert PAR to HPFAR format */
40 	*hpfar = PAR_TO_HPFAR(tmp);
41 	return true;
42 }
43 
__get_fault_info(u64 esr,struct kvm_vcpu_fault_info * fault)44 static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
45 {
46 	u64 hpfar, far;
47 
48 	far = read_sysreg_el2(SYS_FAR);
49 
50 	/*
51 	 * The HPFAR can be invalid if the stage 2 fault did not
52 	 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
53 	 * bit is clear) and one of the two following cases are true:
54 	 *   1. The fault was due to a permission fault
55 	 *   2. The processor carries errata 834220
56 	 *
57 	 * Therefore, for all non S1PTW faults where we either have a
58 	 * permission fault or the errata workaround is enabled, we
59 	 * resolve the IPA using the AT instruction.
60 	 */
61 	if (!(esr & ESR_ELx_S1PTW) &&
62 	    (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
63 	     (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
64 		if (!__translate_far_to_hpfar(far, &hpfar))
65 			return false;
66 	} else {
67 		hpfar = read_sysreg(hpfar_el2);
68 	}
69 
70 	fault->far_el2 = far;
71 	fault->hpfar_el2 = hpfar;
72 	return true;
73 }
74 
75 #endif
76