xref: /openbmc/linux/arch/arm64/include/asm/kvm_asm.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2fd9fc9f7SMarc Zyngier /*
3fd9fc9f7SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
4fd9fc9f7SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
5fd9fc9f7SMarc Zyngier  */
6fd9fc9f7SMarc Zyngier 
7fd9fc9f7SMarc Zyngier #ifndef __ARM_KVM_ASM_H__
8fd9fc9f7SMarc Zyngier #define __ARM_KVM_ASM_H__
9fd9fc9f7SMarc Zyngier 
10ce492a16SDavid Brazdil #include <asm/hyp_image.h>
113e00e39dSMark Rutland #include <asm/insn.h>
1245451914SMarc Zyngier #include <asm/virt.h>
1345451914SMarc Zyngier 
1420163403SMarc Zyngier #define ARM_EXIT_WITH_SERROR_BIT  31
1520163403SMarc Zyngier #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
1658466766SMarc Zyngier #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
1720163403SMarc Zyngier #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
1820163403SMarc Zyngier 
19fd9fc9f7SMarc Zyngier #define ARM_EXCEPTION_IRQ	  0
209aecafc8SMarc Zyngier #define ARM_EXCEPTION_EL1_SERROR  1
219aecafc8SMarc Zyngier #define ARM_EXCEPTION_TRAP	  2
22e4e11cc0SChristoffer Dall #define ARM_EXCEPTION_IL	  3
23c94b0cf2SJames Morse /* The hyp-stub will return this for any kvm_call_hyp() call */
244993fdcfSMarc Zyngier #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
25fd9fc9f7SMarc Zyngier 
2671a7e47fSChristoffer Dall #define kvm_arm_exception_type					\
2771a7e47fSChristoffer Dall 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
2871a7e47fSChristoffer Dall 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
2971a7e47fSChristoffer Dall 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
3071a7e47fSChristoffer Dall 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
3171a7e47fSChristoffer Dall 
323dbf100bSJames Morse /*
333dbf100bSJames Morse  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
343dbf100bSJames Morse  * that jumps over this.
353dbf100bSJames Morse  */
360e5b9c08SJames Morse #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
373dbf100bSJames Morse 
3805469831SAndrew Scull #define KVM_HOST_SMCCC_ID(id)						\
3905469831SAndrew Scull 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
4005469831SAndrew Scull 			   ARM_SMCCC_SMC_64,				\
4105469831SAndrew Scull 			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
4205469831SAndrew Scull 			   (id))
4305469831SAndrew Scull 
4405469831SAndrew Scull #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
4505469831SAndrew Scull 
4605469831SAndrew Scull #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
4705469831SAndrew Scull 
4846c4a30bSMark Rutland #ifndef __ASSEMBLY__
4946c4a30bSMark Rutland 
5046c4a30bSMark Rutland #include <linux/mm.h>
510c557ed4SMarc Zyngier 
52a78738edSMarc Zyngier enum __kvm_host_smccc_func {
53057bed20SWill Deacon 	/* Hypercalls available only prior to pKVM finalisation */
54a78738edSMarc Zyngier 	/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
55057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
56057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___pkvm_init,
57057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
58057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
59057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
60057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
61057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
62057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
63057bed20SWill Deacon 
64057bed20SWill Deacon 	/* Hypercalls available after pKVM finalisation */
65057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
66b8cc6eb5SWill Deacon 	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
67057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
68057bed20SWill Deacon 	__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
69a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
70a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
71a12ab137SMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
72a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
73*6354d150SRaghavendra Rao Ananta 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
74a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
75a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
76a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
77a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
78a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
79a78738edSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
80be08c3cfSMarc Zyngier 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
81a1ec5c70SFuad Tabba 	__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
82a1ec5c70SFuad Tabba 	__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
83a1ec5c70SFuad Tabba 	__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
84a78738edSMarc Zyngier };
85a78738edSMarc Zyngier 
86f50b6f6aSAndrew Scull #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
87f50b6f6aSAndrew Scull #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
88f50b6f6aSAndrew Scull 
89f50b6f6aSAndrew Scull /*
90f50b6f6aSAndrew Scull  * Define a pair of symbols sharing the same name but one defined in
91f50b6f6aSAndrew Scull  * VHE and the other in nVHE hyp implementations.
92f50b6f6aSAndrew Scull  */
93f50b6f6aSAndrew Scull #define DECLARE_KVM_HYP_SYM(sym)		\
94f50b6f6aSAndrew Scull 	DECLARE_KVM_VHE_SYM(sym);		\
95f50b6f6aSAndrew Scull 	DECLARE_KVM_NVHE_SYM(sym)
96f50b6f6aSAndrew Scull 
9757249499SDavid Brazdil #define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
9857249499SDavid Brazdil 	DECLARE_PER_CPU(type, sym)
9957249499SDavid Brazdil #define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
10057249499SDavid Brazdil 	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
10157249499SDavid Brazdil 
10257249499SDavid Brazdil #define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
10357249499SDavid Brazdil 	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
10457249499SDavid Brazdil 	DECLARE_KVM_NVHE_PER_CPU(type, sym)
10557249499SDavid Brazdil 
10630c95391SDavid Brazdil /*
10730c95391SDavid Brazdil  * Compute pointer to a symbol defined in nVHE percpu region.
10830c95391SDavid Brazdil  * Returns NULL if percpu memory has not been allocated yet.
10930c95391SDavid Brazdil  */
11030c95391SDavid Brazdil #define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
11130c95391SDavid Brazdil #define per_cpu_ptr_nvhe_sym(sym, cpu)						\
11230c95391SDavid Brazdil 	({									\
11330c95391SDavid Brazdil 		unsigned long base, off;					\
114fe41a7f8SQuentin Perret 		base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];		\
11530c95391SDavid Brazdil 		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
11630c95391SDavid Brazdil 		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
11730c95391SDavid Brazdil 		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
11830c95391SDavid Brazdil 	})
11957249499SDavid Brazdil 
120ceee2fe4SAndrew Scull #if defined(__KVM_NVHE_HYPERVISOR__)
1216de7dd31SMarc Zyngier 
122ceee2fe4SAndrew Scull #define CHOOSE_NVHE_SYM(sym)	sym
12314ef9d04SMarc Zyngier #define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
12414ef9d04SMarc Zyngier 
125ceee2fe4SAndrew Scull /* The nVHE hypervisor shouldn't even try to access VHE symbols */
126ceee2fe4SAndrew Scull extern void *__nvhe_undefined_symbol;
127ceee2fe4SAndrew Scull #define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
12814ef9d04SMarc Zyngier #define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
12914ef9d04SMarc Zyngier #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
130ceee2fe4SAndrew Scull 
13114ef9d04SMarc Zyngier #elif defined(__KVM_VHE_HYPERVISOR__)
132ceee2fe4SAndrew Scull 
133ceee2fe4SAndrew Scull #define CHOOSE_VHE_SYM(sym)	sym
13414ef9d04SMarc Zyngier #define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
13514ef9d04SMarc Zyngier 
136ceee2fe4SAndrew Scull /* The VHE hypervisor shouldn't even try to access nVHE symbols */
137ceee2fe4SAndrew Scull extern void *__vhe_undefined_symbol;
138ceee2fe4SAndrew Scull #define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
13914ef9d04SMarc Zyngier #define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
14014ef9d04SMarc Zyngier #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
141ceee2fe4SAndrew Scull 
142ceee2fe4SAndrew Scull #else
143ceee2fe4SAndrew Scull 
1446de7dd31SMarc Zyngier /*
1456de7dd31SMarc Zyngier  * BIG FAT WARNINGS:
1466de7dd31SMarc Zyngier  *
1476de7dd31SMarc Zyngier  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
1486de7dd31SMarc Zyngier  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
1496de7dd31SMarc Zyngier  *   while this is used early at boot time, when the capabilities are
1506de7dd31SMarc Zyngier  *   not final yet....
1516de7dd31SMarc Zyngier  *
1526de7dd31SMarc Zyngier  * - Don't let the nVHE hypervisor have access to this, as it will
1536de7dd31SMarc Zyngier  *   pick the *wrong* symbol (yes, it runs at EL2...).
1546de7dd31SMarc Zyngier  */
15557249499SDavid Brazdil #define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
15657249499SDavid Brazdil 					   ? CHOOSE_VHE_SYM(sym)	\
157b877e984SDavid Brazdil 					   : CHOOSE_NVHE_SYM(sym))
15814ef9d04SMarc Zyngier 
15957249499SDavid Brazdil #define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
16057249499SDavid Brazdil 					   ? this_cpu_ptr(&sym)		\
16157249499SDavid Brazdil 					   : this_cpu_ptr_nvhe_sym(sym))
16214ef9d04SMarc Zyngier 
16357249499SDavid Brazdil #define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
16457249499SDavid Brazdil 					   ? per_cpu_ptr(&sym, cpu)	\
16557249499SDavid Brazdil 					   : per_cpu_ptr_nvhe_sym(sym, cpu))
16614ef9d04SMarc Zyngier 
167ceee2fe4SAndrew Scull #define CHOOSE_VHE_SYM(sym)	sym
168ceee2fe4SAndrew Scull #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
169ceee2fe4SAndrew Scull 
1706de7dd31SMarc Zyngier #endif
171b877e984SDavid Brazdil 
17263fec243SDavid Brazdil struct kvm_nvhe_init_params {
173d3e1086cSDavid Brazdil 	unsigned long mair_el2;
174d3e1086cSDavid Brazdil 	unsigned long tcr_el2;
17563fec243SDavid Brazdil 	unsigned long tpidr_el2;
17663fec243SDavid Brazdil 	unsigned long stack_hyp_va;
177ce335431SKalesh Singh 	unsigned long stack_pa;
17863fec243SDavid Brazdil 	phys_addr_t pgd_pa;
179734864c1SQuentin Perret 	unsigned long hcr_el2;
180734864c1SQuentin Perret 	unsigned long vttbr;
181734864c1SQuentin Perret 	unsigned long vtcr;
18263fec243SDavid Brazdil };
18363fec243SDavid Brazdil 
184879e5ac7SKalesh Singh /*
185879e5ac7SKalesh Singh  * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
186879e5ac7SKalesh Singh  * hyp_panic() in non-protected mode.
187879e5ac7SKalesh Singh  *
188879e5ac7SKalesh Singh  * @stack_base:                 hyp VA of the hyp_stack base.
189879e5ac7SKalesh Singh  * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
190879e5ac7SKalesh Singh  * @fp:                         hyp FP where the backtrace begins.
191879e5ac7SKalesh Singh  * @pc:                         hyp PC where the backtrace begins.
192879e5ac7SKalesh Singh  */
193879e5ac7SKalesh Singh struct kvm_nvhe_stacktrace_info {
194879e5ac7SKalesh Singh 	unsigned long stack_base;
195879e5ac7SKalesh Singh 	unsigned long overflow_stack_base;
196879e5ac7SKalesh Singh 	unsigned long fp;
197879e5ac7SKalesh Singh 	unsigned long pc;
198879e5ac7SKalesh Singh };
199879e5ac7SKalesh Singh 
200b877e984SDavid Brazdil /* Translate a kernel address @ptr into its equivalent linear mapping */
201b877e984SDavid Brazdil #define kvm_ksym_ref(ptr)						\
2022510ffe1SMarc Zyngier 	({								\
203b877e984SDavid Brazdil 		void *val = (ptr);					\
2042510ffe1SMarc Zyngier 		if (!is_kernel_in_hyp_mode())				\
205b877e984SDavid Brazdil 			val = lm_alias((ptr));				\
2062510ffe1SMarc Zyngier 		val;							\
2072510ffe1SMarc Zyngier 	 })
208f50b6f6aSAndrew Scull #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
209a0bf9776SArd Biesheuvel 
210fd9fc9f7SMarc Zyngier struct kvm;
211fd9fc9f7SMarc Zyngier struct kvm_vcpu;
212a0e50aa3SChristoffer Dall struct kvm_s2_mmu;
213fd9fc9f7SMarc Zyngier 
214208243c7SAndrew Scull DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
215b877e984SDavid Brazdil DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
216208243c7SAndrew Scull #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
217b877e984SDavid Brazdil #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
218b877e984SDavid Brazdil 
219fe41a7f8SQuentin Perret extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
22030c95391SDavid Brazdil DECLARE_KVM_NVHE_SYM(__per_cpu_start);
22130c95391SDavid Brazdil DECLARE_KVM_NVHE_SYM(__per_cpu_end);
22230c95391SDavid Brazdil 
223b877e984SDavid Brazdil DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
224b877e984SDavid Brazdil #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
225fd9fc9f7SMarc Zyngier 
226fd9fc9f7SMarc Zyngier extern void __kvm_flush_vm_context(void);
22701dc9262SMarc Zyngier extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
228efaa5b93SMarc Zyngier extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
229efaa5b93SMarc Zyngier 				     int level);
230a12ab137SMarc Zyngier extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
231a12ab137SMarc Zyngier 					 phys_addr_t ipa,
232a12ab137SMarc Zyngier 					 int level);
233*6354d150SRaghavendra Rao Ananta extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
234*6354d150SRaghavendra Rao Ananta 					phys_addr_t start, unsigned long pages);
235a0e50aa3SChristoffer Dall extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
236fd9fc9f7SMarc Zyngier 
237c6fe89ffSMarc Zyngier extern void __kvm_timer_set_cntvoff(u64 cntvoff);
238688c50aaSChristoffer Dall 
23909cf57ebSDavid Brazdil extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
2401a9b1305SMarc Zyngier 
241f5e30680SMarc Zyngier extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
242f5e30680SMarc Zyngier 
243b9d699e2SMarc Zyngier extern u64 __vgic_v3_get_gic_config(void);
244328e5664SChristoffer Dall extern u64 __vgic_v3_read_vmcr(void);
245328e5664SChristoffer Dall extern void __vgic_v3_write_vmcr(u32 vmcr);
2460d98d00bSMarc Zyngier extern void __vgic_v3_init_lrs(void);
247b2fb1c0dSMarc Zyngier 
248d6c850ddSFuad Tabba extern u64 __kvm_get_mdcr_el2(void);
24956c7f5e7SAlex Bennée 
25088a84cccSJames Morse #define __KVM_EXTABLE(from, to)						\
25188a84cccSJames Morse 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
25288a84cccSJames Morse 	"	.align		3\n"					\
25388a84cccSJames Morse 	"	.long		(" #from " - .), (" #to " - .)\n"	\
25488a84cccSJames Morse 	"	.popsection\n"
25588a84cccSJames Morse 
25688a84cccSJames Morse 
25788a84cccSJames Morse #define __kvm_at(at_op, addr)						\
25888a84cccSJames Morse ( { 									\
25988a84cccSJames Morse 	int __kvm_at_err = 0;						\
26088a84cccSJames Morse 	u64 spsr, elr;							\
26188a84cccSJames Morse 	asm volatile(							\
26288a84cccSJames Morse 	"	mrs	%1, spsr_el2\n"					\
26388a84cccSJames Morse 	"	mrs	%2, elr_el2\n"					\
26488a84cccSJames Morse 	"1:	at	"at_op", %3\n"					\
26588a84cccSJames Morse 	"	isb\n"							\
26688a84cccSJames Morse 	"	b	9f\n"						\
26788a84cccSJames Morse 	"2:	msr	spsr_el2, %1\n"					\
26888a84cccSJames Morse 	"	msr	elr_el2, %2\n"					\
26988a84cccSJames Morse 	"	mov	%w0, %4\n"					\
27088a84cccSJames Morse 	"9:\n"								\
27188a84cccSJames Morse 	__KVM_EXTABLE(1b, 2b)						\
27288a84cccSJames Morse 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
27388a84cccSJames Morse 	: "r" (addr), "i" (-EFAULT));					\
27488a84cccSJames Morse 	__kvm_at_err;							\
27588a84cccSJames Morse } )
27688a84cccSJames Morse 
27705d557a5SArnd Bergmann void __noreturn hyp_panic(void);
27805d557a5SArnd Bergmann asmlinkage void kvm_unexpected_el2_exception(void);
27905d557a5SArnd Bergmann asmlinkage void __noreturn hyp_panic(void);
28005d557a5SArnd Bergmann asmlinkage void __noreturn hyp_panic_bad_stack(void);
28105d557a5SArnd Bergmann asmlinkage void kvm_unexpected_el2_exception(void);
28205d557a5SArnd Bergmann struct kvm_cpu_context;
28305d557a5SArnd Bergmann void handle_trap(struct kvm_cpu_context *host_ctxt);
28401b94b0fSArnd Bergmann asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
28505d557a5SArnd Bergmann void __noreturn __pkvm_init_finalise(void);
28605d557a5SArnd Bergmann void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
28705d557a5SArnd Bergmann void kvm_patch_vector_branch(struct alt_instr *alt,
28805d557a5SArnd Bergmann 	__le32 *origptr, __le32 *updptr, int nr_inst);
28905d557a5SArnd Bergmann void kvm_get_kimage_voffset(struct alt_instr *alt,
29005d557a5SArnd Bergmann 	__le32 *origptr, __le32 *updptr, int nr_inst);
29105d557a5SArnd Bergmann void kvm_compute_final_ctr_el0(struct alt_instr *alt,
29205d557a5SArnd Bergmann 	__le32 *origptr, __le32 *updptr, int nr_inst);
29305d557a5SArnd Bergmann void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
29405d557a5SArnd Bergmann 	u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
29588a84cccSJames Morse 
2964464e210SChristoffer Dall #else /* __ASSEMBLY__ */
2974464e210SChristoffer Dall 
29885478babSMarc Zyngier .macro get_host_ctxt reg, tmp
299ea391027SDavid Brazdil 	adr_this_cpu \reg, kvm_host_data, \tmp
300630a1685SAndrew Murray 	add	\reg, \reg, #HOST_DATA_CONTEXT
30185478babSMarc Zyngier .endm
30285478babSMarc Zyngier 
3034464e210SChristoffer Dall .macro get_vcpu_ptr vcpu, ctxt
3044464e210SChristoffer Dall 	get_host_ctxt \ctxt, \vcpu
3054464e210SChristoffer Dall 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
3064464e210SChristoffer Dall .endm
3074464e210SChristoffer Dall 
3087db21530SAndrew Scull .macro get_loaded_vcpu vcpu, ctxt
30914ef9d04SMarc Zyngier 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
3107db21530SAndrew Scull 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
3117db21530SAndrew Scull .endm
3127db21530SAndrew Scull 
3137db21530SAndrew Scull .macro set_loaded_vcpu vcpu, ctxt, tmp
31414ef9d04SMarc Zyngier 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
3157db21530SAndrew Scull 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
3167db21530SAndrew Scull .endm
3177db21530SAndrew Scull 
318e9ee186bSJames Morse /*
319e9ee186bSJames Morse  * KVM extable for unexpected exceptions.
320ae2b2f33SMark Rutland  * Create a struct kvm_exception_table_entry output to a section that can be
321ae2b2f33SMark Rutland  * mapped by EL2. The table is not sorted.
322ae2b2f33SMark Rutland  *
323ae2b2f33SMark Rutland  * The caller must ensure:
324e9ee186bSJames Morse  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
325e9ee186bSJames Morse  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
326e9ee186bSJames Morse  */
327e9ee186bSJames Morse .macro	_kvm_extable, from, to
328e9ee186bSJames Morse 	.pushsection	__kvm_ex_table, "a"
329e9ee186bSJames Morse 	.align		3
330e9ee186bSJames Morse 	.long		(\from - .), (\to - .)
331e9ee186bSJames Morse 	.popsection
332e9ee186bSJames Morse .endm
333e9ee186bSJames Morse 
334603d2bdaSAndrew Scull #define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
335603d2bdaSAndrew Scull #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
336603d2bdaSAndrew Scull #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
337603d2bdaSAndrew Scull 
338603d2bdaSAndrew Scull /*
339603d2bdaSAndrew Scull  * We treat x18 as callee-saved as the host may use it as a platform
340603d2bdaSAndrew Scull  * register (e.g. for shadow call stack).
341603d2bdaSAndrew Scull  */
342603d2bdaSAndrew Scull .macro save_callee_saved_regs ctxt
343603d2bdaSAndrew Scull 	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
344603d2bdaSAndrew Scull 	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
345603d2bdaSAndrew Scull 	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
346603d2bdaSAndrew Scull 	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
347603d2bdaSAndrew Scull 	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
348603d2bdaSAndrew Scull 	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
349603d2bdaSAndrew Scull 	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
350603d2bdaSAndrew Scull .endm
351603d2bdaSAndrew Scull 
352603d2bdaSAndrew Scull .macro restore_callee_saved_regs ctxt
353603d2bdaSAndrew Scull 	// We require \ctxt is not x18-x28
354603d2bdaSAndrew Scull 	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
355603d2bdaSAndrew Scull 	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
356603d2bdaSAndrew Scull 	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
357603d2bdaSAndrew Scull 	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
358603d2bdaSAndrew Scull 	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
359603d2bdaSAndrew Scull 	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
360603d2bdaSAndrew Scull 	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
361603d2bdaSAndrew Scull .endm
362603d2bdaSAndrew Scull 
363603d2bdaSAndrew Scull .macro save_sp_el0 ctxt, tmp
364603d2bdaSAndrew Scull 	mrs	\tmp,	sp_el0
365603d2bdaSAndrew Scull 	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
366603d2bdaSAndrew Scull .endm
367603d2bdaSAndrew Scull 
368603d2bdaSAndrew Scull .macro restore_sp_el0 ctxt, tmp
369603d2bdaSAndrew Scull 	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
370603d2bdaSAndrew Scull 	msr	sp_el0, \tmp
371603d2bdaSAndrew Scull .endm
372603d2bdaSAndrew Scull 
373fd9fc9f7SMarc Zyngier #endif
374fd9fc9f7SMarc Zyngier 
375fd9fc9f7SMarc Zyngier #endif /* __ARM_KVM_ASM_H__ */
376