1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
4 */
5
6#include <linux/linkage.h>
7
8SYM_FUNC_START(__efi_rt_asm_wrapper)
9	stp	x29, x30, [sp, #-112]!
10	mov	x29, sp
11
12	/*
13	 * Register x18 is designated as the 'platform' register by the AAPCS,
14	 * which means firmware running at the same exception level as the OS
15	 * (such as UEFI) should never touch it.
16	 */
17	stp	x1, x18, [sp, #16]
18
19	/*
20	 * Preserve all callee saved registers and preserve the stack pointer
21	 * value at the base of the EFI runtime stack so we can recover from
22	 * synchronous exceptions occurring while executing the firmware
23	 * routines.
24	 */
25	stp	x19, x20, [sp, #32]
26	stp	x21, x22, [sp, #48]
27	stp	x23, x24, [sp, #64]
28	stp	x25, x26, [sp, #80]
29	stp	x27, x28, [sp, #96]
30
31	ldr_l	x16, efi_rt_stack_top
32	mov	sp, x16
33	stp	x18, x29, [sp, #-16]!
34
35	/*
36	 * We are lucky enough that no EFI runtime services take more than
37	 * 5 arguments, so all are passed in registers rather than via the
38	 * stack.
39	 */
40	mov	x8, x0
41	mov	x0, x2
42	mov	x1, x3
43	mov	x2, x4
44	mov	x3, x5
45	mov	x4, x6
46	blr	x8
47
48	mov	sp, x29
49	ldp	x1, x2, [sp, #16]
50	cmp	x2, x18
51	ldp	x29, x30, [sp], #112
52	b.ne	0f
53	ret
540:
55	/*
56	 * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a
57	 * shadow stack pointer, which we need to restore before returning to
58	 * potentially instrumented code. This is safe because the wrapper is
59	 * called with preemption disabled and a separate shadow stack is used
60	 * for interrupts.
61	 */
62#ifdef CONFIG_SHADOW_CALL_STACK
63	ldr_l	x18, efi_rt_stack_top
64	ldr	x18, [x18, #-16]
65#endif
66
67	b	efi_handle_corrupted_x18	// tail call
68SYM_FUNC_END(__efi_rt_asm_wrapper)
69
70SYM_CODE_START(__efi_rt_asm_recover)
71	mov	sp, x30
72
73	ldp	x19, x20, [sp, #32]
74	ldp	x21, x22, [sp, #48]
75	ldp	x23, x24, [sp, #64]
76	ldp	x25, x26, [sp, #80]
77	ldp	x27, x28, [sp, #96]
78	ldp	x29, x30, [sp], #112
79	ret
80SYM_CODE_END(__efi_rt_asm_recover)
81