xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision cb325ddd)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32	.macro	clear_gp_regs
33	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
34	mov	x\n, xzr
35	.endr
36	.endm
37
38	.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
39	.align 7
40.Lventry_start\@:
41	.if	\el == 0
42	/*
43	 * This must be the first instruction of the EL0 vector entries. It is
44	 * skipped by the trampoline vectors, to trigger the cleanup.
45	 */
46	b	.Lskip_tramp_vectors_cleanup\@
47	.if	\regsize == 64
48	mrs	x30, tpidrro_el0
49	msr	tpidrro_el0, xzr
50	.else
51	mov	x30, xzr
52	.endif
53.Lskip_tramp_vectors_cleanup\@:
54	.endif
55
56	sub	sp, sp, #PT_REGS_SIZE
57#ifdef CONFIG_VMAP_STACK
58	/*
59	 * Test whether the SP has overflowed, without corrupting a GPR.
60	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
61	 * should always be zero.
62	 */
63	add	sp, sp, x0			// sp' = sp + x0
64	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
65	tbnz	x0, #THREAD_SHIFT, 0f
66	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
67	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
68	b	el\el\ht\()_\regsize\()_\label
69
700:
71	/*
72	 * Either we've just detected an overflow, or we've taken an exception
73	 * while on the overflow stack. Either way, we won't return to
74	 * userspace, and can clobber EL0 registers to free up GPRs.
75	 */
76
77	/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
78	msr	tpidr_el0, x0
79
80	/* Recover the original x0 value and stash it in tpidrro_el0 */
81	sub	x0, sp, x0
82	msr	tpidrro_el0, x0
83
84	/* Switch to the overflow stack */
85	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
86
87	/*
88	 * Check whether we were already on the overflow stack. This may happen
89	 * after panic() re-enables interrupts.
90	 */
91	mrs	x0, tpidr_el0			// sp of interrupted context
92	sub	x0, sp, x0			// delta with top of overflow stack
93	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
94	b.ne	__bad_stack			// no? -> bad stack pointer
95
96	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
97	sub	sp, sp, x0
98	mrs	x0, tpidrro_el0
99#endif
100	b	el\el\ht\()_\regsize\()_\label
101.org .Lventry_start\@ + 128	// Did we overflow the ventry slot?
102	.endm
103
104	.macro tramp_alias, dst, sym, tmp
105	mov_q	\dst, TRAMP_VALIAS
106	adr_l	\tmp, \sym
107	add	\dst, \dst, \tmp
108	adr_l	\tmp, .entry.tramp.text
109	sub	\dst, \dst, \tmp
110	.endm
111
112	/*
113	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
114	 * them if required.
115	 */
116	.macro	apply_ssbd, state, tmp1, tmp2
117alternative_cb	spectre_v4_patch_fw_mitigation_enable
118	b	.L__asm_ssbd_skip\@		// Patched to NOP
119alternative_cb_end
120	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
121	cbz	\tmp2,	.L__asm_ssbd_skip\@
122	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
123	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
124	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
125	mov	w1, #\state
126alternative_cb	smccc_patch_fw_mitigation_conduit
127	nop					// Patched to SMC/HVC #0
128alternative_cb_end
129.L__asm_ssbd_skip\@:
130	.endm
131
132	/* Check for MTE asynchronous tag check faults */
133	.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
134#ifdef CONFIG_ARM64_MTE
135	.arch_extension lse
136alternative_if_not ARM64_MTE
137	b	1f
138alternative_else_nop_endif
139	/*
140	 * Asynchronous tag check faults are only possible in ASYNC (2) or
141	 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
142	 * set, so skip the check if it is unset.
143	 */
144	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
145	mrs_s	\tmp, SYS_TFSRE0_EL1
146	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
147	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
148	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
149	add	\ti_flags, tsk, #TSK_TI_FLAGS
150	stset	\tmp, [\ti_flags]
1511:
152#endif
153	.endm
154
155	/* Clear the MTE asynchronous tag check faults */
156	.macro clear_mte_async_tcf thread_sctlr
157#ifdef CONFIG_ARM64_MTE
158alternative_if ARM64_MTE
159	/* See comment in check_mte_async_tcf above. */
160	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
161	dsb	ish
162	msr_s	SYS_TFSRE0_EL1, xzr
1631:
164alternative_else_nop_endif
165#endif
166	.endm
167
168	.macro mte_set_gcr, mte_ctrl, tmp
169#ifdef CONFIG_ARM64_MTE
170	ubfx	\tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
171	orr	\tmp, \tmp, #SYS_GCR_EL1_RRND
172	msr_s	SYS_GCR_EL1, \tmp
173#endif
174	.endm
175
176	.macro mte_set_kernel_gcr, tmp, tmp2
177#ifdef CONFIG_KASAN_HW_TAGS
178alternative_cb	kasan_hw_tags_enable
179	b	1f
180alternative_cb_end
181	mov	\tmp, KERNEL_GCR_EL1
182	msr_s	SYS_GCR_EL1, \tmp
1831:
184#endif
185	.endm
186
187	.macro mte_set_user_gcr, tsk, tmp, tmp2
188#ifdef CONFIG_KASAN_HW_TAGS
189alternative_cb	kasan_hw_tags_enable
190	b	1f
191alternative_cb_end
192	ldr	\tmp, [\tsk, #THREAD_MTE_CTRL]
193
194	mte_set_gcr \tmp, \tmp2
1951:
196#endif
197	.endm
198
199	.macro	kernel_entry, el, regsize = 64
200	.if	\regsize == 32
201	mov	w0, w0				// zero upper 32 bits of x0
202	.endif
203	stp	x0, x1, [sp, #16 * 0]
204	stp	x2, x3, [sp, #16 * 1]
205	stp	x4, x5, [sp, #16 * 2]
206	stp	x6, x7, [sp, #16 * 3]
207	stp	x8, x9, [sp, #16 * 4]
208	stp	x10, x11, [sp, #16 * 5]
209	stp	x12, x13, [sp, #16 * 6]
210	stp	x14, x15, [sp, #16 * 7]
211	stp	x16, x17, [sp, #16 * 8]
212	stp	x18, x19, [sp, #16 * 9]
213	stp	x20, x21, [sp, #16 * 10]
214	stp	x22, x23, [sp, #16 * 11]
215	stp	x24, x25, [sp, #16 * 12]
216	stp	x26, x27, [sp, #16 * 13]
217	stp	x28, x29, [sp, #16 * 14]
218
219	.if	\el == 0
220	clear_gp_regs
221	mrs	x21, sp_el0
222	ldr_this_cpu	tsk, __entry_task, x20
223	msr	sp_el0, tsk
224
225	/*
226	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
227	 * when scheduling.
228	 */
229	ldr	x19, [tsk, #TSK_TI_FLAGS]
230	disable_step_tsk x19, x20
231
232	/* Check for asynchronous tag check faults in user space */
233	ldr	x0, [tsk, THREAD_SCTLR_USER]
234	check_mte_async_tcf x22, x23, x0
235
236#ifdef CONFIG_ARM64_PTR_AUTH
237alternative_if ARM64_HAS_ADDRESS_AUTH
238	/*
239	 * Enable IA for in-kernel PAC if the task had it disabled. Although
240	 * this could be implemented with an unconditional MRS which would avoid
241	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
242	 *
243	 * Install the kernel IA key only if IA was enabled in the task. If IA
244	 * was disabled on kernel exit then we would have left the kernel IA
245	 * installed so there is no need to install it again.
246	 */
247	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
248	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
249	b	2f
2501:
251	mrs	x0, sctlr_el1
252	orr	x0, x0, SCTLR_ELx_ENIA
253	msr	sctlr_el1, x0
2542:
255alternative_else_nop_endif
256#endif
257
258	apply_ssbd 1, x22, x23
259
260	mte_set_kernel_gcr x22, x23
261
262	/*
263	 * Any non-self-synchronizing system register updates required for
264	 * kernel entry should be placed before this point.
265	 */
266alternative_if ARM64_MTE
267	isb
268	b	1f
269alternative_else_nop_endif
270alternative_if ARM64_HAS_ADDRESS_AUTH
271	isb
272alternative_else_nop_endif
2731:
274
275	scs_load tsk
276	.else
277	add	x21, sp, #PT_REGS_SIZE
278	get_current_task tsk
279	.endif /* \el == 0 */
280	mrs	x22, elr_el1
281	mrs	x23, spsr_el1
282	stp	lr, x21, [sp, #S_LR]
283
284	/*
285	 * For exceptions from EL0, create a final frame record.
286	 * For exceptions from EL1, create a synthetic frame record so the
287	 * interrupted code shows up in the backtrace.
288	 */
289	.if \el == 0
290	stp	xzr, xzr, [sp, #S_STACKFRAME]
291	.else
292	stp	x29, x22, [sp, #S_STACKFRAME]
293	.endif
294	add	x29, sp, #S_STACKFRAME
295
296#ifdef CONFIG_ARM64_SW_TTBR0_PAN
297alternative_if_not ARM64_HAS_PAN
298	bl	__swpan_entry_el\el
299alternative_else_nop_endif
300#endif
301
302	stp	x22, x23, [sp, #S_PC]
303
304	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
305	.if	\el == 0
306	mov	w21, #NO_SYSCALL
307	str	w21, [sp, #S_SYSCALLNO]
308	.endif
309
310	/* Save pmr */
311alternative_if ARM64_HAS_IRQ_PRIO_MASKING
312	mrs_s	x20, SYS_ICC_PMR_EL1
313	str	x20, [sp, #S_PMR_SAVE]
314	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
315	msr_s	SYS_ICC_PMR_EL1, x20
316alternative_else_nop_endif
317
318	/* Re-enable tag checking (TCO set on exception entry) */
319#ifdef CONFIG_ARM64_MTE
320alternative_if ARM64_MTE
321	SET_PSTATE_TCO(0)
322alternative_else_nop_endif
323#endif
324
325	/*
326	 * Registers that may be useful after this macro is invoked:
327	 *
328	 * x20 - ICC_PMR_EL1
329	 * x21 - aborted SP
330	 * x22 - aborted PC
331	 * x23 - aborted PSTATE
332	*/
333	.endm
334
335	.macro	kernel_exit, el
336	.if	\el != 0
337	disable_daif
338	.endif
339
340	/* Restore pmr */
341alternative_if ARM64_HAS_IRQ_PRIO_MASKING
342	ldr	x20, [sp, #S_PMR_SAVE]
343	msr_s	SYS_ICC_PMR_EL1, x20
344	mrs_s	x21, SYS_ICC_CTLR_EL1
345	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
346	dsb	sy				// Ensure priority change is seen by redistributor
347.L__skip_pmr_sync\@:
348alternative_else_nop_endif
349
350	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
351
352#ifdef CONFIG_ARM64_SW_TTBR0_PAN
353alternative_if_not ARM64_HAS_PAN
354	bl	__swpan_exit_el\el
355alternative_else_nop_endif
356#endif
357
358	.if	\el == 0
359	ldr	x23, [sp, #S_SP]		// load return stack pointer
360	msr	sp_el0, x23
361	tst	x22, #PSR_MODE32_BIT		// native task?
362	b.eq	3f
363
364#ifdef CONFIG_ARM64_ERRATUM_845719
365alternative_if ARM64_WORKAROUND_845719
366#ifdef CONFIG_PID_IN_CONTEXTIDR
367	mrs	x29, contextidr_el1
368	msr	contextidr_el1, x29
369#else
370	msr contextidr_el1, xzr
371#endif
372alternative_else_nop_endif
373#endif
3743:
375	scs_save tsk
376
377	/* Ignore asynchronous tag check faults in the uaccess routines */
378	ldr	x0, [tsk, THREAD_SCTLR_USER]
379	clear_mte_async_tcf x0
380
381#ifdef CONFIG_ARM64_PTR_AUTH
382alternative_if ARM64_HAS_ADDRESS_AUTH
383	/*
384	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
385	 * alternatively install the user's IA. All other per-task keys and
386	 * SCTLR bits were updated on task switch.
387	 *
388	 * No kernel C function calls after this.
389	 */
390	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
391	__ptrauth_keys_install_user tsk, x0, x1, x2
392	b	2f
3931:
394	mrs	x0, sctlr_el1
395	bic	x0, x0, SCTLR_ELx_ENIA
396	msr	sctlr_el1, x0
3972:
398alternative_else_nop_endif
399#endif
400
401	mte_set_user_gcr tsk, x0, x1
402
403	apply_ssbd 0, x0, x1
404	.endif
405
406	msr	elr_el1, x21			// set up the return data
407	msr	spsr_el1, x22
408	ldp	x0, x1, [sp, #16 * 0]
409	ldp	x2, x3, [sp, #16 * 1]
410	ldp	x4, x5, [sp, #16 * 2]
411	ldp	x6, x7, [sp, #16 * 3]
412	ldp	x8, x9, [sp, #16 * 4]
413	ldp	x10, x11, [sp, #16 * 5]
414	ldp	x12, x13, [sp, #16 * 6]
415	ldp	x14, x15, [sp, #16 * 7]
416	ldp	x16, x17, [sp, #16 * 8]
417	ldp	x18, x19, [sp, #16 * 9]
418	ldp	x20, x21, [sp, #16 * 10]
419	ldp	x22, x23, [sp, #16 * 11]
420	ldp	x24, x25, [sp, #16 * 12]
421	ldp	x26, x27, [sp, #16 * 13]
422	ldp	x28, x29, [sp, #16 * 14]
423
424	.if	\el == 0
425alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
426	ldr	lr, [sp, #S_LR]
427	add	sp, sp, #PT_REGS_SIZE		// restore sp
428	eret
429alternative_else_nop_endif
430#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
431	bne	4f
432	msr	far_el1, x29
433	tramp_alias	x30, tramp_exit_native, x29
434	br	x30
4354:
436	tramp_alias	x30, tramp_exit_compat, x29
437	br	x30
438#endif
439	.else
440	ldr	lr, [sp, #S_LR]
441	add	sp, sp, #PT_REGS_SIZE		// restore sp
442
443	/* Ensure any device/NC reads complete */
444	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
445
446	eret
447	.endif
448	sb
449	.endm
450
451#ifdef CONFIG_ARM64_SW_TTBR0_PAN
452	/*
453	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
454	 * EL0, there is no need to check the state of TTBR0_EL1 since
455	 * accesses are always enabled.
456	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
457	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
458	 * user mappings.
459	 */
460SYM_CODE_START_LOCAL(__swpan_entry_el1)
461	mrs	x21, ttbr0_el1
462	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
463	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
464	b.eq	1f				// TTBR0 access already disabled
465	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
466SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
467	__uaccess_ttbr0_disable x21
4681:	ret
469SYM_CODE_END(__swpan_entry_el1)
470
471	/*
472	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
473	 * PAN bit checking.
474	 */
475SYM_CODE_START_LOCAL(__swpan_exit_el1)
476	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
477	__uaccess_ttbr0_enable x0, x1
4781:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
479	ret
480SYM_CODE_END(__swpan_exit_el1)
481
482SYM_CODE_START_LOCAL(__swpan_exit_el0)
483	__uaccess_ttbr0_enable x0, x1
484	/*
485	 * Enable errata workarounds only if returning to user. The only
486	 * workaround currently required for TTBR0_EL1 changes are for the
487	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
488	 * corruption).
489	 */
490	b	post_ttbr_update_workaround
491SYM_CODE_END(__swpan_exit_el0)
492#endif
493
494/* GPRs used by entry code */
495tsk	.req	x28		// current thread_info
496
497	.text
498
499/*
500 * Exception vectors.
501 */
502	.pushsection ".entry.text", "ax"
503
504	.align	11
505SYM_CODE_START(vectors)
506	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
507	kernel_ventry	1, t, 64, irq		// IRQ EL1t
508	kernel_ventry	1, t, 64, fiq		// FIQ EL1h
509	kernel_ventry	1, t, 64, error		// Error EL1t
510
511	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
512	kernel_ventry	1, h, 64, irq		// IRQ EL1h
513	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
514	kernel_ventry	1, h, 64, error		// Error EL1h
515
516	kernel_ventry	0, t, 64, sync		// Synchronous 64-bit EL0
517	kernel_ventry	0, t, 64, irq		// IRQ 64-bit EL0
518	kernel_ventry	0, t, 64, fiq		// FIQ 64-bit EL0
519	kernel_ventry	0, t, 64, error		// Error 64-bit EL0
520
521	kernel_ventry	0, t, 32, sync		// Synchronous 32-bit EL0
522	kernel_ventry	0, t, 32, irq		// IRQ 32-bit EL0
523	kernel_ventry	0, t, 32, fiq		// FIQ 32-bit EL0
524	kernel_ventry	0, t, 32, error		// Error 32-bit EL0
525SYM_CODE_END(vectors)
526
527#ifdef CONFIG_VMAP_STACK
528SYM_CODE_START_LOCAL(__bad_stack)
529	/*
530	 * We detected an overflow in kernel_ventry, which switched to the
531	 * overflow stack. Stash the exception regs, and head to our overflow
532	 * handler.
533	 */
534
535	/* Restore the original x0 value */
536	mrs	x0, tpidrro_el0
537
538	/*
539	 * Store the original GPRs to the new stack. The orginal SP (minus
540	 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
541	 */
542	sub	sp, sp, #PT_REGS_SIZE
543	kernel_entry 1
544	mrs	x0, tpidr_el0
545	add	x0, x0, #PT_REGS_SIZE
546	str	x0, [sp, #S_SP]
547
548	/* Stash the regs for handle_bad_stack */
549	mov	x0, sp
550
551	/* Time to die */
552	bl	handle_bad_stack
553	ASM_BUG()
554SYM_CODE_END(__bad_stack)
555#endif /* CONFIG_VMAP_STACK */
556
557
558	.macro entry_handler el:req, ht:req, regsize:req, label:req
559SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
560	kernel_entry \el, \regsize
561	mov	x0, sp
562	bl	el\el\ht\()_\regsize\()_\label\()_handler
563	.if \el == 0
564	b	ret_to_user
565	.else
566	b	ret_to_kernel
567	.endif
568SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
569	.endm
570
571/*
572 * Early exception handlers
573 */
574	entry_handler	1, t, 64, sync
575	entry_handler	1, t, 64, irq
576	entry_handler	1, t, 64, fiq
577	entry_handler	1, t, 64, error
578
579	entry_handler	1, h, 64, sync
580	entry_handler	1, h, 64, irq
581	entry_handler	1, h, 64, fiq
582	entry_handler	1, h, 64, error
583
584	entry_handler	0, t, 64, sync
585	entry_handler	0, t, 64, irq
586	entry_handler	0, t, 64, fiq
587	entry_handler	0, t, 64, error
588
589	entry_handler	0, t, 32, sync
590	entry_handler	0, t, 32, irq
591	entry_handler	0, t, 32, fiq
592	entry_handler	0, t, 32, error
593
594SYM_CODE_START_LOCAL(ret_to_kernel)
595	kernel_exit 1
596SYM_CODE_END(ret_to_kernel)
597
598SYM_CODE_START_LOCAL(ret_to_user)
599	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
600	enable_step_tsk x19, x2
601#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
602	bl	stackleak_erase
603#endif
604	kernel_exit 0
605SYM_CODE_END(ret_to_user)
606
607	.popsection				// .entry.text
608
609	// Move from tramp_pg_dir to swapper_pg_dir
610	.macro tramp_map_kernel, tmp
611	mrs	\tmp, ttbr1_el1
612	add	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
613	bic	\tmp, \tmp, #USER_ASID_FLAG
614	msr	ttbr1_el1, \tmp
615#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
616alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
617	/* ASID already in \tmp[63:48] */
618	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
619	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
620	/* 2MB boundary containing the vectors, so we nobble the walk cache */
621	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
622	isb
623	tlbi	vae1, \tmp
624	dsb	nsh
625alternative_else_nop_endif
626#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
627	.endm
628
629	// Move from swapper_pg_dir to tramp_pg_dir
630	.macro tramp_unmap_kernel, tmp
631	mrs	\tmp, ttbr1_el1
632	sub	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
633	orr	\tmp, \tmp, #USER_ASID_FLAG
634	msr	ttbr1_el1, \tmp
635	/*
636	 * We avoid running the post_ttbr_update_workaround here because
637	 * it's only needed by Cavium ThunderX, which requires KPTI to be
638	 * disabled.
639	 */
640	.endm
641
642	.macro tramp_data_page	dst
643	adr_l	\dst, .entry.tramp.text
644	sub	\dst, \dst, PAGE_SIZE
645	.endm
646
647	.macro tramp_data_read_var	dst, var
648#ifdef CONFIG_RANDOMIZE_BASE
649	tramp_data_page		\dst
650	add	\dst, \dst, #:lo12:__entry_tramp_data_\var
651	ldr	\dst, [\dst]
652#else
653	ldr	\dst, =\var
654#endif
655	.endm
656
657#define BHB_MITIGATION_NONE	0
658#define BHB_MITIGATION_LOOP	1
659#define BHB_MITIGATION_FW	2
660#define BHB_MITIGATION_INSN	3
661
662	.macro tramp_ventry, vector_start, regsize, kpti, bhb
663	.align	7
6641:
665	.if	\regsize == 64
666	msr	tpidrro_el0, x30	// Restored in kernel_ventry
667	.endif
668
669	.if	\bhb == BHB_MITIGATION_LOOP
670	/*
671	 * This sequence must appear before the first indirect branch. i.e. the
672	 * ret out of tramp_ventry. It appears here because x30 is free.
673	 */
674	__mitigate_spectre_bhb_loop	x30
675	.endif // \bhb == BHB_MITIGATION_LOOP
676
677	.if	\bhb == BHB_MITIGATION_INSN
678	clearbhb
679	isb
680	.endif // \bhb == BHB_MITIGATION_INSN
681
682	.if	\kpti == 1
683	/*
684	 * Defend against branch aliasing attacks by pushing a dummy
685	 * entry onto the return stack and using a RET instruction to
686	 * enter the full-fat kernel vectors.
687	 */
688	bl	2f
689	b	.
6902:
691	tramp_map_kernel	x30
692alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
693	tramp_data_read_var	x30, vectors
694alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
695	prfm	plil1strm, [x30, #(1b - \vector_start)]
696alternative_else_nop_endif
697
698	msr	vbar_el1, x30
699	isb
700	.else
701	ldr	x30, =vectors
702	.endif // \kpti == 1
703
704	.if	\bhb == BHB_MITIGATION_FW
705	/*
706	 * The firmware sequence must appear before the first indirect branch.
707	 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
708	 * mapped to save/restore the registers the SMC clobbers.
709	 */
710	__mitigate_spectre_bhb_fw
711	.endif // \bhb == BHB_MITIGATION_FW
712
713	add	x30, x30, #(1b - \vector_start + 4)
714	ret
715.org 1b + 128	// Did we overflow the ventry slot?
716	.endm
717
718	.macro tramp_exit, regsize = 64
719	tramp_data_read_var	x30, this_cpu_vector
720	get_this_cpu_offset x29
721	ldr	x30, [x30, x29]
722
723	msr	vbar_el1, x30
724	ldr	lr, [sp, #S_LR]
725	tramp_unmap_kernel	x29
726	.if	\regsize == 64
727	mrs	x29, far_el1
728	.endif
729	add	sp, sp, #PT_REGS_SIZE		// restore sp
730	eret
731	sb
732	.endm
733
734	.macro	generate_tramp_vector,	kpti, bhb
735.Lvector_start\@:
736	.space	0x400
737
738	.rept	4
739	tramp_ventry	.Lvector_start\@, 64, \kpti, \bhb
740	.endr
741	.rept	4
742	tramp_ventry	.Lvector_start\@, 32, \kpti, \bhb
743	.endr
744	.endm
745
746#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
747/*
748 * Exception vectors trampoline.
749 * The order must match __bp_harden_el1_vectors and the
750 * arm64_bp_harden_el1_vectors enum.
751 */
752	.pushsection ".entry.tramp.text", "ax"
753	.align	11
754SYM_CODE_START_NOALIGN(tramp_vectors)
755#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
756	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_LOOP
757	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_FW
758	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_INSN
759#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
760	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_NONE
761SYM_CODE_END(tramp_vectors)
762
763SYM_CODE_START(tramp_exit_native)
764	tramp_exit
765SYM_CODE_END(tramp_exit_native)
766
767SYM_CODE_START(tramp_exit_compat)
768	tramp_exit	32
769SYM_CODE_END(tramp_exit_compat)
770
771	.ltorg
772	.popsection				// .entry.tramp.text
773#ifdef CONFIG_RANDOMIZE_BASE
774	.pushsection ".rodata", "a"
775	.align PAGE_SHIFT
776SYM_DATA_START(__entry_tramp_data_start)
777__entry_tramp_data_vectors:
778	.quad	vectors
779#ifdef CONFIG_ARM_SDE_INTERFACE
780__entry_tramp_data___sdei_asm_handler:
781	.quad	__sdei_asm_handler
782#endif /* CONFIG_ARM_SDE_INTERFACE */
783__entry_tramp_data_this_cpu_vector:
784	.quad	this_cpu_vector
785SYM_DATA_END(__entry_tramp_data_start)
786	.popsection				// .rodata
787#endif /* CONFIG_RANDOMIZE_BASE */
788#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
789
790/*
791 * Exception vectors for spectre mitigations on entry from EL1 when
792 * kpti is not in use.
793 */
794	.macro generate_el1_vector, bhb
795.Lvector_start\@:
796	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
797	kernel_ventry	1, t, 64, irq		// IRQ EL1t
798	kernel_ventry	1, t, 64, fiq		// FIQ EL1h
799	kernel_ventry	1, t, 64, error		// Error EL1t
800
801	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
802	kernel_ventry	1, h, 64, irq		// IRQ EL1h
803	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
804	kernel_ventry	1, h, 64, error		// Error EL1h
805
806	.rept	4
807	tramp_ventry	.Lvector_start\@, 64, 0, \bhb
808	.endr
809	.rept 4
810	tramp_ventry	.Lvector_start\@, 32, 0, \bhb
811	.endr
812	.endm
813
814/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
815	.pushsection ".entry.text", "ax"
816	.align	11
817SYM_CODE_START(__bp_harden_el1_vectors)
818#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
819	generate_el1_vector	bhb=BHB_MITIGATION_LOOP
820	generate_el1_vector	bhb=BHB_MITIGATION_FW
821	generate_el1_vector	bhb=BHB_MITIGATION_INSN
822#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
823SYM_CODE_END(__bp_harden_el1_vectors)
824	.popsection
825
826
827/*
828 * Register switch for AArch64. The callee-saved registers need to be saved
829 * and restored. On entry:
830 *   x0 = previous task_struct (must be preserved across the switch)
831 *   x1 = next task_struct
832 * Previous and next are guaranteed not to be the same.
833 *
834 */
835SYM_FUNC_START(cpu_switch_to)
836	mov	x10, #THREAD_CPU_CONTEXT
837	add	x8, x0, x10
838	mov	x9, sp
839	stp	x19, x20, [x8], #16		// store callee-saved registers
840	stp	x21, x22, [x8], #16
841	stp	x23, x24, [x8], #16
842	stp	x25, x26, [x8], #16
843	stp	x27, x28, [x8], #16
844	stp	x29, x9, [x8], #16
845	str	lr, [x8]
846	add	x8, x1, x10
847	ldp	x19, x20, [x8], #16		// restore callee-saved registers
848	ldp	x21, x22, [x8], #16
849	ldp	x23, x24, [x8], #16
850	ldp	x25, x26, [x8], #16
851	ldp	x27, x28, [x8], #16
852	ldp	x29, x9, [x8], #16
853	ldr	lr, [x8]
854	mov	sp, x9
855	msr	sp_el0, x1
856	ptrauth_keys_install_kernel x1, x8, x9, x10
857	scs_save x0
858	scs_load x1
859	ret
860SYM_FUNC_END(cpu_switch_to)
861NOKPROBE(cpu_switch_to)
862
863/*
864 * This is how we return from a fork.
865 */
866SYM_CODE_START(ret_from_fork)
867	bl	schedule_tail
868	cbz	x19, 1f				// not a kernel thread
869	mov	x0, x20
870	blr	x19
8711:	get_current_task tsk
872	mov	x0, sp
873	bl	asm_exit_to_user_mode
874	b	ret_to_user
875SYM_CODE_END(ret_from_fork)
876NOKPROBE(ret_from_fork)
877
878/*
879 * void call_on_irq_stack(struct pt_regs *regs,
880 * 		          void (*func)(struct pt_regs *));
881 *
882 * Calls func(regs) using this CPU's irq stack and shadow irq stack.
883 */
884SYM_FUNC_START(call_on_irq_stack)
885#ifdef CONFIG_SHADOW_CALL_STACK
886	stp	scs_sp, xzr, [sp, #-16]!
887	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
888#endif
889	/* Create a frame record to save our LR and SP (implicit in FP) */
890	stp	x29, x30, [sp, #-16]!
891	mov	x29, sp
892
893	ldr_this_cpu x16, irq_stack_ptr, x17
894	mov	x15, #IRQ_STACK_SIZE
895	add	x16, x16, x15
896
897	/* Move to the new stack and call the function there */
898	mov	sp, x16
899	blr	x1
900
901	/*
902	 * Restore the SP from the FP, and restore the FP and LR from the frame
903	 * record.
904	 */
905	mov	sp, x29
906	ldp	x29, x30, [sp], #16
907#ifdef CONFIG_SHADOW_CALL_STACK
908	ldp	scs_sp, xzr, [sp], #16
909#endif
910	ret
911SYM_FUNC_END(call_on_irq_stack)
912NOKPROBE(call_on_irq_stack)
913
914#ifdef CONFIG_ARM_SDE_INTERFACE
915
916#include <asm/sdei.h>
917#include <uapi/linux/arm_sdei.h>
918
919.macro sdei_handler_exit exit_mode
920	/* On success, this call never returns... */
921	cmp	\exit_mode, #SDEI_EXIT_SMC
922	b.ne	99f
923	smc	#0
924	b	.
92599:	hvc	#0
926	b	.
927.endm
928
929#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
930/*
931 * The regular SDEI entry point may have been unmapped along with the rest of
932 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
933 * argument accessible.
934 *
935 * This clobbers x4, __sdei_handler() will restore this from firmware's
936 * copy.
937 */
938.ltorg
939.pushsection ".entry.tramp.text", "ax"
940SYM_CODE_START(__sdei_asm_entry_trampoline)
941	mrs	x4, ttbr1_el1
942	tbz	x4, #USER_ASID_BIT, 1f
943
944	tramp_map_kernel tmp=x4
945	isb
946	mov	x4, xzr
947
948	/*
949	 * Remember whether to unmap the kernel on exit.
950	 */
9511:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
952	tramp_data_read_var     x4, __sdei_asm_handler
953	br	x4
954SYM_CODE_END(__sdei_asm_entry_trampoline)
955NOKPROBE(__sdei_asm_entry_trampoline)
956
957/*
958 * Make the exit call and restore the original ttbr1_el1
959 *
960 * x0 & x1: setup for the exit API call
961 * x2: exit_mode
962 * x4: struct sdei_registered_event argument from registration time.
963 */
964SYM_CODE_START(__sdei_asm_exit_trampoline)
965	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
966	cbnz	x4, 1f
967
968	tramp_unmap_kernel	tmp=x4
969
9701:	sdei_handler_exit exit_mode=x2
971SYM_CODE_END(__sdei_asm_exit_trampoline)
972NOKPROBE(__sdei_asm_exit_trampoline)
973	.ltorg
974.popsection		// .entry.tramp.text
975#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
976
977/*
978 * Software Delegated Exception entry point.
979 *
980 * x0: Event number
981 * x1: struct sdei_registered_event argument from registration time.
982 * x2: interrupted PC
983 * x3: interrupted PSTATE
984 * x4: maybe clobbered by the trampoline
985 *
986 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
987 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
988 * want them.
989 */
990SYM_CODE_START(__sdei_asm_handler)
991	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
992	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
993	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
994	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
995	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
996	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
997	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
998	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
999	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1000	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1001	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1002	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1003	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1004	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1005	mov	x4, sp
1006	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1007
1008	mov	x19, x1
1009
1010#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1011	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1012#endif
1013
1014#ifdef CONFIG_VMAP_STACK
1015	/*
1016	 * entry.S may have been using sp as a scratch register, find whether
1017	 * this is a normal or critical event and switch to the appropriate
1018	 * stack for this CPU.
1019	 */
1020	cbnz	w4, 1f
1021	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1022	b	2f
10231:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10242:	mov	x6, #SDEI_STACK_SIZE
1025	add	x5, x5, x6
1026	mov	sp, x5
1027#endif
1028
1029#ifdef CONFIG_SHADOW_CALL_STACK
1030	/* Use a separate shadow call stack for normal and critical events */
1031	cbnz	w4, 3f
1032	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1033	b	4f
10343:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
10354:
1036#endif
1037
1038	/*
1039	 * We may have interrupted userspace, or a guest, or exit-from or
1040	 * return-to either of these. We can't trust sp_el0, restore it.
1041	 */
1042	mrs	x28, sp_el0
1043	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1044	msr	sp_el0, x0
1045
1046	/* If we interrupted the kernel point to the previous stack/frame. */
1047	and     x0, x3, #0xc
1048	mrs     x1, CurrentEL
1049	cmp     x0, x1
1050	csel	x29, x29, xzr, eq	// fp, or zero
1051	csel	x4, x2, xzr, eq		// elr, or zero
1052
1053	stp	x29, x4, [sp, #-16]!
1054	mov	x29, sp
1055
1056	add	x0, x19, #SDEI_EVENT_INTREGS
1057	mov	x1, x19
1058	bl	__sdei_handler
1059
1060	msr	sp_el0, x28
1061	/* restore regs >x17 that we clobbered */
1062	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1063	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1064	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1065	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1066	mov	sp, x1
1067
1068	mov	x1, x0			// address to complete_and_resume
1069	/* x0 = (x0 <= SDEI_EV_FAILED) ?
1070	 * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME
1071	 */
1072	cmp	x0, #SDEI_EV_FAILED
1073	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1074	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1075	csel	x0, x2, x3, ls
1076
1077	ldr_l	x2, sdei_exit_mode
1078
1079alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1080	sdei_handler_exit exit_mode=x2
1081alternative_else_nop_endif
1082
1083#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1084	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1085	br	x5
1086#endif
1087SYM_CODE_END(__sdei_asm_handler)
1088NOKPROBE(__sdei_asm_handler)
1089#endif /* CONFIG_ARM_SDE_INTERFACE */
1090