xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision de8c12110a130337c8e7e7b8250de0580e644dee)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32/*
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
35 */
36	.macro user_exit_irqoff
37#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38	bl	enter_from_user_mode
39#endif
40	.endm
41
42	.macro user_enter_irqoff
43#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
44	bl	exit_to_user_mode
45#endif
46	.endm
47
48	.macro	clear_gp_regs
49	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
50	mov	x\n, xzr
51	.endr
52	.endm
53
54/*
55 * Bad Abort numbers
56 *-----------------
57 */
58#define BAD_SYNC	0
59#define BAD_IRQ		1
60#define BAD_FIQ		2
61#define BAD_ERROR	3
62
63	.macro kernel_ventry, el, label, regsize = 64
64	.align 7
65#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
66	.if	\el == 0
67alternative_if ARM64_UNMAP_KERNEL_AT_EL0
68	.if	\regsize == 64
69	mrs	x30, tpidrro_el0
70	msr	tpidrro_el0, xzr
71	.else
72	mov	x30, xzr
73	.endif
74alternative_else_nop_endif
75	.endif
76#endif
77
78	sub	sp, sp, #PT_REGS_SIZE
79#ifdef CONFIG_VMAP_STACK
80	/*
81	 * Test whether the SP has overflowed, without corrupting a GPR.
82	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
83	 * should always be zero.
84	 */
85	add	sp, sp, x0			// sp' = sp + x0
86	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
87	tbnz	x0, #THREAD_SHIFT, 0f
88	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
89	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
90	b	el\()\el\()_\label
91
920:
93	/*
94	 * Either we've just detected an overflow, or we've taken an exception
95	 * while on the overflow stack. Either way, we won't return to
96	 * userspace, and can clobber EL0 registers to free up GPRs.
97	 */
98
99	/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
100	msr	tpidr_el0, x0
101
102	/* Recover the original x0 value and stash it in tpidrro_el0 */
103	sub	x0, sp, x0
104	msr	tpidrro_el0, x0
105
106	/* Switch to the overflow stack */
107	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
108
109	/*
110	 * Check whether we were already on the overflow stack. This may happen
111	 * after panic() re-enables interrupts.
112	 */
113	mrs	x0, tpidr_el0			// sp of interrupted context
114	sub	x0, sp, x0			// delta with top of overflow stack
115	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
116	b.ne	__bad_stack			// no? -> bad stack pointer
117
118	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
119	sub	sp, sp, x0
120	mrs	x0, tpidrro_el0
121#endif
122	b	el\()\el\()_\label
123	.endm
124
125	.macro tramp_alias, dst, sym
126	mov_q	\dst, TRAMP_VALIAS
127	add	\dst, \dst, #(\sym - .entry.tramp.text)
128	.endm
129
130	/*
131	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
132	 * them if required.
133	 */
134	.macro	apply_ssbd, state, tmp1, tmp2
135alternative_cb	spectre_v4_patch_fw_mitigation_enable
136	b	.L__asm_ssbd_skip\@		// Patched to NOP
137alternative_cb_end
138	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
139	cbz	\tmp2,	.L__asm_ssbd_skip\@
140	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
141	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
142	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
143	mov	w1, #\state
144alternative_cb	spectre_v4_patch_fw_mitigation_conduit
145	nop					// Patched to SMC/HVC #0
146alternative_cb_end
147.L__asm_ssbd_skip\@:
148	.endm
149
150	/* Check for MTE asynchronous tag check faults */
151	.macro check_mte_async_tcf, tmp, ti_flags
152#ifdef CONFIG_ARM64_MTE
153	.arch_extension lse
154alternative_if_not ARM64_MTE
155	b	1f
156alternative_else_nop_endif
157	mrs_s	\tmp, SYS_TFSRE0_EL1
158	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
159	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
160	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
161	add	\ti_flags, tsk, #TSK_TI_FLAGS
162	stset	\tmp, [\ti_flags]
163	msr_s	SYS_TFSRE0_EL1, xzr
1641:
165#endif
166	.endm
167
168	/* Clear the MTE asynchronous tag check faults */
169	.macro clear_mte_async_tcf
170#ifdef CONFIG_ARM64_MTE
171alternative_if ARM64_MTE
172	dsb	ish
173	msr_s	SYS_TFSRE0_EL1, xzr
174alternative_else_nop_endif
175#endif
176	.endm
177
178	.macro mte_set_gcr, tmp, tmp2
179#ifdef CONFIG_ARM64_MTE
180	/*
181	 * Calculate and set the exclude mask preserving
182	 * the RRND (bit[16]) setting.
183	 */
184	mrs_s	\tmp2, SYS_GCR_EL1
185	bfi	\tmp2, \tmp, #0, #16
186	msr_s	SYS_GCR_EL1, \tmp2
187#endif
188	.endm
189
190	.macro mte_set_kernel_gcr, tmp, tmp2
191#ifdef CONFIG_KASAN_HW_TAGS
192alternative_if_not ARM64_MTE
193	b	1f
194alternative_else_nop_endif
195	ldr_l	\tmp, gcr_kernel_excl
196
197	mte_set_gcr \tmp, \tmp2
198	isb
1991:
200#endif
201	.endm
202
203	.macro mte_set_user_gcr, tsk, tmp, tmp2
204#ifdef CONFIG_ARM64_MTE
205alternative_if_not ARM64_MTE
206	b	1f
207alternative_else_nop_endif
208	ldr	\tmp, [\tsk, #THREAD_GCR_EL1_USER]
209
210	mte_set_gcr \tmp, \tmp2
2111:
212#endif
213	.endm
214
215	.macro	kernel_entry, el, regsize = 64
216	.if	\regsize == 32
217	mov	w0, w0				// zero upper 32 bits of x0
218	.endif
219	stp	x0, x1, [sp, #16 * 0]
220	stp	x2, x3, [sp, #16 * 1]
221	stp	x4, x5, [sp, #16 * 2]
222	stp	x6, x7, [sp, #16 * 3]
223	stp	x8, x9, [sp, #16 * 4]
224	stp	x10, x11, [sp, #16 * 5]
225	stp	x12, x13, [sp, #16 * 6]
226	stp	x14, x15, [sp, #16 * 7]
227	stp	x16, x17, [sp, #16 * 8]
228	stp	x18, x19, [sp, #16 * 9]
229	stp	x20, x21, [sp, #16 * 10]
230	stp	x22, x23, [sp, #16 * 11]
231	stp	x24, x25, [sp, #16 * 12]
232	stp	x26, x27, [sp, #16 * 13]
233	stp	x28, x29, [sp, #16 * 14]
234
235	.if	\el == 0
236	clear_gp_regs
237	mrs	x21, sp_el0
238	ldr_this_cpu	tsk, __entry_task, x20
239	msr	sp_el0, tsk
240
241	/*
242	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
243	 * when scheduling.
244	 */
245	ldr	x19, [tsk, #TSK_TI_FLAGS]
246	disable_step_tsk x19, x20
247
248	/* Check for asynchronous tag check faults in user space */
249	check_mte_async_tcf x22, x23
250	apply_ssbd 1, x22, x23
251
252	ptrauth_keys_install_kernel tsk, x20, x22, x23
253
254	mte_set_kernel_gcr x22, x23
255
256	scs_load tsk, x20
257	.else
258	add	x21, sp, #PT_REGS_SIZE
259	get_current_task tsk
260	.endif /* \el == 0 */
261	mrs	x22, elr_el1
262	mrs	x23, spsr_el1
263	stp	lr, x21, [sp, #S_LR]
264
265	/*
266	 * For exceptions from EL0, terminate the callchain here.
267	 * For exceptions from EL1, create a synthetic frame record so the
268	 * interrupted code shows up in the backtrace.
269	 */
270	.if \el == 0
271	mov	x29, xzr
272	.else
273	stp	x29, x22, [sp, #S_STACKFRAME]
274	add	x29, sp, #S_STACKFRAME
275	.endif
276
277#ifdef CONFIG_ARM64_SW_TTBR0_PAN
278alternative_if_not ARM64_HAS_PAN
279	bl	__swpan_entry_el\el
280alternative_else_nop_endif
281#endif
282
283	stp	x22, x23, [sp, #S_PC]
284
285	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
286	.if	\el == 0
287	mov	w21, #NO_SYSCALL
288	str	w21, [sp, #S_SYSCALLNO]
289	.endif
290
291	/* Save pmr */
292alternative_if ARM64_HAS_IRQ_PRIO_MASKING
293	mrs_s	x20, SYS_ICC_PMR_EL1
294	str	x20, [sp, #S_PMR_SAVE]
295alternative_else_nop_endif
296
297	/* Re-enable tag checking (TCO set on exception entry) */
298#ifdef CONFIG_ARM64_MTE
299alternative_if ARM64_MTE
300	SET_PSTATE_TCO(0)
301alternative_else_nop_endif
302#endif
303
304	/*
305	 * Registers that may be useful after this macro is invoked:
306	 *
307	 * x20 - ICC_PMR_EL1
308	 * x21 - aborted SP
309	 * x22 - aborted PC
310	 * x23 - aborted PSTATE
311	*/
312	.endm
313
314	.macro	kernel_exit, el
315	.if	\el != 0
316	disable_daif
317	.endif
318
319	/* Restore pmr */
320alternative_if ARM64_HAS_IRQ_PRIO_MASKING
321	ldr	x20, [sp, #S_PMR_SAVE]
322	msr_s	SYS_ICC_PMR_EL1, x20
323	mrs_s	x21, SYS_ICC_CTLR_EL1
324	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
325	dsb	sy				// Ensure priority change is seen by redistributor
326.L__skip_pmr_sync\@:
327alternative_else_nop_endif
328
329	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
330
331#ifdef CONFIG_ARM64_SW_TTBR0_PAN
332alternative_if_not ARM64_HAS_PAN
333	bl	__swpan_exit_el\el
334alternative_else_nop_endif
335#endif
336
337	.if	\el == 0
338	ldr	x23, [sp, #S_SP]		// load return stack pointer
339	msr	sp_el0, x23
340	tst	x22, #PSR_MODE32_BIT		// native task?
341	b.eq	3f
342
343#ifdef CONFIG_ARM64_ERRATUM_845719
344alternative_if ARM64_WORKAROUND_845719
345#ifdef CONFIG_PID_IN_CONTEXTIDR
346	mrs	x29, contextidr_el1
347	msr	contextidr_el1, x29
348#else
349	msr contextidr_el1, xzr
350#endif
351alternative_else_nop_endif
352#endif
3533:
354	scs_save tsk, x0
355
356	/* No kernel C function calls after this as user keys are set. */
357	ptrauth_keys_install_user tsk, x0, x1, x2
358
359	mte_set_user_gcr tsk, x0, x1
360
361	apply_ssbd 0, x0, x1
362	.endif
363
364	msr	elr_el1, x21			// set up the return data
365	msr	spsr_el1, x22
366	ldp	x0, x1, [sp, #16 * 0]
367	ldp	x2, x3, [sp, #16 * 1]
368	ldp	x4, x5, [sp, #16 * 2]
369	ldp	x6, x7, [sp, #16 * 3]
370	ldp	x8, x9, [sp, #16 * 4]
371	ldp	x10, x11, [sp, #16 * 5]
372	ldp	x12, x13, [sp, #16 * 6]
373	ldp	x14, x15, [sp, #16 * 7]
374	ldp	x16, x17, [sp, #16 * 8]
375	ldp	x18, x19, [sp, #16 * 9]
376	ldp	x20, x21, [sp, #16 * 10]
377	ldp	x22, x23, [sp, #16 * 11]
378	ldp	x24, x25, [sp, #16 * 12]
379	ldp	x26, x27, [sp, #16 * 13]
380	ldp	x28, x29, [sp, #16 * 14]
381	ldr	lr, [sp, #S_LR]
382	add	sp, sp, #PT_REGS_SIZE		// restore sp
383
384	.if	\el == 0
385alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
386#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
387	bne	4f
388	msr	far_el1, x30
389	tramp_alias	x30, tramp_exit_native
390	br	x30
3914:
392	tramp_alias	x30, tramp_exit_compat
393	br	x30
394#endif
395	.else
396	/* Ensure any device/NC reads complete */
397	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
398
399	eret
400	.endif
401	sb
402	.endm
403
404#ifdef CONFIG_ARM64_SW_TTBR0_PAN
405	/*
406	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
407	 * EL0, there is no need to check the state of TTBR0_EL1 since
408	 * accesses are always enabled.
409	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
410	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
411	 * user mappings.
412	 */
413SYM_CODE_START_LOCAL(__swpan_entry_el1)
414	mrs	x21, ttbr0_el1
415	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
416	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
417	b.eq	1f				// TTBR0 access already disabled
418	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
419SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
420	__uaccess_ttbr0_disable x21
4211:	ret
422SYM_CODE_END(__swpan_entry_el1)
423
424	/*
425	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
426	 * PAN bit checking.
427	 */
428SYM_CODE_START_LOCAL(__swpan_exit_el1)
429	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
430	__uaccess_ttbr0_enable x0, x1
4311:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
432	ret
433SYM_CODE_END(__swpan_exit_el1)
434
435SYM_CODE_START_LOCAL(__swpan_exit_el0)
436	__uaccess_ttbr0_enable x0, x1
437	/*
438	 * Enable errata workarounds only if returning to user. The only
439	 * workaround currently required for TTBR0_EL1 changes are for the
440	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
441	 * corruption).
442	 */
443	b	post_ttbr_update_workaround
444SYM_CODE_END(__swpan_exit_el0)
445#endif
446
447	.macro	irq_stack_entry
448	mov	x19, sp			// preserve the original sp
449#ifdef CONFIG_SHADOW_CALL_STACK
450	mov	x24, scs_sp		// preserve the original shadow stack
451#endif
452
453	/*
454	 * Compare sp with the base of the task stack.
455	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
456	 * and should switch to the irq stack.
457	 */
458	ldr	x25, [tsk, TSK_STACK]
459	eor	x25, x25, x19
460	and	x25, x25, #~(THREAD_SIZE - 1)
461	cbnz	x25, 9998f
462
463	ldr_this_cpu x25, irq_stack_ptr, x26
464	mov	x26, #IRQ_STACK_SIZE
465	add	x26, x25, x26
466
467	/* switch to the irq stack */
468	mov	sp, x26
469
470#ifdef CONFIG_SHADOW_CALL_STACK
471	/* also switch to the irq shadow stack */
472	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
473#endif
474
4759998:
476	.endm
477
478	/*
479	 * The callee-saved regs (x19-x29) should be preserved between
480	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
481	 * uses x20-x23 to store data for later use.
482	 */
483	.macro	irq_stack_exit
484	mov	sp, x19
485#ifdef CONFIG_SHADOW_CALL_STACK
486	mov	scs_sp, x24
487#endif
488	.endm
489
490/* GPRs used by entry code */
491tsk	.req	x28		// current thread_info
492
493/*
494 * Interrupt handling.
495 */
496	.macro	irq_handler
497	ldr_l	x1, handle_arch_irq
498	mov	x0, sp
499	irq_stack_entry
500	blr	x1
501	irq_stack_exit
502	.endm
503
504#ifdef CONFIG_ARM64_PSEUDO_NMI
505	/*
506	 * Set res to 0 if irqs were unmasked in interrupted context.
507	 * Otherwise set res to non-0 value.
508	 */
509	.macro	test_irqs_unmasked res:req, pmr:req
510alternative_if ARM64_HAS_IRQ_PRIO_MASKING
511	sub	\res, \pmr, #GIC_PRIO_IRQON
512alternative_else
513	mov	\res, xzr
514alternative_endif
515	.endm
516#endif
517
518	.macro	gic_prio_kentry_setup, tmp:req
519#ifdef CONFIG_ARM64_PSEUDO_NMI
520	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
521	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
522	msr_s	SYS_ICC_PMR_EL1, \tmp
523	alternative_else_nop_endif
524#endif
525	.endm
526
527	.macro	gic_prio_irq_setup, pmr:req, tmp:req
528#ifdef CONFIG_ARM64_PSEUDO_NMI
529	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
530	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
531	msr_s	SYS_ICC_PMR_EL1, \tmp
532	alternative_else_nop_endif
533#endif
534	.endm
535
536	.text
537
538/*
539 * Exception vectors.
540 */
541	.pushsection ".entry.text", "ax"
542
543	.align	11
544SYM_CODE_START(vectors)
545	kernel_ventry	1, sync_invalid			// Synchronous EL1t
546	kernel_ventry	1, irq_invalid			// IRQ EL1t
547	kernel_ventry	1, fiq_invalid			// FIQ EL1t
548	kernel_ventry	1, error_invalid		// Error EL1t
549
550	kernel_ventry	1, sync				// Synchronous EL1h
551	kernel_ventry	1, irq				// IRQ EL1h
552	kernel_ventry	1, fiq_invalid			// FIQ EL1h
553	kernel_ventry	1, error			// Error EL1h
554
555	kernel_ventry	0, sync				// Synchronous 64-bit EL0
556	kernel_ventry	0, irq				// IRQ 64-bit EL0
557	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
558	kernel_ventry	0, error			// Error 64-bit EL0
559
560#ifdef CONFIG_COMPAT
561	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
562	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
563	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
564	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
565#else
566	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
567	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
568	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
569	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
570#endif
571SYM_CODE_END(vectors)
572
573#ifdef CONFIG_VMAP_STACK
574	/*
575	 * We detected an overflow in kernel_ventry, which switched to the
576	 * overflow stack. Stash the exception regs, and head to our overflow
577	 * handler.
578	 */
579__bad_stack:
580	/* Restore the original x0 value */
581	mrs	x0, tpidrro_el0
582
583	/*
584	 * Store the original GPRs to the new stack. The orginal SP (minus
585	 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
586	 */
587	sub	sp, sp, #PT_REGS_SIZE
588	kernel_entry 1
589	mrs	x0, tpidr_el0
590	add	x0, x0, #PT_REGS_SIZE
591	str	x0, [sp, #S_SP]
592
593	/* Stash the regs for handle_bad_stack */
594	mov	x0, sp
595
596	/* Time to die */
597	bl	handle_bad_stack
598	ASM_BUG()
599#endif /* CONFIG_VMAP_STACK */
600
601/*
602 * Invalid mode handlers
603 */
604	.macro	inv_entry, el, reason, regsize = 64
605	kernel_entry \el, \regsize
606	mov	x0, sp
607	mov	x1, #\reason
608	mrs	x2, esr_el1
609	bl	bad_mode
610	ASM_BUG()
611	.endm
612
613SYM_CODE_START_LOCAL(el0_sync_invalid)
614	inv_entry 0, BAD_SYNC
615SYM_CODE_END(el0_sync_invalid)
616
617SYM_CODE_START_LOCAL(el0_irq_invalid)
618	inv_entry 0, BAD_IRQ
619SYM_CODE_END(el0_irq_invalid)
620
621SYM_CODE_START_LOCAL(el0_fiq_invalid)
622	inv_entry 0, BAD_FIQ
623SYM_CODE_END(el0_fiq_invalid)
624
625SYM_CODE_START_LOCAL(el0_error_invalid)
626	inv_entry 0, BAD_ERROR
627SYM_CODE_END(el0_error_invalid)
628
629#ifdef CONFIG_COMPAT
630SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
631	inv_entry 0, BAD_FIQ, 32
632SYM_CODE_END(el0_fiq_invalid_compat)
633#endif
634
635SYM_CODE_START_LOCAL(el1_sync_invalid)
636	inv_entry 1, BAD_SYNC
637SYM_CODE_END(el1_sync_invalid)
638
639SYM_CODE_START_LOCAL(el1_irq_invalid)
640	inv_entry 1, BAD_IRQ
641SYM_CODE_END(el1_irq_invalid)
642
643SYM_CODE_START_LOCAL(el1_fiq_invalid)
644	inv_entry 1, BAD_FIQ
645SYM_CODE_END(el1_fiq_invalid)
646
647SYM_CODE_START_LOCAL(el1_error_invalid)
648	inv_entry 1, BAD_ERROR
649SYM_CODE_END(el1_error_invalid)
650
651/*
652 * EL1 mode handlers.
653 */
654	.align	6
655SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
656	kernel_entry 1
657	mov	x0, sp
658	bl	el1_sync_handler
659	kernel_exit 1
660SYM_CODE_END(el1_sync)
661
662	.align	6
663SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
664	kernel_entry 1
665	gic_prio_irq_setup pmr=x20, tmp=x1
666	enable_da_f
667
668	mov	x0, sp
669	bl	enter_el1_irq_or_nmi
670
671	irq_handler
672
673#ifdef CONFIG_PREEMPTION
674	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
675alternative_if ARM64_HAS_IRQ_PRIO_MASKING
676	/*
677	 * DA_F were cleared at start of handling. If anything is set in DAIF,
678	 * we come back from an NMI, so skip preemption
679	 */
680	mrs	x0, daif
681	orr	x24, x24, x0
682alternative_else_nop_endif
683	cbnz	x24, 1f				// preempt count != 0 || NMI return path
684	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
6851:
686#endif
687
688	mov	x0, sp
689	bl	exit_el1_irq_or_nmi
690
691	kernel_exit 1
692SYM_CODE_END(el1_irq)
693
694/*
695 * EL0 mode handlers.
696 */
697	.align	6
698SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
699	kernel_entry 0
700	mov	x0, sp
701	bl	el0_sync_handler
702	b	ret_to_user
703SYM_CODE_END(el0_sync)
704
705#ifdef CONFIG_COMPAT
706	.align	6
707SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
708	kernel_entry 0, 32
709	mov	x0, sp
710	bl	el0_sync_compat_handler
711	b	ret_to_user
712SYM_CODE_END(el0_sync_compat)
713
714	.align	6
715SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
716	kernel_entry 0, 32
717	b	el0_irq_naked
718SYM_CODE_END(el0_irq_compat)
719
720SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
721	kernel_entry 0, 32
722	b	el0_error_naked
723SYM_CODE_END(el0_error_compat)
724#endif
725
726	.align	6
727SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
728	kernel_entry 0
729el0_irq_naked:
730	gic_prio_irq_setup pmr=x20, tmp=x0
731	user_exit_irqoff
732	enable_da_f
733
734	tbz	x22, #55, 1f
735	bl	do_el0_irq_bp_hardening
7361:
737	irq_handler
738
739	b	ret_to_user
740SYM_CODE_END(el0_irq)
741
742SYM_CODE_START_LOCAL(el1_error)
743	kernel_entry 1
744	mrs	x1, esr_el1
745	gic_prio_kentry_setup tmp=x2
746	enable_dbg
747	mov	x0, sp
748	bl	do_serror
749	kernel_exit 1
750SYM_CODE_END(el1_error)
751
752SYM_CODE_START_LOCAL(el0_error)
753	kernel_entry 0
754el0_error_naked:
755	mrs	x25, esr_el1
756	gic_prio_kentry_setup tmp=x2
757	user_exit_irqoff
758	enable_dbg
759	mov	x0, sp
760	mov	x1, x25
761	bl	do_serror
762	enable_da_f
763	b	ret_to_user
764SYM_CODE_END(el0_error)
765
766/*
767 * "slow" syscall return path.
768 */
769SYM_CODE_START_LOCAL(ret_to_user)
770	disable_daif
771	gic_prio_kentry_setup tmp=x3
772#ifdef CONFIG_TRACE_IRQFLAGS
773	bl	trace_hardirqs_off
774#endif
775	ldr	x19, [tsk, #TSK_TI_FLAGS]
776	and	x2, x19, #_TIF_WORK_MASK
777	cbnz	x2, work_pending
778finish_ret_to_user:
779	user_enter_irqoff
780	/* Ignore asynchronous tag check faults in the uaccess routines */
781	clear_mte_async_tcf
782	enable_step_tsk x19, x2
783#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
784	bl	stackleak_erase
785#endif
786	kernel_exit 0
787
788/*
789 * Ok, we need to do extra processing, enter the slow path.
790 */
791work_pending:
792	mov	x0, sp				// 'regs'
793	mov	x1, x19
794	bl	do_notify_resume
795	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
796	b	finish_ret_to_user
797SYM_CODE_END(ret_to_user)
798
799	.popsection				// .entry.text
800
801#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
802/*
803 * Exception vectors trampoline.
804 */
805	.pushsection ".entry.tramp.text", "ax"
806
807	// Move from tramp_pg_dir to swapper_pg_dir
808	.macro tramp_map_kernel, tmp
809	mrs	\tmp, ttbr1_el1
810	add	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
811	bic	\tmp, \tmp, #USER_ASID_FLAG
812	msr	ttbr1_el1, \tmp
813#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
814alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
815	/* ASID already in \tmp[63:48] */
816	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
817	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
818	/* 2MB boundary containing the vectors, so we nobble the walk cache */
819	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
820	isb
821	tlbi	vae1, \tmp
822	dsb	nsh
823alternative_else_nop_endif
824#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
825	.endm
826
827	// Move from swapper_pg_dir to tramp_pg_dir
828	.macro tramp_unmap_kernel, tmp
829	mrs	\tmp, ttbr1_el1
830	sub	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
831	orr	\tmp, \tmp, #USER_ASID_FLAG
832	msr	ttbr1_el1, \tmp
833	/*
834	 * We avoid running the post_ttbr_update_workaround here because
835	 * it's only needed by Cavium ThunderX, which requires KPTI to be
836	 * disabled.
837	 */
838	.endm
839
840	.macro tramp_ventry, regsize = 64
841	.align	7
8421:
843	.if	\regsize == 64
844	msr	tpidrro_el0, x30	// Restored in kernel_ventry
845	.endif
846	/*
847	 * Defend against branch aliasing attacks by pushing a dummy
848	 * entry onto the return stack and using a RET instruction to
849	 * enter the full-fat kernel vectors.
850	 */
851	bl	2f
852	b	.
8532:
854	tramp_map_kernel	x30
855#ifdef CONFIG_RANDOMIZE_BASE
856	adr	x30, tramp_vectors + PAGE_SIZE
857alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
858	ldr	x30, [x30]
859#else
860	ldr	x30, =vectors
861#endif
862alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
863	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
864alternative_else_nop_endif
865	msr	vbar_el1, x30
866	add	x30, x30, #(1b - tramp_vectors)
867	isb
868	ret
869	.endm
870
871	.macro tramp_exit, regsize = 64
872	adr	x30, tramp_vectors
873	msr	vbar_el1, x30
874	tramp_unmap_kernel	x30
875	.if	\regsize == 64
876	mrs	x30, far_el1
877	.endif
878	eret
879	sb
880	.endm
881
882	.align	11
883SYM_CODE_START_NOALIGN(tramp_vectors)
884	.space	0x400
885
886	tramp_ventry
887	tramp_ventry
888	tramp_ventry
889	tramp_ventry
890
891	tramp_ventry	32
892	tramp_ventry	32
893	tramp_ventry	32
894	tramp_ventry	32
895SYM_CODE_END(tramp_vectors)
896
897SYM_CODE_START(tramp_exit_native)
898	tramp_exit
899SYM_CODE_END(tramp_exit_native)
900
901SYM_CODE_START(tramp_exit_compat)
902	tramp_exit	32
903SYM_CODE_END(tramp_exit_compat)
904
905	.ltorg
906	.popsection				// .entry.tramp.text
907#ifdef CONFIG_RANDOMIZE_BASE
908	.pushsection ".rodata", "a"
909	.align PAGE_SHIFT
910SYM_DATA_START(__entry_tramp_data_start)
911	.quad	vectors
912SYM_DATA_END(__entry_tramp_data_start)
913	.popsection				// .rodata
914#endif /* CONFIG_RANDOMIZE_BASE */
915#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
916
917/*
918 * Register switch for AArch64. The callee-saved registers need to be saved
919 * and restored. On entry:
920 *   x0 = previous task_struct (must be preserved across the switch)
921 *   x1 = next task_struct
922 * Previous and next are guaranteed not to be the same.
923 *
924 */
925SYM_FUNC_START(cpu_switch_to)
926	mov	x10, #THREAD_CPU_CONTEXT
927	add	x8, x0, x10
928	mov	x9, sp
929	stp	x19, x20, [x8], #16		// store callee-saved registers
930	stp	x21, x22, [x8], #16
931	stp	x23, x24, [x8], #16
932	stp	x25, x26, [x8], #16
933	stp	x27, x28, [x8], #16
934	stp	x29, x9, [x8], #16
935	str	lr, [x8]
936	add	x8, x1, x10
937	ldp	x19, x20, [x8], #16		// restore callee-saved registers
938	ldp	x21, x22, [x8], #16
939	ldp	x23, x24, [x8], #16
940	ldp	x25, x26, [x8], #16
941	ldp	x27, x28, [x8], #16
942	ldp	x29, x9, [x8], #16
943	ldr	lr, [x8]
944	mov	sp, x9
945	msr	sp_el0, x1
946	ptrauth_keys_install_kernel x1, x8, x9, x10
947	scs_save x0, x8
948	scs_load x1, x8
949	ret
950SYM_FUNC_END(cpu_switch_to)
951NOKPROBE(cpu_switch_to)
952
953/*
954 * This is how we return from a fork.
955 */
956SYM_CODE_START(ret_from_fork)
957	bl	schedule_tail
958	cbz	x19, 1f				// not a kernel thread
959	mov	x0, x20
960	blr	x19
9611:	get_current_task tsk
962	b	ret_to_user
963SYM_CODE_END(ret_from_fork)
964NOKPROBE(ret_from_fork)
965
966#ifdef CONFIG_ARM_SDE_INTERFACE
967
968#include <asm/sdei.h>
969#include <uapi/linux/arm_sdei.h>
970
971.macro sdei_handler_exit exit_mode
972	/* On success, this call never returns... */
973	cmp	\exit_mode, #SDEI_EXIT_SMC
974	b.ne	99f
975	smc	#0
976	b	.
97799:	hvc	#0
978	b	.
979.endm
980
981#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
982/*
983 * The regular SDEI entry point may have been unmapped along with the rest of
984 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
985 * argument accessible.
986 *
987 * This clobbers x4, __sdei_handler() will restore this from firmware's
988 * copy.
989 */
990.ltorg
991.pushsection ".entry.tramp.text", "ax"
992SYM_CODE_START(__sdei_asm_entry_trampoline)
993	mrs	x4, ttbr1_el1
994	tbz	x4, #USER_ASID_BIT, 1f
995
996	tramp_map_kernel tmp=x4
997	isb
998	mov	x4, xzr
999
1000	/*
1001	 * Remember whether to unmap the kernel on exit.
1002	 */
10031:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
1004
1005#ifdef CONFIG_RANDOMIZE_BASE
1006	adr	x4, tramp_vectors + PAGE_SIZE
1007	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1008	ldr	x4, [x4]
1009#else
1010	ldr	x4, =__sdei_asm_handler
1011#endif
1012	br	x4
1013SYM_CODE_END(__sdei_asm_entry_trampoline)
1014NOKPROBE(__sdei_asm_entry_trampoline)
1015
1016/*
1017 * Make the exit call and restore the original ttbr1_el1
1018 *
1019 * x0 & x1: setup for the exit API call
1020 * x2: exit_mode
1021 * x4: struct sdei_registered_event argument from registration time.
1022 */
1023SYM_CODE_START(__sdei_asm_exit_trampoline)
1024	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
1025	cbnz	x4, 1f
1026
1027	tramp_unmap_kernel	tmp=x4
1028
10291:	sdei_handler_exit exit_mode=x2
1030SYM_CODE_END(__sdei_asm_exit_trampoline)
1031NOKPROBE(__sdei_asm_exit_trampoline)
1032	.ltorg
1033.popsection		// .entry.tramp.text
1034#ifdef CONFIG_RANDOMIZE_BASE
1035.pushsection ".rodata", "a"
1036SYM_DATA_START(__sdei_asm_trampoline_next_handler)
1037	.quad	__sdei_asm_handler
1038SYM_DATA_END(__sdei_asm_trampoline_next_handler)
1039.popsection		// .rodata
1040#endif /* CONFIG_RANDOMIZE_BASE */
1041#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1042
1043/*
1044 * Software Delegated Exception entry point.
1045 *
1046 * x0: Event number
1047 * x1: struct sdei_registered_event argument from registration time.
1048 * x2: interrupted PC
1049 * x3: interrupted PSTATE
1050 * x4: maybe clobbered by the trampoline
1051 *
1052 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1053 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1054 * want them.
1055 */
1056SYM_CODE_START(__sdei_asm_handler)
1057	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1058	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1059	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1060	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1061	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1062	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1063	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1064	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1065	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1066	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1067	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1068	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1069	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1070	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1071	mov	x4, sp
1072	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1073
1074	mov	x19, x1
1075
1076#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1077	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1078#endif
1079
1080#ifdef CONFIG_VMAP_STACK
1081	/*
1082	 * entry.S may have been using sp as a scratch register, find whether
1083	 * this is a normal or critical event and switch to the appropriate
1084	 * stack for this CPU.
1085	 */
1086	cbnz	w4, 1f
1087	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1088	b	2f
10891:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10902:	mov	x6, #SDEI_STACK_SIZE
1091	add	x5, x5, x6
1092	mov	sp, x5
1093#endif
1094
1095#ifdef CONFIG_SHADOW_CALL_STACK
1096	/* Use a separate shadow call stack for normal and critical events */
1097	cbnz	w4, 3f
1098	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1099	b	4f
11003:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
11014:
1102#endif
1103
1104	/*
1105	 * We may have interrupted userspace, or a guest, or exit-from or
1106	 * return-to either of these. We can't trust sp_el0, restore it.
1107	 */
1108	mrs	x28, sp_el0
1109	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1110	msr	sp_el0, x0
1111
1112	/* If we interrupted the kernel point to the previous stack/frame. */
1113	and     x0, x3, #0xc
1114	mrs     x1, CurrentEL
1115	cmp     x0, x1
1116	csel	x29, x29, xzr, eq	// fp, or zero
1117	csel	x4, x2, xzr, eq		// elr, or zero
1118
1119	stp	x29, x4, [sp, #-16]!
1120	mov	x29, sp
1121
1122	add	x0, x19, #SDEI_EVENT_INTREGS
1123	mov	x1, x19
1124	bl	__sdei_handler
1125
1126	msr	sp_el0, x28
1127	/* restore regs >x17 that we clobbered */
1128	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1129	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1130	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1131	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1132	mov	sp, x1
1133
1134	mov	x1, x0			// address to complete_and_resume
1135	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1136	cmp	x0, #1
1137	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1138	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1139	csel	x0, x2, x3, ls
1140
1141	ldr_l	x2, sdei_exit_mode
1142
1143alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1144	sdei_handler_exit exit_mode=x2
1145alternative_else_nop_endif
1146
1147#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1148	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1149	br	x5
1150#endif
1151SYM_CODE_END(__sdei_asm_handler)
1152NOKPROBE(__sdei_asm_handler)
1153#endif /* CONFIG_ARM_SDE_INTERFACE */
1154