xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 22d55f02)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/cpufeature.h>
18#include <asm/errno.h>
19#include <asm/esr.h>
20#include <asm/irq.h>
21#include <asm/memory.h>
22#include <asm/mmu.h>
23#include <asm/processor.h>
24#include <asm/ptrace.h>
25#include <asm/thread_info.h>
26#include <asm/asm-uaccess.h>
27#include <asm/unistd.h>
28
29/*
30 * Context tracking subsystem.  Used to instrument transitions
31 * between user and kernel mode.
32 */
33	.macro ct_user_exit
34#ifdef CONFIG_CONTEXT_TRACKING
35	bl	context_tracking_user_exit
36#endif
37	.endm
38
39	.macro ct_user_enter
40#ifdef CONFIG_CONTEXT_TRACKING
41	bl	context_tracking_user_enter
42#endif
43	.endm
44
45	.macro	clear_gp_regs
46	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
47	mov	x\n, xzr
48	.endr
49	.endm
50
51/*
52 * Bad Abort numbers
53 *-----------------
54 */
55#define BAD_SYNC	0
56#define BAD_IRQ		1
57#define BAD_FIQ		2
58#define BAD_ERROR	3
59
60	.macro kernel_ventry, el, label, regsize = 64
61	.align 7
62#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
63alternative_if ARM64_UNMAP_KERNEL_AT_EL0
64	.if	\el == 0
65	.if	\regsize == 64
66	mrs	x30, tpidrro_el0
67	msr	tpidrro_el0, xzr
68	.else
69	mov	x30, xzr
70	.endif
71	.endif
72alternative_else_nop_endif
73#endif
74
75	sub	sp, sp, #S_FRAME_SIZE
76#ifdef CONFIG_VMAP_STACK
77	/*
78	 * Test whether the SP has overflowed, without corrupting a GPR.
79	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
80	 */
81	add	sp, sp, x0			// sp' = sp + x0
82	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
83	tbnz	x0, #THREAD_SHIFT, 0f
84	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
85	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
86	b	el\()\el\()_\label
87
880:
89	/*
90	 * Either we've just detected an overflow, or we've taken an exception
91	 * while on the overflow stack. Either way, we won't return to
92	 * userspace, and can clobber EL0 registers to free up GPRs.
93	 */
94
95	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
96	msr	tpidr_el0, x0
97
98	/* Recover the original x0 value and stash it in tpidrro_el0 */
99	sub	x0, sp, x0
100	msr	tpidrro_el0, x0
101
102	/* Switch to the overflow stack */
103	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
104
105	/*
106	 * Check whether we were already on the overflow stack. This may happen
107	 * after panic() re-enables interrupts.
108	 */
109	mrs	x0, tpidr_el0			// sp of interrupted context
110	sub	x0, sp, x0			// delta with top of overflow stack
111	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
112	b.ne	__bad_stack			// no? -> bad stack pointer
113
114	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
115	sub	sp, sp, x0
116	mrs	x0, tpidrro_el0
117#endif
118	b	el\()\el\()_\label
119	.endm
120
121	.macro tramp_alias, dst, sym
122	mov_q	\dst, TRAMP_VALIAS
123	add	\dst, \dst, #(\sym - .entry.tramp.text)
124	.endm
125
126	// This macro corrupts x0-x3. It is the caller's duty
127	// to save/restore them if required.
128	.macro	apply_ssbd, state, tmp1, tmp2
129#ifdef CONFIG_ARM64_SSBD
130alternative_cb	arm64_enable_wa2_handling
131	b	.L__asm_ssbd_skip\@
132alternative_cb_end
133	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
134	cbz	\tmp2,	.L__asm_ssbd_skip\@
135	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
136	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
137	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
138	mov	w1, #\state
139alternative_cb	arm64_update_smccc_conduit
140	nop					// Patched to SMC/HVC #0
141alternative_cb_end
142.L__asm_ssbd_skip\@:
143#endif
144	.endm
145
146	.macro	kernel_entry, el, regsize = 64
147	.if	\regsize == 32
148	mov	w0, w0				// zero upper 32 bits of x0
149	.endif
150	stp	x0, x1, [sp, #16 * 0]
151	stp	x2, x3, [sp, #16 * 1]
152	stp	x4, x5, [sp, #16 * 2]
153	stp	x6, x7, [sp, #16 * 3]
154	stp	x8, x9, [sp, #16 * 4]
155	stp	x10, x11, [sp, #16 * 5]
156	stp	x12, x13, [sp, #16 * 6]
157	stp	x14, x15, [sp, #16 * 7]
158	stp	x16, x17, [sp, #16 * 8]
159	stp	x18, x19, [sp, #16 * 9]
160	stp	x20, x21, [sp, #16 * 10]
161	stp	x22, x23, [sp, #16 * 11]
162	stp	x24, x25, [sp, #16 * 12]
163	stp	x26, x27, [sp, #16 * 13]
164	stp	x28, x29, [sp, #16 * 14]
165
166	.if	\el == 0
167	clear_gp_regs
168	mrs	x21, sp_el0
169	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
170	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
171	disable_step_tsk x19, x20		// exceptions when scheduling.
172
173	apply_ssbd 1, x22, x23
174
175	.else
176	add	x21, sp, #S_FRAME_SIZE
177	get_current_task tsk
178	/* Save the task's original addr_limit and set USER_DS */
179	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
180	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
181	mov	x20, #USER_DS
182	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
183	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
184	.endif /* \el == 0 */
185	mrs	x22, elr_el1
186	mrs	x23, spsr_el1
187	stp	lr, x21, [sp, #S_LR]
188
189	/*
190	 * In order to be able to dump the contents of struct pt_regs at the
191	 * time the exception was taken (in case we attempt to walk the call
192	 * stack later), chain it together with the stack frames.
193	 */
194	.if \el == 0
195	stp	xzr, xzr, [sp, #S_STACKFRAME]
196	.else
197	stp	x29, x22, [sp, #S_STACKFRAME]
198	.endif
199	add	x29, sp, #S_STACKFRAME
200
201#ifdef CONFIG_ARM64_SW_TTBR0_PAN
202	/*
203	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
204	 * EL0, there is no need to check the state of TTBR0_EL1 since
205	 * accesses are always enabled.
206	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
207	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
208	 * user mappings.
209	 */
210alternative_if ARM64_HAS_PAN
211	b	1f				// skip TTBR0 PAN
212alternative_else_nop_endif
213
214	.if	\el != 0
215	mrs	x21, ttbr0_el1
216	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
217	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
218	b.eq	1f				// TTBR0 access already disabled
219	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
220	.endif
221
222	__uaccess_ttbr0_disable x21
2231:
224#endif
225
226	stp	x22, x23, [sp, #S_PC]
227
228	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
229	.if	\el == 0
230	mov	w21, #NO_SYSCALL
231	str	w21, [sp, #S_SYSCALLNO]
232	.endif
233
234	/*
235	 * Set sp_el0 to current thread_info.
236	 */
237	.if	\el == 0
238	msr	sp_el0, tsk
239	.endif
240
241	/* Save pmr */
242alternative_if ARM64_HAS_IRQ_PRIO_MASKING
243	mrs_s	x20, SYS_ICC_PMR_EL1
244	str	x20, [sp, #S_PMR_SAVE]
245alternative_else_nop_endif
246
247	/*
248	 * Registers that may be useful after this macro is invoked:
249	 *
250	 * x21 - aborted SP
251	 * x22 - aborted PC
252	 * x23 - aborted PSTATE
253	*/
254	.endm
255
256	.macro	kernel_exit, el
257	.if	\el != 0
258	disable_daif
259
260	/* Restore the task's original addr_limit. */
261	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
262	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
263
264	/* No need to restore UAO, it will be restored from SPSR_EL1 */
265	.endif
266
267	/* Restore pmr */
268alternative_if ARM64_HAS_IRQ_PRIO_MASKING
269	ldr	x20, [sp, #S_PMR_SAVE]
270	msr_s	SYS_ICC_PMR_EL1, x20
271	/* Ensure priority change is seen by redistributor */
272	dsb	sy
273alternative_else_nop_endif
274
275	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
276	.if	\el == 0
277	ct_user_enter
278	.endif
279
280#ifdef CONFIG_ARM64_SW_TTBR0_PAN
281	/*
282	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
283	 * PAN bit checking.
284	 */
285alternative_if ARM64_HAS_PAN
286	b	2f				// skip TTBR0 PAN
287alternative_else_nop_endif
288
289	.if	\el != 0
290	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
291	.endif
292
293	__uaccess_ttbr0_enable x0, x1
294
295	.if	\el == 0
296	/*
297	 * Enable errata workarounds only if returning to user. The only
298	 * workaround currently required for TTBR0_EL1 changes are for the
299	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
300	 * corruption).
301	 */
302	bl	post_ttbr_update_workaround
303	.endif
3041:
305	.if	\el != 0
306	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
307	.endif
3082:
309#endif
310
311	.if	\el == 0
312	ldr	x23, [sp, #S_SP]		// load return stack pointer
313	msr	sp_el0, x23
314	tst	x22, #PSR_MODE32_BIT		// native task?
315	b.eq	3f
316
317#ifdef CONFIG_ARM64_ERRATUM_845719
318alternative_if ARM64_WORKAROUND_845719
319#ifdef CONFIG_PID_IN_CONTEXTIDR
320	mrs	x29, contextidr_el1
321	msr	contextidr_el1, x29
322#else
323	msr contextidr_el1, xzr
324#endif
325alternative_else_nop_endif
326#endif
3273:
328#ifdef CONFIG_ARM64_ERRATUM_1418040
329alternative_if_not ARM64_WORKAROUND_1418040
330	b	4f
331alternative_else_nop_endif
332	/*
333	 * if (x22.mode32 == cntkctl_el1.el0vcten)
334	 *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
335	 */
336	mrs	x1, cntkctl_el1
337	eon	x0, x1, x22, lsr #3
338	tbz	x0, #1, 4f
339	eor	x1, x1, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
340	msr	cntkctl_el1, x1
3414:
342#endif
343	apply_ssbd 0, x0, x1
344	.endif
345
346	msr	elr_el1, x21			// set up the return data
347	msr	spsr_el1, x22
348	ldp	x0, x1, [sp, #16 * 0]
349	ldp	x2, x3, [sp, #16 * 1]
350	ldp	x4, x5, [sp, #16 * 2]
351	ldp	x6, x7, [sp, #16 * 3]
352	ldp	x8, x9, [sp, #16 * 4]
353	ldp	x10, x11, [sp, #16 * 5]
354	ldp	x12, x13, [sp, #16 * 6]
355	ldp	x14, x15, [sp, #16 * 7]
356	ldp	x16, x17, [sp, #16 * 8]
357	ldp	x18, x19, [sp, #16 * 9]
358	ldp	x20, x21, [sp, #16 * 10]
359	ldp	x22, x23, [sp, #16 * 11]
360	ldp	x24, x25, [sp, #16 * 12]
361	ldp	x26, x27, [sp, #16 * 13]
362	ldp	x28, x29, [sp, #16 * 14]
363	ldr	lr, [sp, #S_LR]
364	add	sp, sp, #S_FRAME_SIZE		// restore sp
365
366	.if	\el == 0
367alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
368#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
369	bne	5f
370	msr	far_el1, x30
371	tramp_alias	x30, tramp_exit_native
372	br	x30
3735:
374	tramp_alias	x30, tramp_exit_compat
375	br	x30
376#endif
377	.else
378	eret
379	.endif
380	sb
381	.endm
382
383	.macro	irq_stack_entry
384	mov	x19, sp			// preserve the original sp
385
386	/*
387	 * Compare sp with the base of the task stack.
388	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
389	 * and should switch to the irq stack.
390	 */
391	ldr	x25, [tsk, TSK_STACK]
392	eor	x25, x25, x19
393	and	x25, x25, #~(THREAD_SIZE - 1)
394	cbnz	x25, 9998f
395
396	ldr_this_cpu x25, irq_stack_ptr, x26
397	mov	x26, #IRQ_STACK_SIZE
398	add	x26, x25, x26
399
400	/* switch to the irq stack */
401	mov	sp, x26
4029998:
403	.endm
404
405	/*
406	 * x19 should be preserved between irq_stack_entry and
407	 * irq_stack_exit.
408	 */
409	.macro	irq_stack_exit
410	mov	sp, x19
411	.endm
412
413/* GPRs used by entry code */
414tsk	.req	x28		// current thread_info
415
416/*
417 * Interrupt handling.
418 */
419	.macro	irq_handler
420	ldr_l	x1, handle_arch_irq
421	mov	x0, sp
422	irq_stack_entry
423	blr	x1
424	irq_stack_exit
425	.endm
426
427	.text
428
429/*
430 * Exception vectors.
431 */
432	.pushsection ".entry.text", "ax"
433
434	.align	11
435ENTRY(vectors)
436	kernel_ventry	1, sync_invalid			// Synchronous EL1t
437	kernel_ventry	1, irq_invalid			// IRQ EL1t
438	kernel_ventry	1, fiq_invalid			// FIQ EL1t
439	kernel_ventry	1, error_invalid		// Error EL1t
440
441	kernel_ventry	1, sync				// Synchronous EL1h
442	kernel_ventry	1, irq				// IRQ EL1h
443	kernel_ventry	1, fiq_invalid			// FIQ EL1h
444	kernel_ventry	1, error			// Error EL1h
445
446	kernel_ventry	0, sync				// Synchronous 64-bit EL0
447	kernel_ventry	0, irq				// IRQ 64-bit EL0
448	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
449	kernel_ventry	0, error			// Error 64-bit EL0
450
451#ifdef CONFIG_COMPAT
452	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
453	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
454	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
455	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
456#else
457	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
458	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
459	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
460	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
461#endif
462END(vectors)
463
464#ifdef CONFIG_VMAP_STACK
465	/*
466	 * We detected an overflow in kernel_ventry, which switched to the
467	 * overflow stack. Stash the exception regs, and head to our overflow
468	 * handler.
469	 */
470__bad_stack:
471	/* Restore the original x0 value */
472	mrs	x0, tpidrro_el0
473
474	/*
475	 * Store the original GPRs to the new stack. The orginal SP (minus
476	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
477	 */
478	sub	sp, sp, #S_FRAME_SIZE
479	kernel_entry 1
480	mrs	x0, tpidr_el0
481	add	x0, x0, #S_FRAME_SIZE
482	str	x0, [sp, #S_SP]
483
484	/* Stash the regs for handle_bad_stack */
485	mov	x0, sp
486
487	/* Time to die */
488	bl	handle_bad_stack
489	ASM_BUG()
490#endif /* CONFIG_VMAP_STACK */
491
492/*
493 * Invalid mode handlers
494 */
495	.macro	inv_entry, el, reason, regsize = 64
496	kernel_entry \el, \regsize
497	mov	x0, sp
498	mov	x1, #\reason
499	mrs	x2, esr_el1
500	bl	bad_mode
501	ASM_BUG()
502	.endm
503
504el0_sync_invalid:
505	inv_entry 0, BAD_SYNC
506ENDPROC(el0_sync_invalid)
507
508el0_irq_invalid:
509	inv_entry 0, BAD_IRQ
510ENDPROC(el0_irq_invalid)
511
512el0_fiq_invalid:
513	inv_entry 0, BAD_FIQ
514ENDPROC(el0_fiq_invalid)
515
516el0_error_invalid:
517	inv_entry 0, BAD_ERROR
518ENDPROC(el0_error_invalid)
519
520#ifdef CONFIG_COMPAT
521el0_fiq_invalid_compat:
522	inv_entry 0, BAD_FIQ, 32
523ENDPROC(el0_fiq_invalid_compat)
524#endif
525
526el1_sync_invalid:
527	inv_entry 1, BAD_SYNC
528ENDPROC(el1_sync_invalid)
529
530el1_irq_invalid:
531	inv_entry 1, BAD_IRQ
532ENDPROC(el1_irq_invalid)
533
534el1_fiq_invalid:
535	inv_entry 1, BAD_FIQ
536ENDPROC(el1_fiq_invalid)
537
538el1_error_invalid:
539	inv_entry 1, BAD_ERROR
540ENDPROC(el1_error_invalid)
541
542/*
543 * EL1 mode handlers.
544 */
545	.align	6
546el1_sync:
547	kernel_entry 1
548	mrs	x1, esr_el1			// read the syndrome register
549	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
550	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
551	b.eq	el1_da
552	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
553	b.eq	el1_ia
554	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
555	b.eq	el1_undef
556	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
557	b.eq	el1_sp_pc
558	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
559	b.eq	el1_sp_pc
560	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
561	b.eq	el1_undef
562	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
563	b.ge	el1_dbg
564	b	el1_inv
565
566el1_ia:
567	/*
568	 * Fall through to the Data abort case
569	 */
570el1_da:
571	/*
572	 * Data abort handling
573	 */
574	mrs	x3, far_el1
575	inherit_daif	pstate=x23, tmp=x2
576	clear_address_tag x0, x3
577	mov	x2, sp				// struct pt_regs
578	bl	do_mem_abort
579
580	kernel_exit 1
581el1_sp_pc:
582	/*
583	 * Stack or PC alignment exception handling
584	 */
585	mrs	x0, far_el1
586	inherit_daif	pstate=x23, tmp=x2
587	mov	x2, sp
588	bl	do_sp_pc_abort
589	ASM_BUG()
590el1_undef:
591	/*
592	 * Undefined instruction
593	 */
594	inherit_daif	pstate=x23, tmp=x2
595	mov	x0, sp
596	bl	do_undefinstr
597	kernel_exit 1
598el1_dbg:
599	/*
600	 * Debug exception handling
601	 */
602	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
603	cinc	x24, x24, eq			// set bit '0'
604	tbz	x24, #0, el1_inv		// EL1 only
605	mrs	x0, far_el1
606	mov	x2, sp				// struct pt_regs
607	bl	do_debug_exception
608	kernel_exit 1
609el1_inv:
610	// TODO: add support for undefined instructions in kernel mode
611	inherit_daif	pstate=x23, tmp=x2
612	mov	x0, sp
613	mov	x2, x1
614	mov	x1, #BAD_SYNC
615	bl	bad_mode
616	ASM_BUG()
617ENDPROC(el1_sync)
618
619	.align	6
620el1_irq:
621	kernel_entry 1
622	enable_da_f
623#ifdef CONFIG_TRACE_IRQFLAGS
624#ifdef CONFIG_ARM64_PSEUDO_NMI
625alternative_if ARM64_HAS_IRQ_PRIO_MASKING
626	ldr	x20, [sp, #S_PMR_SAVE]
627alternative_else
628	mov	x20, #GIC_PRIO_IRQON
629alternative_endif
630	cmp	x20, #GIC_PRIO_IRQOFF
631	/* Irqs were disabled, don't trace */
632	b.ls	1f
633#endif
634	bl	trace_hardirqs_off
6351:
636#endif
637
638	irq_handler
639
640#ifdef CONFIG_PREEMPT
641	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
642alternative_if ARM64_HAS_IRQ_PRIO_MASKING
643	/*
644	 * DA_F were cleared at start of handling. If anything is set in DAIF,
645	 * we come back from an NMI, so skip preemption
646	 */
647	mrs	x0, daif
648	orr	x24, x24, x0
649alternative_else_nop_endif
650	cbnz	x24, 1f				// preempt count != 0 || NMI return path
651	bl	preempt_schedule_irq		// irq en/disable is done inside
6521:
653#endif
654#ifdef CONFIG_TRACE_IRQFLAGS
655#ifdef CONFIG_ARM64_PSEUDO_NMI
656	/*
657	 * if IRQs were disabled when we received the interrupt, we have an NMI
658	 * and we are not re-enabling interrupt upon eret. Skip tracing.
659	 */
660	cmp	x20, #GIC_PRIO_IRQOFF
661	b.ls	1f
662#endif
663	bl	trace_hardirqs_on
6641:
665#endif
666
667	kernel_exit 1
668ENDPROC(el1_irq)
669
670/*
671 * EL0 mode handlers.
672 */
673	.align	6
674el0_sync:
675	kernel_entry 0
676	mrs	x25, esr_el1			// read the syndrome register
677	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
678	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
679	b.eq	el0_svc
680	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
681	b.eq	el0_da
682	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
683	b.eq	el0_ia
684	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
685	b.eq	el0_fpsimd_acc
686	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
687	b.eq	el0_sve_acc
688	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
689	b.eq	el0_fpsimd_exc
690	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
691	ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
692	b.eq	el0_sys
693	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
694	b.eq	el0_sp_pc
695	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
696	b.eq	el0_sp_pc
697	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
698	b.eq	el0_undef
699	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
700	b.ge	el0_dbg
701	b	el0_inv
702
703#ifdef CONFIG_COMPAT
704	.align	6
705el0_sync_compat:
706	kernel_entry 0, 32
707	mrs	x25, esr_el1			// read the syndrome register
708	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
709	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
710	b.eq	el0_svc_compat
711	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
712	b.eq	el0_da
713	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
714	b.eq	el0_ia
715	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
716	b.eq	el0_fpsimd_acc
717	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
718	b.eq	el0_fpsimd_exc
719	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
720	b.eq	el0_sp_pc
721	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
722	b.eq	el0_undef
723	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
724	b.eq	el0_cp15
725	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
726	b.eq	el0_cp15
727	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
728	b.eq	el0_undef
729	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
730	b.eq	el0_undef
731	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
732	b.eq	el0_undef
733	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
734	b.ge	el0_dbg
735	b	el0_inv
736el0_svc_compat:
737	mov	x0, sp
738	bl	el0_svc_compat_handler
739	b	ret_to_user
740
741	.align	6
742el0_irq_compat:
743	kernel_entry 0, 32
744	b	el0_irq_naked
745
746el0_error_compat:
747	kernel_entry 0, 32
748	b	el0_error_naked
749
750el0_cp15:
751	/*
752	 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
753	 */
754	enable_daif
755	ct_user_exit
756	mov	x0, x25
757	mov	x1, sp
758	bl	do_cp15instr
759	b	ret_to_user
760#endif
761
762el0_da:
763	/*
764	 * Data abort handling
765	 */
766	mrs	x26, far_el1
767	enable_daif
768	ct_user_exit
769	clear_address_tag x0, x26
770	mov	x1, x25
771	mov	x2, sp
772	bl	do_mem_abort
773	b	ret_to_user
774el0_ia:
775	/*
776	 * Instruction abort handling
777	 */
778	mrs	x26, far_el1
779	enable_da_f
780#ifdef CONFIG_TRACE_IRQFLAGS
781	bl	trace_hardirqs_off
782#endif
783	ct_user_exit
784	mov	x0, x26
785	mov	x1, x25
786	mov	x2, sp
787	bl	do_el0_ia_bp_hardening
788	b	ret_to_user
789el0_fpsimd_acc:
790	/*
791	 * Floating Point or Advanced SIMD access
792	 */
793	enable_daif
794	ct_user_exit
795	mov	x0, x25
796	mov	x1, sp
797	bl	do_fpsimd_acc
798	b	ret_to_user
799el0_sve_acc:
800	/*
801	 * Scalable Vector Extension access
802	 */
803	enable_daif
804	ct_user_exit
805	mov	x0, x25
806	mov	x1, sp
807	bl	do_sve_acc
808	b	ret_to_user
809el0_fpsimd_exc:
810	/*
811	 * Floating Point, Advanced SIMD or SVE exception
812	 */
813	enable_daif
814	ct_user_exit
815	mov	x0, x25
816	mov	x1, sp
817	bl	do_fpsimd_exc
818	b	ret_to_user
819el0_sp_pc:
820	/*
821	 * Stack or PC alignment exception handling
822	 */
823	mrs	x26, far_el1
824	enable_da_f
825#ifdef CONFIG_TRACE_IRQFLAGS
826	bl	trace_hardirqs_off
827#endif
828	ct_user_exit
829	mov	x0, x26
830	mov	x1, x25
831	mov	x2, sp
832	bl	do_sp_pc_abort
833	b	ret_to_user
834el0_undef:
835	/*
836	 * Undefined instruction
837	 */
838	enable_daif
839	ct_user_exit
840	mov	x0, sp
841	bl	do_undefinstr
842	b	ret_to_user
843el0_sys:
844	/*
845	 * System instructions, for trapped cache maintenance instructions
846	 */
847	enable_daif
848	ct_user_exit
849	mov	x0, x25
850	mov	x1, sp
851	bl	do_sysinstr
852	b	ret_to_user
853el0_dbg:
854	/*
855	 * Debug exception handling
856	 */
857	tbnz	x24, #0, el0_inv		// EL0 only
858	mrs	x0, far_el1
859	mov	x1, x25
860	mov	x2, sp
861	bl	do_debug_exception
862	enable_daif
863	ct_user_exit
864	b	ret_to_user
865el0_inv:
866	enable_daif
867	ct_user_exit
868	mov	x0, sp
869	mov	x1, #BAD_SYNC
870	mov	x2, x25
871	bl	bad_el0_sync
872	b	ret_to_user
873ENDPROC(el0_sync)
874
875	.align	6
876el0_irq:
877	kernel_entry 0
878el0_irq_naked:
879	enable_da_f
880#ifdef CONFIG_TRACE_IRQFLAGS
881	bl	trace_hardirqs_off
882#endif
883
884	ct_user_exit
885#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
886	tbz	x22, #55, 1f
887	bl	do_el0_irq_bp_hardening
8881:
889#endif
890	irq_handler
891
892#ifdef CONFIG_TRACE_IRQFLAGS
893	bl	trace_hardirqs_on
894#endif
895	b	ret_to_user
896ENDPROC(el0_irq)
897
898el1_error:
899	kernel_entry 1
900	mrs	x1, esr_el1
901	enable_dbg
902	mov	x0, sp
903	bl	do_serror
904	kernel_exit 1
905ENDPROC(el1_error)
906
907el0_error:
908	kernel_entry 0
909el0_error_naked:
910	mrs	x1, esr_el1
911	enable_dbg
912	mov	x0, sp
913	bl	do_serror
914	enable_daif
915	ct_user_exit
916	b	ret_to_user
917ENDPROC(el0_error)
918
919/*
920 * Ok, we need to do extra processing, enter the slow path.
921 */
922work_pending:
923	mov	x0, sp				// 'regs'
924	bl	do_notify_resume
925#ifdef CONFIG_TRACE_IRQFLAGS
926	bl	trace_hardirqs_on		// enabled while in userspace
927#endif
928	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
929	b	finish_ret_to_user
930/*
931 * "slow" syscall return path.
932 */
933ret_to_user:
934	disable_daif
935	ldr	x1, [tsk, #TSK_TI_FLAGS]
936	and	x2, x1, #_TIF_WORK_MASK
937	cbnz	x2, work_pending
938finish_ret_to_user:
939	enable_step_tsk x1, x2
940#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
941	bl	stackleak_erase
942#endif
943	kernel_exit 0
944ENDPROC(ret_to_user)
945
946/*
947 * SVC handler.
948 */
949	.align	6
950el0_svc:
951	mov	x0, sp
952	bl	el0_svc_handler
953	b	ret_to_user
954ENDPROC(el0_svc)
955
956	.popsection				// .entry.text
957
958#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
959/*
960 * Exception vectors trampoline.
961 */
962	.pushsection ".entry.tramp.text", "ax"
963
964	.macro tramp_map_kernel, tmp
965	mrs	\tmp, ttbr1_el1
966	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
967	bic	\tmp, \tmp, #USER_ASID_FLAG
968	msr	ttbr1_el1, \tmp
969#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
970alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
971	/* ASID already in \tmp[63:48] */
972	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
973	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
974	/* 2MB boundary containing the vectors, so we nobble the walk cache */
975	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
976	isb
977	tlbi	vae1, \tmp
978	dsb	nsh
979alternative_else_nop_endif
980#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
981	.endm
982
983	.macro tramp_unmap_kernel, tmp
984	mrs	\tmp, ttbr1_el1
985	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
986	orr	\tmp, \tmp, #USER_ASID_FLAG
987	msr	ttbr1_el1, \tmp
988	/*
989	 * We avoid running the post_ttbr_update_workaround here because
990	 * it's only needed by Cavium ThunderX, which requires KPTI to be
991	 * disabled.
992	 */
993	.endm
994
995	.macro tramp_ventry, regsize = 64
996	.align	7
9971:
998	.if	\regsize == 64
999	msr	tpidrro_el0, x30	// Restored in kernel_ventry
1000	.endif
1001	/*
1002	 * Defend against branch aliasing attacks by pushing a dummy
1003	 * entry onto the return stack and using a RET instruction to
1004	 * enter the full-fat kernel vectors.
1005	 */
1006	bl	2f
1007	b	.
10082:
1009	tramp_map_kernel	x30
1010#ifdef CONFIG_RANDOMIZE_BASE
1011	adr	x30, tramp_vectors + PAGE_SIZE
1012alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1013	ldr	x30, [x30]
1014#else
1015	ldr	x30, =vectors
1016#endif
1017	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1018	msr	vbar_el1, x30
1019	add	x30, x30, #(1b - tramp_vectors)
1020	isb
1021	ret
1022	.endm
1023
1024	.macro tramp_exit, regsize = 64
1025	adr	x30, tramp_vectors
1026	msr	vbar_el1, x30
1027	tramp_unmap_kernel	x30
1028	.if	\regsize == 64
1029	mrs	x30, far_el1
1030	.endif
1031	eret
1032	sb
1033	.endm
1034
1035	.align	11
1036ENTRY(tramp_vectors)
1037	.space	0x400
1038
1039	tramp_ventry
1040	tramp_ventry
1041	tramp_ventry
1042	tramp_ventry
1043
1044	tramp_ventry	32
1045	tramp_ventry	32
1046	tramp_ventry	32
1047	tramp_ventry	32
1048END(tramp_vectors)
1049
1050ENTRY(tramp_exit_native)
1051	tramp_exit
1052END(tramp_exit_native)
1053
1054ENTRY(tramp_exit_compat)
1055	tramp_exit	32
1056END(tramp_exit_compat)
1057
1058	.ltorg
1059	.popsection				// .entry.tramp.text
1060#ifdef CONFIG_RANDOMIZE_BASE
1061	.pushsection ".rodata", "a"
1062	.align PAGE_SHIFT
1063	.globl	__entry_tramp_data_start
1064__entry_tramp_data_start:
1065	.quad	vectors
1066	.popsection				// .rodata
1067#endif /* CONFIG_RANDOMIZE_BASE */
1068#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1069
1070/*
1071 * Register switch for AArch64. The callee-saved registers need to be saved
1072 * and restored. On entry:
1073 *   x0 = previous task_struct (must be preserved across the switch)
1074 *   x1 = next task_struct
1075 * Previous and next are guaranteed not to be the same.
1076 *
1077 */
1078ENTRY(cpu_switch_to)
1079	mov	x10, #THREAD_CPU_CONTEXT
1080	add	x8, x0, x10
1081	mov	x9, sp
1082	stp	x19, x20, [x8], #16		// store callee-saved registers
1083	stp	x21, x22, [x8], #16
1084	stp	x23, x24, [x8], #16
1085	stp	x25, x26, [x8], #16
1086	stp	x27, x28, [x8], #16
1087	stp	x29, x9, [x8], #16
1088	str	lr, [x8]
1089	add	x8, x1, x10
1090	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1091	ldp	x21, x22, [x8], #16
1092	ldp	x23, x24, [x8], #16
1093	ldp	x25, x26, [x8], #16
1094	ldp	x27, x28, [x8], #16
1095	ldp	x29, x9, [x8], #16
1096	ldr	lr, [x8]
1097	mov	sp, x9
1098	msr	sp_el0, x1
1099	ret
1100ENDPROC(cpu_switch_to)
1101NOKPROBE(cpu_switch_to)
1102
1103/*
1104 * This is how we return from a fork.
1105 */
1106ENTRY(ret_from_fork)
1107	bl	schedule_tail
1108	cbz	x19, 1f				// not a kernel thread
1109	mov	x0, x20
1110	blr	x19
11111:	get_current_task tsk
1112	b	ret_to_user
1113ENDPROC(ret_from_fork)
1114NOKPROBE(ret_from_fork)
1115
1116#ifdef CONFIG_ARM_SDE_INTERFACE
1117
1118#include <asm/sdei.h>
1119#include <uapi/linux/arm_sdei.h>
1120
1121.macro sdei_handler_exit exit_mode
1122	/* On success, this call never returns... */
1123	cmp	\exit_mode, #SDEI_EXIT_SMC
1124	b.ne	99f
1125	smc	#0
1126	b	.
112799:	hvc	#0
1128	b	.
1129.endm
1130
1131#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1132/*
1133 * The regular SDEI entry point may have been unmapped along with the rest of
1134 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1135 * argument accessible.
1136 *
1137 * This clobbers x4, __sdei_handler() will restore this from firmware's
1138 * copy.
1139 */
1140.ltorg
1141.pushsection ".entry.tramp.text", "ax"
1142ENTRY(__sdei_asm_entry_trampoline)
1143	mrs	x4, ttbr1_el1
1144	tbz	x4, #USER_ASID_BIT, 1f
1145
1146	tramp_map_kernel tmp=x4
1147	isb
1148	mov	x4, xzr
1149
1150	/*
1151	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1152	 * the kernel on exit.
1153	 */
11541:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1155
1156#ifdef CONFIG_RANDOMIZE_BASE
1157	adr	x4, tramp_vectors + PAGE_SIZE
1158	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1159	ldr	x4, [x4]
1160#else
1161	ldr	x4, =__sdei_asm_handler
1162#endif
1163	br	x4
1164ENDPROC(__sdei_asm_entry_trampoline)
1165NOKPROBE(__sdei_asm_entry_trampoline)
1166
1167/*
1168 * Make the exit call and restore the original ttbr1_el1
1169 *
1170 * x0 & x1: setup for the exit API call
1171 * x2: exit_mode
1172 * x4: struct sdei_registered_event argument from registration time.
1173 */
1174ENTRY(__sdei_asm_exit_trampoline)
1175	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1176	cbnz	x4, 1f
1177
1178	tramp_unmap_kernel	tmp=x4
1179
11801:	sdei_handler_exit exit_mode=x2
1181ENDPROC(__sdei_asm_exit_trampoline)
1182NOKPROBE(__sdei_asm_exit_trampoline)
1183	.ltorg
1184.popsection		// .entry.tramp.text
1185#ifdef CONFIG_RANDOMIZE_BASE
1186.pushsection ".rodata", "a"
1187__sdei_asm_trampoline_next_handler:
1188	.quad	__sdei_asm_handler
1189.popsection		// .rodata
1190#endif /* CONFIG_RANDOMIZE_BASE */
1191#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1192
1193/*
1194 * Software Delegated Exception entry point.
1195 *
1196 * x0: Event number
1197 * x1: struct sdei_registered_event argument from registration time.
1198 * x2: interrupted PC
1199 * x3: interrupted PSTATE
1200 * x4: maybe clobbered by the trampoline
1201 *
1202 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1203 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1204 * want them.
1205 */
1206ENTRY(__sdei_asm_handler)
1207	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1208	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1209	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1210	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1211	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1212	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1213	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1214	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1215	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1216	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1217	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1218	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1219	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1220	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1221	mov	x4, sp
1222	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1223
1224	mov	x19, x1
1225
1226#ifdef CONFIG_VMAP_STACK
1227	/*
1228	 * entry.S may have been using sp as a scratch register, find whether
1229	 * this is a normal or critical event and switch to the appropriate
1230	 * stack for this CPU.
1231	 */
1232	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1233	cbnz	w4, 1f
1234	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1235	b	2f
12361:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12372:	mov	x6, #SDEI_STACK_SIZE
1238	add	x5, x5, x6
1239	mov	sp, x5
1240#endif
1241
1242	/*
1243	 * We may have interrupted userspace, or a guest, or exit-from or
1244	 * return-to either of these. We can't trust sp_el0, restore it.
1245	 */
1246	mrs	x28, sp_el0
1247	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1248	msr	sp_el0, x0
1249
1250	/* If we interrupted the kernel point to the previous stack/frame. */
1251	and     x0, x3, #0xc
1252	mrs     x1, CurrentEL
1253	cmp     x0, x1
1254	csel	x29, x29, xzr, eq	// fp, or zero
1255	csel	x4, x2, xzr, eq		// elr, or zero
1256
1257	stp	x29, x4, [sp, #-16]!
1258	mov	x29, sp
1259
1260	add	x0, x19, #SDEI_EVENT_INTREGS
1261	mov	x1, x19
1262	bl	__sdei_handler
1263
1264	msr	sp_el0, x28
1265	/* restore regs >x17 that we clobbered */
1266	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1267	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1268	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1269	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1270	mov	sp, x1
1271
1272	mov	x1, x0			// address to complete_and_resume
1273	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1274	cmp	x0, #1
1275	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1276	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1277	csel	x0, x2, x3, ls
1278
1279	ldr_l	x2, sdei_exit_mode
1280
1281alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1282	sdei_handler_exit exit_mode=x2
1283alternative_else_nop_endif
1284
1285#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1286	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1287	br	x5
1288#endif
1289ENDPROC(__sdei_asm_handler)
1290NOKPROBE(__sdei_asm_handler)
1291#endif /* CONFIG_ARM_SDE_INTERFACE */
1292