xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 6d99a79c)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/arm-smccc.h>
22#include <linux/init.h>
23#include <linux/linkage.h>
24
25#include <asm/alternative.h>
26#include <asm/assembler.h>
27#include <asm/asm-offsets.h>
28#include <asm/cpufeature.h>
29#include <asm/errno.h>
30#include <asm/esr.h>
31#include <asm/irq.h>
32#include <asm/memory.h>
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/ptrace.h>
36#include <asm/thread_info.h>
37#include <asm/asm-uaccess.h>
38#include <asm/unistd.h>
39
40/*
41 * Context tracking subsystem.  Used to instrument transitions
42 * between user and kernel mode.
43 */
44	.macro ct_user_exit
45#ifdef CONFIG_CONTEXT_TRACKING
46	bl	context_tracking_user_exit
47#endif
48	.endm
49
50	.macro ct_user_enter
51#ifdef CONFIG_CONTEXT_TRACKING
52	bl	context_tracking_user_enter
53#endif
54	.endm
55
56	.macro	clear_gp_regs
57	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
58	mov	x\n, xzr
59	.endr
60	.endm
61
62/*
63 * Bad Abort numbers
64 *-----------------
65 */
66#define BAD_SYNC	0
67#define BAD_IRQ		1
68#define BAD_FIQ		2
69#define BAD_ERROR	3
70
71	.macro kernel_ventry, el, label, regsize = 64
72	.align 7
73#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
74alternative_if ARM64_UNMAP_KERNEL_AT_EL0
75	.if	\el == 0
76	.if	\regsize == 64
77	mrs	x30, tpidrro_el0
78	msr	tpidrro_el0, xzr
79	.else
80	mov	x30, xzr
81	.endif
82	.endif
83alternative_else_nop_endif
84#endif
85
86	sub	sp, sp, #S_FRAME_SIZE
87#ifdef CONFIG_VMAP_STACK
88	/*
89	 * Test whether the SP has overflowed, without corrupting a GPR.
90	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
91	 */
92	add	sp, sp, x0			// sp' = sp + x0
93	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
94	tbnz	x0, #THREAD_SHIFT, 0f
95	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
96	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
97	b	el\()\el\()_\label
98
990:
100	/*
101	 * Either we've just detected an overflow, or we've taken an exception
102	 * while on the overflow stack. Either way, we won't return to
103	 * userspace, and can clobber EL0 registers to free up GPRs.
104	 */
105
106	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
107	msr	tpidr_el0, x0
108
109	/* Recover the original x0 value and stash it in tpidrro_el0 */
110	sub	x0, sp, x0
111	msr	tpidrro_el0, x0
112
113	/* Switch to the overflow stack */
114	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
115
116	/*
117	 * Check whether we were already on the overflow stack. This may happen
118	 * after panic() re-enables interrupts.
119	 */
120	mrs	x0, tpidr_el0			// sp of interrupted context
121	sub	x0, sp, x0			// delta with top of overflow stack
122	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
123	b.ne	__bad_stack			// no? -> bad stack pointer
124
125	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
126	sub	sp, sp, x0
127	mrs	x0, tpidrro_el0
128#endif
129	b	el\()\el\()_\label
130	.endm
131
132	.macro tramp_alias, dst, sym
133	mov_q	\dst, TRAMP_VALIAS
134	add	\dst, \dst, #(\sym - .entry.tramp.text)
135	.endm
136
137	// This macro corrupts x0-x3. It is the caller's duty
138	// to save/restore them if required.
139	.macro	apply_ssbd, state, tmp1, tmp2
140#ifdef CONFIG_ARM64_SSBD
141alternative_cb	arm64_enable_wa2_handling
142	b	.L__asm_ssbd_skip\@
143alternative_cb_end
144	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
145	cbz	\tmp2,	.L__asm_ssbd_skip\@
146	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
147	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
148	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
149	mov	w1, #\state
150alternative_cb	arm64_update_smccc_conduit
151	nop					// Patched to SMC/HVC #0
152alternative_cb_end
153.L__asm_ssbd_skip\@:
154#endif
155	.endm
156
157	.macro	kernel_entry, el, regsize = 64
158	.if	\regsize == 32
159	mov	w0, w0				// zero upper 32 bits of x0
160	.endif
161	stp	x0, x1, [sp, #16 * 0]
162	stp	x2, x3, [sp, #16 * 1]
163	stp	x4, x5, [sp, #16 * 2]
164	stp	x6, x7, [sp, #16 * 3]
165	stp	x8, x9, [sp, #16 * 4]
166	stp	x10, x11, [sp, #16 * 5]
167	stp	x12, x13, [sp, #16 * 6]
168	stp	x14, x15, [sp, #16 * 7]
169	stp	x16, x17, [sp, #16 * 8]
170	stp	x18, x19, [sp, #16 * 9]
171	stp	x20, x21, [sp, #16 * 10]
172	stp	x22, x23, [sp, #16 * 11]
173	stp	x24, x25, [sp, #16 * 12]
174	stp	x26, x27, [sp, #16 * 13]
175	stp	x28, x29, [sp, #16 * 14]
176
177	.if	\el == 0
178	clear_gp_regs
179	mrs	x21, sp_el0
180	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
181	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
182	disable_step_tsk x19, x20		// exceptions when scheduling.
183
184	apply_ssbd 1, x22, x23
185
186	.else
187	add	x21, sp, #S_FRAME_SIZE
188	get_thread_info tsk
189	/* Save the task's original addr_limit and set USER_DS */
190	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
191	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
192	mov	x20, #USER_DS
193	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
194	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
195	.endif /* \el == 0 */
196	mrs	x22, elr_el1
197	mrs	x23, spsr_el1
198	stp	lr, x21, [sp, #S_LR]
199
200	/*
201	 * In order to be able to dump the contents of struct pt_regs at the
202	 * time the exception was taken (in case we attempt to walk the call
203	 * stack later), chain it together with the stack frames.
204	 */
205	.if \el == 0
206	stp	xzr, xzr, [sp, #S_STACKFRAME]
207	.else
208	stp	x29, x22, [sp, #S_STACKFRAME]
209	.endif
210	add	x29, sp, #S_STACKFRAME
211
212#ifdef CONFIG_ARM64_SW_TTBR0_PAN
213	/*
214	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
215	 * EL0, there is no need to check the state of TTBR0_EL1 since
216	 * accesses are always enabled.
217	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
218	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
219	 * user mappings.
220	 */
221alternative_if ARM64_HAS_PAN
222	b	1f				// skip TTBR0 PAN
223alternative_else_nop_endif
224
225	.if	\el != 0
226	mrs	x21, ttbr0_el1
227	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
228	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
229	b.eq	1f				// TTBR0 access already disabled
230	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
231	.endif
232
233	__uaccess_ttbr0_disable x21
2341:
235#endif
236
237	stp	x22, x23, [sp, #S_PC]
238
239	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
240	.if	\el == 0
241	mov	w21, #NO_SYSCALL
242	str	w21, [sp, #S_SYSCALLNO]
243	.endif
244
245	/*
246	 * Set sp_el0 to current thread_info.
247	 */
248	.if	\el == 0
249	msr	sp_el0, tsk
250	.endif
251
252	/*
253	 * Registers that may be useful after this macro is invoked:
254	 *
255	 * x21 - aborted SP
256	 * x22 - aborted PC
257	 * x23 - aborted PSTATE
258	*/
259	.endm
260
261	.macro	kernel_exit, el
262	.if	\el != 0
263	disable_daif
264
265	/* Restore the task's original addr_limit. */
266	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
267	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
268
269	/* No need to restore UAO, it will be restored from SPSR_EL1 */
270	.endif
271
272	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
273	.if	\el == 0
274	ct_user_enter
275	.endif
276
277#ifdef CONFIG_ARM64_SW_TTBR0_PAN
278	/*
279	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
280	 * PAN bit checking.
281	 */
282alternative_if ARM64_HAS_PAN
283	b	2f				// skip TTBR0 PAN
284alternative_else_nop_endif
285
286	.if	\el != 0
287	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
288	.endif
289
290	__uaccess_ttbr0_enable x0, x1
291
292	.if	\el == 0
293	/*
294	 * Enable errata workarounds only if returning to user. The only
295	 * workaround currently required for TTBR0_EL1 changes are for the
296	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
297	 * corruption).
298	 */
299	bl	post_ttbr_update_workaround
300	.endif
3011:
302	.if	\el != 0
303	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
304	.endif
3052:
306#endif
307
308	.if	\el == 0
309	ldr	x23, [sp, #S_SP]		// load return stack pointer
310	msr	sp_el0, x23
311	tst	x22, #PSR_MODE32_BIT		// native task?
312	b.eq	3f
313
314#ifdef CONFIG_ARM64_ERRATUM_845719
315alternative_if ARM64_WORKAROUND_845719
316#ifdef CONFIG_PID_IN_CONTEXTIDR
317	mrs	x29, contextidr_el1
318	msr	contextidr_el1, x29
319#else
320	msr contextidr_el1, xzr
321#endif
322alternative_else_nop_endif
323#endif
3243:
325	apply_ssbd 0, x0, x1
326	.endif
327
328	msr	elr_el1, x21			// set up the return data
329	msr	spsr_el1, x22
330	ldp	x0, x1, [sp, #16 * 0]
331	ldp	x2, x3, [sp, #16 * 1]
332	ldp	x4, x5, [sp, #16 * 2]
333	ldp	x6, x7, [sp, #16 * 3]
334	ldp	x8, x9, [sp, #16 * 4]
335	ldp	x10, x11, [sp, #16 * 5]
336	ldp	x12, x13, [sp, #16 * 6]
337	ldp	x14, x15, [sp, #16 * 7]
338	ldp	x16, x17, [sp, #16 * 8]
339	ldp	x18, x19, [sp, #16 * 9]
340	ldp	x20, x21, [sp, #16 * 10]
341	ldp	x22, x23, [sp, #16 * 11]
342	ldp	x24, x25, [sp, #16 * 12]
343	ldp	x26, x27, [sp, #16 * 13]
344	ldp	x28, x29, [sp, #16 * 14]
345	ldr	lr, [sp, #S_LR]
346	add	sp, sp, #S_FRAME_SIZE		// restore sp
347	/*
348	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
349	 * when returning from IPI handler, and when returning to user-space.
350	 */
351
352	.if	\el == 0
353alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
354#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
355	bne	4f
356	msr	far_el1, x30
357	tramp_alias	x30, tramp_exit_native
358	br	x30
3594:
360	tramp_alias	x30, tramp_exit_compat
361	br	x30
362#endif
363	.else
364	eret
365	.endif
366	.endm
367
368	.macro	irq_stack_entry
369	mov	x19, sp			// preserve the original sp
370
371	/*
372	 * Compare sp with the base of the task stack.
373	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
374	 * and should switch to the irq stack.
375	 */
376	ldr	x25, [tsk, TSK_STACK]
377	eor	x25, x25, x19
378	and	x25, x25, #~(THREAD_SIZE - 1)
379	cbnz	x25, 9998f
380
381	ldr_this_cpu x25, irq_stack_ptr, x26
382	mov	x26, #IRQ_STACK_SIZE
383	add	x26, x25, x26
384
385	/* switch to the irq stack */
386	mov	sp, x26
3879998:
388	.endm
389
390	/*
391	 * x19 should be preserved between irq_stack_entry and
392	 * irq_stack_exit.
393	 */
394	.macro	irq_stack_exit
395	mov	sp, x19
396	.endm
397
398/*
399 * These are the registers used in the syscall handler, and allow us to
400 * have in theory up to 7 arguments to a function - x0 to x6.
401 *
402 * x7 is reserved for the system call number in 32-bit mode.
403 */
404wsc_nr	.req	w25		// number of system calls
405xsc_nr	.req	x25		// number of system calls (zero-extended)
406wscno	.req	w26		// syscall number
407xscno	.req	x26		// syscall number (zero-extended)
408stbl	.req	x27		// syscall table pointer
409tsk	.req	x28		// current thread_info
410
411/*
412 * Interrupt handling.
413 */
414	.macro	irq_handler
415	ldr_l	x1, handle_arch_irq
416	mov	x0, sp
417	irq_stack_entry
418	blr	x1
419	irq_stack_exit
420	.endm
421
422	.text
423
424/*
425 * Exception vectors.
426 */
427	.pushsection ".entry.text", "ax"
428
429	.align	11
430ENTRY(vectors)
431	kernel_ventry	1, sync_invalid			// Synchronous EL1t
432	kernel_ventry	1, irq_invalid			// IRQ EL1t
433	kernel_ventry	1, fiq_invalid			// FIQ EL1t
434	kernel_ventry	1, error_invalid		// Error EL1t
435
436	kernel_ventry	1, sync				// Synchronous EL1h
437	kernel_ventry	1, irq				// IRQ EL1h
438	kernel_ventry	1, fiq_invalid			// FIQ EL1h
439	kernel_ventry	1, error			// Error EL1h
440
441	kernel_ventry	0, sync				// Synchronous 64-bit EL0
442	kernel_ventry	0, irq				// IRQ 64-bit EL0
443	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
444	kernel_ventry	0, error			// Error 64-bit EL0
445
446#ifdef CONFIG_COMPAT
447	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
448	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
449	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
450	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
451#else
452	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
453	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
454	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
455	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
456#endif
457END(vectors)
458
459#ifdef CONFIG_VMAP_STACK
460	/*
461	 * We detected an overflow in kernel_ventry, which switched to the
462	 * overflow stack. Stash the exception regs, and head to our overflow
463	 * handler.
464	 */
465__bad_stack:
466	/* Restore the original x0 value */
467	mrs	x0, tpidrro_el0
468
469	/*
470	 * Store the original GPRs to the new stack. The orginal SP (minus
471	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
472	 */
473	sub	sp, sp, #S_FRAME_SIZE
474	kernel_entry 1
475	mrs	x0, tpidr_el0
476	add	x0, x0, #S_FRAME_SIZE
477	str	x0, [sp, #S_SP]
478
479	/* Stash the regs for handle_bad_stack */
480	mov	x0, sp
481
482	/* Time to die */
483	bl	handle_bad_stack
484	ASM_BUG()
485#endif /* CONFIG_VMAP_STACK */
486
487/*
488 * Invalid mode handlers
489 */
490	.macro	inv_entry, el, reason, regsize = 64
491	kernel_entry \el, \regsize
492	mov	x0, sp
493	mov	x1, #\reason
494	mrs	x2, esr_el1
495	bl	bad_mode
496	ASM_BUG()
497	.endm
498
499el0_sync_invalid:
500	inv_entry 0, BAD_SYNC
501ENDPROC(el0_sync_invalid)
502
503el0_irq_invalid:
504	inv_entry 0, BAD_IRQ
505ENDPROC(el0_irq_invalid)
506
507el0_fiq_invalid:
508	inv_entry 0, BAD_FIQ
509ENDPROC(el0_fiq_invalid)
510
511el0_error_invalid:
512	inv_entry 0, BAD_ERROR
513ENDPROC(el0_error_invalid)
514
515#ifdef CONFIG_COMPAT
516el0_fiq_invalid_compat:
517	inv_entry 0, BAD_FIQ, 32
518ENDPROC(el0_fiq_invalid_compat)
519#endif
520
521el1_sync_invalid:
522	inv_entry 1, BAD_SYNC
523ENDPROC(el1_sync_invalid)
524
525el1_irq_invalid:
526	inv_entry 1, BAD_IRQ
527ENDPROC(el1_irq_invalid)
528
529el1_fiq_invalid:
530	inv_entry 1, BAD_FIQ
531ENDPROC(el1_fiq_invalid)
532
533el1_error_invalid:
534	inv_entry 1, BAD_ERROR
535ENDPROC(el1_error_invalid)
536
537/*
538 * EL1 mode handlers.
539 */
540	.align	6
541el1_sync:
542	kernel_entry 1
543	mrs	x1, esr_el1			// read the syndrome register
544	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
545	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
546	b.eq	el1_da
547	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
548	b.eq	el1_ia
549	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
550	b.eq	el1_undef
551	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
552	b.eq	el1_sp_pc
553	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
554	b.eq	el1_sp_pc
555	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
556	b.eq	el1_undef
557	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
558	b.ge	el1_dbg
559	b	el1_inv
560
561el1_ia:
562	/*
563	 * Fall through to the Data abort case
564	 */
565el1_da:
566	/*
567	 * Data abort handling
568	 */
569	mrs	x3, far_el1
570	inherit_daif	pstate=x23, tmp=x2
571	clear_address_tag x0, x3
572	mov	x2, sp				// struct pt_regs
573	bl	do_mem_abort
574
575	kernel_exit 1
576el1_sp_pc:
577	/*
578	 * Stack or PC alignment exception handling
579	 */
580	mrs	x0, far_el1
581	inherit_daif	pstate=x23, tmp=x2
582	mov	x2, sp
583	bl	do_sp_pc_abort
584	ASM_BUG()
585el1_undef:
586	/*
587	 * Undefined instruction
588	 */
589	inherit_daif	pstate=x23, tmp=x2
590	mov	x0, sp
591	bl	do_undefinstr
592	kernel_exit 1
593el1_dbg:
594	/*
595	 * Debug exception handling
596	 */
597	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
598	cinc	x24, x24, eq			// set bit '0'
599	tbz	x24, #0, el1_inv		// EL1 only
600	mrs	x0, far_el1
601	mov	x2, sp				// struct pt_regs
602	bl	do_debug_exception
603	kernel_exit 1
604el1_inv:
605	// TODO: add support for undefined instructions in kernel mode
606	inherit_daif	pstate=x23, tmp=x2
607	mov	x0, sp
608	mov	x2, x1
609	mov	x1, #BAD_SYNC
610	bl	bad_mode
611	ASM_BUG()
612ENDPROC(el1_sync)
613
614	.align	6
615el1_irq:
616	kernel_entry 1
617	enable_da_f
618#ifdef CONFIG_TRACE_IRQFLAGS
619	bl	trace_hardirqs_off
620#endif
621
622	irq_handler
623
624#ifdef CONFIG_PREEMPT
625	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
626	cbnz	w24, 1f				// preempt count != 0
627	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
628	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
629	bl	el1_preempt
6301:
631#endif
632#ifdef CONFIG_TRACE_IRQFLAGS
633	bl	trace_hardirqs_on
634#endif
635	kernel_exit 1
636ENDPROC(el1_irq)
637
638#ifdef CONFIG_PREEMPT
639el1_preempt:
640	mov	x24, lr
6411:	bl	preempt_schedule_irq		// irq en/disable is done inside
642	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
643	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
644	ret	x24
645#endif
646
647/*
648 * EL0 mode handlers.
649 */
650	.align	6
651el0_sync:
652	kernel_entry 0
653	mrs	x25, esr_el1			// read the syndrome register
654	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
655	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
656	b.eq	el0_svc
657	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
658	b.eq	el0_da
659	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
660	b.eq	el0_ia
661	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
662	b.eq	el0_fpsimd_acc
663	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
664	b.eq	el0_sve_acc
665	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
666	b.eq	el0_fpsimd_exc
667	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
668	ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
669	b.eq	el0_sys
670	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
671	b.eq	el0_sp_pc
672	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
673	b.eq	el0_sp_pc
674	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
675	b.eq	el0_undef
676	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
677	b.ge	el0_dbg
678	b	el0_inv
679
680#ifdef CONFIG_COMPAT
681	.align	6
682el0_sync_compat:
683	kernel_entry 0, 32
684	mrs	x25, esr_el1			// read the syndrome register
685	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
686	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
687	b.eq	el0_svc_compat
688	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
689	b.eq	el0_da
690	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
691	b.eq	el0_ia
692	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
693	b.eq	el0_fpsimd_acc
694	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
695	b.eq	el0_fpsimd_exc
696	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
697	b.eq	el0_sp_pc
698	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
699	b.eq	el0_undef
700	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
701	b.eq	el0_cp15
702	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
703	b.eq	el0_cp15
704	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
705	b.eq	el0_undef
706	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
707	b.eq	el0_undef
708	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
709	b.eq	el0_undef
710	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
711	b.ge	el0_dbg
712	b	el0_inv
713el0_svc_compat:
714	mov	x0, sp
715	bl	el0_svc_compat_handler
716	b	ret_to_user
717
718	.align	6
719el0_irq_compat:
720	kernel_entry 0, 32
721	b	el0_irq_naked
722
723el0_error_compat:
724	kernel_entry 0, 32
725	b	el0_error_naked
726
727el0_cp15:
728	/*
729	 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
730	 */
731	enable_daif
732	ct_user_exit
733	mov	x0, x25
734	mov	x1, sp
735	bl	do_cp15instr
736	b	ret_to_user
737#endif
738
739el0_da:
740	/*
741	 * Data abort handling
742	 */
743	mrs	x26, far_el1
744	enable_daif
745	ct_user_exit
746	clear_address_tag x0, x26
747	mov	x1, x25
748	mov	x2, sp
749	bl	do_mem_abort
750	b	ret_to_user
751el0_ia:
752	/*
753	 * Instruction abort handling
754	 */
755	mrs	x26, far_el1
756	enable_da_f
757#ifdef CONFIG_TRACE_IRQFLAGS
758	bl	trace_hardirqs_off
759#endif
760	ct_user_exit
761	mov	x0, x26
762	mov	x1, x25
763	mov	x2, sp
764	bl	do_el0_ia_bp_hardening
765	b	ret_to_user
766el0_fpsimd_acc:
767	/*
768	 * Floating Point or Advanced SIMD access
769	 */
770	enable_daif
771	ct_user_exit
772	mov	x0, x25
773	mov	x1, sp
774	bl	do_fpsimd_acc
775	b	ret_to_user
776el0_sve_acc:
777	/*
778	 * Scalable Vector Extension access
779	 */
780	enable_daif
781	ct_user_exit
782	mov	x0, x25
783	mov	x1, sp
784	bl	do_sve_acc
785	b	ret_to_user
786el0_fpsimd_exc:
787	/*
788	 * Floating Point, Advanced SIMD or SVE exception
789	 */
790	enable_daif
791	ct_user_exit
792	mov	x0, x25
793	mov	x1, sp
794	bl	do_fpsimd_exc
795	b	ret_to_user
796el0_sp_pc:
797	/*
798	 * Stack or PC alignment exception handling
799	 */
800	mrs	x26, far_el1
801	enable_da_f
802#ifdef CONFIG_TRACE_IRQFLAGS
803	bl	trace_hardirqs_off
804#endif
805	ct_user_exit
806	mov	x0, x26
807	mov	x1, x25
808	mov	x2, sp
809	bl	do_sp_pc_abort
810	b	ret_to_user
811el0_undef:
812	/*
813	 * Undefined instruction
814	 */
815	enable_daif
816	ct_user_exit
817	mov	x0, sp
818	bl	do_undefinstr
819	b	ret_to_user
820el0_sys:
821	/*
822	 * System instructions, for trapped cache maintenance instructions
823	 */
824	enable_daif
825	ct_user_exit
826	mov	x0, x25
827	mov	x1, sp
828	bl	do_sysinstr
829	b	ret_to_user
830el0_dbg:
831	/*
832	 * Debug exception handling
833	 */
834	tbnz	x24, #0, el0_inv		// EL0 only
835	mrs	x0, far_el1
836	mov	x1, x25
837	mov	x2, sp
838	bl	do_debug_exception
839	enable_daif
840	ct_user_exit
841	b	ret_to_user
842el0_inv:
843	enable_daif
844	ct_user_exit
845	mov	x0, sp
846	mov	x1, #BAD_SYNC
847	mov	x2, x25
848	bl	bad_el0_sync
849	b	ret_to_user
850ENDPROC(el0_sync)
851
852	.align	6
853el0_irq:
854	kernel_entry 0
855el0_irq_naked:
856	enable_da_f
857#ifdef CONFIG_TRACE_IRQFLAGS
858	bl	trace_hardirqs_off
859#endif
860
861	ct_user_exit
862#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
863	tbz	x22, #55, 1f
864	bl	do_el0_irq_bp_hardening
8651:
866#endif
867	irq_handler
868
869#ifdef CONFIG_TRACE_IRQFLAGS
870	bl	trace_hardirqs_on
871#endif
872	b	ret_to_user
873ENDPROC(el0_irq)
874
875el1_error:
876	kernel_entry 1
877	mrs	x1, esr_el1
878	enable_dbg
879	mov	x0, sp
880	bl	do_serror
881	kernel_exit 1
882ENDPROC(el1_error)
883
884el0_error:
885	kernel_entry 0
886el0_error_naked:
887	mrs	x1, esr_el1
888	enable_dbg
889	mov	x0, sp
890	bl	do_serror
891	enable_daif
892	ct_user_exit
893	b	ret_to_user
894ENDPROC(el0_error)
895
896/*
897 * Ok, we need to do extra processing, enter the slow path.
898 */
899work_pending:
900	mov	x0, sp				// 'regs'
901	bl	do_notify_resume
902#ifdef CONFIG_TRACE_IRQFLAGS
903	bl	trace_hardirqs_on		// enabled while in userspace
904#endif
905	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
906	b	finish_ret_to_user
907/*
908 * "slow" syscall return path.
909 */
910ret_to_user:
911	disable_daif
912	ldr	x1, [tsk, #TSK_TI_FLAGS]
913	and	x2, x1, #_TIF_WORK_MASK
914	cbnz	x2, work_pending
915finish_ret_to_user:
916	enable_step_tsk x1, x2
917#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
918	bl	stackleak_erase
919#endif
920	kernel_exit 0
921ENDPROC(ret_to_user)
922
923/*
924 * SVC handler.
925 */
926	.align	6
927el0_svc:
928	mov	x0, sp
929	bl	el0_svc_handler
930	b	ret_to_user
931ENDPROC(el0_svc)
932
933	.popsection				// .entry.text
934
935#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
936/*
937 * Exception vectors trampoline.
938 */
939	.pushsection ".entry.tramp.text", "ax"
940
941	.macro tramp_map_kernel, tmp
942	mrs	\tmp, ttbr1_el1
943	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
944	bic	\tmp, \tmp, #USER_ASID_FLAG
945	msr	ttbr1_el1, \tmp
946#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
947alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
948	/* ASID already in \tmp[63:48] */
949	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
950	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
951	/* 2MB boundary containing the vectors, so we nobble the walk cache */
952	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
953	isb
954	tlbi	vae1, \tmp
955	dsb	nsh
956alternative_else_nop_endif
957#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
958	.endm
959
960	.macro tramp_unmap_kernel, tmp
961	mrs	\tmp, ttbr1_el1
962	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
963	orr	\tmp, \tmp, #USER_ASID_FLAG
964	msr	ttbr1_el1, \tmp
965	/*
966	 * We avoid running the post_ttbr_update_workaround here because
967	 * it's only needed by Cavium ThunderX, which requires KPTI to be
968	 * disabled.
969	 */
970	.endm
971
972	.macro tramp_ventry, regsize = 64
973	.align	7
9741:
975	.if	\regsize == 64
976	msr	tpidrro_el0, x30	// Restored in kernel_ventry
977	.endif
978	/*
979	 * Defend against branch aliasing attacks by pushing a dummy
980	 * entry onto the return stack and using a RET instruction to
981	 * enter the full-fat kernel vectors.
982	 */
983	bl	2f
984	b	.
9852:
986	tramp_map_kernel	x30
987#ifdef CONFIG_RANDOMIZE_BASE
988	adr	x30, tramp_vectors + PAGE_SIZE
989alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
990	ldr	x30, [x30]
991#else
992	ldr	x30, =vectors
993#endif
994	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
995	msr	vbar_el1, x30
996	add	x30, x30, #(1b - tramp_vectors)
997	isb
998	ret
999	.endm
1000
1001	.macro tramp_exit, regsize = 64
1002	adr	x30, tramp_vectors
1003	msr	vbar_el1, x30
1004	tramp_unmap_kernel	x30
1005	.if	\regsize == 64
1006	mrs	x30, far_el1
1007	.endif
1008	eret
1009	.endm
1010
1011	.align	11
1012ENTRY(tramp_vectors)
1013	.space	0x400
1014
1015	tramp_ventry
1016	tramp_ventry
1017	tramp_ventry
1018	tramp_ventry
1019
1020	tramp_ventry	32
1021	tramp_ventry	32
1022	tramp_ventry	32
1023	tramp_ventry	32
1024END(tramp_vectors)
1025
1026ENTRY(tramp_exit_native)
1027	tramp_exit
1028END(tramp_exit_native)
1029
1030ENTRY(tramp_exit_compat)
1031	tramp_exit	32
1032END(tramp_exit_compat)
1033
1034	.ltorg
1035	.popsection				// .entry.tramp.text
1036#ifdef CONFIG_RANDOMIZE_BASE
1037	.pushsection ".rodata", "a"
1038	.align PAGE_SHIFT
1039	.globl	__entry_tramp_data_start
1040__entry_tramp_data_start:
1041	.quad	vectors
1042	.popsection				// .rodata
1043#endif /* CONFIG_RANDOMIZE_BASE */
1044#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1045
1046/*
1047 * Register switch for AArch64. The callee-saved registers need to be saved
1048 * and restored. On entry:
1049 *   x0 = previous task_struct (must be preserved across the switch)
1050 *   x1 = next task_struct
1051 * Previous and next are guaranteed not to be the same.
1052 *
1053 */
1054ENTRY(cpu_switch_to)
1055	mov	x10, #THREAD_CPU_CONTEXT
1056	add	x8, x0, x10
1057	mov	x9, sp
1058	stp	x19, x20, [x8], #16		// store callee-saved registers
1059	stp	x21, x22, [x8], #16
1060	stp	x23, x24, [x8], #16
1061	stp	x25, x26, [x8], #16
1062	stp	x27, x28, [x8], #16
1063	stp	x29, x9, [x8], #16
1064	str	lr, [x8]
1065	add	x8, x1, x10
1066	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1067	ldp	x21, x22, [x8], #16
1068	ldp	x23, x24, [x8], #16
1069	ldp	x25, x26, [x8], #16
1070	ldp	x27, x28, [x8], #16
1071	ldp	x29, x9, [x8], #16
1072	ldr	lr, [x8]
1073	mov	sp, x9
1074	msr	sp_el0, x1
1075	ret
1076ENDPROC(cpu_switch_to)
1077NOKPROBE(cpu_switch_to)
1078
1079/*
1080 * This is how we return from a fork.
1081 */
1082ENTRY(ret_from_fork)
1083	bl	schedule_tail
1084	cbz	x19, 1f				// not a kernel thread
1085	mov	x0, x20
1086	blr	x19
10871:	get_thread_info tsk
1088	b	ret_to_user
1089ENDPROC(ret_from_fork)
1090NOKPROBE(ret_from_fork)
1091
1092#ifdef CONFIG_ARM_SDE_INTERFACE
1093
1094#include <asm/sdei.h>
1095#include <uapi/linux/arm_sdei.h>
1096
1097.macro sdei_handler_exit exit_mode
1098	/* On success, this call never returns... */
1099	cmp	\exit_mode, #SDEI_EXIT_SMC
1100	b.ne	99f
1101	smc	#0
1102	b	.
110399:	hvc	#0
1104	b	.
1105.endm
1106
1107#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1108/*
1109 * The regular SDEI entry point may have been unmapped along with the rest of
1110 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1111 * argument accessible.
1112 *
1113 * This clobbers x4, __sdei_handler() will restore this from firmware's
1114 * copy.
1115 */
1116.ltorg
1117.pushsection ".entry.tramp.text", "ax"
1118ENTRY(__sdei_asm_entry_trampoline)
1119	mrs	x4, ttbr1_el1
1120	tbz	x4, #USER_ASID_BIT, 1f
1121
1122	tramp_map_kernel tmp=x4
1123	isb
1124	mov	x4, xzr
1125
1126	/*
1127	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1128	 * the kernel on exit.
1129	 */
11301:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1131
1132#ifdef CONFIG_RANDOMIZE_BASE
1133	adr	x4, tramp_vectors + PAGE_SIZE
1134	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1135	ldr	x4, [x4]
1136#else
1137	ldr	x4, =__sdei_asm_handler
1138#endif
1139	br	x4
1140ENDPROC(__sdei_asm_entry_trampoline)
1141NOKPROBE(__sdei_asm_entry_trampoline)
1142
1143/*
1144 * Make the exit call and restore the original ttbr1_el1
1145 *
1146 * x0 & x1: setup for the exit API call
1147 * x2: exit_mode
1148 * x4: struct sdei_registered_event argument from registration time.
1149 */
1150ENTRY(__sdei_asm_exit_trampoline)
1151	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1152	cbnz	x4, 1f
1153
1154	tramp_unmap_kernel	tmp=x4
1155
11561:	sdei_handler_exit exit_mode=x2
1157ENDPROC(__sdei_asm_exit_trampoline)
1158NOKPROBE(__sdei_asm_exit_trampoline)
1159	.ltorg
1160.popsection		// .entry.tramp.text
1161#ifdef CONFIG_RANDOMIZE_BASE
1162.pushsection ".rodata", "a"
1163__sdei_asm_trampoline_next_handler:
1164	.quad	__sdei_asm_handler
1165.popsection		// .rodata
1166#endif /* CONFIG_RANDOMIZE_BASE */
1167#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1168
1169/*
1170 * Software Delegated Exception entry point.
1171 *
1172 * x0: Event number
1173 * x1: struct sdei_registered_event argument from registration time.
1174 * x2: interrupted PC
1175 * x3: interrupted PSTATE
1176 * x4: maybe clobbered by the trampoline
1177 *
1178 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1179 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1180 * want them.
1181 */
1182ENTRY(__sdei_asm_handler)
1183	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1184	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1185	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1186	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1187	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1188	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1189	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1190	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1191	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1192	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1193	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1194	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1195	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1196	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1197	mov	x4, sp
1198	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1199
1200	mov	x19, x1
1201
1202#ifdef CONFIG_VMAP_STACK
1203	/*
1204	 * entry.S may have been using sp as a scratch register, find whether
1205	 * this is a normal or critical event and switch to the appropriate
1206	 * stack for this CPU.
1207	 */
1208	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1209	cbnz	w4, 1f
1210	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1211	b	2f
12121:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12132:	mov	x6, #SDEI_STACK_SIZE
1214	add	x5, x5, x6
1215	mov	sp, x5
1216#endif
1217
1218	/*
1219	 * We may have interrupted userspace, or a guest, or exit-from or
1220	 * return-to either of these. We can't trust sp_el0, restore it.
1221	 */
1222	mrs	x28, sp_el0
1223	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1224	msr	sp_el0, x0
1225
1226	/* If we interrupted the kernel point to the previous stack/frame. */
1227	and     x0, x3, #0xc
1228	mrs     x1, CurrentEL
1229	cmp     x0, x1
1230	csel	x29, x29, xzr, eq	// fp, or zero
1231	csel	x4, x2, xzr, eq		// elr, or zero
1232
1233	stp	x29, x4, [sp, #-16]!
1234	mov	x29, sp
1235
1236	add	x0, x19, #SDEI_EVENT_INTREGS
1237	mov	x1, x19
1238	bl	__sdei_handler
1239
1240	msr	sp_el0, x28
1241	/* restore regs >x17 that we clobbered */
1242	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1243	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1244	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1245	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1246	mov	sp, x1
1247
1248	mov	x1, x0			// address to complete_and_resume
1249	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1250	cmp	x0, #1
1251	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1252	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1253	csel	x0, x2, x3, ls
1254
1255	ldr_l	x2, sdei_exit_mode
1256
1257alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1258	sdei_handler_exit exit_mode=x2
1259alternative_else_nop_endif
1260
1261#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1262	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1263	br	x5
1264#endif
1265ENDPROC(__sdei_asm_handler)
1266NOKPROBE(__sdei_asm_handler)
1267#endif /* CONFIG_ARM_SDE_INTERFACE */
1268