xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision ccb01374)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/arm-smccc.h>
22#include <linux/init.h>
23#include <linux/linkage.h>
24
25#include <asm/alternative.h>
26#include <asm/assembler.h>
27#include <asm/asm-offsets.h>
28#include <asm/cpufeature.h>
29#include <asm/errno.h>
30#include <asm/esr.h>
31#include <asm/irq.h>
32#include <asm/memory.h>
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/ptrace.h>
36#include <asm/thread_info.h>
37#include <asm/asm-uaccess.h>
38#include <asm/unistd.h>
39
40/*
41 * Context tracking subsystem.  Used to instrument transitions
42 * between user and kernel mode.
43 */
44	.macro ct_user_exit
45#ifdef CONFIG_CONTEXT_TRACKING
46	bl	context_tracking_user_exit
47#endif
48	.endm
49
50	.macro ct_user_enter
51#ifdef CONFIG_CONTEXT_TRACKING
52	bl	context_tracking_user_enter
53#endif
54	.endm
55
56	.macro	clear_gp_regs
57	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
58	mov	x\n, xzr
59	.endr
60	.endm
61
62/*
63 * Bad Abort numbers
64 *-----------------
65 */
66#define BAD_SYNC	0
67#define BAD_IRQ		1
68#define BAD_FIQ		2
69#define BAD_ERROR	3
70
71	.macro kernel_ventry, el, label, regsize = 64
72	.align 7
73#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
74alternative_if ARM64_UNMAP_KERNEL_AT_EL0
75	.if	\el == 0
76	.if	\regsize == 64
77	mrs	x30, tpidrro_el0
78	msr	tpidrro_el0, xzr
79	.else
80	mov	x30, xzr
81	.endif
82	.endif
83alternative_else_nop_endif
84#endif
85
86	sub	sp, sp, #S_FRAME_SIZE
87#ifdef CONFIG_VMAP_STACK
88	/*
89	 * Test whether the SP has overflowed, without corrupting a GPR.
90	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
91	 */
92	add	sp, sp, x0			// sp' = sp + x0
93	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
94	tbnz	x0, #THREAD_SHIFT, 0f
95	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
96	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
97	b	el\()\el\()_\label
98
990:
100	/*
101	 * Either we've just detected an overflow, or we've taken an exception
102	 * while on the overflow stack. Either way, we won't return to
103	 * userspace, and can clobber EL0 registers to free up GPRs.
104	 */
105
106	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
107	msr	tpidr_el0, x0
108
109	/* Recover the original x0 value and stash it in tpidrro_el0 */
110	sub	x0, sp, x0
111	msr	tpidrro_el0, x0
112
113	/* Switch to the overflow stack */
114	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
115
116	/*
117	 * Check whether we were already on the overflow stack. This may happen
118	 * after panic() re-enables interrupts.
119	 */
120	mrs	x0, tpidr_el0			// sp of interrupted context
121	sub	x0, sp, x0			// delta with top of overflow stack
122	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
123	b.ne	__bad_stack			// no? -> bad stack pointer
124
125	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
126	sub	sp, sp, x0
127	mrs	x0, tpidrro_el0
128#endif
129	b	el\()\el\()_\label
130	.endm
131
132	.macro tramp_alias, dst, sym
133	mov_q	\dst, TRAMP_VALIAS
134	add	\dst, \dst, #(\sym - .entry.tramp.text)
135	.endm
136
137	// This macro corrupts x0-x3. It is the caller's duty
138	// to save/restore them if required.
139	.macro	apply_ssbd, state, tmp1, tmp2
140#ifdef CONFIG_ARM64_SSBD
141alternative_cb	arm64_enable_wa2_handling
142	b	.L__asm_ssbd_skip\@
143alternative_cb_end
144	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
145	cbz	\tmp2,	.L__asm_ssbd_skip\@
146	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
147	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
148	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
149	mov	w1, #\state
150alternative_cb	arm64_update_smccc_conduit
151	nop					// Patched to SMC/HVC #0
152alternative_cb_end
153.L__asm_ssbd_skip\@:
154#endif
155	.endm
156
157	.macro	kernel_entry, el, regsize = 64
158	.if	\regsize == 32
159	mov	w0, w0				// zero upper 32 bits of x0
160	.endif
161	stp	x0, x1, [sp, #16 * 0]
162	stp	x2, x3, [sp, #16 * 1]
163	stp	x4, x5, [sp, #16 * 2]
164	stp	x6, x7, [sp, #16 * 3]
165	stp	x8, x9, [sp, #16 * 4]
166	stp	x10, x11, [sp, #16 * 5]
167	stp	x12, x13, [sp, #16 * 6]
168	stp	x14, x15, [sp, #16 * 7]
169	stp	x16, x17, [sp, #16 * 8]
170	stp	x18, x19, [sp, #16 * 9]
171	stp	x20, x21, [sp, #16 * 10]
172	stp	x22, x23, [sp, #16 * 11]
173	stp	x24, x25, [sp, #16 * 12]
174	stp	x26, x27, [sp, #16 * 13]
175	stp	x28, x29, [sp, #16 * 14]
176
177	.if	\el == 0
178	clear_gp_regs
179	mrs	x21, sp_el0
180	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
181	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
182	disable_step_tsk x19, x20		// exceptions when scheduling.
183
184	apply_ssbd 1, x22, x23
185
186	.else
187	add	x21, sp, #S_FRAME_SIZE
188	get_thread_info tsk
189	/* Save the task's original addr_limit and set USER_DS */
190	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
191	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
192	mov	x20, #USER_DS
193	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
194	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
195	.endif /* \el == 0 */
196	mrs	x22, elr_el1
197	mrs	x23, spsr_el1
198	stp	lr, x21, [sp, #S_LR]
199
200	/*
201	 * In order to be able to dump the contents of struct pt_regs at the
202	 * time the exception was taken (in case we attempt to walk the call
203	 * stack later), chain it together with the stack frames.
204	 */
205	.if \el == 0
206	stp	xzr, xzr, [sp, #S_STACKFRAME]
207	.else
208	stp	x29, x22, [sp, #S_STACKFRAME]
209	.endif
210	add	x29, sp, #S_STACKFRAME
211
212#ifdef CONFIG_ARM64_SW_TTBR0_PAN
213	/*
214	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
215	 * EL0, there is no need to check the state of TTBR0_EL1 since
216	 * accesses are always enabled.
217	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
218	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
219	 * user mappings.
220	 */
221alternative_if ARM64_HAS_PAN
222	b	1f				// skip TTBR0 PAN
223alternative_else_nop_endif
224
225	.if	\el != 0
226	mrs	x21, ttbr0_el1
227	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
228	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
229	b.eq	1f				// TTBR0 access already disabled
230	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
231	.endif
232
233	__uaccess_ttbr0_disable x21
2341:
235#endif
236
237	stp	x22, x23, [sp, #S_PC]
238
239	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
240	.if	\el == 0
241	mov	w21, #NO_SYSCALL
242	str	w21, [sp, #S_SYSCALLNO]
243	.endif
244
245	/*
246	 * Set sp_el0 to current thread_info.
247	 */
248	.if	\el == 0
249	msr	sp_el0, tsk
250	.endif
251
252	/*
253	 * Registers that may be useful after this macro is invoked:
254	 *
255	 * x21 - aborted SP
256	 * x22 - aborted PC
257	 * x23 - aborted PSTATE
258	*/
259	.endm
260
261	.macro	kernel_exit, el
262	.if	\el != 0
263	disable_daif
264
265	/* Restore the task's original addr_limit. */
266	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
267	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
268
269	/* No need to restore UAO, it will be restored from SPSR_EL1 */
270	.endif
271
272	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
273	.if	\el == 0
274	ct_user_enter
275	.endif
276
277#ifdef CONFIG_ARM64_SW_TTBR0_PAN
278	/*
279	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
280	 * PAN bit checking.
281	 */
282alternative_if ARM64_HAS_PAN
283	b	2f				// skip TTBR0 PAN
284alternative_else_nop_endif
285
286	.if	\el != 0
287	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
288	.endif
289
290	__uaccess_ttbr0_enable x0, x1
291
292	.if	\el == 0
293	/*
294	 * Enable errata workarounds only if returning to user. The only
295	 * workaround currently required for TTBR0_EL1 changes are for the
296	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
297	 * corruption).
298	 */
299	bl	post_ttbr_update_workaround
300	.endif
3011:
302	.if	\el != 0
303	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
304	.endif
3052:
306#endif
307
308	.if	\el == 0
309	ldr	x23, [sp, #S_SP]		// load return stack pointer
310	msr	sp_el0, x23
311	tst	x22, #PSR_MODE32_BIT		// native task?
312	b.eq	3f
313
314#ifdef CONFIG_ARM64_ERRATUM_845719
315alternative_if ARM64_WORKAROUND_845719
316#ifdef CONFIG_PID_IN_CONTEXTIDR
317	mrs	x29, contextidr_el1
318	msr	contextidr_el1, x29
319#else
320	msr contextidr_el1, xzr
321#endif
322alternative_else_nop_endif
323#endif
3243:
325	apply_ssbd 0, x0, x1
326	.endif
327
328	msr	elr_el1, x21			// set up the return data
329	msr	spsr_el1, x22
330	ldp	x0, x1, [sp, #16 * 0]
331	ldp	x2, x3, [sp, #16 * 1]
332	ldp	x4, x5, [sp, #16 * 2]
333	ldp	x6, x7, [sp, #16 * 3]
334	ldp	x8, x9, [sp, #16 * 4]
335	ldp	x10, x11, [sp, #16 * 5]
336	ldp	x12, x13, [sp, #16 * 6]
337	ldp	x14, x15, [sp, #16 * 7]
338	ldp	x16, x17, [sp, #16 * 8]
339	ldp	x18, x19, [sp, #16 * 9]
340	ldp	x20, x21, [sp, #16 * 10]
341	ldp	x22, x23, [sp, #16 * 11]
342	ldp	x24, x25, [sp, #16 * 12]
343	ldp	x26, x27, [sp, #16 * 13]
344	ldp	x28, x29, [sp, #16 * 14]
345	ldr	lr, [sp, #S_LR]
346	add	sp, sp, #S_FRAME_SIZE		// restore sp
347
348	.if	\el == 0
349alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
350#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
351	bne	4f
352	msr	far_el1, x30
353	tramp_alias	x30, tramp_exit_native
354	br	x30
3554:
356	tramp_alias	x30, tramp_exit_compat
357	br	x30
358#endif
359	.else
360	eret
361	.endif
362	sb
363	.endm
364
365	.macro	irq_stack_entry
366	mov	x19, sp			// preserve the original sp
367
368	/*
369	 * Compare sp with the base of the task stack.
370	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
371	 * and should switch to the irq stack.
372	 */
373	ldr	x25, [tsk, TSK_STACK]
374	eor	x25, x25, x19
375	and	x25, x25, #~(THREAD_SIZE - 1)
376	cbnz	x25, 9998f
377
378	ldr_this_cpu x25, irq_stack_ptr, x26
379	mov	x26, #IRQ_STACK_SIZE
380	add	x26, x25, x26
381
382	/* switch to the irq stack */
383	mov	sp, x26
3849998:
385	.endm
386
387	/*
388	 * x19 should be preserved between irq_stack_entry and
389	 * irq_stack_exit.
390	 */
391	.macro	irq_stack_exit
392	mov	sp, x19
393	.endm
394
395/* GPRs used by entry code */
396tsk	.req	x28		// current thread_info
397
398/*
399 * Interrupt handling.
400 */
401	.macro	irq_handler
402	ldr_l	x1, handle_arch_irq
403	mov	x0, sp
404	irq_stack_entry
405	blr	x1
406	irq_stack_exit
407	.endm
408
409	.text
410
411/*
412 * Exception vectors.
413 */
414	.pushsection ".entry.text", "ax"
415
416	.align	11
417ENTRY(vectors)
418	kernel_ventry	1, sync_invalid			// Synchronous EL1t
419	kernel_ventry	1, irq_invalid			// IRQ EL1t
420	kernel_ventry	1, fiq_invalid			// FIQ EL1t
421	kernel_ventry	1, error_invalid		// Error EL1t
422
423	kernel_ventry	1, sync				// Synchronous EL1h
424	kernel_ventry	1, irq				// IRQ EL1h
425	kernel_ventry	1, fiq_invalid			// FIQ EL1h
426	kernel_ventry	1, error			// Error EL1h
427
428	kernel_ventry	0, sync				// Synchronous 64-bit EL0
429	kernel_ventry	0, irq				// IRQ 64-bit EL0
430	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
431	kernel_ventry	0, error			// Error 64-bit EL0
432
433#ifdef CONFIG_COMPAT
434	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
435	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
436	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
437	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
438#else
439	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
440	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
441	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
442	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
443#endif
444END(vectors)
445
446#ifdef CONFIG_VMAP_STACK
447	/*
448	 * We detected an overflow in kernel_ventry, which switched to the
449	 * overflow stack. Stash the exception regs, and head to our overflow
450	 * handler.
451	 */
452__bad_stack:
453	/* Restore the original x0 value */
454	mrs	x0, tpidrro_el0
455
456	/*
457	 * Store the original GPRs to the new stack. The orginal SP (minus
458	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
459	 */
460	sub	sp, sp, #S_FRAME_SIZE
461	kernel_entry 1
462	mrs	x0, tpidr_el0
463	add	x0, x0, #S_FRAME_SIZE
464	str	x0, [sp, #S_SP]
465
466	/* Stash the regs for handle_bad_stack */
467	mov	x0, sp
468
469	/* Time to die */
470	bl	handle_bad_stack
471	ASM_BUG()
472#endif /* CONFIG_VMAP_STACK */
473
474/*
475 * Invalid mode handlers
476 */
477	.macro	inv_entry, el, reason, regsize = 64
478	kernel_entry \el, \regsize
479	mov	x0, sp
480	mov	x1, #\reason
481	mrs	x2, esr_el1
482	bl	bad_mode
483	ASM_BUG()
484	.endm
485
486el0_sync_invalid:
487	inv_entry 0, BAD_SYNC
488ENDPROC(el0_sync_invalid)
489
490el0_irq_invalid:
491	inv_entry 0, BAD_IRQ
492ENDPROC(el0_irq_invalid)
493
494el0_fiq_invalid:
495	inv_entry 0, BAD_FIQ
496ENDPROC(el0_fiq_invalid)
497
498el0_error_invalid:
499	inv_entry 0, BAD_ERROR
500ENDPROC(el0_error_invalid)
501
502#ifdef CONFIG_COMPAT
503el0_fiq_invalid_compat:
504	inv_entry 0, BAD_FIQ, 32
505ENDPROC(el0_fiq_invalid_compat)
506#endif
507
508el1_sync_invalid:
509	inv_entry 1, BAD_SYNC
510ENDPROC(el1_sync_invalid)
511
512el1_irq_invalid:
513	inv_entry 1, BAD_IRQ
514ENDPROC(el1_irq_invalid)
515
516el1_fiq_invalid:
517	inv_entry 1, BAD_FIQ
518ENDPROC(el1_fiq_invalid)
519
520el1_error_invalid:
521	inv_entry 1, BAD_ERROR
522ENDPROC(el1_error_invalid)
523
524/*
525 * EL1 mode handlers.
526 */
527	.align	6
528el1_sync:
529	kernel_entry 1
530	mrs	x1, esr_el1			// read the syndrome register
531	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
532	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
533	b.eq	el1_da
534	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
535	b.eq	el1_ia
536	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
537	b.eq	el1_undef
538	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
539	b.eq	el1_sp_pc
540	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
541	b.eq	el1_sp_pc
542	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
543	b.eq	el1_undef
544	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
545	b.ge	el1_dbg
546	b	el1_inv
547
548el1_ia:
549	/*
550	 * Fall through to the Data abort case
551	 */
552el1_da:
553	/*
554	 * Data abort handling
555	 */
556	mrs	x3, far_el1
557	inherit_daif	pstate=x23, tmp=x2
558	clear_address_tag x0, x3
559	mov	x2, sp				// struct pt_regs
560	bl	do_mem_abort
561
562	kernel_exit 1
563el1_sp_pc:
564	/*
565	 * Stack or PC alignment exception handling
566	 */
567	mrs	x0, far_el1
568	inherit_daif	pstate=x23, tmp=x2
569	mov	x2, sp
570	bl	do_sp_pc_abort
571	ASM_BUG()
572el1_undef:
573	/*
574	 * Undefined instruction
575	 */
576	inherit_daif	pstate=x23, tmp=x2
577	mov	x0, sp
578	bl	do_undefinstr
579	kernel_exit 1
580el1_dbg:
581	/*
582	 * Debug exception handling
583	 */
584	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
585	cinc	x24, x24, eq			// set bit '0'
586	tbz	x24, #0, el1_inv		// EL1 only
587	mrs	x0, far_el1
588	mov	x2, sp				// struct pt_regs
589	bl	do_debug_exception
590	kernel_exit 1
591el1_inv:
592	// TODO: add support for undefined instructions in kernel mode
593	inherit_daif	pstate=x23, tmp=x2
594	mov	x0, sp
595	mov	x2, x1
596	mov	x1, #BAD_SYNC
597	bl	bad_mode
598	ASM_BUG()
599ENDPROC(el1_sync)
600
601	.align	6
602el1_irq:
603	kernel_entry 1
604	enable_da_f
605#ifdef CONFIG_TRACE_IRQFLAGS
606	bl	trace_hardirqs_off
607#endif
608
609	irq_handler
610
611#ifdef CONFIG_PREEMPT
612	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
613	cbnz	x24, 1f				// preempt count != 0
614	bl	el1_preempt
6151:
616#endif
617#ifdef CONFIG_TRACE_IRQFLAGS
618	bl	trace_hardirqs_on
619#endif
620	kernel_exit 1
621ENDPROC(el1_irq)
622
623#ifdef CONFIG_PREEMPT
624el1_preempt:
625	mov	x24, lr
6261:	bl	preempt_schedule_irq		// irq en/disable is done inside
627	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
628	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
629	ret	x24
630#endif
631
632/*
633 * EL0 mode handlers.
634 */
635	.align	6
636el0_sync:
637	kernel_entry 0
638	mrs	x25, esr_el1			// read the syndrome register
639	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
640	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
641	b.eq	el0_svc
642	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
643	b.eq	el0_da
644	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
645	b.eq	el0_ia
646	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
647	b.eq	el0_fpsimd_acc
648	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
649	b.eq	el0_sve_acc
650	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
651	b.eq	el0_fpsimd_exc
652	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
653	ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
654	b.eq	el0_sys
655	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
656	b.eq	el0_sp_pc
657	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
658	b.eq	el0_sp_pc
659	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
660	b.eq	el0_undef
661	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
662	b.ge	el0_dbg
663	b	el0_inv
664
665#ifdef CONFIG_COMPAT
666	.align	6
667el0_sync_compat:
668	kernel_entry 0, 32
669	mrs	x25, esr_el1			// read the syndrome register
670	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
671	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
672	b.eq	el0_svc_compat
673	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
674	b.eq	el0_da
675	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
676	b.eq	el0_ia
677	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
678	b.eq	el0_fpsimd_acc
679	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
680	b.eq	el0_fpsimd_exc
681	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
682	b.eq	el0_sp_pc
683	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
684	b.eq	el0_undef
685	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
686	b.eq	el0_cp15
687	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
688	b.eq	el0_cp15
689	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
690	b.eq	el0_undef
691	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
692	b.eq	el0_undef
693	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
694	b.eq	el0_undef
695	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
696	b.ge	el0_dbg
697	b	el0_inv
698el0_svc_compat:
699	mov	x0, sp
700	bl	el0_svc_compat_handler
701	b	ret_to_user
702
703	.align	6
704el0_irq_compat:
705	kernel_entry 0, 32
706	b	el0_irq_naked
707
708el0_error_compat:
709	kernel_entry 0, 32
710	b	el0_error_naked
711
712el0_cp15:
713	/*
714	 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
715	 */
716	enable_daif
717	ct_user_exit
718	mov	x0, x25
719	mov	x1, sp
720	bl	do_cp15instr
721	b	ret_to_user
722#endif
723
724el0_da:
725	/*
726	 * Data abort handling
727	 */
728	mrs	x26, far_el1
729	enable_daif
730	ct_user_exit
731	clear_address_tag x0, x26
732	mov	x1, x25
733	mov	x2, sp
734	bl	do_mem_abort
735	b	ret_to_user
736el0_ia:
737	/*
738	 * Instruction abort handling
739	 */
740	mrs	x26, far_el1
741	enable_da_f
742#ifdef CONFIG_TRACE_IRQFLAGS
743	bl	trace_hardirqs_off
744#endif
745	ct_user_exit
746	mov	x0, x26
747	mov	x1, x25
748	mov	x2, sp
749	bl	do_el0_ia_bp_hardening
750	b	ret_to_user
751el0_fpsimd_acc:
752	/*
753	 * Floating Point or Advanced SIMD access
754	 */
755	enable_daif
756	ct_user_exit
757	mov	x0, x25
758	mov	x1, sp
759	bl	do_fpsimd_acc
760	b	ret_to_user
761el0_sve_acc:
762	/*
763	 * Scalable Vector Extension access
764	 */
765	enable_daif
766	ct_user_exit
767	mov	x0, x25
768	mov	x1, sp
769	bl	do_sve_acc
770	b	ret_to_user
771el0_fpsimd_exc:
772	/*
773	 * Floating Point, Advanced SIMD or SVE exception
774	 */
775	enable_daif
776	ct_user_exit
777	mov	x0, x25
778	mov	x1, sp
779	bl	do_fpsimd_exc
780	b	ret_to_user
781el0_sp_pc:
782	/*
783	 * Stack or PC alignment exception handling
784	 */
785	mrs	x26, far_el1
786	enable_da_f
787#ifdef CONFIG_TRACE_IRQFLAGS
788	bl	trace_hardirqs_off
789#endif
790	ct_user_exit
791	mov	x0, x26
792	mov	x1, x25
793	mov	x2, sp
794	bl	do_sp_pc_abort
795	b	ret_to_user
796el0_undef:
797	/*
798	 * Undefined instruction
799	 */
800	enable_daif
801	ct_user_exit
802	mov	x0, sp
803	bl	do_undefinstr
804	b	ret_to_user
805el0_sys:
806	/*
807	 * System instructions, for trapped cache maintenance instructions
808	 */
809	enable_daif
810	ct_user_exit
811	mov	x0, x25
812	mov	x1, sp
813	bl	do_sysinstr
814	b	ret_to_user
815el0_dbg:
816	/*
817	 * Debug exception handling
818	 */
819	tbnz	x24, #0, el0_inv		// EL0 only
820	mrs	x0, far_el1
821	mov	x1, x25
822	mov	x2, sp
823	bl	do_debug_exception
824	enable_daif
825	ct_user_exit
826	b	ret_to_user
827el0_inv:
828	enable_daif
829	ct_user_exit
830	mov	x0, sp
831	mov	x1, #BAD_SYNC
832	mov	x2, x25
833	bl	bad_el0_sync
834	b	ret_to_user
835ENDPROC(el0_sync)
836
837	.align	6
838el0_irq:
839	kernel_entry 0
840el0_irq_naked:
841	enable_da_f
842#ifdef CONFIG_TRACE_IRQFLAGS
843	bl	trace_hardirqs_off
844#endif
845
846	ct_user_exit
847#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
848	tbz	x22, #55, 1f
849	bl	do_el0_irq_bp_hardening
8501:
851#endif
852	irq_handler
853
854#ifdef CONFIG_TRACE_IRQFLAGS
855	bl	trace_hardirqs_on
856#endif
857	b	ret_to_user
858ENDPROC(el0_irq)
859
860el1_error:
861	kernel_entry 1
862	mrs	x1, esr_el1
863	enable_dbg
864	mov	x0, sp
865	bl	do_serror
866	kernel_exit 1
867ENDPROC(el1_error)
868
869el0_error:
870	kernel_entry 0
871el0_error_naked:
872	mrs	x1, esr_el1
873	enable_dbg
874	mov	x0, sp
875	bl	do_serror
876	enable_daif
877	ct_user_exit
878	b	ret_to_user
879ENDPROC(el0_error)
880
881/*
882 * Ok, we need to do extra processing, enter the slow path.
883 */
884work_pending:
885	mov	x0, sp				// 'regs'
886	bl	do_notify_resume
887#ifdef CONFIG_TRACE_IRQFLAGS
888	bl	trace_hardirqs_on		// enabled while in userspace
889#endif
890	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
891	b	finish_ret_to_user
892/*
893 * "slow" syscall return path.
894 */
895ret_to_user:
896	disable_daif
897	ldr	x1, [tsk, #TSK_TI_FLAGS]
898	and	x2, x1, #_TIF_WORK_MASK
899	cbnz	x2, work_pending
900finish_ret_to_user:
901	enable_step_tsk x1, x2
902#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
903	bl	stackleak_erase
904#endif
905	kernel_exit 0
906ENDPROC(ret_to_user)
907
908/*
909 * SVC handler.
910 */
911	.align	6
912el0_svc:
913	mov	x0, sp
914	bl	el0_svc_handler
915	b	ret_to_user
916ENDPROC(el0_svc)
917
918	.popsection				// .entry.text
919
920#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
921/*
922 * Exception vectors trampoline.
923 */
924	.pushsection ".entry.tramp.text", "ax"
925
926	.macro tramp_map_kernel, tmp
927	mrs	\tmp, ttbr1_el1
928	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
929	bic	\tmp, \tmp, #USER_ASID_FLAG
930	msr	ttbr1_el1, \tmp
931#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
932alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
933	/* ASID already in \tmp[63:48] */
934	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
935	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
936	/* 2MB boundary containing the vectors, so we nobble the walk cache */
937	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
938	isb
939	tlbi	vae1, \tmp
940	dsb	nsh
941alternative_else_nop_endif
942#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
943	.endm
944
945	.macro tramp_unmap_kernel, tmp
946	mrs	\tmp, ttbr1_el1
947	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
948	orr	\tmp, \tmp, #USER_ASID_FLAG
949	msr	ttbr1_el1, \tmp
950	/*
951	 * We avoid running the post_ttbr_update_workaround here because
952	 * it's only needed by Cavium ThunderX, which requires KPTI to be
953	 * disabled.
954	 */
955	.endm
956
957	.macro tramp_ventry, regsize = 64
958	.align	7
9591:
960	.if	\regsize == 64
961	msr	tpidrro_el0, x30	// Restored in kernel_ventry
962	.endif
963	/*
964	 * Defend against branch aliasing attacks by pushing a dummy
965	 * entry onto the return stack and using a RET instruction to
966	 * enter the full-fat kernel vectors.
967	 */
968	bl	2f
969	b	.
9702:
971	tramp_map_kernel	x30
972#ifdef CONFIG_RANDOMIZE_BASE
973	adr	x30, tramp_vectors + PAGE_SIZE
974alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
975	ldr	x30, [x30]
976#else
977	ldr	x30, =vectors
978#endif
979	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
980	msr	vbar_el1, x30
981	add	x30, x30, #(1b - tramp_vectors)
982	isb
983	ret
984	.endm
985
986	.macro tramp_exit, regsize = 64
987	adr	x30, tramp_vectors
988	msr	vbar_el1, x30
989	tramp_unmap_kernel	x30
990	.if	\regsize == 64
991	mrs	x30, far_el1
992	.endif
993	eret
994	sb
995	.endm
996
997	.align	11
998ENTRY(tramp_vectors)
999	.space	0x400
1000
1001	tramp_ventry
1002	tramp_ventry
1003	tramp_ventry
1004	tramp_ventry
1005
1006	tramp_ventry	32
1007	tramp_ventry	32
1008	tramp_ventry	32
1009	tramp_ventry	32
1010END(tramp_vectors)
1011
1012ENTRY(tramp_exit_native)
1013	tramp_exit
1014END(tramp_exit_native)
1015
1016ENTRY(tramp_exit_compat)
1017	tramp_exit	32
1018END(tramp_exit_compat)
1019
1020	.ltorg
1021	.popsection				// .entry.tramp.text
1022#ifdef CONFIG_RANDOMIZE_BASE
1023	.pushsection ".rodata", "a"
1024	.align PAGE_SHIFT
1025	.globl	__entry_tramp_data_start
1026__entry_tramp_data_start:
1027	.quad	vectors
1028	.popsection				// .rodata
1029#endif /* CONFIG_RANDOMIZE_BASE */
1030#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1031
1032/*
1033 * Register switch for AArch64. The callee-saved registers need to be saved
1034 * and restored. On entry:
1035 *   x0 = previous task_struct (must be preserved across the switch)
1036 *   x1 = next task_struct
1037 * Previous and next are guaranteed not to be the same.
1038 *
1039 */
1040ENTRY(cpu_switch_to)
1041	mov	x10, #THREAD_CPU_CONTEXT
1042	add	x8, x0, x10
1043	mov	x9, sp
1044	stp	x19, x20, [x8], #16		// store callee-saved registers
1045	stp	x21, x22, [x8], #16
1046	stp	x23, x24, [x8], #16
1047	stp	x25, x26, [x8], #16
1048	stp	x27, x28, [x8], #16
1049	stp	x29, x9, [x8], #16
1050	str	lr, [x8]
1051	add	x8, x1, x10
1052	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1053	ldp	x21, x22, [x8], #16
1054	ldp	x23, x24, [x8], #16
1055	ldp	x25, x26, [x8], #16
1056	ldp	x27, x28, [x8], #16
1057	ldp	x29, x9, [x8], #16
1058	ldr	lr, [x8]
1059	mov	sp, x9
1060	msr	sp_el0, x1
1061	ret
1062ENDPROC(cpu_switch_to)
1063NOKPROBE(cpu_switch_to)
1064
1065/*
1066 * This is how we return from a fork.
1067 */
1068ENTRY(ret_from_fork)
1069	bl	schedule_tail
1070	cbz	x19, 1f				// not a kernel thread
1071	mov	x0, x20
1072	blr	x19
10731:	get_thread_info tsk
1074	b	ret_to_user
1075ENDPROC(ret_from_fork)
1076NOKPROBE(ret_from_fork)
1077
1078#ifdef CONFIG_ARM_SDE_INTERFACE
1079
1080#include <asm/sdei.h>
1081#include <uapi/linux/arm_sdei.h>
1082
1083.macro sdei_handler_exit exit_mode
1084	/* On success, this call never returns... */
1085	cmp	\exit_mode, #SDEI_EXIT_SMC
1086	b.ne	99f
1087	smc	#0
1088	b	.
108999:	hvc	#0
1090	b	.
1091.endm
1092
1093#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1094/*
1095 * The regular SDEI entry point may have been unmapped along with the rest of
1096 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1097 * argument accessible.
1098 *
1099 * This clobbers x4, __sdei_handler() will restore this from firmware's
1100 * copy.
1101 */
1102.ltorg
1103.pushsection ".entry.tramp.text", "ax"
1104ENTRY(__sdei_asm_entry_trampoline)
1105	mrs	x4, ttbr1_el1
1106	tbz	x4, #USER_ASID_BIT, 1f
1107
1108	tramp_map_kernel tmp=x4
1109	isb
1110	mov	x4, xzr
1111
1112	/*
1113	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1114	 * the kernel on exit.
1115	 */
11161:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1117
1118#ifdef CONFIG_RANDOMIZE_BASE
1119	adr	x4, tramp_vectors + PAGE_SIZE
1120	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1121	ldr	x4, [x4]
1122#else
1123	ldr	x4, =__sdei_asm_handler
1124#endif
1125	br	x4
1126ENDPROC(__sdei_asm_entry_trampoline)
1127NOKPROBE(__sdei_asm_entry_trampoline)
1128
1129/*
1130 * Make the exit call and restore the original ttbr1_el1
1131 *
1132 * x0 & x1: setup for the exit API call
1133 * x2: exit_mode
1134 * x4: struct sdei_registered_event argument from registration time.
1135 */
1136ENTRY(__sdei_asm_exit_trampoline)
1137	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1138	cbnz	x4, 1f
1139
1140	tramp_unmap_kernel	tmp=x4
1141
11421:	sdei_handler_exit exit_mode=x2
1143ENDPROC(__sdei_asm_exit_trampoline)
1144NOKPROBE(__sdei_asm_exit_trampoline)
1145	.ltorg
1146.popsection		// .entry.tramp.text
1147#ifdef CONFIG_RANDOMIZE_BASE
1148.pushsection ".rodata", "a"
1149__sdei_asm_trampoline_next_handler:
1150	.quad	__sdei_asm_handler
1151.popsection		// .rodata
1152#endif /* CONFIG_RANDOMIZE_BASE */
1153#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1154
1155/*
1156 * Software Delegated Exception entry point.
1157 *
1158 * x0: Event number
1159 * x1: struct sdei_registered_event argument from registration time.
1160 * x2: interrupted PC
1161 * x3: interrupted PSTATE
1162 * x4: maybe clobbered by the trampoline
1163 *
1164 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1165 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1166 * want them.
1167 */
1168ENTRY(__sdei_asm_handler)
1169	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1170	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1171	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1172	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1173	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1174	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1175	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1176	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1177	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1178	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1179	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1180	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1181	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1182	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1183	mov	x4, sp
1184	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1185
1186	mov	x19, x1
1187
1188#ifdef CONFIG_VMAP_STACK
1189	/*
1190	 * entry.S may have been using sp as a scratch register, find whether
1191	 * this is a normal or critical event and switch to the appropriate
1192	 * stack for this CPU.
1193	 */
1194	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1195	cbnz	w4, 1f
1196	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1197	b	2f
11981:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
11992:	mov	x6, #SDEI_STACK_SIZE
1200	add	x5, x5, x6
1201	mov	sp, x5
1202#endif
1203
1204	/*
1205	 * We may have interrupted userspace, or a guest, or exit-from or
1206	 * return-to either of these. We can't trust sp_el0, restore it.
1207	 */
1208	mrs	x28, sp_el0
1209	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1210	msr	sp_el0, x0
1211
1212	/* If we interrupted the kernel point to the previous stack/frame. */
1213	and     x0, x3, #0xc
1214	mrs     x1, CurrentEL
1215	cmp     x0, x1
1216	csel	x29, x29, xzr, eq	// fp, or zero
1217	csel	x4, x2, xzr, eq		// elr, or zero
1218
1219	stp	x29, x4, [sp, #-16]!
1220	mov	x29, sp
1221
1222	add	x0, x19, #SDEI_EVENT_INTREGS
1223	mov	x1, x19
1224	bl	__sdei_handler
1225
1226	msr	sp_el0, x28
1227	/* restore regs >x17 that we clobbered */
1228	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1229	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1230	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1231	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1232	mov	sp, x1
1233
1234	mov	x1, x0			// address to complete_and_resume
1235	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1236	cmp	x0, #1
1237	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1238	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1239	csel	x0, x2, x3, ls
1240
1241	ldr_l	x2, sdei_exit_mode
1242
1243alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1244	sdei_handler_exit exit_mode=x2
1245alternative_else_nop_endif
1246
1247#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1248	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1249	br	x5
1250#endif
1251ENDPROC(__sdei_asm_handler)
1252NOKPROBE(__sdei_asm_handler)
1253#endif /* CONFIG_ARM_SDE_INTERFACE */
1254