xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 62e59c4e)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/arm-smccc.h>
22#include <linux/init.h>
23#include <linux/linkage.h>
24
25#include <asm/alternative.h>
26#include <asm/assembler.h>
27#include <asm/asm-offsets.h>
28#include <asm/cpufeature.h>
29#include <asm/errno.h>
30#include <asm/esr.h>
31#include <asm/irq.h>
32#include <asm/memory.h>
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/ptrace.h>
36#include <asm/thread_info.h>
37#include <asm/asm-uaccess.h>
38#include <asm/unistd.h>
39
40/*
41 * Context tracking subsystem.  Used to instrument transitions
42 * between user and kernel mode.
43 */
44	.macro ct_user_exit
45#ifdef CONFIG_CONTEXT_TRACKING
46	bl	context_tracking_user_exit
47#endif
48	.endm
49
50	.macro ct_user_enter
51#ifdef CONFIG_CONTEXT_TRACKING
52	bl	context_tracking_user_enter
53#endif
54	.endm
55
56	.macro	clear_gp_regs
57	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
58	mov	x\n, xzr
59	.endr
60	.endm
61
62/*
63 * Bad Abort numbers
64 *-----------------
65 */
66#define BAD_SYNC	0
67#define BAD_IRQ		1
68#define BAD_FIQ		2
69#define BAD_ERROR	3
70
71	.macro kernel_ventry, el, label, regsize = 64
72	.align 7
73#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
74alternative_if ARM64_UNMAP_KERNEL_AT_EL0
75	.if	\el == 0
76	.if	\regsize == 64
77	mrs	x30, tpidrro_el0
78	msr	tpidrro_el0, xzr
79	.else
80	mov	x30, xzr
81	.endif
82	.endif
83alternative_else_nop_endif
84#endif
85
86	sub	sp, sp, #S_FRAME_SIZE
87#ifdef CONFIG_VMAP_STACK
88	/*
89	 * Test whether the SP has overflowed, without corrupting a GPR.
90	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
91	 */
92	add	sp, sp, x0			// sp' = sp + x0
93	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
94	tbnz	x0, #THREAD_SHIFT, 0f
95	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
96	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
97	b	el\()\el\()_\label
98
990:
100	/*
101	 * Either we've just detected an overflow, or we've taken an exception
102	 * while on the overflow stack. Either way, we won't return to
103	 * userspace, and can clobber EL0 registers to free up GPRs.
104	 */
105
106	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
107	msr	tpidr_el0, x0
108
109	/* Recover the original x0 value and stash it in tpidrro_el0 */
110	sub	x0, sp, x0
111	msr	tpidrro_el0, x0
112
113	/* Switch to the overflow stack */
114	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
115
116	/*
117	 * Check whether we were already on the overflow stack. This may happen
118	 * after panic() re-enables interrupts.
119	 */
120	mrs	x0, tpidr_el0			// sp of interrupted context
121	sub	x0, sp, x0			// delta with top of overflow stack
122	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
123	b.ne	__bad_stack			// no? -> bad stack pointer
124
125	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
126	sub	sp, sp, x0
127	mrs	x0, tpidrro_el0
128#endif
129	b	el\()\el\()_\label
130	.endm
131
132	.macro tramp_alias, dst, sym
133	mov_q	\dst, TRAMP_VALIAS
134	add	\dst, \dst, #(\sym - .entry.tramp.text)
135	.endm
136
137	// This macro corrupts x0-x3. It is the caller's duty
138	// to save/restore them if required.
139	.macro	apply_ssbd, state, tmp1, tmp2
140#ifdef CONFIG_ARM64_SSBD
141alternative_cb	arm64_enable_wa2_handling
142	b	.L__asm_ssbd_skip\@
143alternative_cb_end
144	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
145	cbz	\tmp2,	.L__asm_ssbd_skip\@
146	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
147	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
148	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
149	mov	w1, #\state
150alternative_cb	arm64_update_smccc_conduit
151	nop					// Patched to SMC/HVC #0
152alternative_cb_end
153.L__asm_ssbd_skip\@:
154#endif
155	.endm
156
157	.macro	kernel_entry, el, regsize = 64
158	.if	\regsize == 32
159	mov	w0, w0				// zero upper 32 bits of x0
160	.endif
161	stp	x0, x1, [sp, #16 * 0]
162	stp	x2, x3, [sp, #16 * 1]
163	stp	x4, x5, [sp, #16 * 2]
164	stp	x6, x7, [sp, #16 * 3]
165	stp	x8, x9, [sp, #16 * 4]
166	stp	x10, x11, [sp, #16 * 5]
167	stp	x12, x13, [sp, #16 * 6]
168	stp	x14, x15, [sp, #16 * 7]
169	stp	x16, x17, [sp, #16 * 8]
170	stp	x18, x19, [sp, #16 * 9]
171	stp	x20, x21, [sp, #16 * 10]
172	stp	x22, x23, [sp, #16 * 11]
173	stp	x24, x25, [sp, #16 * 12]
174	stp	x26, x27, [sp, #16 * 13]
175	stp	x28, x29, [sp, #16 * 14]
176
177	.if	\el == 0
178	clear_gp_regs
179	mrs	x21, sp_el0
180	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
181	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
182	disable_step_tsk x19, x20		// exceptions when scheduling.
183
184	apply_ssbd 1, x22, x23
185
186	.else
187	add	x21, sp, #S_FRAME_SIZE
188	get_current_task tsk
189	/* Save the task's original addr_limit and set USER_DS */
190	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
191	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
192	mov	x20, #USER_DS
193	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
194	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
195	.endif /* \el == 0 */
196	mrs	x22, elr_el1
197	mrs	x23, spsr_el1
198	stp	lr, x21, [sp, #S_LR]
199
200	/*
201	 * In order to be able to dump the contents of struct pt_regs at the
202	 * time the exception was taken (in case we attempt to walk the call
203	 * stack later), chain it together with the stack frames.
204	 */
205	.if \el == 0
206	stp	xzr, xzr, [sp, #S_STACKFRAME]
207	.else
208	stp	x29, x22, [sp, #S_STACKFRAME]
209	.endif
210	add	x29, sp, #S_STACKFRAME
211
212#ifdef CONFIG_ARM64_SW_TTBR0_PAN
213	/*
214	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
215	 * EL0, there is no need to check the state of TTBR0_EL1 since
216	 * accesses are always enabled.
217	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
218	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
219	 * user mappings.
220	 */
221alternative_if ARM64_HAS_PAN
222	b	1f				// skip TTBR0 PAN
223alternative_else_nop_endif
224
225	.if	\el != 0
226	mrs	x21, ttbr0_el1
227	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
228	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
229	b.eq	1f				// TTBR0 access already disabled
230	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
231	.endif
232
233	__uaccess_ttbr0_disable x21
2341:
235#endif
236
237	stp	x22, x23, [sp, #S_PC]
238
239	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
240	.if	\el == 0
241	mov	w21, #NO_SYSCALL
242	str	w21, [sp, #S_SYSCALLNO]
243	.endif
244
245	/*
246	 * Set sp_el0 to current thread_info.
247	 */
248	.if	\el == 0
249	msr	sp_el0, tsk
250	.endif
251
252	/* Save pmr */
253alternative_if ARM64_HAS_IRQ_PRIO_MASKING
254	mrs_s	x20, SYS_ICC_PMR_EL1
255	str	x20, [sp, #S_PMR_SAVE]
256alternative_else_nop_endif
257
258	/*
259	 * Registers that may be useful after this macro is invoked:
260	 *
261	 * x21 - aborted SP
262	 * x22 - aborted PC
263	 * x23 - aborted PSTATE
264	*/
265	.endm
266
267	.macro	kernel_exit, el
268	.if	\el != 0
269	disable_daif
270
271	/* Restore the task's original addr_limit. */
272	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
273	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
274
275	/* No need to restore UAO, it will be restored from SPSR_EL1 */
276	.endif
277
278	/* Restore pmr */
279alternative_if ARM64_HAS_IRQ_PRIO_MASKING
280	ldr	x20, [sp, #S_PMR_SAVE]
281	msr_s	SYS_ICC_PMR_EL1, x20
282	/* Ensure priority change is seen by redistributor */
283	dsb	sy
284alternative_else_nop_endif
285
286	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
287	.if	\el == 0
288	ct_user_enter
289	.endif
290
291#ifdef CONFIG_ARM64_SW_TTBR0_PAN
292	/*
293	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
294	 * PAN bit checking.
295	 */
296alternative_if ARM64_HAS_PAN
297	b	2f				// skip TTBR0 PAN
298alternative_else_nop_endif
299
300	.if	\el != 0
301	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
302	.endif
303
304	__uaccess_ttbr0_enable x0, x1
305
306	.if	\el == 0
307	/*
308	 * Enable errata workarounds only if returning to user. The only
309	 * workaround currently required for TTBR0_EL1 changes are for the
310	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
311	 * corruption).
312	 */
313	bl	post_ttbr_update_workaround
314	.endif
3151:
316	.if	\el != 0
317	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
318	.endif
3192:
320#endif
321
322	.if	\el == 0
323	ldr	x23, [sp, #S_SP]		// load return stack pointer
324	msr	sp_el0, x23
325	tst	x22, #PSR_MODE32_BIT		// native task?
326	b.eq	3f
327
328#ifdef CONFIG_ARM64_ERRATUM_845719
329alternative_if ARM64_WORKAROUND_845719
330#ifdef CONFIG_PID_IN_CONTEXTIDR
331	mrs	x29, contextidr_el1
332	msr	contextidr_el1, x29
333#else
334	msr contextidr_el1, xzr
335#endif
336alternative_else_nop_endif
337#endif
3383:
339	apply_ssbd 0, x0, x1
340	.endif
341
342	msr	elr_el1, x21			// set up the return data
343	msr	spsr_el1, x22
344	ldp	x0, x1, [sp, #16 * 0]
345	ldp	x2, x3, [sp, #16 * 1]
346	ldp	x4, x5, [sp, #16 * 2]
347	ldp	x6, x7, [sp, #16 * 3]
348	ldp	x8, x9, [sp, #16 * 4]
349	ldp	x10, x11, [sp, #16 * 5]
350	ldp	x12, x13, [sp, #16 * 6]
351	ldp	x14, x15, [sp, #16 * 7]
352	ldp	x16, x17, [sp, #16 * 8]
353	ldp	x18, x19, [sp, #16 * 9]
354	ldp	x20, x21, [sp, #16 * 10]
355	ldp	x22, x23, [sp, #16 * 11]
356	ldp	x24, x25, [sp, #16 * 12]
357	ldp	x26, x27, [sp, #16 * 13]
358	ldp	x28, x29, [sp, #16 * 14]
359	ldr	lr, [sp, #S_LR]
360	add	sp, sp, #S_FRAME_SIZE		// restore sp
361
362	.if	\el == 0
363alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
364#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
365	bne	4f
366	msr	far_el1, x30
367	tramp_alias	x30, tramp_exit_native
368	br	x30
3694:
370	tramp_alias	x30, tramp_exit_compat
371	br	x30
372#endif
373	.else
374	eret
375	.endif
376	sb
377	.endm
378
379	.macro	irq_stack_entry
380	mov	x19, sp			// preserve the original sp
381
382	/*
383	 * Compare sp with the base of the task stack.
384	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
385	 * and should switch to the irq stack.
386	 */
387	ldr	x25, [tsk, TSK_STACK]
388	eor	x25, x25, x19
389	and	x25, x25, #~(THREAD_SIZE - 1)
390	cbnz	x25, 9998f
391
392	ldr_this_cpu x25, irq_stack_ptr, x26
393	mov	x26, #IRQ_STACK_SIZE
394	add	x26, x25, x26
395
396	/* switch to the irq stack */
397	mov	sp, x26
3989998:
399	.endm
400
401	/*
402	 * x19 should be preserved between irq_stack_entry and
403	 * irq_stack_exit.
404	 */
405	.macro	irq_stack_exit
406	mov	sp, x19
407	.endm
408
409/* GPRs used by entry code */
410tsk	.req	x28		// current thread_info
411
412/*
413 * Interrupt handling.
414 */
415	.macro	irq_handler
416	ldr_l	x1, handle_arch_irq
417	mov	x0, sp
418	irq_stack_entry
419	blr	x1
420	irq_stack_exit
421	.endm
422
423	.text
424
425/*
426 * Exception vectors.
427 */
428	.pushsection ".entry.text", "ax"
429
430	.align	11
431ENTRY(vectors)
432	kernel_ventry	1, sync_invalid			// Synchronous EL1t
433	kernel_ventry	1, irq_invalid			// IRQ EL1t
434	kernel_ventry	1, fiq_invalid			// FIQ EL1t
435	kernel_ventry	1, error_invalid		// Error EL1t
436
437	kernel_ventry	1, sync				// Synchronous EL1h
438	kernel_ventry	1, irq				// IRQ EL1h
439	kernel_ventry	1, fiq_invalid			// FIQ EL1h
440	kernel_ventry	1, error			// Error EL1h
441
442	kernel_ventry	0, sync				// Synchronous 64-bit EL0
443	kernel_ventry	0, irq				// IRQ 64-bit EL0
444	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
445	kernel_ventry	0, error			// Error 64-bit EL0
446
447#ifdef CONFIG_COMPAT
448	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
449	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
450	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
451	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
452#else
453	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
454	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
455	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
456	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
457#endif
458END(vectors)
459
460#ifdef CONFIG_VMAP_STACK
461	/*
462	 * We detected an overflow in kernel_ventry, which switched to the
463	 * overflow stack. Stash the exception regs, and head to our overflow
464	 * handler.
465	 */
466__bad_stack:
467	/* Restore the original x0 value */
468	mrs	x0, tpidrro_el0
469
470	/*
471	 * Store the original GPRs to the new stack. The orginal SP (minus
472	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
473	 */
474	sub	sp, sp, #S_FRAME_SIZE
475	kernel_entry 1
476	mrs	x0, tpidr_el0
477	add	x0, x0, #S_FRAME_SIZE
478	str	x0, [sp, #S_SP]
479
480	/* Stash the regs for handle_bad_stack */
481	mov	x0, sp
482
483	/* Time to die */
484	bl	handle_bad_stack
485	ASM_BUG()
486#endif /* CONFIG_VMAP_STACK */
487
488/*
489 * Invalid mode handlers
490 */
491	.macro	inv_entry, el, reason, regsize = 64
492	kernel_entry \el, \regsize
493	mov	x0, sp
494	mov	x1, #\reason
495	mrs	x2, esr_el1
496	bl	bad_mode
497	ASM_BUG()
498	.endm
499
500el0_sync_invalid:
501	inv_entry 0, BAD_SYNC
502ENDPROC(el0_sync_invalid)
503
504el0_irq_invalid:
505	inv_entry 0, BAD_IRQ
506ENDPROC(el0_irq_invalid)
507
508el0_fiq_invalid:
509	inv_entry 0, BAD_FIQ
510ENDPROC(el0_fiq_invalid)
511
512el0_error_invalid:
513	inv_entry 0, BAD_ERROR
514ENDPROC(el0_error_invalid)
515
516#ifdef CONFIG_COMPAT
517el0_fiq_invalid_compat:
518	inv_entry 0, BAD_FIQ, 32
519ENDPROC(el0_fiq_invalid_compat)
520#endif
521
522el1_sync_invalid:
523	inv_entry 1, BAD_SYNC
524ENDPROC(el1_sync_invalid)
525
526el1_irq_invalid:
527	inv_entry 1, BAD_IRQ
528ENDPROC(el1_irq_invalid)
529
530el1_fiq_invalid:
531	inv_entry 1, BAD_FIQ
532ENDPROC(el1_fiq_invalid)
533
534el1_error_invalid:
535	inv_entry 1, BAD_ERROR
536ENDPROC(el1_error_invalid)
537
538/*
539 * EL1 mode handlers.
540 */
541	.align	6
542el1_sync:
543	kernel_entry 1
544	mrs	x1, esr_el1			// read the syndrome register
545	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
546	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
547	b.eq	el1_da
548	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
549	b.eq	el1_ia
550	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
551	b.eq	el1_undef
552	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
553	b.eq	el1_sp_pc
554	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
555	b.eq	el1_sp_pc
556	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
557	b.eq	el1_undef
558	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
559	b.ge	el1_dbg
560	b	el1_inv
561
562el1_ia:
563	/*
564	 * Fall through to the Data abort case
565	 */
566el1_da:
567	/*
568	 * Data abort handling
569	 */
570	mrs	x3, far_el1
571	inherit_daif	pstate=x23, tmp=x2
572	clear_address_tag x0, x3
573	mov	x2, sp				// struct pt_regs
574	bl	do_mem_abort
575
576	kernel_exit 1
577el1_sp_pc:
578	/*
579	 * Stack or PC alignment exception handling
580	 */
581	mrs	x0, far_el1
582	inherit_daif	pstate=x23, tmp=x2
583	mov	x2, sp
584	bl	do_sp_pc_abort
585	ASM_BUG()
586el1_undef:
587	/*
588	 * Undefined instruction
589	 */
590	inherit_daif	pstate=x23, tmp=x2
591	mov	x0, sp
592	bl	do_undefinstr
593	kernel_exit 1
594el1_dbg:
595	/*
596	 * Debug exception handling
597	 */
598	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
599	cinc	x24, x24, eq			// set bit '0'
600	tbz	x24, #0, el1_inv		// EL1 only
601	mrs	x0, far_el1
602	mov	x2, sp				// struct pt_regs
603	bl	do_debug_exception
604	kernel_exit 1
605el1_inv:
606	// TODO: add support for undefined instructions in kernel mode
607	inherit_daif	pstate=x23, tmp=x2
608	mov	x0, sp
609	mov	x2, x1
610	mov	x1, #BAD_SYNC
611	bl	bad_mode
612	ASM_BUG()
613ENDPROC(el1_sync)
614
615	.align	6
616el1_irq:
617	kernel_entry 1
618	enable_da_f
619#ifdef CONFIG_TRACE_IRQFLAGS
620#ifdef CONFIG_ARM64_PSEUDO_NMI
621alternative_if ARM64_HAS_IRQ_PRIO_MASKING
622	ldr	x20, [sp, #S_PMR_SAVE]
623alternative_else
624	mov	x20, #GIC_PRIO_IRQON
625alternative_endif
626	cmp	x20, #GIC_PRIO_IRQOFF
627	/* Irqs were disabled, don't trace */
628	b.ls	1f
629#endif
630	bl	trace_hardirqs_off
6311:
632#endif
633
634	irq_handler
635
636#ifdef CONFIG_PREEMPT
637	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
638alternative_if ARM64_HAS_IRQ_PRIO_MASKING
639	/*
640	 * DA_F were cleared at start of handling. If anything is set in DAIF,
641	 * we come back from an NMI, so skip preemption
642	 */
643	mrs	x0, daif
644	orr	x24, x24, x0
645alternative_else_nop_endif
646	cbnz	x24, 1f				// preempt count != 0 || NMI return path
647	bl	preempt_schedule_irq		// irq en/disable is done inside
6481:
649#endif
650#ifdef CONFIG_TRACE_IRQFLAGS
651#ifdef CONFIG_ARM64_PSEUDO_NMI
652	/*
653	 * if IRQs were disabled when we received the interrupt, we have an NMI
654	 * and we are not re-enabling interrupt upon eret. Skip tracing.
655	 */
656	cmp	x20, #GIC_PRIO_IRQOFF
657	b.ls	1f
658#endif
659	bl	trace_hardirqs_on
6601:
661#endif
662
663	kernel_exit 1
664ENDPROC(el1_irq)
665
666/*
667 * EL0 mode handlers.
668 */
669	.align	6
670el0_sync:
671	kernel_entry 0
672	mrs	x25, esr_el1			// read the syndrome register
673	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
674	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
675	b.eq	el0_svc
676	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
677	b.eq	el0_da
678	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
679	b.eq	el0_ia
680	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
681	b.eq	el0_fpsimd_acc
682	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
683	b.eq	el0_sve_acc
684	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
685	b.eq	el0_fpsimd_exc
686	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
687	ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
688	b.eq	el0_sys
689	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
690	b.eq	el0_sp_pc
691	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
692	b.eq	el0_sp_pc
693	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
694	b.eq	el0_undef
695	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
696	b.ge	el0_dbg
697	b	el0_inv
698
699#ifdef CONFIG_COMPAT
700	.align	6
701el0_sync_compat:
702	kernel_entry 0, 32
703	mrs	x25, esr_el1			// read the syndrome register
704	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
705	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
706	b.eq	el0_svc_compat
707	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
708	b.eq	el0_da
709	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
710	b.eq	el0_ia
711	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
712	b.eq	el0_fpsimd_acc
713	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
714	b.eq	el0_fpsimd_exc
715	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
716	b.eq	el0_sp_pc
717	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
718	b.eq	el0_undef
719	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
720	b.eq	el0_cp15
721	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
722	b.eq	el0_cp15
723	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
724	b.eq	el0_undef
725	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
726	b.eq	el0_undef
727	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
728	b.eq	el0_undef
729	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
730	b.ge	el0_dbg
731	b	el0_inv
732el0_svc_compat:
733	mov	x0, sp
734	bl	el0_svc_compat_handler
735	b	ret_to_user
736
737	.align	6
738el0_irq_compat:
739	kernel_entry 0, 32
740	b	el0_irq_naked
741
742el0_error_compat:
743	kernel_entry 0, 32
744	b	el0_error_naked
745
746el0_cp15:
747	/*
748	 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
749	 */
750	enable_daif
751	ct_user_exit
752	mov	x0, x25
753	mov	x1, sp
754	bl	do_cp15instr
755	b	ret_to_user
756#endif
757
758el0_da:
759	/*
760	 * Data abort handling
761	 */
762	mrs	x26, far_el1
763	enable_daif
764	ct_user_exit
765	clear_address_tag x0, x26
766	mov	x1, x25
767	mov	x2, sp
768	bl	do_mem_abort
769	b	ret_to_user
770el0_ia:
771	/*
772	 * Instruction abort handling
773	 */
774	mrs	x26, far_el1
775	enable_da_f
776#ifdef CONFIG_TRACE_IRQFLAGS
777	bl	trace_hardirqs_off
778#endif
779	ct_user_exit
780	mov	x0, x26
781	mov	x1, x25
782	mov	x2, sp
783	bl	do_el0_ia_bp_hardening
784	b	ret_to_user
785el0_fpsimd_acc:
786	/*
787	 * Floating Point or Advanced SIMD access
788	 */
789	enable_daif
790	ct_user_exit
791	mov	x0, x25
792	mov	x1, sp
793	bl	do_fpsimd_acc
794	b	ret_to_user
795el0_sve_acc:
796	/*
797	 * Scalable Vector Extension access
798	 */
799	enable_daif
800	ct_user_exit
801	mov	x0, x25
802	mov	x1, sp
803	bl	do_sve_acc
804	b	ret_to_user
805el0_fpsimd_exc:
806	/*
807	 * Floating Point, Advanced SIMD or SVE exception
808	 */
809	enable_daif
810	ct_user_exit
811	mov	x0, x25
812	mov	x1, sp
813	bl	do_fpsimd_exc
814	b	ret_to_user
815el0_sp_pc:
816	/*
817	 * Stack or PC alignment exception handling
818	 */
819	mrs	x26, far_el1
820	enable_da_f
821#ifdef CONFIG_TRACE_IRQFLAGS
822	bl	trace_hardirqs_off
823#endif
824	ct_user_exit
825	mov	x0, x26
826	mov	x1, x25
827	mov	x2, sp
828	bl	do_sp_pc_abort
829	b	ret_to_user
830el0_undef:
831	/*
832	 * Undefined instruction
833	 */
834	enable_daif
835	ct_user_exit
836	mov	x0, sp
837	bl	do_undefinstr
838	b	ret_to_user
839el0_sys:
840	/*
841	 * System instructions, for trapped cache maintenance instructions
842	 */
843	enable_daif
844	ct_user_exit
845	mov	x0, x25
846	mov	x1, sp
847	bl	do_sysinstr
848	b	ret_to_user
849el0_dbg:
850	/*
851	 * Debug exception handling
852	 */
853	tbnz	x24, #0, el0_inv		// EL0 only
854	mrs	x0, far_el1
855	mov	x1, x25
856	mov	x2, sp
857	bl	do_debug_exception
858	enable_daif
859	ct_user_exit
860	b	ret_to_user
861el0_inv:
862	enable_daif
863	ct_user_exit
864	mov	x0, sp
865	mov	x1, #BAD_SYNC
866	mov	x2, x25
867	bl	bad_el0_sync
868	b	ret_to_user
869ENDPROC(el0_sync)
870
871	.align	6
872el0_irq:
873	kernel_entry 0
874el0_irq_naked:
875	enable_da_f
876#ifdef CONFIG_TRACE_IRQFLAGS
877	bl	trace_hardirqs_off
878#endif
879
880	ct_user_exit
881#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
882	tbz	x22, #55, 1f
883	bl	do_el0_irq_bp_hardening
8841:
885#endif
886	irq_handler
887
888#ifdef CONFIG_TRACE_IRQFLAGS
889	bl	trace_hardirqs_on
890#endif
891	b	ret_to_user
892ENDPROC(el0_irq)
893
894el1_error:
895	kernel_entry 1
896	mrs	x1, esr_el1
897	enable_dbg
898	mov	x0, sp
899	bl	do_serror
900	kernel_exit 1
901ENDPROC(el1_error)
902
903el0_error:
904	kernel_entry 0
905el0_error_naked:
906	mrs	x1, esr_el1
907	enable_dbg
908	mov	x0, sp
909	bl	do_serror
910	enable_daif
911	ct_user_exit
912	b	ret_to_user
913ENDPROC(el0_error)
914
915/*
916 * Ok, we need to do extra processing, enter the slow path.
917 */
918work_pending:
919	mov	x0, sp				// 'regs'
920	bl	do_notify_resume
921#ifdef CONFIG_TRACE_IRQFLAGS
922	bl	trace_hardirqs_on		// enabled while in userspace
923#endif
924	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
925	b	finish_ret_to_user
926/*
927 * "slow" syscall return path.
928 */
929ret_to_user:
930	disable_daif
931	ldr	x1, [tsk, #TSK_TI_FLAGS]
932	and	x2, x1, #_TIF_WORK_MASK
933	cbnz	x2, work_pending
934finish_ret_to_user:
935	enable_step_tsk x1, x2
936#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
937	bl	stackleak_erase
938#endif
939	kernel_exit 0
940ENDPROC(ret_to_user)
941
942/*
943 * SVC handler.
944 */
945	.align	6
946el0_svc:
947	mov	x0, sp
948	bl	el0_svc_handler
949	b	ret_to_user
950ENDPROC(el0_svc)
951
952	.popsection				// .entry.text
953
954#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
955/*
956 * Exception vectors trampoline.
957 */
958	.pushsection ".entry.tramp.text", "ax"
959
960	.macro tramp_map_kernel, tmp
961	mrs	\tmp, ttbr1_el1
962	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
963	bic	\tmp, \tmp, #USER_ASID_FLAG
964	msr	ttbr1_el1, \tmp
965#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
966alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
967	/* ASID already in \tmp[63:48] */
968	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
969	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
970	/* 2MB boundary containing the vectors, so we nobble the walk cache */
971	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
972	isb
973	tlbi	vae1, \tmp
974	dsb	nsh
975alternative_else_nop_endif
976#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
977	.endm
978
979	.macro tramp_unmap_kernel, tmp
980	mrs	\tmp, ttbr1_el1
981	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
982	orr	\tmp, \tmp, #USER_ASID_FLAG
983	msr	ttbr1_el1, \tmp
984	/*
985	 * We avoid running the post_ttbr_update_workaround here because
986	 * it's only needed by Cavium ThunderX, which requires KPTI to be
987	 * disabled.
988	 */
989	.endm
990
991	.macro tramp_ventry, regsize = 64
992	.align	7
9931:
994	.if	\regsize == 64
995	msr	tpidrro_el0, x30	// Restored in kernel_ventry
996	.endif
997	/*
998	 * Defend against branch aliasing attacks by pushing a dummy
999	 * entry onto the return stack and using a RET instruction to
1000	 * enter the full-fat kernel vectors.
1001	 */
1002	bl	2f
1003	b	.
10042:
1005	tramp_map_kernel	x30
1006#ifdef CONFIG_RANDOMIZE_BASE
1007	adr	x30, tramp_vectors + PAGE_SIZE
1008alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1009	ldr	x30, [x30]
1010#else
1011	ldr	x30, =vectors
1012#endif
1013	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1014	msr	vbar_el1, x30
1015	add	x30, x30, #(1b - tramp_vectors)
1016	isb
1017	ret
1018	.endm
1019
1020	.macro tramp_exit, regsize = 64
1021	adr	x30, tramp_vectors
1022	msr	vbar_el1, x30
1023	tramp_unmap_kernel	x30
1024	.if	\regsize == 64
1025	mrs	x30, far_el1
1026	.endif
1027	eret
1028	sb
1029	.endm
1030
1031	.align	11
1032ENTRY(tramp_vectors)
1033	.space	0x400
1034
1035	tramp_ventry
1036	tramp_ventry
1037	tramp_ventry
1038	tramp_ventry
1039
1040	tramp_ventry	32
1041	tramp_ventry	32
1042	tramp_ventry	32
1043	tramp_ventry	32
1044END(tramp_vectors)
1045
1046ENTRY(tramp_exit_native)
1047	tramp_exit
1048END(tramp_exit_native)
1049
1050ENTRY(tramp_exit_compat)
1051	tramp_exit	32
1052END(tramp_exit_compat)
1053
1054	.ltorg
1055	.popsection				// .entry.tramp.text
1056#ifdef CONFIG_RANDOMIZE_BASE
1057	.pushsection ".rodata", "a"
1058	.align PAGE_SHIFT
1059	.globl	__entry_tramp_data_start
1060__entry_tramp_data_start:
1061	.quad	vectors
1062	.popsection				// .rodata
1063#endif /* CONFIG_RANDOMIZE_BASE */
1064#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1065
1066/*
1067 * Register switch for AArch64. The callee-saved registers need to be saved
1068 * and restored. On entry:
1069 *   x0 = previous task_struct (must be preserved across the switch)
1070 *   x1 = next task_struct
1071 * Previous and next are guaranteed not to be the same.
1072 *
1073 */
1074ENTRY(cpu_switch_to)
1075	mov	x10, #THREAD_CPU_CONTEXT
1076	add	x8, x0, x10
1077	mov	x9, sp
1078	stp	x19, x20, [x8], #16		// store callee-saved registers
1079	stp	x21, x22, [x8], #16
1080	stp	x23, x24, [x8], #16
1081	stp	x25, x26, [x8], #16
1082	stp	x27, x28, [x8], #16
1083	stp	x29, x9, [x8], #16
1084	str	lr, [x8]
1085	add	x8, x1, x10
1086	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1087	ldp	x21, x22, [x8], #16
1088	ldp	x23, x24, [x8], #16
1089	ldp	x25, x26, [x8], #16
1090	ldp	x27, x28, [x8], #16
1091	ldp	x29, x9, [x8], #16
1092	ldr	lr, [x8]
1093	mov	sp, x9
1094	msr	sp_el0, x1
1095	ret
1096ENDPROC(cpu_switch_to)
1097NOKPROBE(cpu_switch_to)
1098
1099/*
1100 * This is how we return from a fork.
1101 */
1102ENTRY(ret_from_fork)
1103	bl	schedule_tail
1104	cbz	x19, 1f				// not a kernel thread
1105	mov	x0, x20
1106	blr	x19
11071:	get_current_task tsk
1108	b	ret_to_user
1109ENDPROC(ret_from_fork)
1110NOKPROBE(ret_from_fork)
1111
1112#ifdef CONFIG_ARM_SDE_INTERFACE
1113
1114#include <asm/sdei.h>
1115#include <uapi/linux/arm_sdei.h>
1116
1117.macro sdei_handler_exit exit_mode
1118	/* On success, this call never returns... */
1119	cmp	\exit_mode, #SDEI_EXIT_SMC
1120	b.ne	99f
1121	smc	#0
1122	b	.
112399:	hvc	#0
1124	b	.
1125.endm
1126
1127#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1128/*
1129 * The regular SDEI entry point may have been unmapped along with the rest of
1130 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1131 * argument accessible.
1132 *
1133 * This clobbers x4, __sdei_handler() will restore this from firmware's
1134 * copy.
1135 */
1136.ltorg
1137.pushsection ".entry.tramp.text", "ax"
1138ENTRY(__sdei_asm_entry_trampoline)
1139	mrs	x4, ttbr1_el1
1140	tbz	x4, #USER_ASID_BIT, 1f
1141
1142	tramp_map_kernel tmp=x4
1143	isb
1144	mov	x4, xzr
1145
1146	/*
1147	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1148	 * the kernel on exit.
1149	 */
11501:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1151
1152#ifdef CONFIG_RANDOMIZE_BASE
1153	adr	x4, tramp_vectors + PAGE_SIZE
1154	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1155	ldr	x4, [x4]
1156#else
1157	ldr	x4, =__sdei_asm_handler
1158#endif
1159	br	x4
1160ENDPROC(__sdei_asm_entry_trampoline)
1161NOKPROBE(__sdei_asm_entry_trampoline)
1162
1163/*
1164 * Make the exit call and restore the original ttbr1_el1
1165 *
1166 * x0 & x1: setup for the exit API call
1167 * x2: exit_mode
1168 * x4: struct sdei_registered_event argument from registration time.
1169 */
1170ENTRY(__sdei_asm_exit_trampoline)
1171	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1172	cbnz	x4, 1f
1173
1174	tramp_unmap_kernel	tmp=x4
1175
11761:	sdei_handler_exit exit_mode=x2
1177ENDPROC(__sdei_asm_exit_trampoline)
1178NOKPROBE(__sdei_asm_exit_trampoline)
1179	.ltorg
1180.popsection		// .entry.tramp.text
1181#ifdef CONFIG_RANDOMIZE_BASE
1182.pushsection ".rodata", "a"
1183__sdei_asm_trampoline_next_handler:
1184	.quad	__sdei_asm_handler
1185.popsection		// .rodata
1186#endif /* CONFIG_RANDOMIZE_BASE */
1187#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1188
1189/*
1190 * Software Delegated Exception entry point.
1191 *
1192 * x0: Event number
1193 * x1: struct sdei_registered_event argument from registration time.
1194 * x2: interrupted PC
1195 * x3: interrupted PSTATE
1196 * x4: maybe clobbered by the trampoline
1197 *
1198 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1199 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1200 * want them.
1201 */
1202ENTRY(__sdei_asm_handler)
1203	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1204	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1205	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1206	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1207	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1208	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1209	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1210	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1211	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1212	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1213	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1214	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1215	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1216	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1217	mov	x4, sp
1218	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1219
1220	mov	x19, x1
1221
1222#ifdef CONFIG_VMAP_STACK
1223	/*
1224	 * entry.S may have been using sp as a scratch register, find whether
1225	 * this is a normal or critical event and switch to the appropriate
1226	 * stack for this CPU.
1227	 */
1228	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1229	cbnz	w4, 1f
1230	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1231	b	2f
12321:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12332:	mov	x6, #SDEI_STACK_SIZE
1234	add	x5, x5, x6
1235	mov	sp, x5
1236#endif
1237
1238	/*
1239	 * We may have interrupted userspace, or a guest, or exit-from or
1240	 * return-to either of these. We can't trust sp_el0, restore it.
1241	 */
1242	mrs	x28, sp_el0
1243	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1244	msr	sp_el0, x0
1245
1246	/* If we interrupted the kernel point to the previous stack/frame. */
1247	and     x0, x3, #0xc
1248	mrs     x1, CurrentEL
1249	cmp     x0, x1
1250	csel	x29, x29, xzr, eq	// fp, or zero
1251	csel	x4, x2, xzr, eq		// elr, or zero
1252
1253	stp	x29, x4, [sp, #-16]!
1254	mov	x29, sp
1255
1256	add	x0, x19, #SDEI_EVENT_INTREGS
1257	mov	x1, x19
1258	bl	__sdei_handler
1259
1260	msr	sp_el0, x28
1261	/* restore regs >x17 that we clobbered */
1262	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1263	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1264	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1265	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1266	mov	sp, x1
1267
1268	mov	x1, x0			// address to complete_and_resume
1269	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1270	cmp	x0, #1
1271	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1272	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1273	csel	x0, x2, x3, ls
1274
1275	ldr_l	x2, sdei_exit_mode
1276
1277alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1278	sdei_handler_exit exit_mode=x2
1279alternative_else_nop_endif
1280
1281#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1282	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1283	br	x5
1284#endif
1285ENDPROC(__sdei_asm_handler)
1286NOKPROBE(__sdei_asm_handler)
1287#endif /* CONFIG_ARM_SDE_INTERFACE */
1288