xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 5f66f73b)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32/*
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
35 */
36	.macro user_exit_irqoff
37#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38	bl	enter_from_user_mode
39#endif
40	.endm
41
42	.macro user_enter_irqoff
43#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
44	bl	exit_to_user_mode
45#endif
46	.endm
47
48	.macro	clear_gp_regs
49	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
50	mov	x\n, xzr
51	.endr
52	.endm
53
54/*
55 * Bad Abort numbers
56 *-----------------
57 */
58#define BAD_SYNC	0
59#define BAD_IRQ		1
60#define BAD_FIQ		2
61#define BAD_ERROR	3
62
63	.macro kernel_ventry, el, label, regsize = 64
64	.align 7
65#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
66	.if	\el == 0
67alternative_if ARM64_UNMAP_KERNEL_AT_EL0
68	.if	\regsize == 64
69	mrs	x30, tpidrro_el0
70	msr	tpidrro_el0, xzr
71	.else
72	mov	x30, xzr
73	.endif
74alternative_else_nop_endif
75	.endif
76#endif
77
78	sub	sp, sp, #PT_REGS_SIZE
79#ifdef CONFIG_VMAP_STACK
80	/*
81	 * Test whether the SP has overflowed, without corrupting a GPR.
82	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
83	 * should always be zero.
84	 */
85	add	sp, sp, x0			// sp' = sp + x0
86	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
87	tbnz	x0, #THREAD_SHIFT, 0f
88	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
89	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
90	b	el\()\el\()_\label
91
920:
93	/*
94	 * Either we've just detected an overflow, or we've taken an exception
95	 * while on the overflow stack. Either way, we won't return to
96	 * userspace, and can clobber EL0 registers to free up GPRs.
97	 */
98
99	/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
100	msr	tpidr_el0, x0
101
102	/* Recover the original x0 value and stash it in tpidrro_el0 */
103	sub	x0, sp, x0
104	msr	tpidrro_el0, x0
105
106	/* Switch to the overflow stack */
107	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
108
109	/*
110	 * Check whether we were already on the overflow stack. This may happen
111	 * after panic() re-enables interrupts.
112	 */
113	mrs	x0, tpidr_el0			// sp of interrupted context
114	sub	x0, sp, x0			// delta with top of overflow stack
115	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
116	b.ne	__bad_stack			// no? -> bad stack pointer
117
118	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
119	sub	sp, sp, x0
120	mrs	x0, tpidrro_el0
121#endif
122	b	el\()\el\()_\label
123	.endm
124
125	.macro tramp_alias, dst, sym
126	mov_q	\dst, TRAMP_VALIAS
127	add	\dst, \dst, #(\sym - .entry.tramp.text)
128	.endm
129
130	/*
131	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
132	 * them if required.
133	 */
134	.macro	apply_ssbd, state, tmp1, tmp2
135alternative_cb	spectre_v4_patch_fw_mitigation_enable
136	b	.L__asm_ssbd_skip\@		// Patched to NOP
137alternative_cb_end
138	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
139	cbz	\tmp2,	.L__asm_ssbd_skip\@
140	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
141	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
142	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
143	mov	w1, #\state
144alternative_cb	spectre_v4_patch_fw_mitigation_conduit
145	nop					// Patched to SMC/HVC #0
146alternative_cb_end
147.L__asm_ssbd_skip\@:
148	.endm
149
150	/* Check for MTE asynchronous tag check faults */
151	.macro check_mte_async_tcf, tmp, ti_flags
152#ifdef CONFIG_ARM64_MTE
153	.arch_extension lse
154alternative_if_not ARM64_MTE
155	b	1f
156alternative_else_nop_endif
157	mrs_s	\tmp, SYS_TFSRE0_EL1
158	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
159	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
160	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
161	add	\ti_flags, tsk, #TSK_TI_FLAGS
162	stset	\tmp, [\ti_flags]
163	msr_s	SYS_TFSRE0_EL1, xzr
1641:
165#endif
166	.endm
167
168	/* Clear the MTE asynchronous tag check faults */
169	.macro clear_mte_async_tcf
170#ifdef CONFIG_ARM64_MTE
171alternative_if ARM64_MTE
172	dsb	ish
173	msr_s	SYS_TFSRE0_EL1, xzr
174alternative_else_nop_endif
175#endif
176	.endm
177
178	.macro mte_set_gcr, tmp, tmp2
179#ifdef CONFIG_ARM64_MTE
180	/*
181	 * Calculate and set the exclude mask preserving
182	 * the RRND (bit[16]) setting.
183	 */
184	mrs_s	\tmp2, SYS_GCR_EL1
185	bfi	\tmp2, \tmp, #0, #16
186	msr_s	SYS_GCR_EL1, \tmp2
187#endif
188	.endm
189
190	.macro mte_set_kernel_gcr, tmp, tmp2
191#ifdef CONFIG_KASAN_HW_TAGS
192alternative_if_not ARM64_MTE
193	b	1f
194alternative_else_nop_endif
195	ldr_l	\tmp, gcr_kernel_excl
196
197	mte_set_gcr \tmp, \tmp2
198	isb
1991:
200#endif
201	.endm
202
203	.macro mte_set_user_gcr, tsk, tmp, tmp2
204#ifdef CONFIG_ARM64_MTE
205alternative_if_not ARM64_MTE
206	b	1f
207alternative_else_nop_endif
208	ldr	\tmp, [\tsk, #THREAD_GCR_EL1_USER]
209
210	mte_set_gcr \tmp, \tmp2
2111:
212#endif
213	.endm
214
215	.macro	kernel_entry, el, regsize = 64
216	.if	\regsize == 32
217	mov	w0, w0				// zero upper 32 bits of x0
218	.endif
219	stp	x0, x1, [sp, #16 * 0]
220	stp	x2, x3, [sp, #16 * 1]
221	stp	x4, x5, [sp, #16 * 2]
222	stp	x6, x7, [sp, #16 * 3]
223	stp	x8, x9, [sp, #16 * 4]
224	stp	x10, x11, [sp, #16 * 5]
225	stp	x12, x13, [sp, #16 * 6]
226	stp	x14, x15, [sp, #16 * 7]
227	stp	x16, x17, [sp, #16 * 8]
228	stp	x18, x19, [sp, #16 * 9]
229	stp	x20, x21, [sp, #16 * 10]
230	stp	x22, x23, [sp, #16 * 11]
231	stp	x24, x25, [sp, #16 * 12]
232	stp	x26, x27, [sp, #16 * 13]
233	stp	x28, x29, [sp, #16 * 14]
234
235	.if	\el == 0
236	clear_gp_regs
237	mrs	x21, sp_el0
238	ldr_this_cpu	tsk, __entry_task, x20
239	msr	sp_el0, tsk
240
241	/*
242	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
243	 * when scheduling.
244	 */
245	ldr	x19, [tsk, #TSK_TI_FLAGS]
246	disable_step_tsk x19, x20
247
248	/* Check for asynchronous tag check faults in user space */
249	check_mte_async_tcf x22, x23
250	apply_ssbd 1, x22, x23
251
252#ifdef CONFIG_ARM64_PTR_AUTH
253alternative_if ARM64_HAS_ADDRESS_AUTH
254	/*
255	 * Enable IA for in-kernel PAC if the task had it disabled. Although
256	 * this could be implemented with an unconditional MRS which would avoid
257	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
258	 *
259	 * Install the kernel IA key only if IA was enabled in the task. If IA
260	 * was disabled on kernel exit then we would have left the kernel IA
261	 * installed so there is no need to install it again.
262	 */
263	ldr	x0, [tsk, THREAD_SCTLR_USER]
264	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
265	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
266	b	2f
2671:
268	mrs	x0, sctlr_el1
269	orr	x0, x0, SCTLR_ELx_ENIA
270	msr	sctlr_el1, x0
2712:
272	isb
273alternative_else_nop_endif
274#endif
275
276	mte_set_kernel_gcr x22, x23
277
278	scs_load tsk, x20
279	.else
280	add	x21, sp, #PT_REGS_SIZE
281	get_current_task tsk
282	.endif /* \el == 0 */
283	mrs	x22, elr_el1
284	mrs	x23, spsr_el1
285	stp	lr, x21, [sp, #S_LR]
286
287	/*
288	 * For exceptions from EL0, terminate the callchain here.
289	 * For exceptions from EL1, create a synthetic frame record so the
290	 * interrupted code shows up in the backtrace.
291	 */
292	.if \el == 0
293	mov	x29, xzr
294	.else
295	stp	x29, x22, [sp, #S_STACKFRAME]
296	add	x29, sp, #S_STACKFRAME
297	.endif
298
299#ifdef CONFIG_ARM64_SW_TTBR0_PAN
300alternative_if_not ARM64_HAS_PAN
301	bl	__swpan_entry_el\el
302alternative_else_nop_endif
303#endif
304
305	stp	x22, x23, [sp, #S_PC]
306
307	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
308	.if	\el == 0
309	mov	w21, #NO_SYSCALL
310	str	w21, [sp, #S_SYSCALLNO]
311	.endif
312
313	/* Save pmr */
314alternative_if ARM64_HAS_IRQ_PRIO_MASKING
315	mrs_s	x20, SYS_ICC_PMR_EL1
316	str	x20, [sp, #S_PMR_SAVE]
317alternative_else_nop_endif
318
319	/* Re-enable tag checking (TCO set on exception entry) */
320#ifdef CONFIG_ARM64_MTE
321alternative_if ARM64_MTE
322	SET_PSTATE_TCO(0)
323alternative_else_nop_endif
324#endif
325
326	/*
327	 * Registers that may be useful after this macro is invoked:
328	 *
329	 * x20 - ICC_PMR_EL1
330	 * x21 - aborted SP
331	 * x22 - aborted PC
332	 * x23 - aborted PSTATE
333	*/
334	.endm
335
336	.macro	kernel_exit, el
337	.if	\el != 0
338	disable_daif
339	.endif
340
341	/* Restore pmr */
342alternative_if ARM64_HAS_IRQ_PRIO_MASKING
343	ldr	x20, [sp, #S_PMR_SAVE]
344	msr_s	SYS_ICC_PMR_EL1, x20
345	mrs_s	x21, SYS_ICC_CTLR_EL1
346	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
347	dsb	sy				// Ensure priority change is seen by redistributor
348.L__skip_pmr_sync\@:
349alternative_else_nop_endif
350
351	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
352
353#ifdef CONFIG_ARM64_SW_TTBR0_PAN
354alternative_if_not ARM64_HAS_PAN
355	bl	__swpan_exit_el\el
356alternative_else_nop_endif
357#endif
358
359	.if	\el == 0
360	ldr	x23, [sp, #S_SP]		// load return stack pointer
361	msr	sp_el0, x23
362	tst	x22, #PSR_MODE32_BIT		// native task?
363	b.eq	3f
364
365#ifdef CONFIG_ARM64_ERRATUM_845719
366alternative_if ARM64_WORKAROUND_845719
367#ifdef CONFIG_PID_IN_CONTEXTIDR
368	mrs	x29, contextidr_el1
369	msr	contextidr_el1, x29
370#else
371	msr contextidr_el1, xzr
372#endif
373alternative_else_nop_endif
374#endif
3753:
376	scs_save tsk, x0
377
378#ifdef CONFIG_ARM64_PTR_AUTH
379alternative_if ARM64_HAS_ADDRESS_AUTH
380	/*
381	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
382	 * alternatively install the user's IA. All other per-task keys and
383	 * SCTLR bits were updated on task switch.
384	 *
385	 * No kernel C function calls after this.
386	 */
387	ldr	x0, [tsk, THREAD_SCTLR_USER]
388	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
389	__ptrauth_keys_install_user tsk, x0, x1, x2
390	b	2f
3911:
392	mrs	x0, sctlr_el1
393	bic	x0, x0, SCTLR_ELx_ENIA
394	msr	sctlr_el1, x0
3952:
396alternative_else_nop_endif
397#endif
398
399	mte_set_user_gcr tsk, x0, x1
400
401	apply_ssbd 0, x0, x1
402	.endif
403
404	msr	elr_el1, x21			// set up the return data
405	msr	spsr_el1, x22
406	ldp	x0, x1, [sp, #16 * 0]
407	ldp	x2, x3, [sp, #16 * 1]
408	ldp	x4, x5, [sp, #16 * 2]
409	ldp	x6, x7, [sp, #16 * 3]
410	ldp	x8, x9, [sp, #16 * 4]
411	ldp	x10, x11, [sp, #16 * 5]
412	ldp	x12, x13, [sp, #16 * 6]
413	ldp	x14, x15, [sp, #16 * 7]
414	ldp	x16, x17, [sp, #16 * 8]
415	ldp	x18, x19, [sp, #16 * 9]
416	ldp	x20, x21, [sp, #16 * 10]
417	ldp	x22, x23, [sp, #16 * 11]
418	ldp	x24, x25, [sp, #16 * 12]
419	ldp	x26, x27, [sp, #16 * 13]
420	ldp	x28, x29, [sp, #16 * 14]
421	ldr	lr, [sp, #S_LR]
422	add	sp, sp, #PT_REGS_SIZE		// restore sp
423
424	.if	\el == 0
425alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
426#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
427	bne	4f
428	msr	far_el1, x30
429	tramp_alias	x30, tramp_exit_native
430	br	x30
4314:
432	tramp_alias	x30, tramp_exit_compat
433	br	x30
434#endif
435	.else
436	/* Ensure any device/NC reads complete */
437	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
438
439	eret
440	.endif
441	sb
442	.endm
443
444#ifdef CONFIG_ARM64_SW_TTBR0_PAN
445	/*
446	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
447	 * EL0, there is no need to check the state of TTBR0_EL1 since
448	 * accesses are always enabled.
449	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
450	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
451	 * user mappings.
452	 */
453SYM_CODE_START_LOCAL(__swpan_entry_el1)
454	mrs	x21, ttbr0_el1
455	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
456	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
457	b.eq	1f				// TTBR0 access already disabled
458	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
459SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
460	__uaccess_ttbr0_disable x21
4611:	ret
462SYM_CODE_END(__swpan_entry_el1)
463
464	/*
465	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
466	 * PAN bit checking.
467	 */
468SYM_CODE_START_LOCAL(__swpan_exit_el1)
469	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
470	__uaccess_ttbr0_enable x0, x1
4711:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
472	ret
473SYM_CODE_END(__swpan_exit_el1)
474
475SYM_CODE_START_LOCAL(__swpan_exit_el0)
476	__uaccess_ttbr0_enable x0, x1
477	/*
478	 * Enable errata workarounds only if returning to user. The only
479	 * workaround currently required for TTBR0_EL1 changes are for the
480	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
481	 * corruption).
482	 */
483	b	post_ttbr_update_workaround
484SYM_CODE_END(__swpan_exit_el0)
485#endif
486
487	.macro	irq_stack_entry
488	mov	x19, sp			// preserve the original sp
489#ifdef CONFIG_SHADOW_CALL_STACK
490	mov	x24, scs_sp		// preserve the original shadow stack
491#endif
492
493	/*
494	 * Compare sp with the base of the task stack.
495	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
496	 * and should switch to the irq stack.
497	 */
498	ldr	x25, [tsk, TSK_STACK]
499	eor	x25, x25, x19
500	and	x25, x25, #~(THREAD_SIZE - 1)
501	cbnz	x25, 9998f
502
503	ldr_this_cpu x25, irq_stack_ptr, x26
504	mov	x26, #IRQ_STACK_SIZE
505	add	x26, x25, x26
506
507	/* switch to the irq stack */
508	mov	sp, x26
509
510#ifdef CONFIG_SHADOW_CALL_STACK
511	/* also switch to the irq shadow stack */
512	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
513#endif
514
5159998:
516	.endm
517
518	/*
519	 * The callee-saved regs (x19-x29) should be preserved between
520	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
521	 * uses x20-x23 to store data for later use.
522	 */
523	.macro	irq_stack_exit
524	mov	sp, x19
525#ifdef CONFIG_SHADOW_CALL_STACK
526	mov	scs_sp, x24
527#endif
528	.endm
529
530/* GPRs used by entry code */
531tsk	.req	x28		// current thread_info
532
533/*
534 * Interrupt handling.
535 */
536	.macro	irq_handler, handler:req
537	ldr_l	x1, \handler
538	mov	x0, sp
539	irq_stack_entry
540	blr	x1
541	irq_stack_exit
542	.endm
543
544	.macro	gic_prio_kentry_setup, tmp:req
545#ifdef CONFIG_ARM64_PSEUDO_NMI
546	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
547	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
548	msr_s	SYS_ICC_PMR_EL1, \tmp
549	alternative_else_nop_endif
550#endif
551	.endm
552
553	.macro	gic_prio_irq_setup, pmr:req, tmp:req
554#ifdef CONFIG_ARM64_PSEUDO_NMI
555	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
556	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
557	msr_s	SYS_ICC_PMR_EL1, \tmp
558	alternative_else_nop_endif
559#endif
560	.endm
561
562	.macro el1_interrupt_handler, handler:req
563	gic_prio_irq_setup pmr=x20, tmp=x1
564	enable_da
565
566	mov	x0, sp
567	bl	enter_el1_irq_or_nmi
568
569	irq_handler	\handler
570
571#ifdef CONFIG_PREEMPTION
572	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
573alternative_if ARM64_HAS_IRQ_PRIO_MASKING
574	/*
575	 * DA were cleared at start of handling, and IF are cleared by
576	 * the GIC irqchip driver using gic_arch_enable_irqs() for
577	 * normal IRQs. If anything is set, it means we come back from
578	 * an NMI instead of a normal IRQ, so skip preemption
579	 */
580	mrs	x0, daif
581	orr	x24, x24, x0
582alternative_else_nop_endif
583	cbnz	x24, 1f				// preempt count != 0 || NMI return path
584	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
5851:
586#endif
587
588	mov	x0, sp
589	bl	exit_el1_irq_or_nmi
590	.endm
591
592	.macro el0_interrupt_handler, handler:req
593	gic_prio_irq_setup pmr=x20, tmp=x0
594	user_exit_irqoff
595	enable_da
596
597	tbz	x22, #55, 1f
598	bl	do_el0_irq_bp_hardening
5991:
600	irq_handler	\handler
601	.endm
602
603	.text
604
605/*
606 * Exception vectors.
607 */
608	.pushsection ".entry.text", "ax"
609
610	.align	11
611SYM_CODE_START(vectors)
612	kernel_ventry	1, sync_invalid			// Synchronous EL1t
613	kernel_ventry	1, irq_invalid			// IRQ EL1t
614	kernel_ventry	1, fiq_invalid			// FIQ EL1t
615	kernel_ventry	1, error_invalid		// Error EL1t
616
617	kernel_ventry	1, sync				// Synchronous EL1h
618	kernel_ventry	1, irq				// IRQ EL1h
619	kernel_ventry	1, fiq				// FIQ EL1h
620	kernel_ventry	1, error			// Error EL1h
621
622	kernel_ventry	0, sync				// Synchronous 64-bit EL0
623	kernel_ventry	0, irq				// IRQ 64-bit EL0
624	kernel_ventry	0, fiq				// FIQ 64-bit EL0
625	kernel_ventry	0, error			// Error 64-bit EL0
626
627#ifdef CONFIG_COMPAT
628	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
629	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
630	kernel_ventry	0, fiq_compat, 32		// FIQ 32-bit EL0
631	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
632#else
633	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
634	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
635	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
636	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
637#endif
638SYM_CODE_END(vectors)
639
640#ifdef CONFIG_VMAP_STACK
641	/*
642	 * We detected an overflow in kernel_ventry, which switched to the
643	 * overflow stack. Stash the exception regs, and head to our overflow
644	 * handler.
645	 */
646__bad_stack:
647	/* Restore the original x0 value */
648	mrs	x0, tpidrro_el0
649
650	/*
651	 * Store the original GPRs to the new stack. The orginal SP (minus
652	 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
653	 */
654	sub	sp, sp, #PT_REGS_SIZE
655	kernel_entry 1
656	mrs	x0, tpidr_el0
657	add	x0, x0, #PT_REGS_SIZE
658	str	x0, [sp, #S_SP]
659
660	/* Stash the regs for handle_bad_stack */
661	mov	x0, sp
662
663	/* Time to die */
664	bl	handle_bad_stack
665	ASM_BUG()
666#endif /* CONFIG_VMAP_STACK */
667
668/*
669 * Invalid mode handlers
670 */
671	.macro	inv_entry, el, reason, regsize = 64
672	kernel_entry \el, \regsize
673	mov	x0, sp
674	mov	x1, #\reason
675	mrs	x2, esr_el1
676	bl	bad_mode
677	ASM_BUG()
678	.endm
679
680SYM_CODE_START_LOCAL(el0_sync_invalid)
681	inv_entry 0, BAD_SYNC
682SYM_CODE_END(el0_sync_invalid)
683
684SYM_CODE_START_LOCAL(el0_irq_invalid)
685	inv_entry 0, BAD_IRQ
686SYM_CODE_END(el0_irq_invalid)
687
688SYM_CODE_START_LOCAL(el0_fiq_invalid)
689	inv_entry 0, BAD_FIQ
690SYM_CODE_END(el0_fiq_invalid)
691
692SYM_CODE_START_LOCAL(el0_error_invalid)
693	inv_entry 0, BAD_ERROR
694SYM_CODE_END(el0_error_invalid)
695
696SYM_CODE_START_LOCAL(el1_sync_invalid)
697	inv_entry 1, BAD_SYNC
698SYM_CODE_END(el1_sync_invalid)
699
700SYM_CODE_START_LOCAL(el1_irq_invalid)
701	inv_entry 1, BAD_IRQ
702SYM_CODE_END(el1_irq_invalid)
703
704SYM_CODE_START_LOCAL(el1_fiq_invalid)
705	inv_entry 1, BAD_FIQ
706SYM_CODE_END(el1_fiq_invalid)
707
708SYM_CODE_START_LOCAL(el1_error_invalid)
709	inv_entry 1, BAD_ERROR
710SYM_CODE_END(el1_error_invalid)
711
712/*
713 * EL1 mode handlers.
714 */
715	.align	6
716SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
717	kernel_entry 1
718	mov	x0, sp
719	bl	el1_sync_handler
720	kernel_exit 1
721SYM_CODE_END(el1_sync)
722
723	.align	6
724SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
725	kernel_entry 1
726	el1_interrupt_handler handle_arch_irq
727	kernel_exit 1
728SYM_CODE_END(el1_irq)
729
730SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
731	kernel_entry 1
732	el1_interrupt_handler handle_arch_fiq
733	kernel_exit 1
734SYM_CODE_END(el1_fiq)
735
736/*
737 * EL0 mode handlers.
738 */
739	.align	6
740SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
741	kernel_entry 0
742	mov	x0, sp
743	bl	el0_sync_handler
744	b	ret_to_user
745SYM_CODE_END(el0_sync)
746
747#ifdef CONFIG_COMPAT
748	.align	6
749SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
750	kernel_entry 0, 32
751	mov	x0, sp
752	bl	el0_sync_compat_handler
753	b	ret_to_user
754SYM_CODE_END(el0_sync_compat)
755
756	.align	6
757SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
758	kernel_entry 0, 32
759	b	el0_irq_naked
760SYM_CODE_END(el0_irq_compat)
761
762SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
763	kernel_entry 0, 32
764	b	el0_fiq_naked
765SYM_CODE_END(el0_fiq_compat)
766
767SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
768	kernel_entry 0, 32
769	b	el0_error_naked
770SYM_CODE_END(el0_error_compat)
771#endif
772
773	.align	6
774SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
775	kernel_entry 0
776el0_irq_naked:
777	el0_interrupt_handler handle_arch_irq
778	b	ret_to_user
779SYM_CODE_END(el0_irq)
780
781SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
782	kernel_entry 0
783el0_fiq_naked:
784	el0_interrupt_handler handle_arch_fiq
785	b	ret_to_user
786SYM_CODE_END(el0_fiq)
787
788SYM_CODE_START_LOCAL(el1_error)
789	kernel_entry 1
790	mrs	x1, esr_el1
791	gic_prio_kentry_setup tmp=x2
792	enable_dbg
793	mov	x0, sp
794	bl	do_serror
795	kernel_exit 1
796SYM_CODE_END(el1_error)
797
798SYM_CODE_START_LOCAL(el0_error)
799	kernel_entry 0
800el0_error_naked:
801	mrs	x25, esr_el1
802	gic_prio_kentry_setup tmp=x2
803	user_exit_irqoff
804	enable_dbg
805	mov	x0, sp
806	mov	x1, x25
807	bl	do_serror
808	enable_da
809	b	ret_to_user
810SYM_CODE_END(el0_error)
811
812/*
813 * "slow" syscall return path.
814 */
815SYM_CODE_START_LOCAL(ret_to_user)
816	disable_daif
817	gic_prio_kentry_setup tmp=x3
818#ifdef CONFIG_TRACE_IRQFLAGS
819	bl	trace_hardirqs_off
820#endif
821	ldr	x19, [tsk, #TSK_TI_FLAGS]
822	and	x2, x19, #_TIF_WORK_MASK
823	cbnz	x2, work_pending
824finish_ret_to_user:
825	user_enter_irqoff
826	/* Ignore asynchronous tag check faults in the uaccess routines */
827	clear_mte_async_tcf
828	enable_step_tsk x19, x2
829#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
830	bl	stackleak_erase
831#endif
832	kernel_exit 0
833
834/*
835 * Ok, we need to do extra processing, enter the slow path.
836 */
837work_pending:
838	mov	x0, sp				// 'regs'
839	mov	x1, x19
840	bl	do_notify_resume
841	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
842	b	finish_ret_to_user
843SYM_CODE_END(ret_to_user)
844
845	.popsection				// .entry.text
846
847#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
848/*
849 * Exception vectors trampoline.
850 */
851	.pushsection ".entry.tramp.text", "ax"
852
853	// Move from tramp_pg_dir to swapper_pg_dir
854	.macro tramp_map_kernel, tmp
855	mrs	\tmp, ttbr1_el1
856	add	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
857	bic	\tmp, \tmp, #USER_ASID_FLAG
858	msr	ttbr1_el1, \tmp
859#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
860alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
861	/* ASID already in \tmp[63:48] */
862	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
863	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
864	/* 2MB boundary containing the vectors, so we nobble the walk cache */
865	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
866	isb
867	tlbi	vae1, \tmp
868	dsb	nsh
869alternative_else_nop_endif
870#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
871	.endm
872
873	// Move from swapper_pg_dir to tramp_pg_dir
874	.macro tramp_unmap_kernel, tmp
875	mrs	\tmp, ttbr1_el1
876	sub	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
877	orr	\tmp, \tmp, #USER_ASID_FLAG
878	msr	ttbr1_el1, \tmp
879	/*
880	 * We avoid running the post_ttbr_update_workaround here because
881	 * it's only needed by Cavium ThunderX, which requires KPTI to be
882	 * disabled.
883	 */
884	.endm
885
886	.macro tramp_ventry, regsize = 64
887	.align	7
8881:
889	.if	\regsize == 64
890	msr	tpidrro_el0, x30	// Restored in kernel_ventry
891	.endif
892	/*
893	 * Defend against branch aliasing attacks by pushing a dummy
894	 * entry onto the return stack and using a RET instruction to
895	 * enter the full-fat kernel vectors.
896	 */
897	bl	2f
898	b	.
8992:
900	tramp_map_kernel	x30
901#ifdef CONFIG_RANDOMIZE_BASE
902	adr	x30, tramp_vectors + PAGE_SIZE
903alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
904	ldr	x30, [x30]
905#else
906	ldr	x30, =vectors
907#endif
908alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
909	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
910alternative_else_nop_endif
911	msr	vbar_el1, x30
912	add	x30, x30, #(1b - tramp_vectors)
913	isb
914	ret
915	.endm
916
917	.macro tramp_exit, regsize = 64
918	adr	x30, tramp_vectors
919	msr	vbar_el1, x30
920	tramp_unmap_kernel	x30
921	.if	\regsize == 64
922	mrs	x30, far_el1
923	.endif
924	eret
925	sb
926	.endm
927
928	.align	11
929SYM_CODE_START_NOALIGN(tramp_vectors)
930	.space	0x400
931
932	tramp_ventry
933	tramp_ventry
934	tramp_ventry
935	tramp_ventry
936
937	tramp_ventry	32
938	tramp_ventry	32
939	tramp_ventry	32
940	tramp_ventry	32
941SYM_CODE_END(tramp_vectors)
942
943SYM_CODE_START(tramp_exit_native)
944	tramp_exit
945SYM_CODE_END(tramp_exit_native)
946
947SYM_CODE_START(tramp_exit_compat)
948	tramp_exit	32
949SYM_CODE_END(tramp_exit_compat)
950
951	.ltorg
952	.popsection				// .entry.tramp.text
953#ifdef CONFIG_RANDOMIZE_BASE
954	.pushsection ".rodata", "a"
955	.align PAGE_SHIFT
956SYM_DATA_START(__entry_tramp_data_start)
957	.quad	vectors
958SYM_DATA_END(__entry_tramp_data_start)
959	.popsection				// .rodata
960#endif /* CONFIG_RANDOMIZE_BASE */
961#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
962
963/*
964 * Register switch for AArch64. The callee-saved registers need to be saved
965 * and restored. On entry:
966 *   x0 = previous task_struct (must be preserved across the switch)
967 *   x1 = next task_struct
968 * Previous and next are guaranteed not to be the same.
969 *
970 */
971SYM_FUNC_START(cpu_switch_to)
972	mov	x10, #THREAD_CPU_CONTEXT
973	add	x8, x0, x10
974	mov	x9, sp
975	stp	x19, x20, [x8], #16		// store callee-saved registers
976	stp	x21, x22, [x8], #16
977	stp	x23, x24, [x8], #16
978	stp	x25, x26, [x8], #16
979	stp	x27, x28, [x8], #16
980	stp	x29, x9, [x8], #16
981	str	lr, [x8]
982	add	x8, x1, x10
983	ldp	x19, x20, [x8], #16		// restore callee-saved registers
984	ldp	x21, x22, [x8], #16
985	ldp	x23, x24, [x8], #16
986	ldp	x25, x26, [x8], #16
987	ldp	x27, x28, [x8], #16
988	ldp	x29, x9, [x8], #16
989	ldr	lr, [x8]
990	mov	sp, x9
991	msr	sp_el0, x1
992	ptrauth_keys_install_kernel x1, x8, x9, x10
993	scs_save x0, x8
994	scs_load x1, x8
995	ret
996SYM_FUNC_END(cpu_switch_to)
997NOKPROBE(cpu_switch_to)
998
999/*
1000 * This is how we return from a fork.
1001 */
1002SYM_CODE_START(ret_from_fork)
1003	bl	schedule_tail
1004	cbz	x19, 1f				// not a kernel thread
1005	mov	x0, x20
1006	blr	x19
10071:	get_current_task tsk
1008	b	ret_to_user
1009SYM_CODE_END(ret_from_fork)
1010NOKPROBE(ret_from_fork)
1011
1012#ifdef CONFIG_ARM_SDE_INTERFACE
1013
1014#include <asm/sdei.h>
1015#include <uapi/linux/arm_sdei.h>
1016
1017.macro sdei_handler_exit exit_mode
1018	/* On success, this call never returns... */
1019	cmp	\exit_mode, #SDEI_EXIT_SMC
1020	b.ne	99f
1021	smc	#0
1022	b	.
102399:	hvc	#0
1024	b	.
1025.endm
1026
1027#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1028/*
1029 * The regular SDEI entry point may have been unmapped along with the rest of
1030 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1031 * argument accessible.
1032 *
1033 * This clobbers x4, __sdei_handler() will restore this from firmware's
1034 * copy.
1035 */
1036.ltorg
1037.pushsection ".entry.tramp.text", "ax"
1038SYM_CODE_START(__sdei_asm_entry_trampoline)
1039	mrs	x4, ttbr1_el1
1040	tbz	x4, #USER_ASID_BIT, 1f
1041
1042	tramp_map_kernel tmp=x4
1043	isb
1044	mov	x4, xzr
1045
1046	/*
1047	 * Remember whether to unmap the kernel on exit.
1048	 */
10491:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
1050
1051#ifdef CONFIG_RANDOMIZE_BASE
1052	adr	x4, tramp_vectors + PAGE_SIZE
1053	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1054	ldr	x4, [x4]
1055#else
1056	ldr	x4, =__sdei_asm_handler
1057#endif
1058	br	x4
1059SYM_CODE_END(__sdei_asm_entry_trampoline)
1060NOKPROBE(__sdei_asm_entry_trampoline)
1061
1062/*
1063 * Make the exit call and restore the original ttbr1_el1
1064 *
1065 * x0 & x1: setup for the exit API call
1066 * x2: exit_mode
1067 * x4: struct sdei_registered_event argument from registration time.
1068 */
1069SYM_CODE_START(__sdei_asm_exit_trampoline)
1070	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
1071	cbnz	x4, 1f
1072
1073	tramp_unmap_kernel	tmp=x4
1074
10751:	sdei_handler_exit exit_mode=x2
1076SYM_CODE_END(__sdei_asm_exit_trampoline)
1077NOKPROBE(__sdei_asm_exit_trampoline)
1078	.ltorg
1079.popsection		// .entry.tramp.text
1080#ifdef CONFIG_RANDOMIZE_BASE
1081.pushsection ".rodata", "a"
1082SYM_DATA_START(__sdei_asm_trampoline_next_handler)
1083	.quad	__sdei_asm_handler
1084SYM_DATA_END(__sdei_asm_trampoline_next_handler)
1085.popsection		// .rodata
1086#endif /* CONFIG_RANDOMIZE_BASE */
1087#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1088
1089/*
1090 * Software Delegated Exception entry point.
1091 *
1092 * x0: Event number
1093 * x1: struct sdei_registered_event argument from registration time.
1094 * x2: interrupted PC
1095 * x3: interrupted PSTATE
1096 * x4: maybe clobbered by the trampoline
1097 *
1098 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1099 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1100 * want them.
1101 */
1102SYM_CODE_START(__sdei_asm_handler)
1103	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1104	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1105	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1106	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1107	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1108	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1109	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1110	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1111	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1112	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1113	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1114	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1115	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1116	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1117	mov	x4, sp
1118	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1119
1120	mov	x19, x1
1121
1122#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1123	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1124#endif
1125
1126#ifdef CONFIG_VMAP_STACK
1127	/*
1128	 * entry.S may have been using sp as a scratch register, find whether
1129	 * this is a normal or critical event and switch to the appropriate
1130	 * stack for this CPU.
1131	 */
1132	cbnz	w4, 1f
1133	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1134	b	2f
11351:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
11362:	mov	x6, #SDEI_STACK_SIZE
1137	add	x5, x5, x6
1138	mov	sp, x5
1139#endif
1140
1141#ifdef CONFIG_SHADOW_CALL_STACK
1142	/* Use a separate shadow call stack for normal and critical events */
1143	cbnz	w4, 3f
1144	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1145	b	4f
11463:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
11474:
1148#endif
1149
1150	/*
1151	 * We may have interrupted userspace, or a guest, or exit-from or
1152	 * return-to either of these. We can't trust sp_el0, restore it.
1153	 */
1154	mrs	x28, sp_el0
1155	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1156	msr	sp_el0, x0
1157
1158	/* If we interrupted the kernel point to the previous stack/frame. */
1159	and     x0, x3, #0xc
1160	mrs     x1, CurrentEL
1161	cmp     x0, x1
1162	csel	x29, x29, xzr, eq	// fp, or zero
1163	csel	x4, x2, xzr, eq		// elr, or zero
1164
1165	stp	x29, x4, [sp, #-16]!
1166	mov	x29, sp
1167
1168	add	x0, x19, #SDEI_EVENT_INTREGS
1169	mov	x1, x19
1170	bl	__sdei_handler
1171
1172	msr	sp_el0, x28
1173	/* restore regs >x17 that we clobbered */
1174	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1175	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1176	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1177	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1178	mov	sp, x1
1179
1180	mov	x1, x0			// address to complete_and_resume
1181	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1182	cmp	x0, #1
1183	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1184	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1185	csel	x0, x2, x3, ls
1186
1187	ldr_l	x2, sdei_exit_mode
1188
1189alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1190	sdei_handler_exit exit_mode=x2
1191alternative_else_nop_endif
1192
1193#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1194	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1195	br	x5
1196#endif
1197SYM_CODE_END(__sdei_asm_handler)
1198NOKPROBE(__sdei_asm_handler)
1199#endif /* CONFIG_ARM_SDE_INTERFACE */
1200