xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 1d7a0395)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32/*
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
35 */
36	.macro user_enter_irqoff
37#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38	bl	exit_to_user_mode
39#endif
40	.endm
41
42	.macro	clear_gp_regs
43	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
44	mov	x\n, xzr
45	.endr
46	.endm
47
48	.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
49	.align 7
50#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
51	.if	\el == 0
52alternative_if ARM64_UNMAP_KERNEL_AT_EL0
53	.if	\regsize == 64
54	mrs	x30, tpidrro_el0
55	msr	tpidrro_el0, xzr
56	.else
57	mov	x30, xzr
58	.endif
59alternative_else_nop_endif
60	.endif
61#endif
62
63	sub	sp, sp, #PT_REGS_SIZE
64#ifdef CONFIG_VMAP_STACK
65	/*
66	 * Test whether the SP has overflowed, without corrupting a GPR.
67	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
68	 * should always be zero.
69	 */
70	add	sp, sp, x0			// sp' = sp + x0
71	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
72	tbnz	x0, #THREAD_SHIFT, 0f
73	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
74	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
75	b	el\el\ht\()_\regsize\()_\label
76
770:
78	/*
79	 * Either we've just detected an overflow, or we've taken an exception
80	 * while on the overflow stack. Either way, we won't return to
81	 * userspace, and can clobber EL0 registers to free up GPRs.
82	 */
83
84	/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
85	msr	tpidr_el0, x0
86
87	/* Recover the original x0 value and stash it in tpidrro_el0 */
88	sub	x0, sp, x0
89	msr	tpidrro_el0, x0
90
91	/* Switch to the overflow stack */
92	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
93
94	/*
95	 * Check whether we were already on the overflow stack. This may happen
96	 * after panic() re-enables interrupts.
97	 */
98	mrs	x0, tpidr_el0			// sp of interrupted context
99	sub	x0, sp, x0			// delta with top of overflow stack
100	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
101	b.ne	__bad_stack			// no? -> bad stack pointer
102
103	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
104	sub	sp, sp, x0
105	mrs	x0, tpidrro_el0
106#endif
107	b	el\el\ht\()_\regsize\()_\label
108	.endm
109
110	.macro tramp_alias, dst, sym
111	mov_q	\dst, TRAMP_VALIAS
112	add	\dst, \dst, #(\sym - .entry.tramp.text)
113	.endm
114
115	/*
116	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
117	 * them if required.
118	 */
119	.macro	apply_ssbd, state, tmp1, tmp2
120alternative_cb	spectre_v4_patch_fw_mitigation_enable
121	b	.L__asm_ssbd_skip\@		// Patched to NOP
122alternative_cb_end
123	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
124	cbz	\tmp2,	.L__asm_ssbd_skip\@
125	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
126	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
127	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
128	mov	w1, #\state
129alternative_cb	spectre_v4_patch_fw_mitigation_conduit
130	nop					// Patched to SMC/HVC #0
131alternative_cb_end
132.L__asm_ssbd_skip\@:
133	.endm
134
135	/* Check for MTE asynchronous tag check faults */
136	.macro check_mte_async_tcf, tmp, ti_flags
137#ifdef CONFIG_ARM64_MTE
138	.arch_extension lse
139alternative_if_not ARM64_MTE
140	b	1f
141alternative_else_nop_endif
142	mrs_s	\tmp, SYS_TFSRE0_EL1
143	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
144	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
145	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
146	add	\ti_flags, tsk, #TSK_TI_FLAGS
147	stset	\tmp, [\ti_flags]
148	msr_s	SYS_TFSRE0_EL1, xzr
1491:
150#endif
151	.endm
152
153	/* Clear the MTE asynchronous tag check faults */
154	.macro clear_mte_async_tcf
155#ifdef CONFIG_ARM64_MTE
156alternative_if ARM64_MTE
157	dsb	ish
158	msr_s	SYS_TFSRE0_EL1, xzr
159alternative_else_nop_endif
160#endif
161	.endm
162
163	.macro mte_set_gcr, tmp, tmp2
164#ifdef CONFIG_ARM64_MTE
165	/*
166	 * Calculate and set the exclude mask preserving
167	 * the RRND (bit[16]) setting.
168	 */
169	mrs_s	\tmp2, SYS_GCR_EL1
170	bfi	\tmp2, \tmp, #0, #16
171	msr_s	SYS_GCR_EL1, \tmp2
172#endif
173	.endm
174
175	.macro mte_set_kernel_gcr, tmp, tmp2
176#ifdef CONFIG_KASAN_HW_TAGS
177alternative_if_not ARM64_MTE
178	b	1f
179alternative_else_nop_endif
180	ldr_l	\tmp, gcr_kernel_excl
181
182	mte_set_gcr \tmp, \tmp2
183	isb
1841:
185#endif
186	.endm
187
188	.macro mte_set_user_gcr, tsk, tmp, tmp2
189#ifdef CONFIG_ARM64_MTE
190alternative_if_not ARM64_MTE
191	b	1f
192alternative_else_nop_endif
193	ldr	\tmp, [\tsk, #THREAD_GCR_EL1_USER]
194
195	mte_set_gcr \tmp, \tmp2
1961:
197#endif
198	.endm
199
200	.macro	kernel_entry, el, regsize = 64
201	.if	\regsize == 32
202	mov	w0, w0				// zero upper 32 bits of x0
203	.endif
204	stp	x0, x1, [sp, #16 * 0]
205	stp	x2, x3, [sp, #16 * 1]
206	stp	x4, x5, [sp, #16 * 2]
207	stp	x6, x7, [sp, #16 * 3]
208	stp	x8, x9, [sp, #16 * 4]
209	stp	x10, x11, [sp, #16 * 5]
210	stp	x12, x13, [sp, #16 * 6]
211	stp	x14, x15, [sp, #16 * 7]
212	stp	x16, x17, [sp, #16 * 8]
213	stp	x18, x19, [sp, #16 * 9]
214	stp	x20, x21, [sp, #16 * 10]
215	stp	x22, x23, [sp, #16 * 11]
216	stp	x24, x25, [sp, #16 * 12]
217	stp	x26, x27, [sp, #16 * 13]
218	stp	x28, x29, [sp, #16 * 14]
219
220	.if	\el == 0
221	clear_gp_regs
222	mrs	x21, sp_el0
223	ldr_this_cpu	tsk, __entry_task, x20
224	msr	sp_el0, tsk
225
226	/*
227	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
228	 * when scheduling.
229	 */
230	ldr	x19, [tsk, #TSK_TI_FLAGS]
231	disable_step_tsk x19, x20
232
233	/* Check for asynchronous tag check faults in user space */
234	check_mte_async_tcf x22, x23
235	apply_ssbd 1, x22, x23
236
237#ifdef CONFIG_ARM64_PTR_AUTH
238alternative_if ARM64_HAS_ADDRESS_AUTH
239	/*
240	 * Enable IA for in-kernel PAC if the task had it disabled. Although
241	 * this could be implemented with an unconditional MRS which would avoid
242	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
243	 *
244	 * Install the kernel IA key only if IA was enabled in the task. If IA
245	 * was disabled on kernel exit then we would have left the kernel IA
246	 * installed so there is no need to install it again.
247	 */
248	ldr	x0, [tsk, THREAD_SCTLR_USER]
249	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
250	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
251	b	2f
2521:
253	mrs	x0, sctlr_el1
254	orr	x0, x0, SCTLR_ELx_ENIA
255	msr	sctlr_el1, x0
2562:
257	isb
258alternative_else_nop_endif
259#endif
260
261	mte_set_kernel_gcr x22, x23
262
263	scs_load tsk
264	.else
265	add	x21, sp, #PT_REGS_SIZE
266	get_current_task tsk
267	.endif /* \el == 0 */
268	mrs	x22, elr_el1
269	mrs	x23, spsr_el1
270	stp	lr, x21, [sp, #S_LR]
271
272	/*
273	 * For exceptions from EL0, create a final frame record.
274	 * For exceptions from EL1, create a synthetic frame record so the
275	 * interrupted code shows up in the backtrace.
276	 */
277	.if \el == 0
278	stp	xzr, xzr, [sp, #S_STACKFRAME]
279	.else
280	stp	x29, x22, [sp, #S_STACKFRAME]
281	.endif
282	add	x29, sp, #S_STACKFRAME
283
284#ifdef CONFIG_ARM64_SW_TTBR0_PAN
285alternative_if_not ARM64_HAS_PAN
286	bl	__swpan_entry_el\el
287alternative_else_nop_endif
288#endif
289
290	stp	x22, x23, [sp, #S_PC]
291
292	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
293	.if	\el == 0
294	mov	w21, #NO_SYSCALL
295	str	w21, [sp, #S_SYSCALLNO]
296	.endif
297
298	/* Save pmr */
299alternative_if ARM64_HAS_IRQ_PRIO_MASKING
300	mrs_s	x20, SYS_ICC_PMR_EL1
301	str	x20, [sp, #S_PMR_SAVE]
302	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
303	msr_s	SYS_ICC_PMR_EL1, x20
304alternative_else_nop_endif
305
306	/* Re-enable tag checking (TCO set on exception entry) */
307#ifdef CONFIG_ARM64_MTE
308alternative_if ARM64_MTE
309	SET_PSTATE_TCO(0)
310alternative_else_nop_endif
311#endif
312
313	/*
314	 * Registers that may be useful after this macro is invoked:
315	 *
316	 * x20 - ICC_PMR_EL1
317	 * x21 - aborted SP
318	 * x22 - aborted PC
319	 * x23 - aborted PSTATE
320	*/
321	.endm
322
323	.macro	kernel_exit, el
324	.if	\el != 0
325	disable_daif
326	.endif
327
328	/* Restore pmr */
329alternative_if ARM64_HAS_IRQ_PRIO_MASKING
330	ldr	x20, [sp, #S_PMR_SAVE]
331	msr_s	SYS_ICC_PMR_EL1, x20
332	mrs_s	x21, SYS_ICC_CTLR_EL1
333	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
334	dsb	sy				// Ensure priority change is seen by redistributor
335.L__skip_pmr_sync\@:
336alternative_else_nop_endif
337
338	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
339
340#ifdef CONFIG_ARM64_SW_TTBR0_PAN
341alternative_if_not ARM64_HAS_PAN
342	bl	__swpan_exit_el\el
343alternative_else_nop_endif
344#endif
345
346	.if	\el == 0
347	ldr	x23, [sp, #S_SP]		// load return stack pointer
348	msr	sp_el0, x23
349	tst	x22, #PSR_MODE32_BIT		// native task?
350	b.eq	3f
351
352#ifdef CONFIG_ARM64_ERRATUM_845719
353alternative_if ARM64_WORKAROUND_845719
354#ifdef CONFIG_PID_IN_CONTEXTIDR
355	mrs	x29, contextidr_el1
356	msr	contextidr_el1, x29
357#else
358	msr contextidr_el1, xzr
359#endif
360alternative_else_nop_endif
361#endif
3623:
363	scs_save tsk
364
365#ifdef CONFIG_ARM64_PTR_AUTH
366alternative_if ARM64_HAS_ADDRESS_AUTH
367	/*
368	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
369	 * alternatively install the user's IA. All other per-task keys and
370	 * SCTLR bits were updated on task switch.
371	 *
372	 * No kernel C function calls after this.
373	 */
374	ldr	x0, [tsk, THREAD_SCTLR_USER]
375	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
376	__ptrauth_keys_install_user tsk, x0, x1, x2
377	b	2f
3781:
379	mrs	x0, sctlr_el1
380	bic	x0, x0, SCTLR_ELx_ENIA
381	msr	sctlr_el1, x0
3822:
383alternative_else_nop_endif
384#endif
385
386	mte_set_user_gcr tsk, x0, x1
387
388	apply_ssbd 0, x0, x1
389	.endif
390
391	msr	elr_el1, x21			// set up the return data
392	msr	spsr_el1, x22
393	ldp	x0, x1, [sp, #16 * 0]
394	ldp	x2, x3, [sp, #16 * 1]
395	ldp	x4, x5, [sp, #16 * 2]
396	ldp	x6, x7, [sp, #16 * 3]
397	ldp	x8, x9, [sp, #16 * 4]
398	ldp	x10, x11, [sp, #16 * 5]
399	ldp	x12, x13, [sp, #16 * 6]
400	ldp	x14, x15, [sp, #16 * 7]
401	ldp	x16, x17, [sp, #16 * 8]
402	ldp	x18, x19, [sp, #16 * 9]
403	ldp	x20, x21, [sp, #16 * 10]
404	ldp	x22, x23, [sp, #16 * 11]
405	ldp	x24, x25, [sp, #16 * 12]
406	ldp	x26, x27, [sp, #16 * 13]
407	ldp	x28, x29, [sp, #16 * 14]
408	ldr	lr, [sp, #S_LR]
409	add	sp, sp, #PT_REGS_SIZE		// restore sp
410
411	.if	\el == 0
412alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
413#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
414	bne	4f
415	msr	far_el1, x30
416	tramp_alias	x30, tramp_exit_native
417	br	x30
4184:
419	tramp_alias	x30, tramp_exit_compat
420	br	x30
421#endif
422	.else
423	/* Ensure any device/NC reads complete */
424	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
425
426	eret
427	.endif
428	sb
429	.endm
430
431#ifdef CONFIG_ARM64_SW_TTBR0_PAN
432	/*
433	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
434	 * EL0, there is no need to check the state of TTBR0_EL1 since
435	 * accesses are always enabled.
436	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
437	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
438	 * user mappings.
439	 */
440SYM_CODE_START_LOCAL(__swpan_entry_el1)
441	mrs	x21, ttbr0_el1
442	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
443	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
444	b.eq	1f				// TTBR0 access already disabled
445	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
446SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
447	__uaccess_ttbr0_disable x21
4481:	ret
449SYM_CODE_END(__swpan_entry_el1)
450
451	/*
452	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
453	 * PAN bit checking.
454	 */
455SYM_CODE_START_LOCAL(__swpan_exit_el1)
456	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
457	__uaccess_ttbr0_enable x0, x1
4581:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
459	ret
460SYM_CODE_END(__swpan_exit_el1)
461
462SYM_CODE_START_LOCAL(__swpan_exit_el0)
463	__uaccess_ttbr0_enable x0, x1
464	/*
465	 * Enable errata workarounds only if returning to user. The only
466	 * workaround currently required for TTBR0_EL1 changes are for the
467	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
468	 * corruption).
469	 */
470	b	post_ttbr_update_workaround
471SYM_CODE_END(__swpan_exit_el0)
472#endif
473
474/* GPRs used by entry code */
475tsk	.req	x28		// current thread_info
476
477/*
478 * Interrupt handling.
479 */
480	.macro	gic_prio_kentry_setup, tmp:req
481#ifdef CONFIG_ARM64_PSEUDO_NMI
482	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
483	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
484	msr_s	SYS_ICC_PMR_EL1, \tmp
485	alternative_else_nop_endif
486#endif
487	.endm
488
489	.text
490
491/*
492 * Exception vectors.
493 */
494	.pushsection ".entry.text", "ax"
495
496	.align	11
497SYM_CODE_START(vectors)
498	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
499	kernel_ventry	1, t, 64, irq		// IRQ EL1t
500	kernel_ventry	1, t, 64, fiq		// FIQ EL1h
501	kernel_ventry	1, t, 64, error		// Error EL1t
502
503	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
504	kernel_ventry	1, h, 64, irq		// IRQ EL1h
505	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
506	kernel_ventry	1, h, 64, error		// Error EL1h
507
508	kernel_ventry	0, t, 64, sync		// Synchronous 64-bit EL0
509	kernel_ventry	0, t, 64, irq		// IRQ 64-bit EL0
510	kernel_ventry	0, t, 64, fiq		// FIQ 64-bit EL0
511	kernel_ventry	0, t, 64, error		// Error 64-bit EL0
512
513	kernel_ventry	0, t, 32, sync		// Synchronous 32-bit EL0
514	kernel_ventry	0, t, 32, irq		// IRQ 32-bit EL0
515	kernel_ventry	0, t, 32, fiq		// FIQ 32-bit EL0
516	kernel_ventry	0, t, 32, error		// Error 32-bit EL0
517SYM_CODE_END(vectors)
518
519#ifdef CONFIG_VMAP_STACK
520	/*
521	 * We detected an overflow in kernel_ventry, which switched to the
522	 * overflow stack. Stash the exception regs, and head to our overflow
523	 * handler.
524	 */
525__bad_stack:
526	/* Restore the original x0 value */
527	mrs	x0, tpidrro_el0
528
529	/*
530	 * Store the original GPRs to the new stack. The orginal SP (minus
531	 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
532	 */
533	sub	sp, sp, #PT_REGS_SIZE
534	kernel_entry 1
535	mrs	x0, tpidr_el0
536	add	x0, x0, #PT_REGS_SIZE
537	str	x0, [sp, #S_SP]
538
539	/* Stash the regs for handle_bad_stack */
540	mov	x0, sp
541
542	/* Time to die */
543	bl	handle_bad_stack
544	ASM_BUG()
545#endif /* CONFIG_VMAP_STACK */
546
547
548	.macro entry_handler el:req, ht:req, regsize:req, label:req
549SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
550	kernel_entry \el, \regsize
551	mov	x0, sp
552	bl	el\el\ht\()_\regsize\()_\label\()_handler
553	.if \el == 0
554	b	ret_to_user
555	.else
556	b	ret_to_kernel
557	.endif
558SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
559	.endm
560
561/*
562 * Early exception handlers
563 */
564	entry_handler	1, t, 64, sync
565	entry_handler	1, t, 64, irq
566	entry_handler	1, t, 64, fiq
567	entry_handler	1, t, 64, error
568
569	entry_handler	1, h, 64, sync
570	entry_handler	1, h, 64, irq
571	entry_handler	1, h, 64, fiq
572	entry_handler	1, h, 64, error
573
574	entry_handler	0, t, 64, sync
575	entry_handler	0, t, 64, irq
576	entry_handler	0, t, 64, fiq
577	entry_handler	0, t, 64, error
578
579	entry_handler	0, t, 32, sync
580	entry_handler	0, t, 32, irq
581	entry_handler	0, t, 32, fiq
582	entry_handler	0, t, 32, error
583
584SYM_CODE_START_LOCAL(ret_to_kernel)
585	kernel_exit 1
586SYM_CODE_END(ret_to_kernel)
587
588/*
589 * "slow" syscall return path.
590 */
591SYM_CODE_START_LOCAL(ret_to_user)
592	disable_daif
593	gic_prio_kentry_setup tmp=x3
594#ifdef CONFIG_TRACE_IRQFLAGS
595	bl	trace_hardirqs_off
596#endif
597	ldr	x19, [tsk, #TSK_TI_FLAGS]
598	and	x2, x19, #_TIF_WORK_MASK
599	cbnz	x2, work_pending
600finish_ret_to_user:
601	user_enter_irqoff
602	/* Ignore asynchronous tag check faults in the uaccess routines */
603	clear_mte_async_tcf
604	enable_step_tsk x19, x2
605#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
606	bl	stackleak_erase
607#endif
608	kernel_exit 0
609
610/*
611 * Ok, we need to do extra processing, enter the slow path.
612 */
613work_pending:
614	mov	x0, sp				// 'regs'
615	mov	x1, x19
616	bl	do_notify_resume
617	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
618	b	finish_ret_to_user
619SYM_CODE_END(ret_to_user)
620
621	.popsection				// .entry.text
622
623#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
624/*
625 * Exception vectors trampoline.
626 */
627	.pushsection ".entry.tramp.text", "ax"
628
629	// Move from tramp_pg_dir to swapper_pg_dir
630	.macro tramp_map_kernel, tmp
631	mrs	\tmp, ttbr1_el1
632	add	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
633	bic	\tmp, \tmp, #USER_ASID_FLAG
634	msr	ttbr1_el1, \tmp
635#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
636alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
637	/* ASID already in \tmp[63:48] */
638	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
639	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
640	/* 2MB boundary containing the vectors, so we nobble the walk cache */
641	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
642	isb
643	tlbi	vae1, \tmp
644	dsb	nsh
645alternative_else_nop_endif
646#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
647	.endm
648
649	// Move from swapper_pg_dir to tramp_pg_dir
650	.macro tramp_unmap_kernel, tmp
651	mrs	\tmp, ttbr1_el1
652	sub	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
653	orr	\tmp, \tmp, #USER_ASID_FLAG
654	msr	ttbr1_el1, \tmp
655	/*
656	 * We avoid running the post_ttbr_update_workaround here because
657	 * it's only needed by Cavium ThunderX, which requires KPTI to be
658	 * disabled.
659	 */
660	.endm
661
662	.macro tramp_ventry, regsize = 64
663	.align	7
6641:
665	.if	\regsize == 64
666	msr	tpidrro_el0, x30	// Restored in kernel_ventry
667	.endif
668	/*
669	 * Defend against branch aliasing attacks by pushing a dummy
670	 * entry onto the return stack and using a RET instruction to
671	 * enter the full-fat kernel vectors.
672	 */
673	bl	2f
674	b	.
6752:
676	tramp_map_kernel	x30
677#ifdef CONFIG_RANDOMIZE_BASE
678	adr	x30, tramp_vectors + PAGE_SIZE
679alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
680	ldr	x30, [x30]
681#else
682	ldr	x30, =vectors
683#endif
684alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
685	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
686alternative_else_nop_endif
687	msr	vbar_el1, x30
688	add	x30, x30, #(1b - tramp_vectors)
689	isb
690	ret
691	.endm
692
693	.macro tramp_exit, regsize = 64
694	adr	x30, tramp_vectors
695	msr	vbar_el1, x30
696	tramp_unmap_kernel	x30
697	.if	\regsize == 64
698	mrs	x30, far_el1
699	.endif
700	eret
701	sb
702	.endm
703
704	.align	11
705SYM_CODE_START_NOALIGN(tramp_vectors)
706	.space	0x400
707
708	tramp_ventry
709	tramp_ventry
710	tramp_ventry
711	tramp_ventry
712
713	tramp_ventry	32
714	tramp_ventry	32
715	tramp_ventry	32
716	tramp_ventry	32
717SYM_CODE_END(tramp_vectors)
718
719SYM_CODE_START(tramp_exit_native)
720	tramp_exit
721SYM_CODE_END(tramp_exit_native)
722
723SYM_CODE_START(tramp_exit_compat)
724	tramp_exit	32
725SYM_CODE_END(tramp_exit_compat)
726
727	.ltorg
728	.popsection				// .entry.tramp.text
729#ifdef CONFIG_RANDOMIZE_BASE
730	.pushsection ".rodata", "a"
731	.align PAGE_SHIFT
732SYM_DATA_START(__entry_tramp_data_start)
733	.quad	vectors
734SYM_DATA_END(__entry_tramp_data_start)
735	.popsection				// .rodata
736#endif /* CONFIG_RANDOMIZE_BASE */
737#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
738
739/*
740 * Register switch for AArch64. The callee-saved registers need to be saved
741 * and restored. On entry:
742 *   x0 = previous task_struct (must be preserved across the switch)
743 *   x1 = next task_struct
744 * Previous and next are guaranteed not to be the same.
745 *
746 */
747SYM_FUNC_START(cpu_switch_to)
748	mov	x10, #THREAD_CPU_CONTEXT
749	add	x8, x0, x10
750	mov	x9, sp
751	stp	x19, x20, [x8], #16		// store callee-saved registers
752	stp	x21, x22, [x8], #16
753	stp	x23, x24, [x8], #16
754	stp	x25, x26, [x8], #16
755	stp	x27, x28, [x8], #16
756	stp	x29, x9, [x8], #16
757	str	lr, [x8]
758	add	x8, x1, x10
759	ldp	x19, x20, [x8], #16		// restore callee-saved registers
760	ldp	x21, x22, [x8], #16
761	ldp	x23, x24, [x8], #16
762	ldp	x25, x26, [x8], #16
763	ldp	x27, x28, [x8], #16
764	ldp	x29, x9, [x8], #16
765	ldr	lr, [x8]
766	mov	sp, x9
767	msr	sp_el0, x1
768	ptrauth_keys_install_kernel x1, x8, x9, x10
769	scs_save x0
770	scs_load x1
771	ret
772SYM_FUNC_END(cpu_switch_to)
773NOKPROBE(cpu_switch_to)
774
775/*
776 * This is how we return from a fork.
777 */
778SYM_CODE_START(ret_from_fork)
779	bl	schedule_tail
780	cbz	x19, 1f				// not a kernel thread
781	mov	x0, x20
782	blr	x19
7831:	get_current_task tsk
784	b	ret_to_user
785SYM_CODE_END(ret_from_fork)
786NOKPROBE(ret_from_fork)
787
788/*
789 * void call_on_irq_stack(struct pt_regs *regs,
790 * 		          void (*func)(struct pt_regs *));
791 *
792 * Calls func(regs) using this CPU's irq stack and shadow irq stack.
793 */
794SYM_FUNC_START(call_on_irq_stack)
795#ifdef CONFIG_SHADOW_CALL_STACK
796	stp	scs_sp, xzr, [sp, #-16]!
797	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
798#endif
799	/* Create a frame record to save our LR and SP (implicit in FP) */
800	stp	x29, x30, [sp, #-16]!
801	mov	x29, sp
802
803	ldr_this_cpu x16, irq_stack_ptr, x17
804	mov	x15, #IRQ_STACK_SIZE
805	add	x16, x16, x15
806
807	/* Move to the new stack and call the function there */
808	mov	sp, x16
809	blr	x1
810
811	/*
812	 * Restore the SP from the FP, and restore the FP and LR from the frame
813	 * record.
814	 */
815	mov	sp, x29
816	ldp	x29, x30, [sp], #16
817#ifdef CONFIG_SHADOW_CALL_STACK
818	ldp	scs_sp, xzr, [sp], #16
819#endif
820	ret
821SYM_FUNC_END(call_on_irq_stack)
822NOKPROBE(call_on_irq_stack)
823
824#ifdef CONFIG_ARM_SDE_INTERFACE
825
826#include <asm/sdei.h>
827#include <uapi/linux/arm_sdei.h>
828
829.macro sdei_handler_exit exit_mode
830	/* On success, this call never returns... */
831	cmp	\exit_mode, #SDEI_EXIT_SMC
832	b.ne	99f
833	smc	#0
834	b	.
83599:	hvc	#0
836	b	.
837.endm
838
839#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
840/*
841 * The regular SDEI entry point may have been unmapped along with the rest of
842 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
843 * argument accessible.
844 *
845 * This clobbers x4, __sdei_handler() will restore this from firmware's
846 * copy.
847 */
848.ltorg
849.pushsection ".entry.tramp.text", "ax"
850SYM_CODE_START(__sdei_asm_entry_trampoline)
851	mrs	x4, ttbr1_el1
852	tbz	x4, #USER_ASID_BIT, 1f
853
854	tramp_map_kernel tmp=x4
855	isb
856	mov	x4, xzr
857
858	/*
859	 * Remember whether to unmap the kernel on exit.
860	 */
8611:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
862
863#ifdef CONFIG_RANDOMIZE_BASE
864	adr	x4, tramp_vectors + PAGE_SIZE
865	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
866	ldr	x4, [x4]
867#else
868	ldr	x4, =__sdei_asm_handler
869#endif
870	br	x4
871SYM_CODE_END(__sdei_asm_entry_trampoline)
872NOKPROBE(__sdei_asm_entry_trampoline)
873
874/*
875 * Make the exit call and restore the original ttbr1_el1
876 *
877 * x0 & x1: setup for the exit API call
878 * x2: exit_mode
879 * x4: struct sdei_registered_event argument from registration time.
880 */
881SYM_CODE_START(__sdei_asm_exit_trampoline)
882	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
883	cbnz	x4, 1f
884
885	tramp_unmap_kernel	tmp=x4
886
8871:	sdei_handler_exit exit_mode=x2
888SYM_CODE_END(__sdei_asm_exit_trampoline)
889NOKPROBE(__sdei_asm_exit_trampoline)
890	.ltorg
891.popsection		// .entry.tramp.text
892#ifdef CONFIG_RANDOMIZE_BASE
893.pushsection ".rodata", "a"
894SYM_DATA_START(__sdei_asm_trampoline_next_handler)
895	.quad	__sdei_asm_handler
896SYM_DATA_END(__sdei_asm_trampoline_next_handler)
897.popsection		// .rodata
898#endif /* CONFIG_RANDOMIZE_BASE */
899#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
900
901/*
902 * Software Delegated Exception entry point.
903 *
904 * x0: Event number
905 * x1: struct sdei_registered_event argument from registration time.
906 * x2: interrupted PC
907 * x3: interrupted PSTATE
908 * x4: maybe clobbered by the trampoline
909 *
910 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
911 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
912 * want them.
913 */
914SYM_CODE_START(__sdei_asm_handler)
915	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
916	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
917	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
918	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
919	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
920	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
921	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
922	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
923	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
924	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
925	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
926	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
927	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
928	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
929	mov	x4, sp
930	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
931
932	mov	x19, x1
933
934#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
935	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
936#endif
937
938#ifdef CONFIG_VMAP_STACK
939	/*
940	 * entry.S may have been using sp as a scratch register, find whether
941	 * this is a normal or critical event and switch to the appropriate
942	 * stack for this CPU.
943	 */
944	cbnz	w4, 1f
945	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
946	b	2f
9471:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
9482:	mov	x6, #SDEI_STACK_SIZE
949	add	x5, x5, x6
950	mov	sp, x5
951#endif
952
953#ifdef CONFIG_SHADOW_CALL_STACK
954	/* Use a separate shadow call stack for normal and critical events */
955	cbnz	w4, 3f
956	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
957	b	4f
9583:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
9594:
960#endif
961
962	/*
963	 * We may have interrupted userspace, or a guest, or exit-from or
964	 * return-to either of these. We can't trust sp_el0, restore it.
965	 */
966	mrs	x28, sp_el0
967	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
968	msr	sp_el0, x0
969
970	/* If we interrupted the kernel point to the previous stack/frame. */
971	and     x0, x3, #0xc
972	mrs     x1, CurrentEL
973	cmp     x0, x1
974	csel	x29, x29, xzr, eq	// fp, or zero
975	csel	x4, x2, xzr, eq		// elr, or zero
976
977	stp	x29, x4, [sp, #-16]!
978	mov	x29, sp
979
980	add	x0, x19, #SDEI_EVENT_INTREGS
981	mov	x1, x19
982	bl	__sdei_handler
983
984	msr	sp_el0, x28
985	/* restore regs >x17 that we clobbered */
986	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
987	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
988	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
989	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
990	mov	sp, x1
991
992	mov	x1, x0			// address to complete_and_resume
993	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
994	cmp	x0, #1
995	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
996	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
997	csel	x0, x2, x3, ls
998
999	ldr_l	x2, sdei_exit_mode
1000
1001alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1002	sdei_handler_exit exit_mode=x2
1003alternative_else_nop_endif
1004
1005#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1006	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1007	br	x5
1008#endif
1009SYM_CODE_END(__sdei_asm_handler)
1010NOKPROBE(__sdei_asm_handler)
1011#endif /* CONFIG_ARM_SDE_INTERFACE */
1012