xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision f125e2d4)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/cpufeature.h>
18#include <asm/errno.h>
19#include <asm/esr.h>
20#include <asm/irq.h>
21#include <asm/memory.h>
22#include <asm/mmu.h>
23#include <asm/processor.h>
24#include <asm/ptrace.h>
25#include <asm/thread_info.h>
26#include <asm/asm-uaccess.h>
27#include <asm/unistd.h>
28
29/*
30 * Context tracking subsystem.  Used to instrument transitions
31 * between user and kernel mode.
32 */
33	.macro ct_user_exit_irqoff
34#ifdef CONFIG_CONTEXT_TRACKING
35	bl	enter_from_user_mode
36#endif
37	.endm
38
39	.macro ct_user_enter
40#ifdef CONFIG_CONTEXT_TRACKING
41	bl	context_tracking_user_enter
42#endif
43	.endm
44
45	.macro	clear_gp_regs
46	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
47	mov	x\n, xzr
48	.endr
49	.endm
50
51/*
52 * Bad Abort numbers
53 *-----------------
54 */
55#define BAD_SYNC	0
56#define BAD_IRQ		1
57#define BAD_FIQ		2
58#define BAD_ERROR	3
59
60	.macro kernel_ventry, el, label, regsize = 64
61	.align 7
62#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
63	.if	\el == 0
64alternative_if ARM64_UNMAP_KERNEL_AT_EL0
65	.if	\regsize == 64
66	mrs	x30, tpidrro_el0
67	msr	tpidrro_el0, xzr
68	.else
69	mov	x30, xzr
70	.endif
71alternative_else_nop_endif
72	.endif
73#endif
74
75	sub	sp, sp, #S_FRAME_SIZE
76#ifdef CONFIG_VMAP_STACK
77	/*
78	 * Test whether the SP has overflowed, without corrupting a GPR.
79	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
80	 * should always be zero.
81	 */
82	add	sp, sp, x0			// sp' = sp + x0
83	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
84	tbnz	x0, #THREAD_SHIFT, 0f
85	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
86	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
87	b	el\()\el\()_\label
88
890:
90	/*
91	 * Either we've just detected an overflow, or we've taken an exception
92	 * while on the overflow stack. Either way, we won't return to
93	 * userspace, and can clobber EL0 registers to free up GPRs.
94	 */
95
96	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
97	msr	tpidr_el0, x0
98
99	/* Recover the original x0 value and stash it in tpidrro_el0 */
100	sub	x0, sp, x0
101	msr	tpidrro_el0, x0
102
103	/* Switch to the overflow stack */
104	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
105
106	/*
107	 * Check whether we were already on the overflow stack. This may happen
108	 * after panic() re-enables interrupts.
109	 */
110	mrs	x0, tpidr_el0			// sp of interrupted context
111	sub	x0, sp, x0			// delta with top of overflow stack
112	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
113	b.ne	__bad_stack			// no? -> bad stack pointer
114
115	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
116	sub	sp, sp, x0
117	mrs	x0, tpidrro_el0
118#endif
119	b	el\()\el\()_\label
120	.endm
121
122	.macro tramp_alias, dst, sym
123	mov_q	\dst, TRAMP_VALIAS
124	add	\dst, \dst, #(\sym - .entry.tramp.text)
125	.endm
126
127	// This macro corrupts x0-x3. It is the caller's duty
128	// to save/restore them if required.
129	.macro	apply_ssbd, state, tmp1, tmp2
130#ifdef CONFIG_ARM64_SSBD
131alternative_cb	arm64_enable_wa2_handling
132	b	.L__asm_ssbd_skip\@
133alternative_cb_end
134	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
135	cbz	\tmp2,	.L__asm_ssbd_skip\@
136	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
137	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
138	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
139	mov	w1, #\state
140alternative_cb	arm64_update_smccc_conduit
141	nop					// Patched to SMC/HVC #0
142alternative_cb_end
143.L__asm_ssbd_skip\@:
144#endif
145	.endm
146
147	.macro	kernel_entry, el, regsize = 64
148	.if	\regsize == 32
149	mov	w0, w0				// zero upper 32 bits of x0
150	.endif
151	stp	x0, x1, [sp, #16 * 0]
152	stp	x2, x3, [sp, #16 * 1]
153	stp	x4, x5, [sp, #16 * 2]
154	stp	x6, x7, [sp, #16 * 3]
155	stp	x8, x9, [sp, #16 * 4]
156	stp	x10, x11, [sp, #16 * 5]
157	stp	x12, x13, [sp, #16 * 6]
158	stp	x14, x15, [sp, #16 * 7]
159	stp	x16, x17, [sp, #16 * 8]
160	stp	x18, x19, [sp, #16 * 9]
161	stp	x20, x21, [sp, #16 * 10]
162	stp	x22, x23, [sp, #16 * 11]
163	stp	x24, x25, [sp, #16 * 12]
164	stp	x26, x27, [sp, #16 * 13]
165	stp	x28, x29, [sp, #16 * 14]
166
167	.if	\el == 0
168	clear_gp_regs
169	mrs	x21, sp_el0
170	ldr_this_cpu	tsk, __entry_task, x20
171	msr	sp_el0, tsk
172
173	// Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
174	// when scheduling.
175	ldr	x19, [tsk, #TSK_TI_FLAGS]
176	disable_step_tsk x19, x20
177
178	apply_ssbd 1, x22, x23
179
180	.else
181	add	x21, sp, #S_FRAME_SIZE
182	get_current_task tsk
183	/* Save the task's original addr_limit and set USER_DS */
184	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
185	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
186	mov	x20, #USER_DS
187	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
188	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
189	.endif /* \el == 0 */
190	mrs	x22, elr_el1
191	mrs	x23, spsr_el1
192	stp	lr, x21, [sp, #S_LR]
193
194	/*
195	 * In order to be able to dump the contents of struct pt_regs at the
196	 * time the exception was taken (in case we attempt to walk the call
197	 * stack later), chain it together with the stack frames.
198	 */
199	.if \el == 0
200	stp	xzr, xzr, [sp, #S_STACKFRAME]
201	.else
202	stp	x29, x22, [sp, #S_STACKFRAME]
203	.endif
204	add	x29, sp, #S_STACKFRAME
205
206#ifdef CONFIG_ARM64_SW_TTBR0_PAN
207	/*
208	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
209	 * EL0, there is no need to check the state of TTBR0_EL1 since
210	 * accesses are always enabled.
211	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
212	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
213	 * user mappings.
214	 */
215alternative_if ARM64_HAS_PAN
216	b	1f				// skip TTBR0 PAN
217alternative_else_nop_endif
218
219	.if	\el != 0
220	mrs	x21, ttbr0_el1
221	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
222	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
223	b.eq	1f				// TTBR0 access already disabled
224	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
225	.endif
226
227	__uaccess_ttbr0_disable x21
2281:
229#endif
230
231	stp	x22, x23, [sp, #S_PC]
232
233	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
234	.if	\el == 0
235	mov	w21, #NO_SYSCALL
236	str	w21, [sp, #S_SYSCALLNO]
237	.endif
238
239	/* Save pmr */
240alternative_if ARM64_HAS_IRQ_PRIO_MASKING
241	mrs_s	x20, SYS_ICC_PMR_EL1
242	str	x20, [sp, #S_PMR_SAVE]
243alternative_else_nop_endif
244
245	/*
246	 * Registers that may be useful after this macro is invoked:
247	 *
248	 * x20 - ICC_PMR_EL1
249	 * x21 - aborted SP
250	 * x22 - aborted PC
251	 * x23 - aborted PSTATE
252	*/
253	.endm
254
255	.macro	kernel_exit, el
256	.if	\el != 0
257	disable_daif
258
259	/* Restore the task's original addr_limit. */
260	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
261	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
262
263	/* No need to restore UAO, it will be restored from SPSR_EL1 */
264	.endif
265
266	/* Restore pmr */
267alternative_if ARM64_HAS_IRQ_PRIO_MASKING
268	ldr	x20, [sp, #S_PMR_SAVE]
269	msr_s	SYS_ICC_PMR_EL1, x20
270	mrs_s	x21, SYS_ICC_CTLR_EL1
271	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
272	dsb	sy				// Ensure priority change is seen by redistributor
273.L__skip_pmr_sync\@:
274alternative_else_nop_endif
275
276	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
277	.if	\el == 0
278	ct_user_enter
279	.endif
280
281#ifdef CONFIG_ARM64_SW_TTBR0_PAN
282	/*
283	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
284	 * PAN bit checking.
285	 */
286alternative_if ARM64_HAS_PAN
287	b	2f				// skip TTBR0 PAN
288alternative_else_nop_endif
289
290	.if	\el != 0
291	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
292	.endif
293
294	__uaccess_ttbr0_enable x0, x1
295
296	.if	\el == 0
297	/*
298	 * Enable errata workarounds only if returning to user. The only
299	 * workaround currently required for TTBR0_EL1 changes are for the
300	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
301	 * corruption).
302	 */
303	bl	post_ttbr_update_workaround
304	.endif
3051:
306	.if	\el != 0
307	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
308	.endif
3092:
310#endif
311
312	.if	\el == 0
313	ldr	x23, [sp, #S_SP]		// load return stack pointer
314	msr	sp_el0, x23
315	tst	x22, #PSR_MODE32_BIT		// native task?
316	b.eq	3f
317
318#ifdef CONFIG_ARM64_ERRATUM_845719
319alternative_if ARM64_WORKAROUND_845719
320#ifdef CONFIG_PID_IN_CONTEXTIDR
321	mrs	x29, contextidr_el1
322	msr	contextidr_el1, x29
323#else
324	msr contextidr_el1, xzr
325#endif
326alternative_else_nop_endif
327#endif
3283:
329#ifdef CONFIG_ARM64_ERRATUM_1418040
330alternative_if_not ARM64_WORKAROUND_1418040
331	b	4f
332alternative_else_nop_endif
333	/*
334	 * if (x22.mode32 == cntkctl_el1.el0vcten)
335	 *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
336	 */
337	mrs	x1, cntkctl_el1
338	eon	x0, x1, x22, lsr #3
339	tbz	x0, #1, 4f
340	eor	x1, x1, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
341	msr	cntkctl_el1, x1
3424:
343#endif
344	apply_ssbd 0, x0, x1
345	.endif
346
347	msr	elr_el1, x21			// set up the return data
348	msr	spsr_el1, x22
349	ldp	x0, x1, [sp, #16 * 0]
350	ldp	x2, x3, [sp, #16 * 1]
351	ldp	x4, x5, [sp, #16 * 2]
352	ldp	x6, x7, [sp, #16 * 3]
353	ldp	x8, x9, [sp, #16 * 4]
354	ldp	x10, x11, [sp, #16 * 5]
355	ldp	x12, x13, [sp, #16 * 6]
356	ldp	x14, x15, [sp, #16 * 7]
357	ldp	x16, x17, [sp, #16 * 8]
358	ldp	x18, x19, [sp, #16 * 9]
359	ldp	x20, x21, [sp, #16 * 10]
360	ldp	x22, x23, [sp, #16 * 11]
361	ldp	x24, x25, [sp, #16 * 12]
362	ldp	x26, x27, [sp, #16 * 13]
363	ldp	x28, x29, [sp, #16 * 14]
364	ldr	lr, [sp, #S_LR]
365	add	sp, sp, #S_FRAME_SIZE		// restore sp
366
367	.if	\el == 0
368alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
369#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
370	bne	5f
371	msr	far_el1, x30
372	tramp_alias	x30, tramp_exit_native
373	br	x30
3745:
375	tramp_alias	x30, tramp_exit_compat
376	br	x30
377#endif
378	.else
379	eret
380	.endif
381	sb
382	.endm
383
384	.macro	irq_stack_entry
385	mov	x19, sp			// preserve the original sp
386
387	/*
388	 * Compare sp with the base of the task stack.
389	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
390	 * and should switch to the irq stack.
391	 */
392	ldr	x25, [tsk, TSK_STACK]
393	eor	x25, x25, x19
394	and	x25, x25, #~(THREAD_SIZE - 1)
395	cbnz	x25, 9998f
396
397	ldr_this_cpu x25, irq_stack_ptr, x26
398	mov	x26, #IRQ_STACK_SIZE
399	add	x26, x25, x26
400
401	/* switch to the irq stack */
402	mov	sp, x26
4039998:
404	.endm
405
406	/*
407	 * x19 should be preserved between irq_stack_entry and
408	 * irq_stack_exit.
409	 */
410	.macro	irq_stack_exit
411	mov	sp, x19
412	.endm
413
414/* GPRs used by entry code */
415tsk	.req	x28		// current thread_info
416
417/*
418 * Interrupt handling.
419 */
420	.macro	irq_handler
421	ldr_l	x1, handle_arch_irq
422	mov	x0, sp
423	irq_stack_entry
424	blr	x1
425	irq_stack_exit
426	.endm
427
428#ifdef CONFIG_ARM64_PSEUDO_NMI
429	/*
430	 * Set res to 0 if irqs were unmasked in interrupted context.
431	 * Otherwise set res to non-0 value.
432	 */
433	.macro	test_irqs_unmasked res:req, pmr:req
434alternative_if ARM64_HAS_IRQ_PRIO_MASKING
435	sub	\res, \pmr, #GIC_PRIO_IRQON
436alternative_else
437	mov	\res, xzr
438alternative_endif
439	.endm
440#endif
441
442	.macro	gic_prio_kentry_setup, tmp:req
443#ifdef CONFIG_ARM64_PSEUDO_NMI
444	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
445	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
446	msr_s	SYS_ICC_PMR_EL1, \tmp
447	alternative_else_nop_endif
448#endif
449	.endm
450
451	.macro	gic_prio_irq_setup, pmr:req, tmp:req
452#ifdef CONFIG_ARM64_PSEUDO_NMI
453	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
454	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
455	msr_s	SYS_ICC_PMR_EL1, \tmp
456	alternative_else_nop_endif
457#endif
458	.endm
459
460	.text
461
462/*
463 * Exception vectors.
464 */
465	.pushsection ".entry.text", "ax"
466
467	.align	11
468ENTRY(vectors)
469	kernel_ventry	1, sync_invalid			// Synchronous EL1t
470	kernel_ventry	1, irq_invalid			// IRQ EL1t
471	kernel_ventry	1, fiq_invalid			// FIQ EL1t
472	kernel_ventry	1, error_invalid		// Error EL1t
473
474	kernel_ventry	1, sync				// Synchronous EL1h
475	kernel_ventry	1, irq				// IRQ EL1h
476	kernel_ventry	1, fiq_invalid			// FIQ EL1h
477	kernel_ventry	1, error			// Error EL1h
478
479	kernel_ventry	0, sync				// Synchronous 64-bit EL0
480	kernel_ventry	0, irq				// IRQ 64-bit EL0
481	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
482	kernel_ventry	0, error			// Error 64-bit EL0
483
484#ifdef CONFIG_COMPAT
485	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
486	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
487	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
488	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
489#else
490	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
491	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
492	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
493	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
494#endif
495END(vectors)
496
497#ifdef CONFIG_VMAP_STACK
498	/*
499	 * We detected an overflow in kernel_ventry, which switched to the
500	 * overflow stack. Stash the exception regs, and head to our overflow
501	 * handler.
502	 */
503__bad_stack:
504	/* Restore the original x0 value */
505	mrs	x0, tpidrro_el0
506
507	/*
508	 * Store the original GPRs to the new stack. The orginal SP (minus
509	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
510	 */
511	sub	sp, sp, #S_FRAME_SIZE
512	kernel_entry 1
513	mrs	x0, tpidr_el0
514	add	x0, x0, #S_FRAME_SIZE
515	str	x0, [sp, #S_SP]
516
517	/* Stash the regs for handle_bad_stack */
518	mov	x0, sp
519
520	/* Time to die */
521	bl	handle_bad_stack
522	ASM_BUG()
523#endif /* CONFIG_VMAP_STACK */
524
525/*
526 * Invalid mode handlers
527 */
528	.macro	inv_entry, el, reason, regsize = 64
529	kernel_entry \el, \regsize
530	mov	x0, sp
531	mov	x1, #\reason
532	mrs	x2, esr_el1
533	bl	bad_mode
534	ASM_BUG()
535	.endm
536
537el0_sync_invalid:
538	inv_entry 0, BAD_SYNC
539ENDPROC(el0_sync_invalid)
540
541el0_irq_invalid:
542	inv_entry 0, BAD_IRQ
543ENDPROC(el0_irq_invalid)
544
545el0_fiq_invalid:
546	inv_entry 0, BAD_FIQ
547ENDPROC(el0_fiq_invalid)
548
549el0_error_invalid:
550	inv_entry 0, BAD_ERROR
551ENDPROC(el0_error_invalid)
552
553#ifdef CONFIG_COMPAT
554el0_fiq_invalid_compat:
555	inv_entry 0, BAD_FIQ, 32
556ENDPROC(el0_fiq_invalid_compat)
557#endif
558
559el1_sync_invalid:
560	inv_entry 1, BAD_SYNC
561ENDPROC(el1_sync_invalid)
562
563el1_irq_invalid:
564	inv_entry 1, BAD_IRQ
565ENDPROC(el1_irq_invalid)
566
567el1_fiq_invalid:
568	inv_entry 1, BAD_FIQ
569ENDPROC(el1_fiq_invalid)
570
571el1_error_invalid:
572	inv_entry 1, BAD_ERROR
573ENDPROC(el1_error_invalid)
574
575/*
576 * EL1 mode handlers.
577 */
578	.align	6
579el1_sync:
580	kernel_entry 1
581	mov	x0, sp
582	bl	el1_sync_handler
583	kernel_exit 1
584ENDPROC(el1_sync)
585
586	.align	6
587el1_irq:
588	kernel_entry 1
589	gic_prio_irq_setup pmr=x20, tmp=x1
590	enable_da_f
591
592#ifdef CONFIG_ARM64_PSEUDO_NMI
593	test_irqs_unmasked	res=x0, pmr=x20
594	cbz	x0, 1f
595	bl	asm_nmi_enter
5961:
597#endif
598
599#ifdef CONFIG_TRACE_IRQFLAGS
600	bl	trace_hardirqs_off
601#endif
602
603	irq_handler
604
605#ifdef CONFIG_PREEMPTION
606	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
607alternative_if ARM64_HAS_IRQ_PRIO_MASKING
608	/*
609	 * DA_F were cleared at start of handling. If anything is set in DAIF,
610	 * we come back from an NMI, so skip preemption
611	 */
612	mrs	x0, daif
613	orr	x24, x24, x0
614alternative_else_nop_endif
615	cbnz	x24, 1f				// preempt count != 0 || NMI return path
616	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
6171:
618#endif
619
620#ifdef CONFIG_ARM64_PSEUDO_NMI
621	/*
622	 * When using IRQ priority masking, we can get spurious interrupts while
623	 * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
624	 * section with interrupts disabled. Skip tracing in those cases.
625	 */
626	test_irqs_unmasked	res=x0, pmr=x20
627	cbz	x0, 1f
628	bl	asm_nmi_exit
6291:
630#endif
631
632#ifdef CONFIG_TRACE_IRQFLAGS
633#ifdef CONFIG_ARM64_PSEUDO_NMI
634	test_irqs_unmasked	res=x0, pmr=x20
635	cbnz	x0, 1f
636#endif
637	bl	trace_hardirqs_on
6381:
639#endif
640
641	kernel_exit 1
642ENDPROC(el1_irq)
643
644/*
645 * EL0 mode handlers.
646 */
647	.align	6
648el0_sync:
649	kernel_entry 0
650	mov	x0, sp
651	bl	el0_sync_handler
652	b	ret_to_user
653ENDPROC(el0_sync)
654
655#ifdef CONFIG_COMPAT
656	.align	6
657el0_sync_compat:
658	kernel_entry 0, 32
659	mov	x0, sp
660	bl	el0_sync_compat_handler
661	b	ret_to_user
662ENDPROC(el0_sync_compat)
663
664	.align	6
665el0_irq_compat:
666	kernel_entry 0, 32
667	b	el0_irq_naked
668ENDPROC(el0_irq_compat)
669
670el0_error_compat:
671	kernel_entry 0, 32
672	b	el0_error_naked
673ENDPROC(el0_error_compat)
674#endif
675
676	.align	6
677el0_irq:
678	kernel_entry 0
679el0_irq_naked:
680	gic_prio_irq_setup pmr=x20, tmp=x0
681	ct_user_exit_irqoff
682	enable_da_f
683
684#ifdef CONFIG_TRACE_IRQFLAGS
685	bl	trace_hardirqs_off
686#endif
687
688#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
689	tbz	x22, #55, 1f
690	bl	do_el0_irq_bp_hardening
6911:
692#endif
693	irq_handler
694
695#ifdef CONFIG_TRACE_IRQFLAGS
696	bl	trace_hardirqs_on
697#endif
698	b	ret_to_user
699ENDPROC(el0_irq)
700
701el1_error:
702	kernel_entry 1
703	mrs	x1, esr_el1
704	gic_prio_kentry_setup tmp=x2
705	enable_dbg
706	mov	x0, sp
707	bl	do_serror
708	kernel_exit 1
709ENDPROC(el1_error)
710
711el0_error:
712	kernel_entry 0
713el0_error_naked:
714	mrs	x25, esr_el1
715	gic_prio_kentry_setup tmp=x2
716	ct_user_exit_irqoff
717	enable_dbg
718	mov	x0, sp
719	mov	x1, x25
720	bl	do_serror
721	enable_da_f
722	b	ret_to_user
723ENDPROC(el0_error)
724
725/*
726 * Ok, we need to do extra processing, enter the slow path.
727 */
728work_pending:
729	mov	x0, sp				// 'regs'
730	bl	do_notify_resume
731#ifdef CONFIG_TRACE_IRQFLAGS
732	bl	trace_hardirqs_on		// enabled while in userspace
733#endif
734	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
735	b	finish_ret_to_user
736/*
737 * "slow" syscall return path.
738 */
739ret_to_user:
740	disable_daif
741	gic_prio_kentry_setup tmp=x3
742	ldr	x1, [tsk, #TSK_TI_FLAGS]
743	and	x2, x1, #_TIF_WORK_MASK
744	cbnz	x2, work_pending
745finish_ret_to_user:
746	enable_step_tsk x1, x2
747#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
748	bl	stackleak_erase
749#endif
750	kernel_exit 0
751ENDPROC(ret_to_user)
752
753	.popsection				// .entry.text
754
755#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
756/*
757 * Exception vectors trampoline.
758 */
759	.pushsection ".entry.tramp.text", "ax"
760
761	.macro tramp_map_kernel, tmp
762	mrs	\tmp, ttbr1_el1
763	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
764	bic	\tmp, \tmp, #USER_ASID_FLAG
765	msr	ttbr1_el1, \tmp
766#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
767alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
768	/* ASID already in \tmp[63:48] */
769	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
770	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
771	/* 2MB boundary containing the vectors, so we nobble the walk cache */
772	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
773	isb
774	tlbi	vae1, \tmp
775	dsb	nsh
776alternative_else_nop_endif
777#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
778	.endm
779
780	.macro tramp_unmap_kernel, tmp
781	mrs	\tmp, ttbr1_el1
782	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
783	orr	\tmp, \tmp, #USER_ASID_FLAG
784	msr	ttbr1_el1, \tmp
785	/*
786	 * We avoid running the post_ttbr_update_workaround here because
787	 * it's only needed by Cavium ThunderX, which requires KPTI to be
788	 * disabled.
789	 */
790	.endm
791
792	.macro tramp_ventry, regsize = 64
793	.align	7
7941:
795	.if	\regsize == 64
796	msr	tpidrro_el0, x30	// Restored in kernel_ventry
797	.endif
798	/*
799	 * Defend against branch aliasing attacks by pushing a dummy
800	 * entry onto the return stack and using a RET instruction to
801	 * enter the full-fat kernel vectors.
802	 */
803	bl	2f
804	b	.
8052:
806	tramp_map_kernel	x30
807#ifdef CONFIG_RANDOMIZE_BASE
808	adr	x30, tramp_vectors + PAGE_SIZE
809alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
810	ldr	x30, [x30]
811#else
812	ldr	x30, =vectors
813#endif
814alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
815	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
816alternative_else_nop_endif
817	msr	vbar_el1, x30
818	add	x30, x30, #(1b - tramp_vectors)
819	isb
820	ret
821	.endm
822
823	.macro tramp_exit, regsize = 64
824	adr	x30, tramp_vectors
825	msr	vbar_el1, x30
826	tramp_unmap_kernel	x30
827	.if	\regsize == 64
828	mrs	x30, far_el1
829	.endif
830	eret
831	sb
832	.endm
833
834	.align	11
835ENTRY(tramp_vectors)
836	.space	0x400
837
838	tramp_ventry
839	tramp_ventry
840	tramp_ventry
841	tramp_ventry
842
843	tramp_ventry	32
844	tramp_ventry	32
845	tramp_ventry	32
846	tramp_ventry	32
847END(tramp_vectors)
848
849ENTRY(tramp_exit_native)
850	tramp_exit
851END(tramp_exit_native)
852
853ENTRY(tramp_exit_compat)
854	tramp_exit	32
855END(tramp_exit_compat)
856
857	.ltorg
858	.popsection				// .entry.tramp.text
859#ifdef CONFIG_RANDOMIZE_BASE
860	.pushsection ".rodata", "a"
861	.align PAGE_SHIFT
862	.globl	__entry_tramp_data_start
863__entry_tramp_data_start:
864	.quad	vectors
865	.popsection				// .rodata
866#endif /* CONFIG_RANDOMIZE_BASE */
867#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
868
869/*
870 * Register switch for AArch64. The callee-saved registers need to be saved
871 * and restored. On entry:
872 *   x0 = previous task_struct (must be preserved across the switch)
873 *   x1 = next task_struct
874 * Previous and next are guaranteed not to be the same.
875 *
876 */
877ENTRY(cpu_switch_to)
878	mov	x10, #THREAD_CPU_CONTEXT
879	add	x8, x0, x10
880	mov	x9, sp
881	stp	x19, x20, [x8], #16		// store callee-saved registers
882	stp	x21, x22, [x8], #16
883	stp	x23, x24, [x8], #16
884	stp	x25, x26, [x8], #16
885	stp	x27, x28, [x8], #16
886	stp	x29, x9, [x8], #16
887	str	lr, [x8]
888	add	x8, x1, x10
889	ldp	x19, x20, [x8], #16		// restore callee-saved registers
890	ldp	x21, x22, [x8], #16
891	ldp	x23, x24, [x8], #16
892	ldp	x25, x26, [x8], #16
893	ldp	x27, x28, [x8], #16
894	ldp	x29, x9, [x8], #16
895	ldr	lr, [x8]
896	mov	sp, x9
897	msr	sp_el0, x1
898	ret
899ENDPROC(cpu_switch_to)
900NOKPROBE(cpu_switch_to)
901
902/*
903 * This is how we return from a fork.
904 */
905ENTRY(ret_from_fork)
906	bl	schedule_tail
907	cbz	x19, 1f				// not a kernel thread
908	mov	x0, x20
909	blr	x19
9101:	get_current_task tsk
911	b	ret_to_user
912ENDPROC(ret_from_fork)
913NOKPROBE(ret_from_fork)
914
915#ifdef CONFIG_ARM_SDE_INTERFACE
916
917#include <asm/sdei.h>
918#include <uapi/linux/arm_sdei.h>
919
920.macro sdei_handler_exit exit_mode
921	/* On success, this call never returns... */
922	cmp	\exit_mode, #SDEI_EXIT_SMC
923	b.ne	99f
924	smc	#0
925	b	.
92699:	hvc	#0
927	b	.
928.endm
929
930#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
931/*
932 * The regular SDEI entry point may have been unmapped along with the rest of
933 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
934 * argument accessible.
935 *
936 * This clobbers x4, __sdei_handler() will restore this from firmware's
937 * copy.
938 */
939.ltorg
940.pushsection ".entry.tramp.text", "ax"
941ENTRY(__sdei_asm_entry_trampoline)
942	mrs	x4, ttbr1_el1
943	tbz	x4, #USER_ASID_BIT, 1f
944
945	tramp_map_kernel tmp=x4
946	isb
947	mov	x4, xzr
948
949	/*
950	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
951	 * the kernel on exit.
952	 */
9531:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
954
955#ifdef CONFIG_RANDOMIZE_BASE
956	adr	x4, tramp_vectors + PAGE_SIZE
957	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
958	ldr	x4, [x4]
959#else
960	ldr	x4, =__sdei_asm_handler
961#endif
962	br	x4
963ENDPROC(__sdei_asm_entry_trampoline)
964NOKPROBE(__sdei_asm_entry_trampoline)
965
966/*
967 * Make the exit call and restore the original ttbr1_el1
968 *
969 * x0 & x1: setup for the exit API call
970 * x2: exit_mode
971 * x4: struct sdei_registered_event argument from registration time.
972 */
973ENTRY(__sdei_asm_exit_trampoline)
974	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
975	cbnz	x4, 1f
976
977	tramp_unmap_kernel	tmp=x4
978
9791:	sdei_handler_exit exit_mode=x2
980ENDPROC(__sdei_asm_exit_trampoline)
981NOKPROBE(__sdei_asm_exit_trampoline)
982	.ltorg
983.popsection		// .entry.tramp.text
984#ifdef CONFIG_RANDOMIZE_BASE
985.pushsection ".rodata", "a"
986__sdei_asm_trampoline_next_handler:
987	.quad	__sdei_asm_handler
988.popsection		// .rodata
989#endif /* CONFIG_RANDOMIZE_BASE */
990#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
991
992/*
993 * Software Delegated Exception entry point.
994 *
995 * x0: Event number
996 * x1: struct sdei_registered_event argument from registration time.
997 * x2: interrupted PC
998 * x3: interrupted PSTATE
999 * x4: maybe clobbered by the trampoline
1000 *
1001 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1002 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1003 * want them.
1004 */
1005ENTRY(__sdei_asm_handler)
1006	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1007	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1008	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1009	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1010	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1011	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1012	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1013	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1014	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1015	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1016	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1017	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1018	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1019	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1020	mov	x4, sp
1021	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1022
1023	mov	x19, x1
1024
1025#ifdef CONFIG_VMAP_STACK
1026	/*
1027	 * entry.S may have been using sp as a scratch register, find whether
1028	 * this is a normal or critical event and switch to the appropriate
1029	 * stack for this CPU.
1030	 */
1031	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1032	cbnz	w4, 1f
1033	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1034	b	2f
10351:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10362:	mov	x6, #SDEI_STACK_SIZE
1037	add	x5, x5, x6
1038	mov	sp, x5
1039#endif
1040
1041	/*
1042	 * We may have interrupted userspace, or a guest, or exit-from or
1043	 * return-to either of these. We can't trust sp_el0, restore it.
1044	 */
1045	mrs	x28, sp_el0
1046	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1047	msr	sp_el0, x0
1048
1049	/* If we interrupted the kernel point to the previous stack/frame. */
1050	and     x0, x3, #0xc
1051	mrs     x1, CurrentEL
1052	cmp     x0, x1
1053	csel	x29, x29, xzr, eq	// fp, or zero
1054	csel	x4, x2, xzr, eq		// elr, or zero
1055
1056	stp	x29, x4, [sp, #-16]!
1057	mov	x29, sp
1058
1059	add	x0, x19, #SDEI_EVENT_INTREGS
1060	mov	x1, x19
1061	bl	__sdei_handler
1062
1063	msr	sp_el0, x28
1064	/* restore regs >x17 that we clobbered */
1065	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1066	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1067	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1068	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1069	mov	sp, x1
1070
1071	mov	x1, x0			// address to complete_and_resume
1072	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1073	cmp	x0, #1
1074	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1075	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1076	csel	x0, x2, x3, ls
1077
1078	ldr_l	x2, sdei_exit_mode
1079
1080alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1081	sdei_handler_exit exit_mode=x2
1082alternative_else_nop_endif
1083
1084#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1085	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1086	br	x5
1087#endif
1088ENDPROC(__sdei_asm_handler)
1089NOKPROBE(__sdei_asm_handler)
1090#endif /* CONFIG_ARM_SDE_INTERFACE */
1091