xref: /openbmc/linux/arch/arm/kernel/entry-header.S (revision 74de3792)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/init.h>
3#include <linux/linkage.h>
4
5#include <asm/assembler.h>
6#include <asm/asm-offsets.h>
7#include <asm/errno.h>
8#include <asm/thread_info.h>
9#include <asm/uaccess-asm.h>
10#include <asm/v7m.h>
11
12@ Bad Abort numbers
13@ -----------------
14@
15#define BAD_PREFETCH	0
16#define BAD_DATA	1
17#define BAD_ADDREXCPTN	2
18#define BAD_IRQ		3
19#define BAD_UNDEFINSTR	4
20
21@
22@ Most of the stack format comes from struct pt_regs, but with
23@ the addition of 8 bytes for storing syscall args 5 and 6.
24@ This _must_ remain a multiple of 8 for EABI.
25@
26#define S_OFF		8
27
28/*
29 * The SWI code relies on the fact that R0 is at the bottom of the stack
30 * (due to slow/fast restore user regs).
31 */
32#if S_R0 != 0
33#error "Please fix"
34#endif
35
36	.macro	zero_fp
37#ifdef CONFIG_FRAME_POINTER
38	mov	fp, #0
39#endif
40	.endm
41
42#ifdef CONFIG_ALIGNMENT_TRAP
43#define ATRAP(x...) x
44#else
45#define ATRAP(x...)
46#endif
47
48	.macro	alignment_trap, rtmp1, rtmp2, label
49#ifdef CONFIG_ALIGNMENT_TRAP
50	mrc	p15, 0, \rtmp2, c1, c0, 0
51	ldr	\rtmp1, \label
52	ldr	\rtmp1, [\rtmp1]
53	teq	\rtmp1, \rtmp2
54	mcrne	p15, 0, \rtmp1, c1, c0, 0
55#endif
56	.endm
57
58#ifdef CONFIG_CPU_V7M
59/*
60 * ARMv7-M exception entry/exit macros.
61 *
62 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
63 * automatically saved on the current stack (32 words) before
64 * switching to the exception stack (SP_main).
65 *
66 * If exception is taken while in user mode, SP_main is
67 * empty. Otherwise, SP_main is aligned to 64 bit automatically
68 * (CCR.STKALIGN set).
69 *
70 * Linux assumes that the interrupts are disabled when entering an
71 * exception handler and it may BUG if this is not the case. Interrupts
72 * are disabled during entry and reenabled in the exit macro.
73 *
74 * v7m_exception_slow_exit is used when returning from SVC or PendSV.
75 * When returning to kernel mode, we don't return from exception.
76 */
77	.macro	v7m_exception_entry
78	@ determine the location of the registers saved by the core during
79	@ exception entry. Depending on the mode the cpu was in when the
80	@ exception happend that is either on the main or the process stack.
81	@ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
82	@ was used.
83	tst	lr, #EXC_RET_STACK_MASK
84	mrsne	r12, psp
85	moveq	r12, sp
86
87	@ we cannot rely on r0-r3 and r12 matching the value saved in the
88	@ exception frame because of tail-chaining. So these have to be
89	@ reloaded.
90	ldmia	r12!, {r0-r3}
91
92	@ Linux expects to have irqs off. Do it here before taking stack space
93	cpsid	i
94
95	sub	sp, #PT_REGS_SIZE-S_IP
96	stmdb	sp!, {r0-r11}
97
98	@ load saved r12, lr, return address and xPSR.
99	@ r0-r7 are used for signals and never touched from now on. Clobbering
100	@ r8-r12 is OK.
101	mov	r9, r12
102	ldmia	r9!, {r8, r10-r12}
103
104	@ calculate the original stack pointer value.
105	@ r9 currently points to the memory location just above the auto saved
106	@ xPSR.
107	@ The cpu might automatically 8-byte align the stack. Bit 9
108	@ of the saved xPSR specifies if stack aligning took place. In this case
109	@ another 32-bit value is included in the stack.
110
111	tst	r12, V7M_xPSR_FRAMEPTRALIGN
112	addne	r9, r9, #4
113
114	@ store saved r12 using str to have a register to hold the base for stm
115	str	r8, [sp, #S_IP]
116	add	r8, sp, #S_SP
117	@ store r13-r15, xPSR
118	stmia	r8!, {r9-r12}
119	@ store old_r0
120	str	r0, [r8]
121	.endm
122
123        /*
124	 * PENDSV and SVCALL are configured to have the same exception
125	 * priorities. As a kernel thread runs at SVCALL execution priority it
126	 * can never be preempted and so we will never have to return to a
127	 * kernel thread here.
128         */
129	.macro	v7m_exception_slow_exit ret_r0
130	cpsid	i
131	ldr	lr, =exc_ret
132	ldr	lr, [lr]
133
134	@ read original r12, sp, lr, pc and xPSR
135	add	r12, sp, #S_IP
136	ldmia	r12, {r1-r5}
137
138	@ an exception frame is always 8-byte aligned. To tell the hardware if
139	@ the sp to be restored is aligned or not set bit 9 of the saved xPSR
140	@ accordingly.
141	tst	r2, #4
142	subne	r2, r2, #4
143	orrne	r5, V7M_xPSR_FRAMEPTRALIGN
144	biceq	r5, V7M_xPSR_FRAMEPTRALIGN
145
146	@ ensure bit 0 is cleared in the PC, otherwise behaviour is
147	@ unpredictable
148	bic	r4, #1
149
150	@ write basic exception frame
151	stmdb	r2!, {r1, r3-r5}
152	ldmia	sp, {r1, r3-r5}
153	.if	\ret_r0
154	stmdb	r2!, {r0, r3-r5}
155	.else
156	stmdb	r2!, {r1, r3-r5}
157	.endif
158
159	@ restore process sp
160	msr	psp, r2
161
162	@ restore original r4-r11
163	ldmia	sp!, {r0-r11}
164
165	@ restore main sp
166	add	sp, sp, #PT_REGS_SIZE-S_IP
167
168	cpsie	i
169	bx	lr
170	.endm
171#endif	/* CONFIG_CPU_V7M */
172
173	@
174	@ Store/load the USER SP and LR registers by switching to the SYS
175	@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
176	@ available. Should only be called from SVC mode
177	@
178	.macro	store_user_sp_lr, rd, rtemp, offset = 0
179	mrs	\rtemp, cpsr
180	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
181	msr	cpsr_c, \rtemp			@ switch to the SYS mode
182
183	str	sp, [\rd, #\offset]		@ save sp_usr
184	str	lr, [\rd, #\offset + 4]		@ save lr_usr
185
186	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
187	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
188	.endm
189
190	.macro	load_user_sp_lr, rd, rtemp, offset = 0
191	mrs	\rtemp, cpsr
192	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
193	msr	cpsr_c, \rtemp			@ switch to the SYS mode
194
195	ldr	sp, [\rd, #\offset]		@ load sp_usr
196	ldr	lr, [\rd, #\offset + 4]		@ load lr_usr
197
198	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
199	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
200	.endm
201
202
203	.macro	svc_exit, rpsr, irq = 0
204	.if	\irq != 0
205	@ IRQs already off
206#ifdef CONFIG_TRACE_IRQFLAGS
207	@ The parent context IRQs must have been enabled to get here in
208	@ the first place, so there's no point checking the PSR I bit.
209	bl	trace_hardirqs_on
210#endif
211	.else
212	@ IRQs off again before pulling preserved data off the stack
213	disable_irq_notrace
214#ifdef CONFIG_TRACE_IRQFLAGS
215	tst	\rpsr, #PSR_I_BIT
216	bleq	trace_hardirqs_on
217	tst	\rpsr, #PSR_I_BIT
218	blne	trace_hardirqs_off
219#endif
220	.endif
221	uaccess_exit tsk, r0, r1
222
223#ifndef CONFIG_THUMB2_KERNEL
224	@ ARM mode SVC restore
225	msr	spsr_cxsf, \rpsr
226#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
227	@ We must avoid clrex due to Cortex-A15 erratum #830321
228	sub	r0, sp, #4			@ uninhabited address
229	strex	r1, r2, [r0]			@ clear the exclusive monitor
230#endif
231	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
232#else
233	@ Thumb mode SVC restore
234	ldr	lr, [sp, #S_SP]			@ top of the stack
235	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
236
237	@ We must avoid clrex due to Cortex-A15 erratum #830321
238	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
239
240	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
241	ldmia	sp, {r0 - r12}
242	mov	sp, lr
243	ldr	lr, [sp], #4
244	rfeia	sp!
245#endif
246	.endm
247
248	@
249	@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
250	@
251	@ This macro acts in a similar manner to svc_exit but switches to FIQ
252	@ mode to restore the final part of the register state.
253	@
254	@ We cannot use the normal svc_exit procedure because that would
255	@ clobber spsr_svc (FIQ could be delivered during the first few
256	@ instructions of vector_swi meaning its contents have not been
257	@ saved anywhere).
258	@
259	@ Note that, unlike svc_exit, this macro also does not allow a caller
260	@ supplied rpsr. This is because the FIQ exceptions are not re-entrant
261	@ and the handlers cannot call into the scheduler (meaning the value
262	@ on the stack remains correct).
263	@
264	.macro  svc_exit_via_fiq
265	uaccess_exit tsk, r0, r1
266#ifndef CONFIG_THUMB2_KERNEL
267	@ ARM mode restore
268	mov	r0, sp
269	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
270				@ clobber state restored below)
271	msr	cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
272	add	r8, r0, #S_PC
273	ldr	r9, [r0, #S_PSR]
274	msr	spsr_cxsf, r9
275	ldr	r0, [r0, #S_R0]
276	ldmia	r8, {pc}^
277#else
278	@ Thumb mode restore
279	add	r0, sp, #S_R2
280	ldr	lr, [sp, #S_LR]
281	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
282			        @ clobber state restored below)
283	ldmia	r0, {r2 - r12}
284	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
285	msr	cpsr_c, r1
286	sub	r0, #S_R2
287	add	r8, r0, #S_PC
288	ldmia	r0, {r0 - r1}
289	rfeia	r8
290#endif
291	.endm
292
293
294	.macro	restore_user_regs, fast = 0, offset = 0
295#if defined(CONFIG_CPU_32v6K) && \
296    (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
297#ifdef CONFIG_CPU_V6
298ALT_SMP(nop)
299ALT_UP_B(.L1_\@)
300#endif
301	@ The TLS register update is deferred until return to user space so we
302	@ can use it for other things while running in the kernel
303	mrc	p15, 0, r1, c13, c0, 3		@ get current_thread_info pointer
304	ldr	r1, [r1, #TI_TP_VALUE]
305	mcr	p15, 0, r1, c13, c0, 3		@ set TLS register
306.L1_\@:
307#endif
308
309	uaccess_enable r1, isb=0
310#ifndef CONFIG_THUMB2_KERNEL
311	@ ARM mode restore
312	mov	r2, sp
313	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
314	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
315	tst	r1, #PSR_I_BIT | 0x0f
316	bne	1f
317	msr	spsr_cxsf, r1			@ save in spsr_svc
318#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
319	@ We must avoid clrex due to Cortex-A15 erratum #830321
320	strex	r1, r2, [r2]			@ clear the exclusive monitor
321#endif
322	.if	\fast
323	ldmdb	r2, {r1 - lr}^			@ get calling r1 - lr
324	.else
325	ldmdb	r2, {r0 - lr}^			@ get calling r0 - lr
326	.endif
327	mov	r0, r0				@ ARMv5T and earlier require a nop
328						@ after ldm {}^
329	add	sp, sp, #\offset + PT_REGS_SIZE
330	movs	pc, lr				@ return & move spsr_svc into cpsr
3311:	bug	"Returning to usermode but unexpected PSR bits set?", \@
332#elif defined(CONFIG_CPU_V7M)
333	@ V7M restore.
334	@ Note that we don't need to do clrex here as clearing the local
335	@ monitor is part of the exception entry and exit sequence.
336	.if	\offset
337	add	sp, #\offset
338	.endif
339	v7m_exception_slow_exit ret_r0 = \fast
340#else
341	@ Thumb mode restore
342	mov	r2, sp
343	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
344	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
345	ldr	lr, [sp, #\offset + S_PC]	@ get pc
346	add	sp, sp, #\offset + S_SP
347	tst	r1, #PSR_I_BIT | 0x0f
348	bne	1f
349	msr	spsr_cxsf, r1			@ save in spsr_svc
350
351	@ We must avoid clrex due to Cortex-A15 erratum #830321
352	strex	r1, r2, [sp]			@ clear the exclusive monitor
353
354	.if	\fast
355	ldmdb	sp, {r1 - r12}			@ get calling r1 - r12
356	.else
357	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
358	.endif
359	add	sp, sp, #PT_REGS_SIZE - S_SP
360	movs	pc, lr				@ return & move spsr_svc into cpsr
3611:	bug	"Returning to usermode but unexpected PSR bits set?", \@
362#endif	/* !CONFIG_THUMB2_KERNEL */
363	.endm
364
365/*
366 * Context tracking subsystem.  Used to instrument transitions
367 * between user and kernel mode.
368 */
369	.macro ct_user_exit, save = 1
370#ifdef CONFIG_CONTEXT_TRACKING
371	.if	\save
372	stmdb   sp!, {r0-r3, ip, lr}
373	bl	context_tracking_user_exit
374	ldmia	sp!, {r0-r3, ip, lr}
375	.else
376	bl	context_tracking_user_exit
377	.endif
378#endif
379	.endm
380
381	.macro ct_user_enter, save = 1
382#ifdef CONFIG_CONTEXT_TRACKING
383	.if	\save
384	stmdb   sp!, {r0-r3, ip, lr}
385	bl	context_tracking_user_enter
386	ldmia	sp!, {r0-r3, ip, lr}
387	.else
388	bl	context_tracking_user_enter
389	.endif
390#endif
391	.endm
392
393	.macro	invoke_syscall, table, nr, tmp, ret, reload=0
394#ifdef CONFIG_CPU_SPECTRE
395	mov	\tmp, \nr
396	cmp	\tmp, #NR_syscalls		@ check upper syscall limit
397	movcs	\tmp, #0
398	csdb
399	badr	lr, \ret			@ return address
400	.if	\reload
401	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
402	ldmiacc	r1, {r0 - r6}			@ reload r0-r6
403	stmiacc	sp, {r4, r5}			@ update stack arguments
404	.endif
405	ldrcc	pc, [\table, \tmp, lsl #2]	@ call sys_* routine
406#else
407	cmp	\nr, #NR_syscalls		@ check upper syscall limit
408	badr	lr, \ret			@ return address
409	.if	\reload
410	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
411	ldmiacc	r1, {r0 - r6}			@ reload r0-r6
412	stmiacc	sp, {r4, r5}			@ update stack arguments
413	.endif
414	ldrcc	pc, [\table, \nr, lsl #2]	@ call sys_* routine
415#endif
416	.endm
417
418/*
419 * These are the registers used in the syscall handler, and allow us to
420 * have in theory up to 7 arguments to a function - r0 to r6.
421 *
422 * r7 is reserved for the system call number for thumb mode.
423 *
424 * Note that tbl == why is intentional.
425 *
426 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
427 */
428scno	.req	r7		@ syscall number
429tbl	.req	r8		@ syscall table pointer
430why	.req	r8		@ Linux syscall (!= 0)
431tsk	.req	r9		@ current thread_info
432
433	.macro	do_overflow_check, frame_size:req
434#ifdef CONFIG_VMAP_STACK
435	@
436	@ Test whether the SP has overflowed. Task and IRQ stacks are aligned
437	@ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
438	@ zero.
439	@
440ARM(	tst	sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT)	)
441THUMB(	tst	r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT)	)
442THUMB(	it	ne						)
443	bne	.Lstack_overflow_check\@
444
445	.pushsection	.text
446.Lstack_overflow_check\@:
447	@
448	@ The stack pointer is not pointing to a valid vmap'ed stack, but it
449	@ may be pointing into the linear map instead, which may happen if we
450	@ are already running from the overflow stack. We cannot detect overflow
451	@ in such cases so just carry on.
452	@
453	str	ip, [r0, #12]			@ Stash IP on the mode stack
454	ldr_va	ip, high_memory			@ Start of VMALLOC space
455ARM(	cmp	sp, ip			)	@ SP in vmalloc space?
456THUMB(	cmp	r1, ip			)
457THUMB(	itt	lo			)
458	ldrlo	ip, [r0, #12]			@ Restore IP
459	blo	.Lout\@				@ Carry on
460
461THUMB(	sub	r1, sp, r1		)	@ Restore original R1
462THUMB(	sub	sp, r1			)	@ Restore original SP
463	add	sp, sp, #\frame_size		@ Undo svc_entry's SP change
464	b	__bad_stack			@ Handle VMAP stack overflow
465	.popsection
466.Lout\@:
467#endif
468	.endm
469