xref: /openbmc/linux/arch/arm/kernel/entry-armv.S (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
21da177e4SLinus Torvalds/*
31da177e4SLinus Torvalds *  linux/arch/arm/kernel/entry-armv.S
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds *  Copyright (C) 1996,1997,1998 Russell King.
61da177e4SLinus Torvalds *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7afeb90caSHyok S. Choi *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
81da177e4SLinus Torvalds *
91da177e4SLinus Torvalds *  Low-level vector interface routines
101da177e4SLinus Torvalds *
1170b6f2b4SNicolas Pitre *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
1270b6f2b4SNicolas Pitre *  that causes it to save wrong values...  Be aware!
131da177e4SLinus Torvalds */
141da177e4SLinus Torvalds
159b9cf81aSPaul Gortmaker#include <linux/init.h>
169b9cf81aSPaul Gortmaker
176f6f6a70SRob Herring#include <asm/assembler.h>
18a9ff6961SLinus Walleij#include <asm/page.h>
19753790e7SRussell King#include <asm/glue-df.h>
20753790e7SRussell King#include <asm/glue-pf.h>
211da177e4SLinus Torvalds#include <asm/vfpmacros.h>
22d6551e88SRussell King#include <asm/thread_notify.h>
23c4c5716eSCatalin Marinas#include <asm/unwind.h>
24cc20d429SRussell King#include <asm/unistd.h>
25f159f4edSTony Lindgren#include <asm/tls.h>
269f97da78SDavid Howells#include <asm/system_info.h>
27747ffc2fSRussell King#include <asm/uaccess-asm.h>
28*ef21187cSLinus Walleij#include <asm/kasan_def.h>
291da177e4SLinus Torvalds
301da177e4SLinus Torvalds#include "entry-header.S"
31a0266c21SWang Nan#include <asm/probes.h>
321da177e4SLinus Torvalds
331da177e4SLinus Torvalds/*
34d9600c99SRussell King * Interrupt handling.
35187a51adSRussell King */
36d4664b6cSArd Biesheuvel	.macro	irq_handler, from_user:req
377a8ca84aSArd Biesheuvel	mov	r1, sp
387a8ca84aSArd Biesheuvel	ldr_this_cpu r2, irq_stack_ptr, r2, r3
39d4664b6cSArd Biesheuvel	.if	\from_user == 0
40d4664b6cSArd Biesheuvel	@
41d4664b6cSArd Biesheuvel	@ If we took the interrupt while running in the kernel, we may already
42d4664b6cSArd Biesheuvel	@ be using the IRQ stack, so revert to the original value in that case.
43d4664b6cSArd Biesheuvel	@
447a8ca84aSArd Biesheuvel	subs	r3, r2, r1		@ SP above bottom of IRQ stack?
457a8ca84aSArd Biesheuvel	rsbscs	r3, r3, #THREAD_SIZE	@ ... and below the top?
46a1c510d0SArd Biesheuvel#ifdef CONFIG_VMAP_STACK
477a8ca84aSArd Biesheuvel	ldr_va	r3, high_memory, cc	@ End of the linear region
487a8ca84aSArd Biesheuvel	cmpcc	r3, r1			@ Stack pointer was below it?
49abeb24aeSMarc Zyngier#endif
507a8ca84aSArd Biesheuvel	bcc	0f			@ If not, switch to the IRQ stack
517a8ca84aSArd Biesheuvel	mov	r0, r1
5252108641Seric miao	bl	generic_handle_arch_irq
537a8ca84aSArd Biesheuvel	b	1f
547a8ca84aSArd Biesheuvel0:
557a8ca84aSArd Biesheuvel	.endif
56d4664b6cSArd Biesheuvel
577a8ca84aSArd Biesheuvel	mov_l	r0, generic_handle_arch_irq
587a8ca84aSArd Biesheuvel	bl	call_with_stack
597a8ca84aSArd Biesheuvel1:
60187a51adSRussell King	.endm
61187a51adSRussell King
62ac8b9c1cSRussell King	.macro	pabt_helper
638dfe7ac9SRussell King	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
64ac8b9c1cSRussell King#ifdef MULTI_PABORT
6550807460SArd Biesheuvel	ldr_va	ip, processor, offset=PROCESSOR_PABT_FUNC
6650807460SArd Biesheuvel	bl_r	ip
67ac8b9c1cSRussell King#else
68ac8b9c1cSRussell King	bl	CPU_PABORT_HANDLER
69ac8b9c1cSRussell King#endif
70ac8b9c1cSRussell King	.endm
71ac8b9c1cSRussell King
72ac8b9c1cSRussell King	.macro	dabt_helper
73ac8b9c1cSRussell King
74ac8b9c1cSRussell King	@
75ac8b9c1cSRussell King	@ Call the processor-specific abort handler:
76ac8b9c1cSRussell King	@
77da740472SRussell King	@  r2 - pt_regs
783e287becSRussell King	@  r4 - aborted context pc
793e287becSRussell King	@  r5 - aborted context psr
80ac8b9c1cSRussell King	@
81ac8b9c1cSRussell King	@ The abort handler must return the aborted address in r0, and
82ac8b9c1cSRussell King	@ the fault status register in r1.  r9 must be preserved.
83ac8b9c1cSRussell King	@
84ac8b9c1cSRussell King#ifdef MULTI_DABORT
8550807460SArd Biesheuvel	ldr_va	ip, processor, offset=PROCESSOR_DABT_FUNC
8650807460SArd Biesheuvel	bl_r	ip
87ac8b9c1cSRussell King#else
88ac8b9c1cSRussell King	bl	CPU_DABORT_HANDLER
89ac8b9c1cSRussell King#endif
90ac8b9c1cSRussell King	.endm
91ac8b9c1cSRussell King
92c6089061SRussell King	.section	.entry.text,"ax",%progbits
93785d3cd2SNicolas Pitre
94187a51adSRussell King/*
951da177e4SLinus Torvalds * Invalid mode handlers
961da177e4SLinus Torvalds */
97ccea7a19SRussell King	.macro	inv_entry, reason
985745eef6SRussell King	sub	sp, sp, #PT_REGS_SIZE
99b86040a5SCatalin Marinas ARM(	stmib	sp, {r1 - lr}		)
100b86040a5SCatalin Marinas THUMB(	stmia	sp, {r0 - r12}		)
101b86040a5SCatalin Marinas THUMB(	str	sp, [sp, #S_SP]		)
102b86040a5SCatalin Marinas THUMB(	str	lr, [sp, #S_LR]		)
1031da177e4SLinus Torvalds	mov	r1, #\reason
1041da177e4SLinus Torvalds	.endm
1051da177e4SLinus Torvalds
1061da177e4SLinus Torvalds__pabt_invalid:
107ccea7a19SRussell King	inv_entry BAD_PREFETCH
108ccea7a19SRussell King	b	common_invalid
10993ed3970SCatalin MarinasENDPROC(__pabt_invalid)
1101da177e4SLinus Torvalds
1111da177e4SLinus Torvalds__dabt_invalid:
112ccea7a19SRussell King	inv_entry BAD_DATA
113ccea7a19SRussell King	b	common_invalid
11493ed3970SCatalin MarinasENDPROC(__dabt_invalid)
1151da177e4SLinus Torvalds
1161da177e4SLinus Torvalds__irq_invalid:
117ccea7a19SRussell King	inv_entry BAD_IRQ
118ccea7a19SRussell King	b	common_invalid
11993ed3970SCatalin MarinasENDPROC(__irq_invalid)
1201da177e4SLinus Torvalds
1211da177e4SLinus Torvalds__und_invalid:
122ccea7a19SRussell King	inv_entry BAD_UNDEFINSTR
1231da177e4SLinus Torvalds
124ccea7a19SRussell King	@
125ccea7a19SRussell King	@ XXX fall through to common_invalid
126ccea7a19SRussell King	@
127ccea7a19SRussell King
128ccea7a19SRussell King@
129ccea7a19SRussell King@ common_invalid - generic code for failed exception (re-entrant version of handlers)
130ccea7a19SRussell King@
131ccea7a19SRussell Kingcommon_invalid:
132ccea7a19SRussell King	zero_fp
133ccea7a19SRussell King
134ccea7a19SRussell King	ldmia	r0, {r4 - r6}
135ccea7a19SRussell King	add	r0, sp, #S_PC		@ here for interlock avoidance
136ccea7a19SRussell King	mov	r7, #-1			@  ""   ""    ""        ""
137ccea7a19SRussell King	str	r4, [sp]		@ save preserved r0
138ccea7a19SRussell King	stmia	r0, {r5 - r7}		@ lr_<exception>,
139ccea7a19SRussell King					@ cpsr_<exception>, "old_r0"
140ccea7a19SRussell King
1411da177e4SLinus Torvalds	mov	r0, sp
1421da177e4SLinus Torvalds	b	bad_mode
14393ed3970SCatalin MarinasENDPROC(__und_invalid)
1441da177e4SLinus Torvalds
1451da177e4SLinus Torvalds/*
1461da177e4SLinus Torvalds * SVC mode handlers
1471da177e4SLinus Torvalds */
1482dede2d8SNicolas Pitre
1492dede2d8SNicolas Pitre#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
1502dede2d8SNicolas Pitre#define SPFIX(code...) code
1512dede2d8SNicolas Pitre#else
1522dede2d8SNicolas Pitre#define SPFIX(code...)
1532dede2d8SNicolas Pitre#endif
1542dede2d8SNicolas Pitre
155a1c510d0SArd Biesheuvel	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
156c4c5716eSCatalin Marinas UNWIND(.fnstart		)
157ae5cc07dSArd Biesheuvel	sub	sp, sp, #(SVC_REGS_SIZE + \stack_hole)
158a1c510d0SArd Biesheuvel THUMB(	add	sp, r1		)	@ get SP in a GPR without
159a1c510d0SArd Biesheuvel THUMB(	sub	r1, sp, r1	)	@ using a temp register
160a1c510d0SArd Biesheuvel
161a1c510d0SArd Biesheuvel	.if	\overflow_check
162c4c5716eSCatalin Marinas UNWIND(.save	{r0 - pc}	)
163a1c510d0SArd Biesheuvel	do_overflow_check (SVC_REGS_SIZE + \stack_hole)
164a1c510d0SArd Biesheuvel	.endif
165a1c510d0SArd Biesheuvel
166b86040a5SCatalin Marinas#ifdef CONFIG_THUMB2_KERNEL
167ae5cc07dSArd Biesheuvel	tst	r1, #4			@ test stack pointer alignment
168ae5cc07dSArd Biesheuvel	sub	r1, sp, r1		@ restore original R1
169ae5cc07dSArd Biesheuvel	sub	sp, r1			@ restore original SP
170b86040a5SCatalin Marinas#else
1712dede2d8SNicolas Pitre SPFIX(	tst	sp, #4		)
172b86040a5SCatalin Marinas#endif
173ae5cc07dSArd Biesheuvel SPFIX(	subne	sp, sp, #4	)
174ae5cc07dSArd Biesheuvel
175ae5cc07dSArd Biesheuvel ARM(	stmib	sp, {r1 - r12}	)
176ae5cc07dSArd Biesheuvel THUMB(	stmia	sp, {r0 - r12}	)	@ No STMIB in Thumb-2
177ccea7a19SRussell King
178b059bdc3SRussell King	ldmia	r0, {r3 - r5}
179ae5cc07dSArd Biesheuvel	add	r7, sp, #S_SP		@ here for interlock avoidance
180b059bdc3SRussell King	mov	r6, #-1			@  ""  ""      ""       ""
181ae5cc07dSArd Biesheuvel	add	r2, sp, #(SVC_REGS_SIZE + \stack_hole)
182ae5cc07dSArd Biesheuvel SPFIX(	addne	r2, r2, #4	)
183ae5cc07dSArd Biesheuvel	str	r3, [sp]		@ save the "real" r0 copied
184ccea7a19SRussell King					@ from the exception stack
185ccea7a19SRussell King
186b059bdc3SRussell King	mov	r3, lr
1871da177e4SLinus Torvalds
1881da177e4SLinus Torvalds	@
1891da177e4SLinus Torvalds	@ We are now ready to fill in the remaining blanks on the stack:
1901da177e4SLinus Torvalds	@
191b059bdc3SRussell King	@  r2 - sp_svc
192b059bdc3SRussell King	@  r3 - lr_svc
193b059bdc3SRussell King	@  r4 - lr_<exception>, already fixed up for correct return/restart
194b059bdc3SRussell King	@  r5 - spsr_<exception>
195b059bdc3SRussell King	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
1961da177e4SLinus Torvalds	@
197b059bdc3SRussell King	stmia	r7, {r2 - r6}
198f2741b78SRussell King
199e6978e4bSRussell King	get_thread_info tsk
200747ffc2fSRussell King	uaccess_entry tsk, r0, r1, r2, \uaccess
2012190fed6SRussell King
202c0e7f7eeSDaniel Thompson	.if \trace
203f2741b78SRussell King#ifdef CONFIG_TRACE_IRQFLAGS
204f2741b78SRussell King	bl	trace_hardirqs_off
205f2741b78SRussell King#endif
206c0e7f7eeSDaniel Thompson	.endif
2071da177e4SLinus Torvalds	.endm
2081da177e4SLinus Torvalds
2091da177e4SLinus Torvalds	.align	5
2101da177e4SLinus Torvalds__dabt_svc:
2112190fed6SRussell King	svc_entry uaccess=0
2121da177e4SLinus Torvalds	mov	r2, sp
213da740472SRussell King	dabt_helper
214e16b31bfSMarc Zyngier THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
215b059bdc3SRussell King	svc_exit r5				@ return from exception
216c4c5716eSCatalin Marinas UNWIND(.fnend		)
21793ed3970SCatalin MarinasENDPROC(__dabt_svc)
2181da177e4SLinus Torvalds
2191da177e4SLinus Torvalds	.align	5
2201da177e4SLinus Torvalds__irq_svc:
221ccea7a19SRussell King	svc_entry
222d4664b6cSArd Biesheuvel	irq_handler from_user=0
2231613cc11SRussell King
224e7289c6dSThomas Gleixner#ifdef CONFIG_PREEMPTION
225706fdd9fSRussell King	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
226706fdd9fSRussell King	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
22728fab1a2SRussell King	teq	r8, #0				@ if preempt count != 0
22828fab1a2SRussell King	movne	r0, #0				@ force flags to 0
2291da177e4SLinus Torvalds	tst	r0, #_TIF_NEED_RESCHED
2301da177e4SLinus Torvalds	blne	svc_preempt
2311da177e4SLinus Torvalds#endif
23230891c90SRussell King
2339b56febeSRussell King	svc_exit r5, irq = 1			@ return from exception
234c4c5716eSCatalin Marinas UNWIND(.fnend		)
23593ed3970SCatalin MarinasENDPROC(__irq_svc)
2361da177e4SLinus Torvalds
2371da177e4SLinus Torvalds	.ltorg
2381da177e4SLinus Torvalds
239e7289c6dSThomas Gleixner#ifdef CONFIG_PREEMPTION
2401da177e4SLinus Torvaldssvc_preempt:
24128fab1a2SRussell King	mov	r8, lr
2421da177e4SLinus Torvalds1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
243706fdd9fSRussell King	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
2441da177e4SLinus Torvalds	tst	r0, #_TIF_NEED_RESCHED
2456ebbf2ceSRussell King	reteq	r8				@ go again
2461da177e4SLinus Torvalds	b	1b
2471da177e4SLinus Torvalds#endif
2481da177e4SLinus Torvalds
24915ac49b6SRussell King__und_fault:
25015ac49b6SRussell King	@ Correct the PC such that it is pointing at the instruction
25115ac49b6SRussell King	@ which caused the fault.  If the faulting instruction was ARM
25215ac49b6SRussell King	@ the PC will be pointing at the next instruction, and have to
25315ac49b6SRussell King	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
25415ac49b6SRussell King	@ pointing at the second half of the Thumb instruction.  We
25515ac49b6SRussell King	@ have to subtract 2.
25615ac49b6SRussell King	ldr	r2, [r0, #S_PC]
25715ac49b6SRussell King	sub	r2, r2, r1
25815ac49b6SRussell King	str	r2, [r0, #S_PC]
25915ac49b6SRussell King	b	do_undefinstr
26015ac49b6SRussell KingENDPROC(__und_fault)
26115ac49b6SRussell King
2621da177e4SLinus Torvalds	.align	5
2631da177e4SLinus Torvalds__und_svc:
264d30a0c8bSNicolas Pitre#ifdef CONFIG_KPROBES
265d30a0c8bSNicolas Pitre	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
266d30a0c8bSNicolas Pitre	@ it obviously needs free stack space which then will belong to
267d30a0c8bSNicolas Pitre	@ the saved context.
268a0266c21SWang Nan	svc_entry MAX_STACK_SIZE
269d30a0c8bSNicolas Pitre#else
270ccea7a19SRussell King	svc_entry
271d30a0c8bSNicolas Pitre#endif
2721da177e4SLinus Torvalds
27315ac49b6SRussell King	mov	r1, #4				@ PC correction to apply
274f77ac2e3SArd Biesheuvel THUMB(	tst	r5, #PSR_T_BIT		)	@ exception taken in Thumb mode?
275f77ac2e3SArd Biesheuvel THUMB(	movne	r1, #2			)	@ if so, fix up PC correction
2761da177e4SLinus Torvalds	mov	r0, sp				@ struct pt_regs *regs
27715ac49b6SRussell King	bl	__und_fault
2781da177e4SLinus Torvalds
27915ac49b6SRussell King__und_svc_finish:
28087eed3c7SRussell King	get_thread_info tsk
281b059bdc3SRussell King	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
282b059bdc3SRussell King	svc_exit r5				@ return from exception
283c4c5716eSCatalin Marinas UNWIND(.fnend		)
28493ed3970SCatalin MarinasENDPROC(__und_svc)
2851da177e4SLinus Torvalds
2861da177e4SLinus Torvalds	.align	5
2871da177e4SLinus Torvalds__pabt_svc:
288ccea7a19SRussell King	svc_entry
2894fb28474SKirill A. Shutemov	mov	r2, sp				@ regs
2908dfe7ac9SRussell King	pabt_helper
291b059bdc3SRussell King	svc_exit r5				@ return from exception
292c4c5716eSCatalin Marinas UNWIND(.fnend		)
29393ed3970SCatalin MarinasENDPROC(__pabt_svc)
2941da177e4SLinus Torvalds
2951da177e4SLinus Torvalds	.align	5
296c0e7f7eeSDaniel Thompson__fiq_svc:
297c0e7f7eeSDaniel Thompson	svc_entry trace=0
298c0e7f7eeSDaniel Thompson	mov	r0, sp				@ struct pt_regs *regs
299c0e7f7eeSDaniel Thompson	bl	handle_fiq_as_nmi
300c0e7f7eeSDaniel Thompson	svc_exit_via_fiq
301c0e7f7eeSDaniel Thompson UNWIND(.fnend		)
302c0e7f7eeSDaniel ThompsonENDPROC(__fiq_svc)
303c0e7f7eeSDaniel Thompson
3041da177e4SLinus Torvalds/*
305c0e7f7eeSDaniel Thompson * Abort mode handlers
306c0e7f7eeSDaniel Thompson */
307c0e7f7eeSDaniel Thompson
308c0e7f7eeSDaniel Thompson@
309c0e7f7eeSDaniel Thompson@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
310c0e7f7eeSDaniel Thompson@ and reuses the same macros. However in abort mode we must also
311c0e7f7eeSDaniel Thompson@ save/restore lr_abt and spsr_abt to make nested aborts safe.
312c0e7f7eeSDaniel Thompson@
313c0e7f7eeSDaniel Thompson	.align 5
314c0e7f7eeSDaniel Thompson__fiq_abt:
315c0e7f7eeSDaniel Thompson	svc_entry trace=0
316c0e7f7eeSDaniel Thompson
317c0e7f7eeSDaniel Thompson ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
318c0e7f7eeSDaniel Thompson THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
319c0e7f7eeSDaniel Thompson THUMB( msr	cpsr_c, r0 )
320c0e7f7eeSDaniel Thompson	mov	r1, lr		@ Save lr_abt
321c0e7f7eeSDaniel Thompson	mrs	r2, spsr	@ Save spsr_abt, abort is now safe
322c0e7f7eeSDaniel Thompson ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
323c0e7f7eeSDaniel Thompson THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
324c0e7f7eeSDaniel Thompson THUMB( msr	cpsr_c, r0 )
325c0e7f7eeSDaniel Thompson	stmfd	sp!, {r1 - r2}
326c0e7f7eeSDaniel Thompson
327c0e7f7eeSDaniel Thompson	add	r0, sp, #8			@ struct pt_regs *regs
328c0e7f7eeSDaniel Thompson	bl	handle_fiq_as_nmi
329c0e7f7eeSDaniel Thompson
330c0e7f7eeSDaniel Thompson	ldmfd	sp!, {r1 - r2}
331c0e7f7eeSDaniel Thompson ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332c0e7f7eeSDaniel Thompson THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
333c0e7f7eeSDaniel Thompson THUMB( msr	cpsr_c, r0 )
334c0e7f7eeSDaniel Thompson	mov	lr, r1		@ Restore lr_abt, abort is unsafe
335c0e7f7eeSDaniel Thompson	msr	spsr_cxsf, r2	@ Restore spsr_abt
336c0e7f7eeSDaniel Thompson ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337c0e7f7eeSDaniel Thompson THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
338c0e7f7eeSDaniel Thompson THUMB( msr	cpsr_c, r0 )
339c0e7f7eeSDaniel Thompson
340c0e7f7eeSDaniel Thompson	svc_exit_via_fiq
341c0e7f7eeSDaniel Thompson UNWIND(.fnend		)
342c0e7f7eeSDaniel ThompsonENDPROC(__fiq_abt)
343c0e7f7eeSDaniel Thompson
344c0e7f7eeSDaniel Thompson/*
3451da177e4SLinus Torvalds * User mode handlers
3462dede2d8SNicolas Pitre *
3475745eef6SRussell King * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
3481da177e4SLinus Torvalds */
3492dede2d8SNicolas Pitre
3505745eef6SRussell King#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
3512dede2d8SNicolas Pitre#error "sizeof(struct pt_regs) must be a multiple of 8"
3522dede2d8SNicolas Pitre#endif
3532dede2d8SNicolas Pitre
3542190fed6SRussell King	.macro	usr_entry, trace=1, uaccess=1
355c4c5716eSCatalin Marinas UNWIND(.fnstart	)
356c4c5716eSCatalin Marinas UNWIND(.cantunwind	)	@ don't unwind the user space
3575745eef6SRussell King	sub	sp, sp, #PT_REGS_SIZE
358b86040a5SCatalin Marinas ARM(	stmib	sp, {r1 - r12}	)
359b86040a5SCatalin Marinas THUMB(	stmia	sp, {r0 - r12}	)
360ccea7a19SRussell King
361195b58adSRussell King ATRAP(	mrc	p15, 0, r7, c1, c0, 0)
36250807460SArd Biesheuvel ATRAP(	ldr_va	r8, cr_alignment)
363195b58adSRussell King
364b059bdc3SRussell King	ldmia	r0, {r3 - r5}
365ccea7a19SRussell King	add	r0, sp, #S_PC		@ here for interlock avoidance
366b059bdc3SRussell King	mov	r6, #-1			@  ""  ""     ""        ""
367ccea7a19SRussell King
368b059bdc3SRussell King	str	r3, [sp]		@ save the "real" r0 copied
369ccea7a19SRussell King					@ from the exception stack
3701da177e4SLinus Torvalds
3711da177e4SLinus Torvalds	@
3721da177e4SLinus Torvalds	@ We are now ready to fill in the remaining blanks on the stack:
3731da177e4SLinus Torvalds	@
374b059bdc3SRussell King	@  r4 - lr_<exception>, already fixed up for correct return/restart
375b059bdc3SRussell King	@  r5 - spsr_<exception>
376b059bdc3SRussell King	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
3771da177e4SLinus Torvalds	@
3781da177e4SLinus Torvalds	@ Also, separately save sp_usr and lr_usr
3791da177e4SLinus Torvalds	@
380b059bdc3SRussell King	stmia	r0, {r4 - r6}
381b86040a5SCatalin Marinas ARM(	stmdb	r0, {sp, lr}^			)
382b86040a5SCatalin Marinas THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
3831da177e4SLinus Torvalds
3842190fed6SRussell King	.if \uaccess
3852190fed6SRussell King	uaccess_disable ip
3862190fed6SRussell King	.endif
3872190fed6SRussell King
3881da177e4SLinus Torvalds	@ Enable the alignment trap while in kernel mode
389195b58adSRussell King ATRAP(	teq	r8, r7)
390195b58adSRussell King ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
3911da177e4SLinus Torvalds
39250596b75SArd Biesheuvel	reload_current r7, r8
39350596b75SArd Biesheuvel
3941da177e4SLinus Torvalds	@
3951da177e4SLinus Torvalds	@ Clear FP to mark the first stack frame
3961da177e4SLinus Torvalds	@
3971da177e4SLinus Torvalds	zero_fp
398f2741b78SRussell King
399c0e7f7eeSDaniel Thompson	.if	\trace
40011b8b25cSRussell King#ifdef CONFIG_TRACE_IRQFLAGS
401f2741b78SRussell King	bl	trace_hardirqs_off
402f2741b78SRussell King#endif
403b0088480SKevin Hilman	ct_user_exit save = 0
404c0e7f7eeSDaniel Thompson	.endif
4051da177e4SLinus Torvalds	.endm
4061da177e4SLinus Torvalds
407b49c0f24SNicolas Pitre	.macro	kuser_cmpxchg_check
408db695c05SRussell King#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
409b49c0f24SNicolas Pitre#ifndef CONFIG_MMU
410b49c0f24SNicolas Pitre#warning "NPTL on non MMU needs fixing"
411b49c0f24SNicolas Pitre#else
412b49c0f24SNicolas Pitre	@ Make sure our user space atomic helper is restarted
413b49c0f24SNicolas Pitre	@ if it was interrupted in a critical region.  Here we
414b49c0f24SNicolas Pitre	@ perform a quick test inline since it should be false
415b49c0f24SNicolas Pitre	@ 99.9999% of the time.  The rest is done out of line.
416c12366baSLinus Walleij	ldr	r0, =TASK_SIZE
417c12366baSLinus Walleij	cmp	r4, r0
41840fb79c8SNicolas Pitre	blhs	kuser_cmpxchg64_fixup
419b49c0f24SNicolas Pitre#endif
420b49c0f24SNicolas Pitre#endif
421b49c0f24SNicolas Pitre	.endm
422b49c0f24SNicolas Pitre
4231da177e4SLinus Torvalds	.align	5
4241da177e4SLinus Torvalds__dabt_usr:
4252190fed6SRussell King	usr_entry uaccess=0
426b49c0f24SNicolas Pitre	kuser_cmpxchg_check
4271da177e4SLinus Torvalds	mov	r2, sp
428da740472SRussell King	dabt_helper
429da740472SRussell King	b	ret_from_exception
430c4c5716eSCatalin Marinas UNWIND(.fnend		)
43193ed3970SCatalin MarinasENDPROC(__dabt_usr)
4321da177e4SLinus Torvalds
4331da177e4SLinus Torvalds	.align	5
4341da177e4SLinus Torvalds__irq_usr:
435ccea7a19SRussell King	usr_entry
436bc089602SRussell King	kuser_cmpxchg_check
437d4664b6cSArd Biesheuvel	irq_handler from_user=1
4381613cc11SRussell King	get_thread_info tsk
4391da177e4SLinus Torvalds	mov	why, #0
4409fc2552aSMing Lei	b	ret_to_user_from_irq
441c4c5716eSCatalin Marinas UNWIND(.fnend		)
44293ed3970SCatalin MarinasENDPROC(__irq_usr)
4431da177e4SLinus Torvalds
4441da177e4SLinus Torvalds	.ltorg
4451da177e4SLinus Torvalds
4461da177e4SLinus Torvalds	.align	5
4471da177e4SLinus Torvalds__und_usr:
4482190fed6SRussell King	usr_entry uaccess=0
449bc089602SRussell King
4501417a6b8SCatalin Marinas	@ IRQs must be enabled before attempting to read the instruction from
4511417a6b8SCatalin Marinas	@ user space since that could cause a page/translation fault if the
4521417a6b8SCatalin Marinas	@ page table was modified by another CPU.
4531417a6b8SCatalin Marinas	enable_irq
4541417a6b8SCatalin Marinas
4558bcba70cSArd Biesheuvel	tst	r5, #PSR_T_BIT			@ Thumb mode?
4568bcba70cSArd Biesheuvel	mov	r1, #2				@ set insn size to 2 for Thumb
4578bcba70cSArd Biesheuvel	bne	0f				@ handle as Thumb undef exception
45847ba5f39SArd Biesheuvel#ifdef CONFIG_FPE_NWFPE
4598bcba70cSArd Biesheuvel	adr	r9, ret_from_exception
4608bcba70cSArd Biesheuvel	bl	call_fpe			@ returns via R9 on success
46147ba5f39SArd Biesheuvel#endif
4628bcba70cSArd Biesheuvel	mov	r1, #4				@ set insn size to 4 for ARM
4638bcba70cSArd Biesheuvel0:	mov	r0, sp
4642190fed6SRussell King	uaccess_disable ip
4658bcba70cSArd Biesheuvel	bl	__und_fault
4668bcba70cSArd Biesheuvel	b	ret_from_exception
467c4c5716eSCatalin Marinas UNWIND(.fnend)
46893ed3970SCatalin MarinasENDPROC(__und_usr)
469cb170a45SPaul Brook
4701da177e4SLinus Torvalds	.align	5
4711da177e4SLinus Torvalds__pabt_usr:
472ccea7a19SRussell King	usr_entry
4734fb28474SKirill A. Shutemov	mov	r2, sp				@ regs
4748dfe7ac9SRussell King	pabt_helper
475c4c5716eSCatalin Marinas UNWIND(.fnend		)
4761da177e4SLinus Torvalds	/* fall through */
4771da177e4SLinus Torvalds/*
4781da177e4SLinus Torvalds * This is the return code to user mode for abort handlers
4791da177e4SLinus Torvalds */
4801da177e4SLinus TorvaldsENTRY(ret_from_exception)
481c4c5716eSCatalin Marinas UNWIND(.fnstart	)
482c4c5716eSCatalin Marinas UNWIND(.cantunwind	)
4831da177e4SLinus Torvalds	get_thread_info tsk
4841da177e4SLinus Torvalds	mov	why, #0
4851da177e4SLinus Torvalds	b	ret_to_user
486c4c5716eSCatalin Marinas UNWIND(.fnend		)
48793ed3970SCatalin MarinasENDPROC(__pabt_usr)
48893ed3970SCatalin MarinasENDPROC(ret_from_exception)
4891da177e4SLinus Torvalds
490c0e7f7eeSDaniel Thompson	.align	5
491c0e7f7eeSDaniel Thompson__fiq_usr:
492c0e7f7eeSDaniel Thompson	usr_entry trace=0
493c0e7f7eeSDaniel Thompson	kuser_cmpxchg_check
494c0e7f7eeSDaniel Thompson	mov	r0, sp				@ struct pt_regs *regs
495c0e7f7eeSDaniel Thompson	bl	handle_fiq_as_nmi
496c0e7f7eeSDaniel Thompson	get_thread_info tsk
497c0e7f7eeSDaniel Thompson	restore_user_regs fast = 0, offset = 0
498c0e7f7eeSDaniel Thompson UNWIND(.fnend		)
499c0e7f7eeSDaniel ThompsonENDPROC(__fiq_usr)
500c0e7f7eeSDaniel Thompson
5011da177e4SLinus Torvalds/*
5021da177e4SLinus Torvalds * Register switch for ARMv3 and ARMv4 processors
5031da177e4SLinus Torvalds * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
5041da177e4SLinus Torvalds * previous and next are guaranteed not to be the same.
5051da177e4SLinus Torvalds */
5061da177e4SLinus TorvaldsENTRY(__switch_to)
507c4c5716eSCatalin Marinas UNWIND(.fnstart	)
508c4c5716eSCatalin Marinas UNWIND(.cantunwind	)
5091da177e4SLinus Torvalds	add	ip, r1, #TI_CPU_SAVE
510b86040a5SCatalin Marinas ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
511b86040a5SCatalin Marinas THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
512b86040a5SCatalin Marinas THUMB(	str	sp, [ip], #4		   )
513b86040a5SCatalin Marinas THUMB(	str	lr, [ip], #4		   )
514a4780adeSAndré Hentschel	ldr	r4, [r2, #TI_TP_VALUE]
515a4780adeSAndré Hentschel	ldr	r5, [r2, #TI_TP_VALUE + 4]
516247055aaSCatalin Marinas#ifdef CONFIG_CPU_USE_DOMAINS
5171eef5d2fSRussell King	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
5181eef5d2fSRussell King	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
519d6551e88SRussell King	ldr	r6, [r2, #TI_CPU_DOMAIN]
520afeb90caSHyok S. Choi#endif
521a4780adeSAndré Hentschel	switch_tls r1, r4, r5, r3, r7
522831a469bSArd Biesheuvel#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
523831a469bSArd Biesheuvel    !defined(CONFIG_STACKPROTECTOR_PER_TASK)
524df0698beSNicolas Pitre	ldr	r8, =__stack_chk_guard
525ffa47aa6SArnd Bergmann	.if (TSK_STACK_CANARY > IMM12_MASK)
5269c46929eSArd Biesheuvel	add	r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
527831a469bSArd Biesheuvel	ldr	r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
5289c46929eSArd Biesheuvel	.else
5299c46929eSArd Biesheuvel	ldr	r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
530ffa47aa6SArnd Bergmann	.endif
531df0698beSNicolas Pitre#endif
532831a469bSArd Biesheuvel	mov	r7, r2				@ Preserve 'next'
533247055aaSCatalin Marinas#ifdef CONFIG_CPU_USE_DOMAINS
5341da177e4SLinus Torvalds	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
535afeb90caSHyok S. Choi#endif
536d6551e88SRussell King	mov	r5, r0
537d6551e88SRussell King	add	r4, r2, #TI_CPU_SAVE
538d6551e88SRussell King	ldr	r0, =thread_notify_head
539d6551e88SRussell King	mov	r1, #THREAD_NOTIFY_SWITCH
540d6551e88SRussell King	bl	atomic_notifier_call_chain
541831a469bSArd Biesheuvel#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
542831a469bSArd Biesheuvel    !defined(CONFIG_STACKPROTECTOR_PER_TASK)
543831a469bSArd Biesheuvel	str	r9, [r8]
544df0698beSNicolas Pitre#endif
545d6551e88SRussell King	mov	r0, r5
546a1c510d0SArd Biesheuvel#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
5479c46929eSArd Biesheuvel	set_current r7, r8
548b832faecSArd Biesheuvel	ldmia	r4, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously
549b832faecSArd Biesheuvel#else
550b832faecSArd Biesheuvel	mov	r1, r7
551b832faecSArd Biesheuvel	ldmia	r4, {r4 - sl, fp, ip, lr}	@ Load all regs saved previously
552a1c510d0SArd Biesheuvel#ifdef CONFIG_VMAP_STACK
553a1c510d0SArd Biesheuvel	@
554a1c510d0SArd Biesheuvel	@ Do a dummy read from the new stack while running from the old one so
555a1c510d0SArd Biesheuvel	@ that we can rely on do_translation_fault() to fix up any stale PMD
556a1c510d0SArd Biesheuvel	@ entries covering the vmalloc region.
557a1c510d0SArd Biesheuvel	@
558a1c510d0SArd Biesheuvel	ldr	r2, [ip]
559*ef21187cSLinus Walleij#ifdef CONFIG_KASAN_VMALLOC
560*ef21187cSLinus Walleij	@ Also dummy read from the KASAN shadow memory for the new stack if we
561*ef21187cSLinus Walleij	@ are using KASAN
562*ef21187cSLinus Walleij	mov_l	r2, KASAN_SHADOW_OFFSET
563*ef21187cSLinus Walleij	add	r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT
564*ef21187cSLinus Walleij	ldr	r2, [r2]
565*ef21187cSLinus Walleij#endif
566a1c510d0SArd Biesheuvel#endif
567b832faecSArd Biesheuvel
568b832faecSArd Biesheuvel	@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
569b832faecSArd Biesheuvel	@ effectuates the task switch, as that is what causes the observable
570b832faecSArd Biesheuvel	@ values of current and current_thread_info to change. When
571b832faecSArd Biesheuvel	@ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
572b832faecSArd Biesheuvel	@ current_thread_info) is done explicitly, and the update of SP just
573b832faecSArd Biesheuvel	@ switches us to another stack, with few other side effects. In order
574b832faecSArd Biesheuvel	@ to prevent this distinction from causing any inconsistencies, let's
575b832faecSArd Biesheuvel	@ keep the 'set_current' call as close as we can to the update of SP.
5769c46929eSArd Biesheuvel	set_current r1, r2
577b832faecSArd Biesheuvel	mov	sp, ip
578b832faecSArd Biesheuvel	ret	lr
579b832faecSArd Biesheuvel#endif
580c4c5716eSCatalin Marinas UNWIND(.fnend		)
58193ed3970SCatalin MarinasENDPROC(__switch_to)
5821da177e4SLinus Torvalds
583a1c510d0SArd Biesheuvel#ifdef CONFIG_VMAP_STACK
584a1c510d0SArd Biesheuvel	.text
585a1c510d0SArd Biesheuvel	.align	2
586a1c510d0SArd Biesheuvel__bad_stack:
587a1c510d0SArd Biesheuvel	@
588a1c510d0SArd Biesheuvel	@ We've just detected an overflow. We need to load the address of this
589a1c510d0SArd Biesheuvel	@ CPU's overflow stack into the stack pointer register. We have only one
590a1c510d0SArd Biesheuvel	@ scratch register so let's use a sequence of ADDs including one
591a1c510d0SArd Biesheuvel	@ involving the PC, and decorate them with PC-relative group
592a1c510d0SArd Biesheuvel	@ relocations. As these are ARM only, switch to ARM mode first.
593a1c510d0SArd Biesheuvel	@
594a1c510d0SArd Biesheuvel	@ We enter here with IP clobbered and its value stashed on the mode
595a1c510d0SArd Biesheuvel	@ stack.
596a1c510d0SArd Biesheuvel	@
597a1c510d0SArd BiesheuvelTHUMB(	bx	pc		)
598a1c510d0SArd BiesheuvelTHUMB(	nop			)
599a1c510d0SArd BiesheuvelTHUMB(	.arm			)
6007b9896c3SArd Biesheuvel	ldr_this_cpu_armv6 ip, overflow_stack_ptr
601a1c510d0SArd Biesheuvel
602a1c510d0SArd Biesheuvel	str	sp, [ip, #-4]!			@ Preserve original SP value
603a1c510d0SArd Biesheuvel	mov	sp, ip				@ Switch to overflow stack
604a1c510d0SArd Biesheuvel	pop	{ip}				@ Original SP in IP
605a1c510d0SArd Biesheuvel
606a1c510d0SArd Biesheuvel#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
607a1c510d0SArd Biesheuvel	mov	ip, ip				@ mov expected by unwinder
608a1c510d0SArd Biesheuvel	push	{fp, ip, lr, pc}		@ GCC flavor frame record
609a1c510d0SArd Biesheuvel#else
610a1c510d0SArd Biesheuvel	str	ip, [sp, #-8]!			@ store original SP
611a1c510d0SArd Biesheuvel	push	{fpreg, lr}			@ Clang flavor frame record
612a1c510d0SArd Biesheuvel#endif
613a1c510d0SArd BiesheuvelUNWIND( ldr	ip, [r0, #4]	)		@ load exception LR
614a1c510d0SArd BiesheuvelUNWIND( str	ip, [sp, #12]	)		@ store in the frame record
615a1c510d0SArd Biesheuvel	ldr	ip, [r0, #12]			@ reload IP
616a1c510d0SArd Biesheuvel
617a1c510d0SArd Biesheuvel	@ Store the original GPRs to the new stack.
618a1c510d0SArd Biesheuvel	svc_entry uaccess=0, overflow_check=0
619a1c510d0SArd Biesheuvel
620a1c510d0SArd BiesheuvelUNWIND( .save   {sp, pc}	)
621a1c510d0SArd BiesheuvelUNWIND( .save   {fpreg, lr}	)
622a1c510d0SArd BiesheuvelUNWIND( .setfp  fpreg, sp	)
623a1c510d0SArd Biesheuvel
624a1c510d0SArd Biesheuvel	ldr	fpreg, [sp, #S_SP]		@ Add our frame record
625a1c510d0SArd Biesheuvel						@ to the linked list
626a1c510d0SArd Biesheuvel#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
627a1c510d0SArd Biesheuvel	ldr	r1, [fp, #4]			@ reload SP at entry
628a1c510d0SArd Biesheuvel	add	fp, fp, #12
629a1c510d0SArd Biesheuvel#else
630a1c510d0SArd Biesheuvel	ldr	r1, [fpreg, #8]
631a1c510d0SArd Biesheuvel#endif
632a1c510d0SArd Biesheuvel	str	r1, [sp, #S_SP]			@ store in pt_regs
633a1c510d0SArd Biesheuvel
634a1c510d0SArd Biesheuvel	@ Stash the regs for handle_bad_stack
635a1c510d0SArd Biesheuvel	mov	r0, sp
636a1c510d0SArd Biesheuvel
637a1c510d0SArd Biesheuvel	@ Time to die
638a1c510d0SArd Biesheuvel	bl	handle_bad_stack
639a1c510d0SArd Biesheuvel	nop
640a1c510d0SArd BiesheuvelUNWIND( .fnend			)
641a1c510d0SArd BiesheuvelENDPROC(__bad_stack)
642a1c510d0SArd Biesheuvel#endif
643a1c510d0SArd Biesheuvel
6441da177e4SLinus Torvalds	__INIT
6452d2669b6SNicolas Pitre
6462d2669b6SNicolas Pitre/*
6472d2669b6SNicolas Pitre * User helpers.
6482d2669b6SNicolas Pitre *
6492d2669b6SNicolas Pitre * Each segment is 32-byte aligned and will be moved to the top of the high
6502d2669b6SNicolas Pitre * vector page.  New segments (if ever needed) must be added in front of
6512d2669b6SNicolas Pitre * existing ones.  This mechanism should be used only for things that are
6522d2669b6SNicolas Pitre * really small and justified, and not be abused freely.
6532d2669b6SNicolas Pitre *
654e318b36eSJonathan Corbet * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions.
6552d2669b6SNicolas Pitre */
656b86040a5SCatalin Marinas THUMB(	.arm	)
6572d2669b6SNicolas Pitre
658ba9b5d76SNicolas Pitre	.macro	usr_ret, reg
659ba9b5d76SNicolas Pitre#ifdef CONFIG_ARM_THUMB
660ba9b5d76SNicolas Pitre	bx	\reg
661ba9b5d76SNicolas Pitre#else
6626ebbf2ceSRussell King	ret	\reg
663ba9b5d76SNicolas Pitre#endif
664ba9b5d76SNicolas Pitre	.endm
665ba9b5d76SNicolas Pitre
6665b43e7a3SRussell King	.macro	kuser_pad, sym, size
6675b43e7a3SRussell King	.if	(. - \sym) & 3
6685b43e7a3SRussell King	.rept	4 - (. - \sym) & 3
6695b43e7a3SRussell King	.byte	0
6705b43e7a3SRussell King	.endr
6715b43e7a3SRussell King	.endif
6725b43e7a3SRussell King	.rept	(\size - (. - \sym)) / 4
6735b43e7a3SRussell King	.word	0xe7fddef1
6745b43e7a3SRussell King	.endr
6755b43e7a3SRussell King	.endm
6765b43e7a3SRussell King
677f6f91b0dSRussell King#ifdef CONFIG_KUSER_HELPERS
6782d2669b6SNicolas Pitre	.align	5
6792d2669b6SNicolas Pitre	.globl	__kuser_helper_start
6802d2669b6SNicolas Pitre__kuser_helper_start:
6812d2669b6SNicolas Pitre
6822d2669b6SNicolas Pitre/*
68340fb79c8SNicolas Pitre * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
68440fb79c8SNicolas Pitre * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
6857c612bfdSNicolas Pitre */
6867c612bfdSNicolas Pitre
68740fb79c8SNicolas Pitre__kuser_cmpxchg64:				@ 0xffff0f60
68840fb79c8SNicolas Pitre
689db695c05SRussell King#if defined(CONFIG_CPU_32v6K)
69040fb79c8SNicolas Pitre
69140fb79c8SNicolas Pitre	stmfd	sp!, {r4, r5, r6, r7}
69240fb79c8SNicolas Pitre	ldrd	r4, r5, [r0]			@ load old val
69340fb79c8SNicolas Pitre	ldrd	r6, r7, [r1]			@ load new val
69440fb79c8SNicolas Pitre	smp_dmb	arm
69540fb79c8SNicolas Pitre1:	ldrexd	r0, r1, [r2]			@ load current val
69640fb79c8SNicolas Pitre	eors	r3, r0, r4			@ compare with oldval (1)
697e44fc388SStefan Agner	eorseq	r3, r1, r5			@ compare with oldval (2)
69840fb79c8SNicolas Pitre	strexdeq r3, r6, r7, [r2]		@ store newval if eq
69940fb79c8SNicolas Pitre	teqeq	r3, #1				@ success?
70040fb79c8SNicolas Pitre	beq	1b				@ if no then retry
70140fb79c8SNicolas Pitre	smp_dmb	arm
70240fb79c8SNicolas Pitre	rsbs	r0, r3, #0			@ set returned val and C flag
70340fb79c8SNicolas Pitre	ldmfd	sp!, {r4, r5, r6, r7}
7045a97d0aeSWill Deacon	usr_ret	lr
70540fb79c8SNicolas Pitre
70640fb79c8SNicolas Pitre#elif !defined(CONFIG_SMP)
70740fb79c8SNicolas Pitre
70840fb79c8SNicolas Pitre#ifdef CONFIG_MMU
70940fb79c8SNicolas Pitre
71040fb79c8SNicolas Pitre	/*
71140fb79c8SNicolas Pitre	 * The only thing that can break atomicity in this cmpxchg64
71240fb79c8SNicolas Pitre	 * implementation is either an IRQ or a data abort exception
71340fb79c8SNicolas Pitre	 * causing another process/thread to be scheduled in the middle of
71440fb79c8SNicolas Pitre	 * the critical sequence.  The same strategy as for cmpxchg is used.
71540fb79c8SNicolas Pitre	 */
71640fb79c8SNicolas Pitre	stmfd	sp!, {r4, r5, r6, lr}
71740fb79c8SNicolas Pitre	ldmia	r0, {r4, r5}			@ load old val
71840fb79c8SNicolas Pitre	ldmia	r1, {r6, lr}			@ load new val
71940fb79c8SNicolas Pitre1:	ldmia	r2, {r0, r1}			@ load current val
72040fb79c8SNicolas Pitre	eors	r3, r0, r4			@ compare with oldval (1)
721e44fc388SStefan Agner	eorseq	r3, r1, r5			@ compare with oldval (2)
722e44fc388SStefan Agner2:	stmiaeq	r2, {r6, lr}			@ store newval if eq
72340fb79c8SNicolas Pitre	rsbs	r0, r3, #0			@ set return val and C flag
72440fb79c8SNicolas Pitre	ldmfd	sp!, {r4, r5, r6, pc}
72540fb79c8SNicolas Pitre
72640fb79c8SNicolas Pitre	.text
72740fb79c8SNicolas Pitrekuser_cmpxchg64_fixup:
72840fb79c8SNicolas Pitre	@ Called from kuser_cmpxchg_fixup.
7293ad55155SRussell King	@ r4 = address of interrupted insn (must be preserved).
73040fb79c8SNicolas Pitre	@ sp = saved regs. r7 and r8 are clobbered.
73140fb79c8SNicolas Pitre	@ 1b = first critical insn, 2b = last critical insn.
7323ad55155SRussell King	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
73340fb79c8SNicolas Pitre	mov	r7, #0xffff0fff
73440fb79c8SNicolas Pitre	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
7353ad55155SRussell King	subs	r8, r4, r7
736e44fc388SStefan Agner	rsbscs	r8, r8, #(2b - 1b)
73740fb79c8SNicolas Pitre	strcs	r7, [sp, #S_PC]
73840fb79c8SNicolas Pitre#if __LINUX_ARM_ARCH__ < 6
73940fb79c8SNicolas Pitre	bcc	kuser_cmpxchg32_fixup
74040fb79c8SNicolas Pitre#endif
7416ebbf2ceSRussell King	ret	lr
74240fb79c8SNicolas Pitre	.previous
74340fb79c8SNicolas Pitre
74440fb79c8SNicolas Pitre#else
74540fb79c8SNicolas Pitre#warning "NPTL on non MMU needs fixing"
74640fb79c8SNicolas Pitre	mov	r0, #-1
74740fb79c8SNicolas Pitre	adds	r0, r0, #0
74840fb79c8SNicolas Pitre	usr_ret	lr
74940fb79c8SNicolas Pitre#endif
75040fb79c8SNicolas Pitre
75140fb79c8SNicolas Pitre#else
75240fb79c8SNicolas Pitre#error "incoherent kernel configuration"
75340fb79c8SNicolas Pitre#endif
75440fb79c8SNicolas Pitre
7555b43e7a3SRussell King	kuser_pad __kuser_cmpxchg64, 64
75640fb79c8SNicolas Pitre
7577c612bfdSNicolas Pitre__kuser_memory_barrier:				@ 0xffff0fa0
758ed3768a8SDave Martin	smp_dmb	arm
759ba9b5d76SNicolas Pitre	usr_ret	lr
7607c612bfdSNicolas Pitre
7615b43e7a3SRussell King	kuser_pad __kuser_memory_barrier, 32
7627c612bfdSNicolas Pitre
7632d2669b6SNicolas Pitre__kuser_cmpxchg:				@ 0xffff0fc0
7642d2669b6SNicolas Pitre
765db695c05SRussell King#if __LINUX_ARM_ARCH__ < 6
7662d2669b6SNicolas Pitre
76749bca4c2SNicolas Pitre#ifdef CONFIG_MMU
768b49c0f24SNicolas Pitre
769b49c0f24SNicolas Pitre	/*
770b49c0f24SNicolas Pitre	 * The only thing that can break atomicity in this cmpxchg
771b49c0f24SNicolas Pitre	 * implementation is either an IRQ or a data abort exception
772b49c0f24SNicolas Pitre	 * causing another process/thread to be scheduled in the middle
773b49c0f24SNicolas Pitre	 * of the critical sequence.  To prevent this, code is added to
774b49c0f24SNicolas Pitre	 * the IRQ and data abort exception handlers to set the pc back
775b49c0f24SNicolas Pitre	 * to the beginning of the critical section if it is found to be
776b49c0f24SNicolas Pitre	 * within that critical section (see kuser_cmpxchg_fixup).
777b49c0f24SNicolas Pitre	 */
778b49c0f24SNicolas Pitre1:	ldr	r3, [r2]			@ load current val
779b49c0f24SNicolas Pitre	subs	r3, r3, r0			@ compare with oldval
780b49c0f24SNicolas Pitre2:	streq	r1, [r2]			@ store newval if eq
781b49c0f24SNicolas Pitre	rsbs	r0, r3, #0			@ set return val and C flag
782b49c0f24SNicolas Pitre	usr_ret	lr
783b49c0f24SNicolas Pitre
784b49c0f24SNicolas Pitre	.text
78540fb79c8SNicolas Pitrekuser_cmpxchg32_fixup:
786b49c0f24SNicolas Pitre	@ Called from kuser_cmpxchg_check macro.
787b059bdc3SRussell King	@ r4 = address of interrupted insn (must be preserved).
788b49c0f24SNicolas Pitre	@ sp = saved regs. r7 and r8 are clobbered.
789b49c0f24SNicolas Pitre	@ 1b = first critical insn, 2b = last critical insn.
790b059bdc3SRussell King	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
791b49c0f24SNicolas Pitre	mov	r7, #0xffff0fff
792b49c0f24SNicolas Pitre	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
793b059bdc3SRussell King	subs	r8, r4, r7
794e44fc388SStefan Agner	rsbscs	r8, r8, #(2b - 1b)
795b49c0f24SNicolas Pitre	strcs	r7, [sp, #S_PC]
7966ebbf2ceSRussell King	ret	lr
797b49c0f24SNicolas Pitre	.previous
798b49c0f24SNicolas Pitre
79949bca4c2SNicolas Pitre#else
80049bca4c2SNicolas Pitre#warning "NPTL on non MMU needs fixing"
80149bca4c2SNicolas Pitre	mov	r0, #-1
80249bca4c2SNicolas Pitre	adds	r0, r0, #0
803ba9b5d76SNicolas Pitre	usr_ret	lr
804b49c0f24SNicolas Pitre#endif
8052d2669b6SNicolas Pitre
8062d2669b6SNicolas Pitre#else
8072d2669b6SNicolas Pitre
808ed3768a8SDave Martin	smp_dmb	arm
809b49c0f24SNicolas Pitre1:	ldrex	r3, [r2]
8102d2669b6SNicolas Pitre	subs	r3, r3, r0
8112d2669b6SNicolas Pitre	strexeq	r3, r1, [r2]
812b49c0f24SNicolas Pitre	teqeq	r3, #1
813b49c0f24SNicolas Pitre	beq	1b
8142d2669b6SNicolas Pitre	rsbs	r0, r3, #0
815b49c0f24SNicolas Pitre	/* beware -- each __kuser slot must be 8 instructions max */
816f00ec48fSRussell King	ALT_SMP(b	__kuser_memory_barrier)
817f00ec48fSRussell King	ALT_UP(usr_ret	lr)
8182d2669b6SNicolas Pitre
8192d2669b6SNicolas Pitre#endif
8202d2669b6SNicolas Pitre
8215b43e7a3SRussell King	kuser_pad __kuser_cmpxchg, 32
8222d2669b6SNicolas Pitre
8232d2669b6SNicolas Pitre__kuser_get_tls:				@ 0xffff0fe0
824f159f4edSTony Lindgren	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
825ba9b5d76SNicolas Pitre	usr_ret	lr
826f159f4edSTony Lindgren	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
8275b43e7a3SRussell King	kuser_pad __kuser_get_tls, 16
8285b43e7a3SRussell King	.rep	3
829f159f4edSTony Lindgren	.word	0			@ 0xffff0ff0 software TLS value, then
830f159f4edSTony Lindgren	.endr				@ pad up to __kuser_helper_version
8312d2669b6SNicolas Pitre
8322d2669b6SNicolas Pitre__kuser_helper_version:				@ 0xffff0ffc
8332d2669b6SNicolas Pitre	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
8342d2669b6SNicolas Pitre
8352d2669b6SNicolas Pitre	.globl	__kuser_helper_end
8362d2669b6SNicolas Pitre__kuser_helper_end:
8372d2669b6SNicolas Pitre
838f6f91b0dSRussell King#endif
839f6f91b0dSRussell King
840b86040a5SCatalin Marinas THUMB(	.thumb	)
8412d2669b6SNicolas Pitre
8421da177e4SLinus Torvalds/*
8431da177e4SLinus Torvalds * Vector stubs.
8441da177e4SLinus Torvalds *
84519accfd3SRussell King * This code is copied to 0xffff1000 so we can use branches in the
84619accfd3SRussell King * vectors, rather than ldr's.  Note that this code must not exceed
84719accfd3SRussell King * a page size.
8481da177e4SLinus Torvalds *
8491da177e4SLinus Torvalds * Common stub entry macro:
8501da177e4SLinus Torvalds *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
851ccea7a19SRussell King *
852ccea7a19SRussell King * SP points to a minimal amount of processor-private memory, the address
853ccea7a19SRussell King * of which is copied into r0 for the mode specific abort handler.
8541da177e4SLinus Torvalds */
855b7ec4795SNicolas Pitre	.macro	vector_stub, name, mode, correction=0
8561da177e4SLinus Torvalds	.align	5
857c4f486f1SArd Biesheuvel#ifdef CONFIG_HARDEN_BRANCH_HISTORY
858c4f486f1SArd Biesheuvelvector_bhb_bpiall_\name:
859c4f486f1SArd Biesheuvel	mcr	p15, 0, r0, c7, c5, 6	@ BPIALL
860c4f486f1SArd Biesheuvel	@ isb not needed due to "movs pc, lr" in the vector stub
861c4f486f1SArd Biesheuvel	@ which gives a "context synchronisation".
862c4f486f1SArd Biesheuvel#endif
8631da177e4SLinus Torvalds
8641da177e4SLinus Torvaldsvector_\name:
8651da177e4SLinus Torvalds	.if \correction
8661da177e4SLinus Torvalds	sub	lr, lr, #\correction
8671da177e4SLinus Torvalds	.endif
8681da177e4SLinus Torvalds
869b9baf5c8SRussell King (Oracle)	@ Save r0, lr_<exception> (parent PC)
870ccea7a19SRussell King	stmia	sp, {r0, lr}		@ save r0, lr
871b9baf5c8SRussell King (Oracle)
872b9baf5c8SRussell King (Oracle)	@ Save spsr_<exception> (parent CPSR)
8731290c70dSArd Biesheuvel.Lvec_\name:
8741290c70dSArd Biesheuvel	mrs	lr, spsr
875ccea7a19SRussell King	str	lr, [sp, #8]		@ save spsr
876ccea7a19SRussell King
877ccea7a19SRussell King	@
878ccea7a19SRussell King	@ Prepare for SVC32 mode.  IRQs remain disabled.
879ccea7a19SRussell King	@
880ccea7a19SRussell King	mrs	r0, cpsr
881b86040a5SCatalin Marinas	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
882ccea7a19SRussell King	msr	spsr_cxsf, r0
883ccea7a19SRussell King
884ccea7a19SRussell King	@
885ccea7a19SRussell King	@ the branch table must immediately follow this code
886ccea7a19SRussell King	@
887ccea7a19SRussell King	and	lr, lr, #0x0f
888b86040a5SCatalin Marinas THUMB(	adr	r0, 1f			)
889b86040a5SCatalin Marinas THUMB(	ldr	lr, [r0, lr, lsl #2]	)
890b7ec4795SNicolas Pitre	mov	r0, sp
891b86040a5SCatalin Marinas ARM(	ldr	lr, [pc, lr, lsl #2]	)
892ccea7a19SRussell King	movs	pc, lr			@ branch to handler in SVC mode
89393ed3970SCatalin MarinasENDPROC(vector_\name)
89488987ef9SCatalin Marinas
895b9baf5c8SRussell King (Oracle)#ifdef CONFIG_HARDEN_BRANCH_HISTORY
896b9baf5c8SRussell King (Oracle)	.subsection 1
897b9baf5c8SRussell King (Oracle)	.align 5
898b9baf5c8SRussell King (Oracle)vector_bhb_loop8_\name:
899b9baf5c8SRussell King (Oracle)	.if \correction
900b9baf5c8SRussell King (Oracle)	sub	lr, lr, #\correction
901b9baf5c8SRussell King (Oracle)	.endif
902b9baf5c8SRussell King (Oracle)
903b9baf5c8SRussell King (Oracle)	@ Save r0, lr_<exception> (parent PC)
904b9baf5c8SRussell King (Oracle)	stmia	sp, {r0, lr}
905b9baf5c8SRussell King (Oracle)
906b9baf5c8SRussell King (Oracle)	@ bhb workaround
907b9baf5c8SRussell King (Oracle)	mov	r0, #8
9083cfb3019SArd Biesheuvel3:	W(b)	. + 4
909b9baf5c8SRussell King (Oracle)	subs	r0, r0, #1
9106c7cb60bSRussell King (Oracle)	bne	3b
911892c608aSArd Biesheuvel	dsb	nsh
912892c608aSArd Biesheuvel	@ isb not needed due to "movs pc, lr" in the vector stub
913892c608aSArd Biesheuvel	@ which gives a "context synchronisation".
9141290c70dSArd Biesheuvel	b	.Lvec_\name
915b9baf5c8SRussell King (Oracle)ENDPROC(vector_bhb_loop8_\name)
916b9baf5c8SRussell King (Oracle)	.previous
917b9baf5c8SRussell King (Oracle)#endif
918b9baf5c8SRussell King (Oracle)
91988987ef9SCatalin Marinas	.align	2
92088987ef9SCatalin Marinas	@ handler addresses follow this label
92188987ef9SCatalin Marinas1:
9221da177e4SLinus Torvalds	.endm
9231da177e4SLinus Torvalds
924b9b32bf7SRussell King	.section .stubs, "ax", %progbits
925ad12c2f1SArd Biesheuvel	@ These need to remain at the start of the section so that
926ad12c2f1SArd Biesheuvel	@ they are in range of the 'SWI' entries in the vector tables
927ad12c2f1SArd Biesheuvel	@ located 4k down.
928ad12c2f1SArd Biesheuvel.L__vector_swi:
92919accfd3SRussell King	.word	vector_swi
930b9baf5c8SRussell King (Oracle)#ifdef CONFIG_HARDEN_BRANCH_HISTORY
931ad12c2f1SArd Biesheuvel.L__vector_bhb_loop8_swi:
932b9baf5c8SRussell King (Oracle)	.word	vector_bhb_loop8_swi
933ad12c2f1SArd Biesheuvel.L__vector_bhb_bpiall_swi:
934b9baf5c8SRussell King (Oracle)	.word	vector_bhb_bpiall_swi
935b9baf5c8SRussell King (Oracle)#endif
93619accfd3SRussell King
93719accfd3SRussell Kingvector_rst:
93819accfd3SRussell King ARM(	swi	SYS_ERROR0	)
93919accfd3SRussell King THUMB(	svc	#0		)
94019accfd3SRussell King THUMB(	nop			)
94119accfd3SRussell King	b	vector_und
94219accfd3SRussell King
9431da177e4SLinus Torvalds/*
9441da177e4SLinus Torvalds * Interrupt dispatcher
9451da177e4SLinus Torvalds */
946b7ec4795SNicolas Pitre	vector_stub	irq, IRQ_MODE, 4
9471da177e4SLinus Torvalds
9481da177e4SLinus Torvalds	.long	__irq_usr			@  0  (USR_26 / USR_32)
9491da177e4SLinus Torvalds	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
9501da177e4SLinus Torvalds	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
9511da177e4SLinus Torvalds	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
9521da177e4SLinus Torvalds	.long	__irq_invalid			@  4
9531da177e4SLinus Torvalds	.long	__irq_invalid			@  5
9541da177e4SLinus Torvalds	.long	__irq_invalid			@  6
9551da177e4SLinus Torvalds	.long	__irq_invalid			@  7
9561da177e4SLinus Torvalds	.long	__irq_invalid			@  8
9571da177e4SLinus Torvalds	.long	__irq_invalid			@  9
9581da177e4SLinus Torvalds	.long	__irq_invalid			@  a
9591da177e4SLinus Torvalds	.long	__irq_invalid			@  b
9601da177e4SLinus Torvalds	.long	__irq_invalid			@  c
9611da177e4SLinus Torvalds	.long	__irq_invalid			@  d
9621da177e4SLinus Torvalds	.long	__irq_invalid			@  e
9631da177e4SLinus Torvalds	.long	__irq_invalid			@  f
9641da177e4SLinus Torvalds
9651da177e4SLinus Torvalds/*
9661da177e4SLinus Torvalds * Data abort dispatcher
9671da177e4SLinus Torvalds * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
9681da177e4SLinus Torvalds */
969b7ec4795SNicolas Pitre	vector_stub	dabt, ABT_MODE, 8
9701da177e4SLinus Torvalds
9711da177e4SLinus Torvalds	.long	__dabt_usr			@  0  (USR_26 / USR_32)
9721da177e4SLinus Torvalds	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
9731da177e4SLinus Torvalds	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
9741da177e4SLinus Torvalds	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
9751da177e4SLinus Torvalds	.long	__dabt_invalid			@  4
9761da177e4SLinus Torvalds	.long	__dabt_invalid			@  5
9771da177e4SLinus Torvalds	.long	__dabt_invalid			@  6
9781da177e4SLinus Torvalds	.long	__dabt_invalid			@  7
9791da177e4SLinus Torvalds	.long	__dabt_invalid			@  8
9801da177e4SLinus Torvalds	.long	__dabt_invalid			@  9
9811da177e4SLinus Torvalds	.long	__dabt_invalid			@  a
9821da177e4SLinus Torvalds	.long	__dabt_invalid			@  b
9831da177e4SLinus Torvalds	.long	__dabt_invalid			@  c
9841da177e4SLinus Torvalds	.long	__dabt_invalid			@  d
9851da177e4SLinus Torvalds	.long	__dabt_invalid			@  e
9861da177e4SLinus Torvalds	.long	__dabt_invalid			@  f
9871da177e4SLinus Torvalds
9881da177e4SLinus Torvalds/*
9891da177e4SLinus Torvalds * Prefetch abort dispatcher
9901da177e4SLinus Torvalds * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
9911da177e4SLinus Torvalds */
992b7ec4795SNicolas Pitre	vector_stub	pabt, ABT_MODE, 4
9931da177e4SLinus Torvalds
9941da177e4SLinus Torvalds	.long	__pabt_usr			@  0 (USR_26 / USR_32)
9951da177e4SLinus Torvalds	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
9961da177e4SLinus Torvalds	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
9971da177e4SLinus Torvalds	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
9981da177e4SLinus Torvalds	.long	__pabt_invalid			@  4
9991da177e4SLinus Torvalds	.long	__pabt_invalid			@  5
10001da177e4SLinus Torvalds	.long	__pabt_invalid			@  6
10011da177e4SLinus Torvalds	.long	__pabt_invalid			@  7
10021da177e4SLinus Torvalds	.long	__pabt_invalid			@  8
10031da177e4SLinus Torvalds	.long	__pabt_invalid			@  9
10041da177e4SLinus Torvalds	.long	__pabt_invalid			@  a
10051da177e4SLinus Torvalds	.long	__pabt_invalid			@  b
10061da177e4SLinus Torvalds	.long	__pabt_invalid			@  c
10071da177e4SLinus Torvalds	.long	__pabt_invalid			@  d
10081da177e4SLinus Torvalds	.long	__pabt_invalid			@  e
10091da177e4SLinus Torvalds	.long	__pabt_invalid			@  f
10101da177e4SLinus Torvalds
10111da177e4SLinus Torvalds/*
10121da177e4SLinus Torvalds * Undef instr entry dispatcher
10131da177e4SLinus Torvalds * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
10141da177e4SLinus Torvalds */
1015b7ec4795SNicolas Pitre	vector_stub	und, UND_MODE
10161da177e4SLinus Torvalds
10171da177e4SLinus Torvalds	.long	__und_usr			@  0 (USR_26 / USR_32)
10181da177e4SLinus Torvalds	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
10191da177e4SLinus Torvalds	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
10201da177e4SLinus Torvalds	.long	__und_svc			@  3 (SVC_26 / SVC_32)
10211da177e4SLinus Torvalds	.long	__und_invalid			@  4
10221da177e4SLinus Torvalds	.long	__und_invalid			@  5
10231da177e4SLinus Torvalds	.long	__und_invalid			@  6
10241da177e4SLinus Torvalds	.long	__und_invalid			@  7
10251da177e4SLinus Torvalds	.long	__und_invalid			@  8
10261da177e4SLinus Torvalds	.long	__und_invalid			@  9
10271da177e4SLinus Torvalds	.long	__und_invalid			@  a
10281da177e4SLinus Torvalds	.long	__und_invalid			@  b
10291da177e4SLinus Torvalds	.long	__und_invalid			@  c
10301da177e4SLinus Torvalds	.long	__und_invalid			@  d
10311da177e4SLinus Torvalds	.long	__und_invalid			@  e
10321da177e4SLinus Torvalds	.long	__und_invalid			@  f
10331da177e4SLinus Torvalds
10341da177e4SLinus Torvalds	.align	5
10351da177e4SLinus Torvalds
10361da177e4SLinus Torvalds/*=============================================================================
103719accfd3SRussell King * Address exception handler
103819accfd3SRussell King *-----------------------------------------------------------------------------
103919accfd3SRussell King * These aren't too critical.
104019accfd3SRussell King * (they're not supposed to happen, and won't happen in 32-bit data mode).
104119accfd3SRussell King */
104219accfd3SRussell King
104319accfd3SRussell Kingvector_addrexcptn:
104419accfd3SRussell King	b	vector_addrexcptn
104519accfd3SRussell King
104619accfd3SRussell King/*=============================================================================
1047c0e7f7eeSDaniel Thompson * FIQ "NMI" handler
10481da177e4SLinus Torvalds *-----------------------------------------------------------------------------
1049c0e7f7eeSDaniel Thompson * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1050b9baf5c8SRussell King (Oracle) * systems. This must be the last vector stub, so lets place it in its own
1051b9baf5c8SRussell King (Oracle) * subsection.
10521da177e4SLinus Torvalds */
1053b9baf5c8SRussell King (Oracle)	.subsection 2
1054c0e7f7eeSDaniel Thompson	vector_stub	fiq, FIQ_MODE, 4
1055c0e7f7eeSDaniel Thompson
1056c0e7f7eeSDaniel Thompson	.long	__fiq_usr			@  0  (USR_26 / USR_32)
1057c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  1  (FIQ_26 / FIQ_32)
1058c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  2  (IRQ_26 / IRQ_32)
1059c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  3  (SVC_26 / SVC_32)
1060c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  4
1061c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  5
1062c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  6
1063c0e7f7eeSDaniel Thompson	.long	__fiq_abt			@  7
1064c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  8
1065c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  9
1066c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  a
1067c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  b
1068c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  c
1069c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  d
1070c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  e
1071c0e7f7eeSDaniel Thompson	.long	__fiq_svc			@  f
10721da177e4SLinus Torvalds
107331b96caeSArd Biesheuvel	.globl	vector_fiq
1074e39e3f3eSRussell King
1075b9b32bf7SRussell King	.section .vectors, "ax", %progbits
1076b9b32bf7SRussell King	W(b)	vector_rst
1077b9b32bf7SRussell King	W(b)	vector_und
1078ad12c2f1SArd BiesheuvelARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_swi		)
1079ad12c2f1SArd BiesheuvelTHUMB(	.reloc	., R_ARM_THM_PC12, .L__vector_swi		)
1080ad12c2f1SArd Biesheuvel	W(ldr)	pc, .
1081b9b32bf7SRussell King	W(b)	vector_pabt
1082b9b32bf7SRussell King	W(b)	vector_dabt
1083b9b32bf7SRussell King	W(b)	vector_addrexcptn
1084b9b32bf7SRussell King	W(b)	vector_irq
1085b9b32bf7SRussell King	W(b)	vector_fiq
10861da177e4SLinus Torvalds
1087b9baf5c8SRussell King (Oracle)#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1088b9baf5c8SRussell King (Oracle)	.section .vectors.bhb.loop8, "ax", %progbits
1089b9baf5c8SRussell King (Oracle)	W(b)	vector_rst
1090b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_loop8_und
1091ad12c2f1SArd BiesheuvelARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi	)
1092ad12c2f1SArd BiesheuvelTHUMB(	.reloc	., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi	)
1093ad12c2f1SArd Biesheuvel	W(ldr)	pc, .
1094b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_loop8_pabt
1095b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_loop8_dabt
1096b9baf5c8SRussell King (Oracle)	W(b)	vector_addrexcptn
1097b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_loop8_irq
1098b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_loop8_fiq
1099b9baf5c8SRussell King (Oracle)
1100b9baf5c8SRussell King (Oracle)	.section .vectors.bhb.bpiall, "ax", %progbits
1101b9baf5c8SRussell King (Oracle)	W(b)	vector_rst
1102b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_bpiall_und
1103ad12c2f1SArd BiesheuvelARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi	)
1104ad12c2f1SArd BiesheuvelTHUMB(	.reloc	., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi	)
1105ad12c2f1SArd Biesheuvel	W(ldr)	pc, .
1106b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_bpiall_pabt
1107b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_bpiall_dabt
1108b9baf5c8SRussell King (Oracle)	W(b)	vector_addrexcptn
1109b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_bpiall_irq
1110b9baf5c8SRussell King (Oracle)	W(b)	vector_bhb_bpiall_fiq
1111b9baf5c8SRussell King (Oracle)#endif
1112b9baf5c8SRussell King (Oracle)
11131da177e4SLinus Torvalds	.data
11141abd3502SRussell King	.align	2
11151da177e4SLinus Torvalds
11161da177e4SLinus Torvalds	.globl	cr_alignment
11171da177e4SLinus Torvaldscr_alignment:
11181da177e4SLinus Torvalds	.space	4
1119