xref: /openbmc/linux/arch/arm/kernel/entry-armv.S (revision 36fe4655)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/kernel/entry-armv.S
4 *
5 *  Copyright (C) 1996,1997,1998 Russell King.
6 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 *
9 *  Low-level vector interface routines
10 *
11 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12 *  that causes it to save wrong values...  Be aware!
13 */
14
15#include <linux/init.h>
16
17#include <asm/assembler.h>
18#include <asm/memory.h>
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
21#include <asm/vfpmacros.h>
22#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
23#include <mach/entry-macro.S>
24#endif
25#include <asm/thread_notify.h>
26#include <asm/unwind.h>
27#include <asm/unistd.h>
28#include <asm/tls.h>
29#include <asm/system_info.h>
30#include <asm/uaccess-asm.h>
31
32#include "entry-header.S"
33#include <asm/entry-macro-multi.S>
34#include <asm/probes.h>
35
36/*
37 * Interrupt handling.
38 */
39	.macro	irq_handler
40#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
41	ldr	r1, =handle_arch_irq
42	mov	r0, sp
43	badr	lr, 9997f
44	ldr	pc, [r1]
45#else
46	arch_irq_handler_default
47#endif
489997:
49	.endm
50
51	.macro	pabt_helper
52	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
53#ifdef MULTI_PABORT
54	ldr	ip, .LCprocfns
55	mov	lr, pc
56	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
57#else
58	bl	CPU_PABORT_HANDLER
59#endif
60	.endm
61
62	.macro	dabt_helper
63
64	@
65	@ Call the processor-specific abort handler:
66	@
67	@  r2 - pt_regs
68	@  r4 - aborted context pc
69	@  r5 - aborted context psr
70	@
71	@ The abort handler must return the aborted address in r0, and
72	@ the fault status register in r1.  r9 must be preserved.
73	@
74#ifdef MULTI_DABORT
75	ldr	ip, .LCprocfns
76	mov	lr, pc
77	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
78#else
79	bl	CPU_DABORT_HANDLER
80#endif
81	.endm
82
83	.section	.entry.text,"ax",%progbits
84
85/*
86 * Invalid mode handlers
87 */
88	.macro	inv_entry, reason
89	sub	sp, sp, #PT_REGS_SIZE
90 ARM(	stmib	sp, {r1 - lr}		)
91 THUMB(	stmia	sp, {r0 - r12}		)
92 THUMB(	str	sp, [sp, #S_SP]		)
93 THUMB(	str	lr, [sp, #S_LR]		)
94	mov	r1, #\reason
95	.endm
96
97__pabt_invalid:
98	inv_entry BAD_PREFETCH
99	b	common_invalid
100ENDPROC(__pabt_invalid)
101
102__dabt_invalid:
103	inv_entry BAD_DATA
104	b	common_invalid
105ENDPROC(__dabt_invalid)
106
107__irq_invalid:
108	inv_entry BAD_IRQ
109	b	common_invalid
110ENDPROC(__irq_invalid)
111
112__und_invalid:
113	inv_entry BAD_UNDEFINSTR
114
115	@
116	@ XXX fall through to common_invalid
117	@
118
119@
120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
121@
122common_invalid:
123	zero_fp
124
125	ldmia	r0, {r4 - r6}
126	add	r0, sp, #S_PC		@ here for interlock avoidance
127	mov	r7, #-1			@  ""   ""    ""        ""
128	str	r4, [sp]		@ save preserved r0
129	stmia	r0, {r5 - r7}		@ lr_<exception>,
130					@ cpsr_<exception>, "old_r0"
131
132	mov	r0, sp
133	b	bad_mode
134ENDPROC(__und_invalid)
135
136/*
137 * SVC mode handlers
138 */
139
140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
141#define SPFIX(code...) code
142#else
143#define SPFIX(code...)
144#endif
145
146	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
147 UNWIND(.fnstart		)
148 UNWIND(.save {r0 - pc}		)
149	sub	sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
150#ifdef CONFIG_THUMB2_KERNEL
151 SPFIX(	str	r0, [sp]	)	@ temporarily saved
152 SPFIX(	mov	r0, sp		)
153 SPFIX(	tst	r0, #4		)	@ test original stack alignment
154 SPFIX(	ldr	r0, [sp]	)	@ restored
155#else
156 SPFIX(	tst	sp, #4		)
157#endif
158 SPFIX(	subeq	sp, sp, #4	)
159	stmia	sp, {r1 - r12}
160
161	ldmia	r0, {r3 - r5}
162	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
163	mov	r6, #-1			@  ""  ""      ""       ""
164	add	r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
165 SPFIX(	addeq	r2, r2, #4	)
166	str	r3, [sp, #-4]!		@ save the "real" r0 copied
167					@ from the exception stack
168
169	mov	r3, lr
170
171	@
172	@ We are now ready to fill in the remaining blanks on the stack:
173	@
174	@  r2 - sp_svc
175	@  r3 - lr_svc
176	@  r4 - lr_<exception>, already fixed up for correct return/restart
177	@  r5 - spsr_<exception>
178	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
179	@
180	stmia	r7, {r2 - r6}
181
182	get_thread_info tsk
183	uaccess_entry tsk, r0, r1, r2, \uaccess
184
185	.if \trace
186#ifdef CONFIG_TRACE_IRQFLAGS
187	bl	trace_hardirqs_off
188#endif
189	.endif
190	.endm
191
192	.align	5
193__dabt_svc:
194	svc_entry uaccess=0
195	mov	r2, sp
196	dabt_helper
197 THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
198	svc_exit r5				@ return from exception
199 UNWIND(.fnend		)
200ENDPROC(__dabt_svc)
201
202	.align	5
203__irq_svc:
204	svc_entry
205	irq_handler
206
207#ifdef CONFIG_PREEMPTION
208	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
209	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
210	teq	r8, #0				@ if preempt count != 0
211	movne	r0, #0				@ force flags to 0
212	tst	r0, #_TIF_NEED_RESCHED
213	blne	svc_preempt
214#endif
215
216	svc_exit r5, irq = 1			@ return from exception
217 UNWIND(.fnend		)
218ENDPROC(__irq_svc)
219
220	.ltorg
221
222#ifdef CONFIG_PREEMPTION
223svc_preempt:
224	mov	r8, lr
2251:	bl	preempt_schedule_irq		@ irq en/disable is done inside
226	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
227	tst	r0, #_TIF_NEED_RESCHED
228	reteq	r8				@ go again
229	b	1b
230#endif
231
232__und_fault:
233	@ Correct the PC such that it is pointing at the instruction
234	@ which caused the fault.  If the faulting instruction was ARM
235	@ the PC will be pointing at the next instruction, and have to
236	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
237	@ pointing at the second half of the Thumb instruction.  We
238	@ have to subtract 2.
239	ldr	r2, [r0, #S_PC]
240	sub	r2, r2, r1
241	str	r2, [r0, #S_PC]
242	b	do_undefinstr
243ENDPROC(__und_fault)
244
245	.align	5
246__und_svc:
247#ifdef CONFIG_KPROBES
248	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
249	@ it obviously needs free stack space which then will belong to
250	@ the saved context.
251	svc_entry MAX_STACK_SIZE
252#else
253	svc_entry
254#endif
255
256	mov	r1, #4				@ PC correction to apply
257 THUMB(	tst	r5, #PSR_T_BIT		)	@ exception taken in Thumb mode?
258 THUMB(	movne	r1, #2			)	@ if so, fix up PC correction
259	mov	r0, sp				@ struct pt_regs *regs
260	bl	__und_fault
261
262__und_svc_finish:
263	get_thread_info tsk
264	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
265	svc_exit r5				@ return from exception
266 UNWIND(.fnend		)
267ENDPROC(__und_svc)
268
269	.align	5
270__pabt_svc:
271	svc_entry
272	mov	r2, sp				@ regs
273	pabt_helper
274	svc_exit r5				@ return from exception
275 UNWIND(.fnend		)
276ENDPROC(__pabt_svc)
277
278	.align	5
279__fiq_svc:
280	svc_entry trace=0
281	mov	r0, sp				@ struct pt_regs *regs
282	bl	handle_fiq_as_nmi
283	svc_exit_via_fiq
284 UNWIND(.fnend		)
285ENDPROC(__fiq_svc)
286
287	.align	5
288.LCcralign:
289	.word	cr_alignment
290#ifdef MULTI_DABORT
291.LCprocfns:
292	.word	processor
293#endif
294.LCfp:
295	.word	fp_enter
296
297/*
298 * Abort mode handlers
299 */
300
301@
302@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
303@ and reuses the same macros. However in abort mode we must also
304@ save/restore lr_abt and spsr_abt to make nested aborts safe.
305@
306	.align 5
307__fiq_abt:
308	svc_entry trace=0
309
310 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
311 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
312 THUMB( msr	cpsr_c, r0 )
313	mov	r1, lr		@ Save lr_abt
314	mrs	r2, spsr	@ Save spsr_abt, abort is now safe
315 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
316 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
317 THUMB( msr	cpsr_c, r0 )
318	stmfd	sp!, {r1 - r2}
319
320	add	r0, sp, #8			@ struct pt_regs *regs
321	bl	handle_fiq_as_nmi
322
323	ldmfd	sp!, {r1 - r2}
324 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
325 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
326 THUMB( msr	cpsr_c, r0 )
327	mov	lr, r1		@ Restore lr_abt, abort is unsafe
328	msr	spsr_cxsf, r2	@ Restore spsr_abt
329 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
330 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
331 THUMB( msr	cpsr_c, r0 )
332
333	svc_exit_via_fiq
334 UNWIND(.fnend		)
335ENDPROC(__fiq_abt)
336
337/*
338 * User mode handlers
339 *
340 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
341 */
342
343#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
344#error "sizeof(struct pt_regs) must be a multiple of 8"
345#endif
346
347	.macro	usr_entry, trace=1, uaccess=1
348 UNWIND(.fnstart	)
349 UNWIND(.cantunwind	)	@ don't unwind the user space
350	sub	sp, sp, #PT_REGS_SIZE
351 ARM(	stmib	sp, {r1 - r12}	)
352 THUMB(	stmia	sp, {r0 - r12}	)
353
354 ATRAP(	mrc	p15, 0, r7, c1, c0, 0)
355 ATRAP(	ldr	r8, .LCcralign)
356
357	ldmia	r0, {r3 - r5}
358	add	r0, sp, #S_PC		@ here for interlock avoidance
359	mov	r6, #-1			@  ""  ""     ""        ""
360
361	str	r3, [sp]		@ save the "real" r0 copied
362					@ from the exception stack
363
364 ATRAP(	ldr	r8, [r8, #0])
365
366	@
367	@ We are now ready to fill in the remaining blanks on the stack:
368	@
369	@  r4 - lr_<exception>, already fixed up for correct return/restart
370	@  r5 - spsr_<exception>
371	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
372	@
373	@ Also, separately save sp_usr and lr_usr
374	@
375	stmia	r0, {r4 - r6}
376 ARM(	stmdb	r0, {sp, lr}^			)
377 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
378
379	.if \uaccess
380	uaccess_disable ip
381	.endif
382
383	@ Enable the alignment trap while in kernel mode
384 ATRAP(	teq	r8, r7)
385 ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
386
387	@
388	@ Clear FP to mark the first stack frame
389	@
390	zero_fp
391
392	.if	\trace
393#ifdef CONFIG_TRACE_IRQFLAGS
394	bl	trace_hardirqs_off
395#endif
396	ct_user_exit save = 0
397	.endif
398	.endm
399
400	.macro	kuser_cmpxchg_check
401#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
402#ifndef CONFIG_MMU
403#warning "NPTL on non MMU needs fixing"
404#else
405	@ Make sure our user space atomic helper is restarted
406	@ if it was interrupted in a critical region.  Here we
407	@ perform a quick test inline since it should be false
408	@ 99.9999% of the time.  The rest is done out of line.
409	ldr	r0, =TASK_SIZE
410	cmp	r4, r0
411	blhs	kuser_cmpxchg64_fixup
412#endif
413#endif
414	.endm
415
416	.align	5
417__dabt_usr:
418	usr_entry uaccess=0
419	kuser_cmpxchg_check
420	mov	r2, sp
421	dabt_helper
422	b	ret_from_exception
423 UNWIND(.fnend		)
424ENDPROC(__dabt_usr)
425
426	.align	5
427__irq_usr:
428	usr_entry
429	kuser_cmpxchg_check
430	irq_handler
431	get_thread_info tsk
432	mov	why, #0
433	b	ret_to_user_from_irq
434 UNWIND(.fnend		)
435ENDPROC(__irq_usr)
436
437	.ltorg
438
439	.align	5
440__und_usr:
441	usr_entry uaccess=0
442
443	mov	r2, r4
444	mov	r3, r5
445
446	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
447	@      faulting instruction depending on Thumb mode.
448	@ r3 = regs->ARM_cpsr
449	@
450	@ The emulation code returns using r9 if it has emulated the
451	@ instruction, or the more conventional lr if we are to treat
452	@ this as a real undefined instruction
453	@
454	badr	r9, ret_from_exception
455
456	@ IRQs must be enabled before attempting to read the instruction from
457	@ user space since that could cause a page/translation fault if the
458	@ page table was modified by another CPU.
459	enable_irq
460
461	tst	r3, #PSR_T_BIT			@ Thumb mode?
462	bne	__und_usr_thumb
463	sub	r4, r2, #4			@ ARM instr at LR - 4
4641:	ldrt	r0, [r4]
465 ARM_BE8(rev	r0, r0)				@ little endian instruction
466
467	uaccess_disable ip
468
469	@ r0 = 32-bit ARM instruction which caused the exception
470	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
471	@ r4 = PC value for the faulting instruction
472	@ lr = 32-bit undefined instruction function
473	badr	lr, __und_usr_fault_32
474	b	call_fpe
475
476__und_usr_thumb:
477	@ Thumb instruction
478	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
479#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
480/*
481 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
482 * can never be supported in a single kernel, this code is not applicable at
483 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
484 * made about .arch directives.
485 */
486#if __LINUX_ARM_ARCH__ < 7
487/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
488#define NEED_CPU_ARCHITECTURE
489	ldr	r5, .LCcpu_architecture
490	ldr	r5, [r5]
491	cmp	r5, #CPU_ARCH_ARMv7
492	blo	__und_usr_fault_16		@ 16bit undefined instruction
493/*
494 * The following code won't get run unless the running CPU really is v7, so
495 * coding round the lack of ldrht on older arches is pointless.  Temporarily
496 * override the assembler target arch with the minimum required instead:
497 */
498	.arch	armv6t2
499#endif
5002:	ldrht	r5, [r4]
501ARM_BE8(rev16	r5, r5)				@ little endian instruction
502	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
503	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
5043:	ldrht	r0, [r2]
505ARM_BE8(rev16	r0, r0)				@ little endian instruction
506	uaccess_disable ip
507	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
508	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
509	orr	r0, r0, r5, lsl #16
510	badr	lr, __und_usr_fault_32
511	@ r0 = the two 16-bit Thumb instructions which caused the exception
512	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
513	@ r4 = PC value for the first 16-bit Thumb instruction
514	@ lr = 32bit undefined instruction function
515
516#if __LINUX_ARM_ARCH__ < 7
517/* If the target arch was overridden, change it back: */
518#ifdef CONFIG_CPU_32v6K
519	.arch	armv6k
520#else
521	.arch	armv6
522#endif
523#endif /* __LINUX_ARM_ARCH__ < 7 */
524#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
525	b	__und_usr_fault_16
526#endif
527 UNWIND(.fnend)
528ENDPROC(__und_usr)
529
530/*
531 * The out of line fixup for the ldrt instructions above.
532 */
533	.pushsection .text.fixup, "ax"
534	.align	2
5354:	str     r4, [sp, #S_PC]			@ retry current instruction
536	ret	r9
537	.popsection
538	.pushsection __ex_table,"a"
539	.long	1b, 4b
540#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
541	.long	2b, 4b
542	.long	3b, 4b
543#endif
544	.popsection
545
546/*
547 * Check whether the instruction is a co-processor instruction.
548 * If yes, we need to call the relevant co-processor handler.
549 *
550 * Note that we don't do a full check here for the co-processor
551 * instructions; all instructions with bit 27 set are well
552 * defined.  The only instructions that should fault are the
553 * co-processor instructions.  However, we have to watch out
554 * for the ARM6/ARM7 SWI bug.
555 *
556 * NEON is a special case that has to be handled here. Not all
557 * NEON instructions are co-processor instructions, so we have
558 * to make a special case of checking for them. Plus, there's
559 * five groups of them, so we have a table of mask/opcode pairs
560 * to check against, and if any match then we branch off into the
561 * NEON handler code.
562 *
563 * Emulators may wish to make use of the following registers:
564 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
565 *  r2  = PC value to resume execution after successful emulation
566 *  r9  = normal "successful" return address
567 *  r10 = this threads thread_info structure
568 *  lr  = unrecognised instruction return address
569 * IRQs enabled, FIQs enabled.
570 */
571	@
572	@ Fall-through from Thumb-2 __und_usr
573	@
574#ifdef CONFIG_NEON
575	get_thread_info r10			@ get current thread
576	adr	r6, .LCneon_thumb_opcodes
577	b	2f
578#endif
579call_fpe:
580	get_thread_info r10			@ get current thread
581#ifdef CONFIG_NEON
582	adr	r6, .LCneon_arm_opcodes
5832:	ldr	r5, [r6], #4			@ mask value
584	ldr	r7, [r6], #4			@ opcode bits matching in mask
585	cmp	r5, #0				@ end mask?
586	beq	1f
587	and	r8, r0, r5
588	cmp	r8, r7				@ NEON instruction?
589	bne	2b
590	mov	r7, #1
591	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
592	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
593	b	do_vfp				@ let VFP handler handle this
5941:
595#endif
596	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
597	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
598	reteq	lr
599	and	r8, r0, #0x00000f00		@ mask out CP number
600 THUMB(	lsr	r8, r8, #8		)
601	mov	r7, #1
602	add	r6, r10, #TI_USED_CP
603 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
604 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
605#ifdef CONFIG_IWMMXT
606	@ Test if we need to give access to iWMMXt coprocessors
607	ldr	r5, [r10, #TI_FLAGS]
608	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
609	movscs	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
610	bcs	iwmmxt_task_enable
611#endif
612 ARM(	add	pc, pc, r8, lsr #6	)
613 THUMB(	lsl	r8, r8, #2		)
614 THUMB(	add	pc, r8			)
615	nop
616
617	ret.w	lr				@ CP#0
618	W(b)	do_fpe				@ CP#1 (FPE)
619	W(b)	do_fpe				@ CP#2 (FPE)
620	ret.w	lr				@ CP#3
621#ifdef CONFIG_CRUNCH
622	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
623	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
624	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
625#else
626	ret.w	lr				@ CP#4
627	ret.w	lr				@ CP#5
628	ret.w	lr				@ CP#6
629#endif
630	ret.w	lr				@ CP#7
631	ret.w	lr				@ CP#8
632	ret.w	lr				@ CP#9
633#ifdef CONFIG_VFP
634	W(b)	do_vfp				@ CP#10 (VFP)
635	W(b)	do_vfp				@ CP#11 (VFP)
636#else
637	ret.w	lr				@ CP#10 (VFP)
638	ret.w	lr				@ CP#11 (VFP)
639#endif
640	ret.w	lr				@ CP#12
641	ret.w	lr				@ CP#13
642	ret.w	lr				@ CP#14 (Debug)
643	ret.w	lr				@ CP#15 (Control)
644
645#ifdef NEED_CPU_ARCHITECTURE
646	.align	2
647.LCcpu_architecture:
648	.word	__cpu_architecture
649#endif
650
651#ifdef CONFIG_NEON
652	.align	6
653
654.LCneon_arm_opcodes:
655	.word	0xfe000000			@ mask
656	.word	0xf2000000			@ opcode
657
658	.word	0xff100000			@ mask
659	.word	0xf4000000			@ opcode
660
661	.word	0x00000000			@ mask
662	.word	0x00000000			@ opcode
663
664.LCneon_thumb_opcodes:
665	.word	0xef000000			@ mask
666	.word	0xef000000			@ opcode
667
668	.word	0xff100000			@ mask
669	.word	0xf9000000			@ opcode
670
671	.word	0x00000000			@ mask
672	.word	0x00000000			@ opcode
673#endif
674
675do_fpe:
676	ldr	r4, .LCfp
677	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
678	ldr	pc, [r4]			@ Call FP module USR entry point
679
680/*
681 * The FP module is called with these registers set:
682 *  r0  = instruction
683 *  r2  = PC+4
684 *  r9  = normal "successful" return address
685 *  r10 = FP workspace
686 *  lr  = unrecognised FP instruction return address
687 */
688
689	.pushsection .data
690	.align	2
691ENTRY(fp_enter)
692	.word	no_fp
693	.popsection
694
695ENTRY(no_fp)
696	ret	lr
697ENDPROC(no_fp)
698
699__und_usr_fault_32:
700	mov	r1, #4
701	b	1f
702__und_usr_fault_16_pan:
703	uaccess_disable ip
704__und_usr_fault_16:
705	mov	r1, #2
7061:	mov	r0, sp
707	badr	lr, ret_from_exception
708	b	__und_fault
709ENDPROC(__und_usr_fault_32)
710ENDPROC(__und_usr_fault_16)
711
712	.align	5
713__pabt_usr:
714	usr_entry
715	mov	r2, sp				@ regs
716	pabt_helper
717 UNWIND(.fnend		)
718	/* fall through */
719/*
720 * This is the return code to user mode for abort handlers
721 */
722ENTRY(ret_from_exception)
723 UNWIND(.fnstart	)
724 UNWIND(.cantunwind	)
725	get_thread_info tsk
726	mov	why, #0
727	b	ret_to_user
728 UNWIND(.fnend		)
729ENDPROC(__pabt_usr)
730ENDPROC(ret_from_exception)
731
732	.align	5
733__fiq_usr:
734	usr_entry trace=0
735	kuser_cmpxchg_check
736	mov	r0, sp				@ struct pt_regs *regs
737	bl	handle_fiq_as_nmi
738	get_thread_info tsk
739	restore_user_regs fast = 0, offset = 0
740 UNWIND(.fnend		)
741ENDPROC(__fiq_usr)
742
743/*
744 * Register switch for ARMv3 and ARMv4 processors
745 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
746 * previous and next are guaranteed not to be the same.
747 */
748ENTRY(__switch_to)
749 UNWIND(.fnstart	)
750 UNWIND(.cantunwind	)
751	add	ip, r1, #TI_CPU_SAVE
752 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
753 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
754 THUMB(	str	sp, [ip], #4		   )
755 THUMB(	str	lr, [ip], #4		   )
756	ldr	r4, [r2, #TI_TP_VALUE]
757	ldr	r5, [r2, #TI_TP_VALUE + 4]
758#ifdef CONFIG_CPU_USE_DOMAINS
759	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
760	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
761	ldr	r6, [r2, #TI_CPU_DOMAIN]
762#endif
763	switch_tls r1, r4, r5, r3, r7
764#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
765	ldr	r7, [r2, #TI_TASK]
766	ldr	r8, =__stack_chk_guard
767	.if (TSK_STACK_CANARY > IMM12_MASK)
768	add	r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
769	.endif
770	ldr	r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
771#endif
772#ifdef CONFIG_CPU_USE_DOMAINS
773	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
774#endif
775	mov	r5, r0
776	add	r4, r2, #TI_CPU_SAVE
777	ldr	r0, =thread_notify_head
778	mov	r1, #THREAD_NOTIFY_SWITCH
779	bl	atomic_notifier_call_chain
780#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
781	str	r7, [r8]
782#endif
783 THUMB(	mov	ip, r4			   )
784	mov	r0, r5
785 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
786 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
787 THUMB(	ldr	sp, [ip], #4		   )
788 THUMB(	ldr	pc, [ip]		   )
789 UNWIND(.fnend		)
790ENDPROC(__switch_to)
791
792	__INIT
793
794/*
795 * User helpers.
796 *
797 * Each segment is 32-byte aligned and will be moved to the top of the high
798 * vector page.  New segments (if ever needed) must be added in front of
799 * existing ones.  This mechanism should be used only for things that are
800 * really small and justified, and not be abused freely.
801 *
802 * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
803 */
804 THUMB(	.arm	)
805
806	.macro	usr_ret, reg
807#ifdef CONFIG_ARM_THUMB
808	bx	\reg
809#else
810	ret	\reg
811#endif
812	.endm
813
814	.macro	kuser_pad, sym, size
815	.if	(. - \sym) & 3
816	.rept	4 - (. - \sym) & 3
817	.byte	0
818	.endr
819	.endif
820	.rept	(\size - (. - \sym)) / 4
821	.word	0xe7fddef1
822	.endr
823	.endm
824
825#ifdef CONFIG_KUSER_HELPERS
826	.align	5
827	.globl	__kuser_helper_start
828__kuser_helper_start:
829
830/*
831 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
832 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
833 */
834
835__kuser_cmpxchg64:				@ 0xffff0f60
836
837#if defined(CONFIG_CPU_32v6K)
838
839	stmfd	sp!, {r4, r5, r6, r7}
840	ldrd	r4, r5, [r0]			@ load old val
841	ldrd	r6, r7, [r1]			@ load new val
842	smp_dmb	arm
8431:	ldrexd	r0, r1, [r2]			@ load current val
844	eors	r3, r0, r4			@ compare with oldval (1)
845	eorseq	r3, r1, r5			@ compare with oldval (2)
846	strexdeq r3, r6, r7, [r2]		@ store newval if eq
847	teqeq	r3, #1				@ success?
848	beq	1b				@ if no then retry
849	smp_dmb	arm
850	rsbs	r0, r3, #0			@ set returned val and C flag
851	ldmfd	sp!, {r4, r5, r6, r7}
852	usr_ret	lr
853
854#elif !defined(CONFIG_SMP)
855
856#ifdef CONFIG_MMU
857
858	/*
859	 * The only thing that can break atomicity in this cmpxchg64
860	 * implementation is either an IRQ or a data abort exception
861	 * causing another process/thread to be scheduled in the middle of
862	 * the critical sequence.  The same strategy as for cmpxchg is used.
863	 */
864	stmfd	sp!, {r4, r5, r6, lr}
865	ldmia	r0, {r4, r5}			@ load old val
866	ldmia	r1, {r6, lr}			@ load new val
8671:	ldmia	r2, {r0, r1}			@ load current val
868	eors	r3, r0, r4			@ compare with oldval (1)
869	eorseq	r3, r1, r5			@ compare with oldval (2)
8702:	stmiaeq	r2, {r6, lr}			@ store newval if eq
871	rsbs	r0, r3, #0			@ set return val and C flag
872	ldmfd	sp!, {r4, r5, r6, pc}
873
874	.text
875kuser_cmpxchg64_fixup:
876	@ Called from kuser_cmpxchg_fixup.
877	@ r4 = address of interrupted insn (must be preserved).
878	@ sp = saved regs. r7 and r8 are clobbered.
879	@ 1b = first critical insn, 2b = last critical insn.
880	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
881	mov	r7, #0xffff0fff
882	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
883	subs	r8, r4, r7
884	rsbscs	r8, r8, #(2b - 1b)
885	strcs	r7, [sp, #S_PC]
886#if __LINUX_ARM_ARCH__ < 6
887	bcc	kuser_cmpxchg32_fixup
888#endif
889	ret	lr
890	.previous
891
892#else
893#warning "NPTL on non MMU needs fixing"
894	mov	r0, #-1
895	adds	r0, r0, #0
896	usr_ret	lr
897#endif
898
899#else
900#error "incoherent kernel configuration"
901#endif
902
903	kuser_pad __kuser_cmpxchg64, 64
904
905__kuser_memory_barrier:				@ 0xffff0fa0
906	smp_dmb	arm
907	usr_ret	lr
908
909	kuser_pad __kuser_memory_barrier, 32
910
911__kuser_cmpxchg:				@ 0xffff0fc0
912
913#if __LINUX_ARM_ARCH__ < 6
914
915#ifdef CONFIG_MMU
916
917	/*
918	 * The only thing that can break atomicity in this cmpxchg
919	 * implementation is either an IRQ or a data abort exception
920	 * causing another process/thread to be scheduled in the middle
921	 * of the critical sequence.  To prevent this, code is added to
922	 * the IRQ and data abort exception handlers to set the pc back
923	 * to the beginning of the critical section if it is found to be
924	 * within that critical section (see kuser_cmpxchg_fixup).
925	 */
9261:	ldr	r3, [r2]			@ load current val
927	subs	r3, r3, r0			@ compare with oldval
9282:	streq	r1, [r2]			@ store newval if eq
929	rsbs	r0, r3, #0			@ set return val and C flag
930	usr_ret	lr
931
932	.text
933kuser_cmpxchg32_fixup:
934	@ Called from kuser_cmpxchg_check macro.
935	@ r4 = address of interrupted insn (must be preserved).
936	@ sp = saved regs. r7 and r8 are clobbered.
937	@ 1b = first critical insn, 2b = last critical insn.
938	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
939	mov	r7, #0xffff0fff
940	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
941	subs	r8, r4, r7
942	rsbscs	r8, r8, #(2b - 1b)
943	strcs	r7, [sp, #S_PC]
944	ret	lr
945	.previous
946
947#else
948#warning "NPTL on non MMU needs fixing"
949	mov	r0, #-1
950	adds	r0, r0, #0
951	usr_ret	lr
952#endif
953
954#else
955
956	smp_dmb	arm
9571:	ldrex	r3, [r2]
958	subs	r3, r3, r0
959	strexeq	r3, r1, [r2]
960	teqeq	r3, #1
961	beq	1b
962	rsbs	r0, r3, #0
963	/* beware -- each __kuser slot must be 8 instructions max */
964	ALT_SMP(b	__kuser_memory_barrier)
965	ALT_UP(usr_ret	lr)
966
967#endif
968
969	kuser_pad __kuser_cmpxchg, 32
970
971__kuser_get_tls:				@ 0xffff0fe0
972	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
973	usr_ret	lr
974	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
975	kuser_pad __kuser_get_tls, 16
976	.rep	3
977	.word	0			@ 0xffff0ff0 software TLS value, then
978	.endr				@ pad up to __kuser_helper_version
979
980__kuser_helper_version:				@ 0xffff0ffc
981	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
982
983	.globl	__kuser_helper_end
984__kuser_helper_end:
985
986#endif
987
988 THUMB(	.thumb	)
989
990/*
991 * Vector stubs.
992 *
993 * This code is copied to 0xffff1000 so we can use branches in the
994 * vectors, rather than ldr's.  Note that this code must not exceed
995 * a page size.
996 *
997 * Common stub entry macro:
998 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
999 *
1000 * SP points to a minimal amount of processor-private memory, the address
1001 * of which is copied into r0 for the mode specific abort handler.
1002 */
1003	.macro	vector_stub, name, mode, correction=0
1004	.align	5
1005
1006vector_\name:
1007	.if \correction
1008	sub	lr, lr, #\correction
1009	.endif
1010
1011	@
1012	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1013	@ (parent CPSR)
1014	@
1015	stmia	sp, {r0, lr}		@ save r0, lr
1016	mrs	lr, spsr
1017	str	lr, [sp, #8]		@ save spsr
1018
1019	@
1020	@ Prepare for SVC32 mode.  IRQs remain disabled.
1021	@
1022	mrs	r0, cpsr
1023	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1024	msr	spsr_cxsf, r0
1025
1026	@
1027	@ the branch table must immediately follow this code
1028	@
1029	and	lr, lr, #0x0f
1030 THUMB(	adr	r0, 1f			)
1031 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1032	mov	r0, sp
1033 ARM(	ldr	lr, [pc, lr, lsl #2]	)
1034	movs	pc, lr			@ branch to handler in SVC mode
1035ENDPROC(vector_\name)
1036
1037	.align	2
1038	@ handler addresses follow this label
10391:
1040	.endm
1041
1042	.section .stubs, "ax", %progbits
1043	@ This must be the first word
1044	.word	vector_swi
1045
1046vector_rst:
1047 ARM(	swi	SYS_ERROR0	)
1048 THUMB(	svc	#0		)
1049 THUMB(	nop			)
1050	b	vector_und
1051
1052/*
1053 * Interrupt dispatcher
1054 */
1055	vector_stub	irq, IRQ_MODE, 4
1056
1057	.long	__irq_usr			@  0  (USR_26 / USR_32)
1058	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
1059	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
1060	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
1061	.long	__irq_invalid			@  4
1062	.long	__irq_invalid			@  5
1063	.long	__irq_invalid			@  6
1064	.long	__irq_invalid			@  7
1065	.long	__irq_invalid			@  8
1066	.long	__irq_invalid			@  9
1067	.long	__irq_invalid			@  a
1068	.long	__irq_invalid			@  b
1069	.long	__irq_invalid			@  c
1070	.long	__irq_invalid			@  d
1071	.long	__irq_invalid			@  e
1072	.long	__irq_invalid			@  f
1073
1074/*
1075 * Data abort dispatcher
1076 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1077 */
1078	vector_stub	dabt, ABT_MODE, 8
1079
1080	.long	__dabt_usr			@  0  (USR_26 / USR_32)
1081	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
1082	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
1083	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
1084	.long	__dabt_invalid			@  4
1085	.long	__dabt_invalid			@  5
1086	.long	__dabt_invalid			@  6
1087	.long	__dabt_invalid			@  7
1088	.long	__dabt_invalid			@  8
1089	.long	__dabt_invalid			@  9
1090	.long	__dabt_invalid			@  a
1091	.long	__dabt_invalid			@  b
1092	.long	__dabt_invalid			@  c
1093	.long	__dabt_invalid			@  d
1094	.long	__dabt_invalid			@  e
1095	.long	__dabt_invalid			@  f
1096
1097/*
1098 * Prefetch abort dispatcher
1099 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1100 */
1101	vector_stub	pabt, ABT_MODE, 4
1102
1103	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1104	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1105	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1106	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1107	.long	__pabt_invalid			@  4
1108	.long	__pabt_invalid			@  5
1109	.long	__pabt_invalid			@  6
1110	.long	__pabt_invalid			@  7
1111	.long	__pabt_invalid			@  8
1112	.long	__pabt_invalid			@  9
1113	.long	__pabt_invalid			@  a
1114	.long	__pabt_invalid			@  b
1115	.long	__pabt_invalid			@  c
1116	.long	__pabt_invalid			@  d
1117	.long	__pabt_invalid			@  e
1118	.long	__pabt_invalid			@  f
1119
1120/*
1121 * Undef instr entry dispatcher
1122 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1123 */
1124	vector_stub	und, UND_MODE
1125
1126	.long	__und_usr			@  0 (USR_26 / USR_32)
1127	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1128	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1129	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1130	.long	__und_invalid			@  4
1131	.long	__und_invalid			@  5
1132	.long	__und_invalid			@  6
1133	.long	__und_invalid			@  7
1134	.long	__und_invalid			@  8
1135	.long	__und_invalid			@  9
1136	.long	__und_invalid			@  a
1137	.long	__und_invalid			@  b
1138	.long	__und_invalid			@  c
1139	.long	__und_invalid			@  d
1140	.long	__und_invalid			@  e
1141	.long	__und_invalid			@  f
1142
1143	.align	5
1144
1145/*=============================================================================
1146 * Address exception handler
1147 *-----------------------------------------------------------------------------
1148 * These aren't too critical.
1149 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1150 */
1151
1152vector_addrexcptn:
1153	b	vector_addrexcptn
1154
1155/*=============================================================================
1156 * FIQ "NMI" handler
1157 *-----------------------------------------------------------------------------
1158 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1159 * systems.
1160 */
1161	vector_stub	fiq, FIQ_MODE, 4
1162
1163	.long	__fiq_usr			@  0  (USR_26 / USR_32)
1164	.long	__fiq_svc			@  1  (FIQ_26 / FIQ_32)
1165	.long	__fiq_svc			@  2  (IRQ_26 / IRQ_32)
1166	.long	__fiq_svc			@  3  (SVC_26 / SVC_32)
1167	.long	__fiq_svc			@  4
1168	.long	__fiq_svc			@  5
1169	.long	__fiq_svc			@  6
1170	.long	__fiq_abt			@  7
1171	.long	__fiq_svc			@  8
1172	.long	__fiq_svc			@  9
1173	.long	__fiq_svc			@  a
1174	.long	__fiq_svc			@  b
1175	.long	__fiq_svc			@  c
1176	.long	__fiq_svc			@  d
1177	.long	__fiq_svc			@  e
1178	.long	__fiq_svc			@  f
1179
1180	.globl	vector_fiq
1181
1182	.section .vectors, "ax", %progbits
1183.L__vectors_start:
1184	W(b)	vector_rst
1185	W(b)	vector_und
1186	W(ldr)	pc, .L__vectors_start + 0x1000
1187	W(b)	vector_pabt
1188	W(b)	vector_dabt
1189	W(b)	vector_addrexcptn
1190	W(b)	vector_irq
1191	W(b)	vector_fiq
1192
1193	.data
1194	.align	2
1195
1196	.globl	cr_alignment
1197cr_alignment:
1198	.space	4
1199