xref: /openbmc/linux/arch/arm/kernel/entry-armv.S (revision e23feb16)
1/*
2 *  linux/arch/arm/kernel/entry-armv.S
3 *
4 *  Copyright (C) 1996,1997,1998 Russell King.
5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 *  Low-level vector interface routines
13 *
14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 *  that causes it to save wrong values...  Be aware!
16 */
17
18#include <asm/assembler.h>
19#include <asm/memory.h>
20#include <asm/glue-df.h>
21#include <asm/glue-pf.h>
22#include <asm/vfpmacros.h>
23#ifndef CONFIG_MULTI_IRQ_HANDLER
24#include <mach/entry-macro.S>
25#endif
26#include <asm/thread_notify.h>
27#include <asm/unwind.h>
28#include <asm/unistd.h>
29#include <asm/tls.h>
30#include <asm/system_info.h>
31
32#include "entry-header.S"
33#include <asm/entry-macro-multi.S>
34
35/*
36 * Interrupt handling.
37 */
38	.macro	irq_handler
39#ifdef CONFIG_MULTI_IRQ_HANDLER
40	ldr	r1, =handle_arch_irq
41	mov	r0, sp
42	adr	lr, BSYM(9997f)
43	ldr	pc, [r1]
44#else
45	arch_irq_handler_default
46#endif
479997:
48	.endm
49
50	.macro	pabt_helper
51	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52#ifdef MULTI_PABORT
53	ldr	ip, .LCprocfns
54	mov	lr, pc
55	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
56#else
57	bl	CPU_PABORT_HANDLER
58#endif
59	.endm
60
61	.macro	dabt_helper
62
63	@
64	@ Call the processor-specific abort handler:
65	@
66	@  r2 - pt_regs
67	@  r4 - aborted context pc
68	@  r5 - aborted context psr
69	@
70	@ The abort handler must return the aborted address in r0, and
71	@ the fault status register in r1.  r9 must be preserved.
72	@
73#ifdef MULTI_DABORT
74	ldr	ip, .LCprocfns
75	mov	lr, pc
76	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
77#else
78	bl	CPU_DABORT_HANDLER
79#endif
80	.endm
81
82#ifdef CONFIG_KPROBES
83	.section	.kprobes.text,"ax",%progbits
84#else
85	.text
86#endif
87
88/*
89 * Invalid mode handlers
90 */
91	.macro	inv_entry, reason
92	sub	sp, sp, #S_FRAME_SIZE
93 ARM(	stmib	sp, {r1 - lr}		)
94 THUMB(	stmia	sp, {r0 - r12}		)
95 THUMB(	str	sp, [sp, #S_SP]		)
96 THUMB(	str	lr, [sp, #S_LR]		)
97	mov	r1, #\reason
98	.endm
99
100__pabt_invalid:
101	inv_entry BAD_PREFETCH
102	b	common_invalid
103ENDPROC(__pabt_invalid)
104
105__dabt_invalid:
106	inv_entry BAD_DATA
107	b	common_invalid
108ENDPROC(__dabt_invalid)
109
110__irq_invalid:
111	inv_entry BAD_IRQ
112	b	common_invalid
113ENDPROC(__irq_invalid)
114
115__und_invalid:
116	inv_entry BAD_UNDEFINSTR
117
118	@
119	@ XXX fall through to common_invalid
120	@
121
122@
123@ common_invalid - generic code for failed exception (re-entrant version of handlers)
124@
125common_invalid:
126	zero_fp
127
128	ldmia	r0, {r4 - r6}
129	add	r0, sp, #S_PC		@ here for interlock avoidance
130	mov	r7, #-1			@  ""   ""    ""        ""
131	str	r4, [sp]		@ save preserved r0
132	stmia	r0, {r5 - r7}		@ lr_<exception>,
133					@ cpsr_<exception>, "old_r0"
134
135	mov	r0, sp
136	b	bad_mode
137ENDPROC(__und_invalid)
138
139/*
140 * SVC mode handlers
141 */
142
143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144#define SPFIX(code...) code
145#else
146#define SPFIX(code...)
147#endif
148
149	.macro	svc_entry, stack_hole=0
150 UNWIND(.fnstart		)
151 UNWIND(.save {r0 - pc}		)
152	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153#ifdef CONFIG_THUMB2_KERNEL
154 SPFIX(	str	r0, [sp]	)	@ temporarily saved
155 SPFIX(	mov	r0, sp		)
156 SPFIX(	tst	r0, #4		)	@ test original stack alignment
157 SPFIX(	ldr	r0, [sp]	)	@ restored
158#else
159 SPFIX(	tst	sp, #4		)
160#endif
161 SPFIX(	subeq	sp, sp, #4	)
162	stmia	sp, {r1 - r12}
163
164	ldmia	r0, {r3 - r5}
165	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
166	mov	r6, #-1			@  ""  ""      ""       ""
167	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
168 SPFIX(	addeq	r2, r2, #4	)
169	str	r3, [sp, #-4]!		@ save the "real" r0 copied
170					@ from the exception stack
171
172	mov	r3, lr
173
174	@
175	@ We are now ready to fill in the remaining blanks on the stack:
176	@
177	@  r2 - sp_svc
178	@  r3 - lr_svc
179	@  r4 - lr_<exception>, already fixed up for correct return/restart
180	@  r5 - spsr_<exception>
181	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
182	@
183	stmia	r7, {r2 - r6}
184
185#ifdef CONFIG_TRACE_IRQFLAGS
186	bl	trace_hardirqs_off
187#endif
188	.endm
189
190	.align	5
191__dabt_svc:
192	svc_entry
193	mov	r2, sp
194	dabt_helper
195	svc_exit r5				@ return from exception
196 UNWIND(.fnend		)
197ENDPROC(__dabt_svc)
198
199	.align	5
200__irq_svc:
201	svc_entry
202	irq_handler
203
204#ifdef CONFIG_PREEMPT
205	get_thread_info tsk
206	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
207	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
208	teq	r8, #0				@ if preempt count != 0
209	movne	r0, #0				@ force flags to 0
210	tst	r0, #_TIF_NEED_RESCHED
211	blne	svc_preempt
212#endif
213
214	svc_exit r5, irq = 1			@ return from exception
215 UNWIND(.fnend		)
216ENDPROC(__irq_svc)
217
218	.ltorg
219
220#ifdef CONFIG_PREEMPT
221svc_preempt:
222	mov	r8, lr
2231:	bl	preempt_schedule_irq		@ irq en/disable is done inside
224	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
225	tst	r0, #_TIF_NEED_RESCHED
226	moveq	pc, r8				@ go again
227	b	1b
228#endif
229
230__und_fault:
231	@ Correct the PC such that it is pointing at the instruction
232	@ which caused the fault.  If the faulting instruction was ARM
233	@ the PC will be pointing at the next instruction, and have to
234	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
235	@ pointing at the second half of the Thumb instruction.  We
236	@ have to subtract 2.
237	ldr	r2, [r0, #S_PC]
238	sub	r2, r2, r1
239	str	r2, [r0, #S_PC]
240	b	do_undefinstr
241ENDPROC(__und_fault)
242
243	.align	5
244__und_svc:
245#ifdef CONFIG_KPROBES
246	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
247	@ it obviously needs free stack space which then will belong to
248	@ the saved context.
249	svc_entry 64
250#else
251	svc_entry
252#endif
253	@
254	@ call emulation code, which returns using r9 if it has emulated
255	@ the instruction, or the more conventional lr if we are to treat
256	@ this as a real undefined instruction
257	@
258	@  r0 - instruction
259	@
260#ifndef CONFIG_THUMB2_KERNEL
261	ldr	r0, [r4, #-4]
262#else
263	mov	r1, #2
264	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
265	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
266	blo	__und_svc_fault
267	ldrh	r9, [r4]			@ bottom 16 bits
268	add	r4, r4, #2
269	str	r4, [sp, #S_PC]
270	orr	r0, r9, r0, lsl #16
271#endif
272	adr	r9, BSYM(__und_svc_finish)
273	mov	r2, r4
274	bl	call_fpe
275
276	mov	r1, #4				@ PC correction to apply
277__und_svc_fault:
278	mov	r0, sp				@ struct pt_regs *regs
279	bl	__und_fault
280
281__und_svc_finish:
282	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
283	svc_exit r5				@ return from exception
284 UNWIND(.fnend		)
285ENDPROC(__und_svc)
286
287	.align	5
288__pabt_svc:
289	svc_entry
290	mov	r2, sp				@ regs
291	pabt_helper
292	svc_exit r5				@ return from exception
293 UNWIND(.fnend		)
294ENDPROC(__pabt_svc)
295
296	.align	5
297.LCcralign:
298	.word	cr_alignment
299#ifdef MULTI_DABORT
300.LCprocfns:
301	.word	processor
302#endif
303.LCfp:
304	.word	fp_enter
305
306/*
307 * User mode handlers
308 *
309 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
310 */
311
312#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
313#error "sizeof(struct pt_regs) must be a multiple of 8"
314#endif
315
316	.macro	usr_entry
317 UNWIND(.fnstart	)
318 UNWIND(.cantunwind	)	@ don't unwind the user space
319	sub	sp, sp, #S_FRAME_SIZE
320 ARM(	stmib	sp, {r1 - r12}	)
321 THUMB(	stmia	sp, {r0 - r12}	)
322
323	ldmia	r0, {r3 - r5}
324	add	r0, sp, #S_PC		@ here for interlock avoidance
325	mov	r6, #-1			@  ""  ""     ""        ""
326
327	str	r3, [sp]		@ save the "real" r0 copied
328					@ from the exception stack
329
330	@
331	@ We are now ready to fill in the remaining blanks on the stack:
332	@
333	@  r4 - lr_<exception>, already fixed up for correct return/restart
334	@  r5 - spsr_<exception>
335	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
336	@
337	@ Also, separately save sp_usr and lr_usr
338	@
339	stmia	r0, {r4 - r6}
340 ARM(	stmdb	r0, {sp, lr}^			)
341 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
342
343	@
344	@ Enable the alignment trap while in kernel mode
345	@
346	alignment_trap r0
347
348	@
349	@ Clear FP to mark the first stack frame
350	@
351	zero_fp
352
353#ifdef CONFIG_IRQSOFF_TRACER
354	bl	trace_hardirqs_off
355#endif
356	ct_user_exit save = 0
357	.endm
358
359	.macro	kuser_cmpxchg_check
360#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
361    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
362#ifndef CONFIG_MMU
363#warning "NPTL on non MMU needs fixing"
364#else
365	@ Make sure our user space atomic helper is restarted
366	@ if it was interrupted in a critical region.  Here we
367	@ perform a quick test inline since it should be false
368	@ 99.9999% of the time.  The rest is done out of line.
369	cmp	r4, #TASK_SIZE
370	blhs	kuser_cmpxchg64_fixup
371#endif
372#endif
373	.endm
374
375	.align	5
376__dabt_usr:
377	usr_entry
378	kuser_cmpxchg_check
379	mov	r2, sp
380	dabt_helper
381	b	ret_from_exception
382 UNWIND(.fnend		)
383ENDPROC(__dabt_usr)
384
385	.align	5
386__irq_usr:
387	usr_entry
388	kuser_cmpxchg_check
389	irq_handler
390	get_thread_info tsk
391	mov	why, #0
392	b	ret_to_user_from_irq
393 UNWIND(.fnend		)
394ENDPROC(__irq_usr)
395
396	.ltorg
397
398	.align	5
399__und_usr:
400	usr_entry
401
402	mov	r2, r4
403	mov	r3, r5
404
405	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
406	@      faulting instruction depending on Thumb mode.
407	@ r3 = regs->ARM_cpsr
408	@
409	@ The emulation code returns using r9 if it has emulated the
410	@ instruction, or the more conventional lr if we are to treat
411	@ this as a real undefined instruction
412	@
413	adr	r9, BSYM(ret_from_exception)
414
415	tst	r3, #PSR_T_BIT			@ Thumb mode?
416	bne	__und_usr_thumb
417	sub	r4, r2, #4			@ ARM instr at LR - 4
4181:	ldrt	r0, [r4]
419#ifdef CONFIG_CPU_ENDIAN_BE8
420	rev	r0, r0				@ little endian instruction
421#endif
422	@ r0 = 32-bit ARM instruction which caused the exception
423	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
424	@ r4 = PC value for the faulting instruction
425	@ lr = 32-bit undefined instruction function
426	adr	lr, BSYM(__und_usr_fault_32)
427	b	call_fpe
428
429__und_usr_thumb:
430	@ Thumb instruction
431	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
432#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
433/*
434 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
435 * can never be supported in a single kernel, this code is not applicable at
436 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
437 * made about .arch directives.
438 */
439#if __LINUX_ARM_ARCH__ < 7
440/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
441#define NEED_CPU_ARCHITECTURE
442	ldr	r5, .LCcpu_architecture
443	ldr	r5, [r5]
444	cmp	r5, #CPU_ARCH_ARMv7
445	blo	__und_usr_fault_16		@ 16bit undefined instruction
446/*
447 * The following code won't get run unless the running CPU really is v7, so
448 * coding round the lack of ldrht on older arches is pointless.  Temporarily
449 * override the assembler target arch with the minimum required instead:
450 */
451	.arch	armv6t2
452#endif
4532:	ldrht	r5, [r4]
454	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
455	blo	__und_usr_fault_16		@ 16bit undefined instruction
4563:	ldrht	r0, [r2]
457	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
458	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
459	orr	r0, r0, r5, lsl #16
460	adr	lr, BSYM(__und_usr_fault_32)
461	@ r0 = the two 16-bit Thumb instructions which caused the exception
462	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
463	@ r4 = PC value for the first 16-bit Thumb instruction
464	@ lr = 32bit undefined instruction function
465
466#if __LINUX_ARM_ARCH__ < 7
467/* If the target arch was overridden, change it back: */
468#ifdef CONFIG_CPU_32v6K
469	.arch	armv6k
470#else
471	.arch	armv6
472#endif
473#endif /* __LINUX_ARM_ARCH__ < 7 */
474#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
475	b	__und_usr_fault_16
476#endif
477 UNWIND(.fnend)
478ENDPROC(__und_usr)
479
480/*
481 * The out of line fixup for the ldrt instructions above.
482 */
483	.pushsection .fixup, "ax"
484	.align	2
4854:	mov	pc, r9
486	.popsection
487	.pushsection __ex_table,"a"
488	.long	1b, 4b
489#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
490	.long	2b, 4b
491	.long	3b, 4b
492#endif
493	.popsection
494
495/*
496 * Check whether the instruction is a co-processor instruction.
497 * If yes, we need to call the relevant co-processor handler.
498 *
499 * Note that we don't do a full check here for the co-processor
500 * instructions; all instructions with bit 27 set are well
501 * defined.  The only instructions that should fault are the
502 * co-processor instructions.  However, we have to watch out
503 * for the ARM6/ARM7 SWI bug.
504 *
505 * NEON is a special case that has to be handled here. Not all
506 * NEON instructions are co-processor instructions, so we have
507 * to make a special case of checking for them. Plus, there's
508 * five groups of them, so we have a table of mask/opcode pairs
509 * to check against, and if any match then we branch off into the
510 * NEON handler code.
511 *
512 * Emulators may wish to make use of the following registers:
513 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
514 *  r2  = PC value to resume execution after successful emulation
515 *  r9  = normal "successful" return address
516 *  r10 = this threads thread_info structure
517 *  lr  = unrecognised instruction return address
518 * IRQs disabled, FIQs enabled.
519 */
520	@
521	@ Fall-through from Thumb-2 __und_usr
522	@
523#ifdef CONFIG_NEON
524	get_thread_info r10			@ get current thread
525	adr	r6, .LCneon_thumb_opcodes
526	b	2f
527#endif
528call_fpe:
529	get_thread_info r10			@ get current thread
530#ifdef CONFIG_NEON
531	adr	r6, .LCneon_arm_opcodes
5322:	ldr	r5, [r6], #4			@ mask value
533	ldr	r7, [r6], #4			@ opcode bits matching in mask
534	cmp	r5, #0				@ end mask?
535	beq	1f
536	and	r8, r0, r5
537	cmp	r8, r7				@ NEON instruction?
538	bne	2b
539	mov	r7, #1
540	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
541	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
542	b	do_vfp				@ let VFP handler handle this
5431:
544#endif
545	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
546	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
547	moveq	pc, lr
548	and	r8, r0, #0x00000f00		@ mask out CP number
549 THUMB(	lsr	r8, r8, #8		)
550	mov	r7, #1
551	add	r6, r10, #TI_USED_CP
552 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
553 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
554#ifdef CONFIG_IWMMXT
555	@ Test if we need to give access to iWMMXt coprocessors
556	ldr	r5, [r10, #TI_FLAGS]
557	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
558	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
559	bcs	iwmmxt_task_enable
560#endif
561 ARM(	add	pc, pc, r8, lsr #6	)
562 THUMB(	lsl	r8, r8, #2		)
563 THUMB(	add	pc, r8			)
564	nop
565
566	movw_pc	lr				@ CP#0
567	W(b)	do_fpe				@ CP#1 (FPE)
568	W(b)	do_fpe				@ CP#2 (FPE)
569	movw_pc	lr				@ CP#3
570#ifdef CONFIG_CRUNCH
571	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
572	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
573	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
574#else
575	movw_pc	lr				@ CP#4
576	movw_pc	lr				@ CP#5
577	movw_pc	lr				@ CP#6
578#endif
579	movw_pc	lr				@ CP#7
580	movw_pc	lr				@ CP#8
581	movw_pc	lr				@ CP#9
582#ifdef CONFIG_VFP
583	W(b)	do_vfp				@ CP#10 (VFP)
584	W(b)	do_vfp				@ CP#11 (VFP)
585#else
586	movw_pc	lr				@ CP#10 (VFP)
587	movw_pc	lr				@ CP#11 (VFP)
588#endif
589	movw_pc	lr				@ CP#12
590	movw_pc	lr				@ CP#13
591	movw_pc	lr				@ CP#14 (Debug)
592	movw_pc	lr				@ CP#15 (Control)
593
594#ifdef NEED_CPU_ARCHITECTURE
595	.align	2
596.LCcpu_architecture:
597	.word	__cpu_architecture
598#endif
599
600#ifdef CONFIG_NEON
601	.align	6
602
603.LCneon_arm_opcodes:
604	.word	0xfe000000			@ mask
605	.word	0xf2000000			@ opcode
606
607	.word	0xff100000			@ mask
608	.word	0xf4000000			@ opcode
609
610	.word	0x00000000			@ mask
611	.word	0x00000000			@ opcode
612
613.LCneon_thumb_opcodes:
614	.word	0xef000000			@ mask
615	.word	0xef000000			@ opcode
616
617	.word	0xff100000			@ mask
618	.word	0xf9000000			@ opcode
619
620	.word	0x00000000			@ mask
621	.word	0x00000000			@ opcode
622#endif
623
624do_fpe:
625	enable_irq
626	ldr	r4, .LCfp
627	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
628	ldr	pc, [r4]			@ Call FP module USR entry point
629
630/*
631 * The FP module is called with these registers set:
632 *  r0  = instruction
633 *  r2  = PC+4
634 *  r9  = normal "successful" return address
635 *  r10 = FP workspace
636 *  lr  = unrecognised FP instruction return address
637 */
638
639	.pushsection .data
640ENTRY(fp_enter)
641	.word	no_fp
642	.popsection
643
644ENTRY(no_fp)
645	mov	pc, lr
646ENDPROC(no_fp)
647
648__und_usr_fault_32:
649	mov	r1, #4
650	b	1f
651__und_usr_fault_16:
652	mov	r1, #2
6531:	enable_irq
654	mov	r0, sp
655	adr	lr, BSYM(ret_from_exception)
656	b	__und_fault
657ENDPROC(__und_usr_fault_32)
658ENDPROC(__und_usr_fault_16)
659
660	.align	5
661__pabt_usr:
662	usr_entry
663	mov	r2, sp				@ regs
664	pabt_helper
665 UNWIND(.fnend		)
666	/* fall through */
667/*
668 * This is the return code to user mode for abort handlers
669 */
670ENTRY(ret_from_exception)
671 UNWIND(.fnstart	)
672 UNWIND(.cantunwind	)
673	get_thread_info tsk
674	mov	why, #0
675	b	ret_to_user
676 UNWIND(.fnend		)
677ENDPROC(__pabt_usr)
678ENDPROC(ret_from_exception)
679
680/*
681 * Register switch for ARMv3 and ARMv4 processors
682 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
683 * previous and next are guaranteed not to be the same.
684 */
685ENTRY(__switch_to)
686 UNWIND(.fnstart	)
687 UNWIND(.cantunwind	)
688	add	ip, r1, #TI_CPU_SAVE
689 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
690 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
691 THUMB(	str	sp, [ip], #4		   )
692 THUMB(	str	lr, [ip], #4		   )
693	ldr	r4, [r2, #TI_TP_VALUE]
694	ldr	r5, [r2, #TI_TP_VALUE + 4]
695#ifdef CONFIG_CPU_USE_DOMAINS
696	ldr	r6, [r2, #TI_CPU_DOMAIN]
697#endif
698	switch_tls r1, r4, r5, r3, r7
699#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
700	ldr	r7, [r2, #TI_TASK]
701	ldr	r8, =__stack_chk_guard
702	ldr	r7, [r7, #TSK_STACK_CANARY]
703#endif
704#ifdef CONFIG_CPU_USE_DOMAINS
705	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
706#endif
707	mov	r5, r0
708	add	r4, r2, #TI_CPU_SAVE
709	ldr	r0, =thread_notify_head
710	mov	r1, #THREAD_NOTIFY_SWITCH
711	bl	atomic_notifier_call_chain
712#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
713	str	r7, [r8]
714#endif
715 THUMB(	mov	ip, r4			   )
716	mov	r0, r5
717 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
718 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
719 THUMB(	ldr	sp, [ip], #4		   )
720 THUMB(	ldr	pc, [ip]		   )
721 UNWIND(.fnend		)
722ENDPROC(__switch_to)
723
724	__INIT
725
726/*
727 * User helpers.
728 *
729 * Each segment is 32-byte aligned and will be moved to the top of the high
730 * vector page.  New segments (if ever needed) must be added in front of
731 * existing ones.  This mechanism should be used only for things that are
732 * really small and justified, and not be abused freely.
733 *
734 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
735 */
736 THUMB(	.arm	)
737
738	.macro	usr_ret, reg
739#ifdef CONFIG_ARM_THUMB
740	bx	\reg
741#else
742	mov	pc, \reg
743#endif
744	.endm
745
746	.macro	kuser_pad, sym, size
747	.if	(. - \sym) & 3
748	.rept	4 - (. - \sym) & 3
749	.byte	0
750	.endr
751	.endif
752	.rept	(\size - (. - \sym)) / 4
753	.word	0xe7fddef1
754	.endr
755	.endm
756
757#ifdef CONFIG_KUSER_HELPERS
758	.align	5
759	.globl	__kuser_helper_start
760__kuser_helper_start:
761
762/*
763 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
764 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
765 */
766
767__kuser_cmpxchg64:				@ 0xffff0f60
768
769#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
770
771	/*
772	 * Poor you.  No fast solution possible...
773	 * The kernel itself must perform the operation.
774	 * A special ghost syscall is used for that (see traps.c).
775	 */
776	stmfd	sp!, {r7, lr}
777	ldr	r7, 1f			@ it's 20 bits
778	swi	__ARM_NR_cmpxchg64
779	ldmfd	sp!, {r7, pc}
7801:	.word	__ARM_NR_cmpxchg64
781
782#elif defined(CONFIG_CPU_32v6K)
783
784	stmfd	sp!, {r4, r5, r6, r7}
785	ldrd	r4, r5, [r0]			@ load old val
786	ldrd	r6, r7, [r1]			@ load new val
787	smp_dmb	arm
7881:	ldrexd	r0, r1, [r2]			@ load current val
789	eors	r3, r0, r4			@ compare with oldval (1)
790	eoreqs	r3, r1, r5			@ compare with oldval (2)
791	strexdeq r3, r6, r7, [r2]		@ store newval if eq
792	teqeq	r3, #1				@ success?
793	beq	1b				@ if no then retry
794	smp_dmb	arm
795	rsbs	r0, r3, #0			@ set returned val and C flag
796	ldmfd	sp!, {r4, r5, r6, r7}
797	usr_ret	lr
798
799#elif !defined(CONFIG_SMP)
800
801#ifdef CONFIG_MMU
802
803	/*
804	 * The only thing that can break atomicity in this cmpxchg64
805	 * implementation is either an IRQ or a data abort exception
806	 * causing another process/thread to be scheduled in the middle of
807	 * the critical sequence.  The same strategy as for cmpxchg is used.
808	 */
809	stmfd	sp!, {r4, r5, r6, lr}
810	ldmia	r0, {r4, r5}			@ load old val
811	ldmia	r1, {r6, lr}			@ load new val
8121:	ldmia	r2, {r0, r1}			@ load current val
813	eors	r3, r0, r4			@ compare with oldval (1)
814	eoreqs	r3, r1, r5			@ compare with oldval (2)
8152:	stmeqia	r2, {r6, lr}			@ store newval if eq
816	rsbs	r0, r3, #0			@ set return val and C flag
817	ldmfd	sp!, {r4, r5, r6, pc}
818
819	.text
820kuser_cmpxchg64_fixup:
821	@ Called from kuser_cmpxchg_fixup.
822	@ r4 = address of interrupted insn (must be preserved).
823	@ sp = saved regs. r7 and r8 are clobbered.
824	@ 1b = first critical insn, 2b = last critical insn.
825	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
826	mov	r7, #0xffff0fff
827	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
828	subs	r8, r4, r7
829	rsbcss	r8, r8, #(2b - 1b)
830	strcs	r7, [sp, #S_PC]
831#if __LINUX_ARM_ARCH__ < 6
832	bcc	kuser_cmpxchg32_fixup
833#endif
834	mov	pc, lr
835	.previous
836
837#else
838#warning "NPTL on non MMU needs fixing"
839	mov	r0, #-1
840	adds	r0, r0, #0
841	usr_ret	lr
842#endif
843
844#else
845#error "incoherent kernel configuration"
846#endif
847
848	kuser_pad __kuser_cmpxchg64, 64
849
850__kuser_memory_barrier:				@ 0xffff0fa0
851	smp_dmb	arm
852	usr_ret	lr
853
854	kuser_pad __kuser_memory_barrier, 32
855
856__kuser_cmpxchg:				@ 0xffff0fc0
857
858#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
859
860	/*
861	 * Poor you.  No fast solution possible...
862	 * The kernel itself must perform the operation.
863	 * A special ghost syscall is used for that (see traps.c).
864	 */
865	stmfd	sp!, {r7, lr}
866	ldr	r7, 1f			@ it's 20 bits
867	swi	__ARM_NR_cmpxchg
868	ldmfd	sp!, {r7, pc}
8691:	.word	__ARM_NR_cmpxchg
870
871#elif __LINUX_ARM_ARCH__ < 6
872
873#ifdef CONFIG_MMU
874
875	/*
876	 * The only thing that can break atomicity in this cmpxchg
877	 * implementation is either an IRQ or a data abort exception
878	 * causing another process/thread to be scheduled in the middle
879	 * of the critical sequence.  To prevent this, code is added to
880	 * the IRQ and data abort exception handlers to set the pc back
881	 * to the beginning of the critical section if it is found to be
882	 * within that critical section (see kuser_cmpxchg_fixup).
883	 */
8841:	ldr	r3, [r2]			@ load current val
885	subs	r3, r3, r0			@ compare with oldval
8862:	streq	r1, [r2]			@ store newval if eq
887	rsbs	r0, r3, #0			@ set return val and C flag
888	usr_ret	lr
889
890	.text
891kuser_cmpxchg32_fixup:
892	@ Called from kuser_cmpxchg_check macro.
893	@ r4 = address of interrupted insn (must be preserved).
894	@ sp = saved regs. r7 and r8 are clobbered.
895	@ 1b = first critical insn, 2b = last critical insn.
896	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
897	mov	r7, #0xffff0fff
898	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
899	subs	r8, r4, r7
900	rsbcss	r8, r8, #(2b - 1b)
901	strcs	r7, [sp, #S_PC]
902	mov	pc, lr
903	.previous
904
905#else
906#warning "NPTL on non MMU needs fixing"
907	mov	r0, #-1
908	adds	r0, r0, #0
909	usr_ret	lr
910#endif
911
912#else
913
914	smp_dmb	arm
9151:	ldrex	r3, [r2]
916	subs	r3, r3, r0
917	strexeq	r3, r1, [r2]
918	teqeq	r3, #1
919	beq	1b
920	rsbs	r0, r3, #0
921	/* beware -- each __kuser slot must be 8 instructions max */
922	ALT_SMP(b	__kuser_memory_barrier)
923	ALT_UP(usr_ret	lr)
924
925#endif
926
927	kuser_pad __kuser_cmpxchg, 32
928
929__kuser_get_tls:				@ 0xffff0fe0
930	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
931	usr_ret	lr
932	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
933	kuser_pad __kuser_get_tls, 16
934	.rep	3
935	.word	0			@ 0xffff0ff0 software TLS value, then
936	.endr				@ pad up to __kuser_helper_version
937
938__kuser_helper_version:				@ 0xffff0ffc
939	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
940
941	.globl	__kuser_helper_end
942__kuser_helper_end:
943
944#endif
945
946 THUMB(	.thumb	)
947
948/*
949 * Vector stubs.
950 *
951 * This code is copied to 0xffff1000 so we can use branches in the
952 * vectors, rather than ldr's.  Note that this code must not exceed
953 * a page size.
954 *
955 * Common stub entry macro:
956 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
957 *
958 * SP points to a minimal amount of processor-private memory, the address
959 * of which is copied into r0 for the mode specific abort handler.
960 */
961	.macro	vector_stub, name, mode, correction=0
962	.align	5
963
964vector_\name:
965	.if \correction
966	sub	lr, lr, #\correction
967	.endif
968
969	@
970	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
971	@ (parent CPSR)
972	@
973	stmia	sp, {r0, lr}		@ save r0, lr
974	mrs	lr, spsr
975	str	lr, [sp, #8]		@ save spsr
976
977	@
978	@ Prepare for SVC32 mode.  IRQs remain disabled.
979	@
980	mrs	r0, cpsr
981	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
982	msr	spsr_cxsf, r0
983
984	@
985	@ the branch table must immediately follow this code
986	@
987	and	lr, lr, #0x0f
988 THUMB(	adr	r0, 1f			)
989 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
990	mov	r0, sp
991 ARM(	ldr	lr, [pc, lr, lsl #2]	)
992	movs	pc, lr			@ branch to handler in SVC mode
993ENDPROC(vector_\name)
994
995	.align	2
996	@ handler addresses follow this label
9971:
998	.endm
999
1000	.section .stubs, "ax", %progbits
1001__stubs_start:
1002	@ This must be the first word
1003	.word	vector_swi
1004
1005vector_rst:
1006 ARM(	swi	SYS_ERROR0	)
1007 THUMB(	svc	#0		)
1008 THUMB(	nop			)
1009	b	vector_und
1010
1011/*
1012 * Interrupt dispatcher
1013 */
1014	vector_stub	irq, IRQ_MODE, 4
1015
1016	.long	__irq_usr			@  0  (USR_26 / USR_32)
1017	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
1018	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
1019	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
1020	.long	__irq_invalid			@  4
1021	.long	__irq_invalid			@  5
1022	.long	__irq_invalid			@  6
1023	.long	__irq_invalid			@  7
1024	.long	__irq_invalid			@  8
1025	.long	__irq_invalid			@  9
1026	.long	__irq_invalid			@  a
1027	.long	__irq_invalid			@  b
1028	.long	__irq_invalid			@  c
1029	.long	__irq_invalid			@  d
1030	.long	__irq_invalid			@  e
1031	.long	__irq_invalid			@  f
1032
1033/*
1034 * Data abort dispatcher
1035 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1036 */
1037	vector_stub	dabt, ABT_MODE, 8
1038
1039	.long	__dabt_usr			@  0  (USR_26 / USR_32)
1040	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
1041	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
1042	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
1043	.long	__dabt_invalid			@  4
1044	.long	__dabt_invalid			@  5
1045	.long	__dabt_invalid			@  6
1046	.long	__dabt_invalid			@  7
1047	.long	__dabt_invalid			@  8
1048	.long	__dabt_invalid			@  9
1049	.long	__dabt_invalid			@  a
1050	.long	__dabt_invalid			@  b
1051	.long	__dabt_invalid			@  c
1052	.long	__dabt_invalid			@  d
1053	.long	__dabt_invalid			@  e
1054	.long	__dabt_invalid			@  f
1055
1056/*
1057 * Prefetch abort dispatcher
1058 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1059 */
1060	vector_stub	pabt, ABT_MODE, 4
1061
1062	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1063	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1064	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1065	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1066	.long	__pabt_invalid			@  4
1067	.long	__pabt_invalid			@  5
1068	.long	__pabt_invalid			@  6
1069	.long	__pabt_invalid			@  7
1070	.long	__pabt_invalid			@  8
1071	.long	__pabt_invalid			@  9
1072	.long	__pabt_invalid			@  a
1073	.long	__pabt_invalid			@  b
1074	.long	__pabt_invalid			@  c
1075	.long	__pabt_invalid			@  d
1076	.long	__pabt_invalid			@  e
1077	.long	__pabt_invalid			@  f
1078
1079/*
1080 * Undef instr entry dispatcher
1081 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1082 */
1083	vector_stub	und, UND_MODE
1084
1085	.long	__und_usr			@  0 (USR_26 / USR_32)
1086	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1087	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1088	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1089	.long	__und_invalid			@  4
1090	.long	__und_invalid			@  5
1091	.long	__und_invalid			@  6
1092	.long	__und_invalid			@  7
1093	.long	__und_invalid			@  8
1094	.long	__und_invalid			@  9
1095	.long	__und_invalid			@  a
1096	.long	__und_invalid			@  b
1097	.long	__und_invalid			@  c
1098	.long	__und_invalid			@  d
1099	.long	__und_invalid			@  e
1100	.long	__und_invalid			@  f
1101
1102	.align	5
1103
1104/*=============================================================================
1105 * Address exception handler
1106 *-----------------------------------------------------------------------------
1107 * These aren't too critical.
1108 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1109 */
1110
1111vector_addrexcptn:
1112	b	vector_addrexcptn
1113
1114/*=============================================================================
1115 * Undefined FIQs
1116 *-----------------------------------------------------------------------------
1117 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1118 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1119 * Basically to switch modes, we *HAVE* to clobber one register...  brain
1120 * damage alert!  I don't think that we can execute any code in here in any
1121 * other mode than FIQ...  Ok you can switch to another mode, but you can't
1122 * get out of that mode without clobbering one register.
1123 */
1124vector_fiq:
1125	subs	pc, lr, #4
1126
1127	.globl	vector_fiq_offset
1128	.equ	vector_fiq_offset, vector_fiq
1129
1130	.section .vectors, "ax", %progbits
1131__vectors_start:
1132	W(b)	vector_rst
1133	W(b)	vector_und
1134	W(ldr)	pc, __vectors_start + 0x1000
1135	W(b)	vector_pabt
1136	W(b)	vector_dabt
1137	W(b)	vector_addrexcptn
1138	W(b)	vector_irq
1139	W(b)	vector_fiq
1140
1141	.data
1142
1143	.globl	cr_alignment
1144	.globl	cr_no_alignment
1145cr_alignment:
1146	.space	4
1147cr_no_alignment:
1148	.space	4
1149
1150#ifdef CONFIG_MULTI_IRQ_HANDLER
1151	.globl	handle_arch_irq
1152handle_arch_irq:
1153	.space	4
1154#endif
1155