xref: /openbmc/linux/arch/arm/kernel/entry-armv.S (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1/*
2 *  linux/arch/arm/kernel/entry-armv.S
3 *
4 *  Copyright (C) 1996,1997,1998 Russell King.
5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *  Low-level vector interface routines
12 *
13 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
14 *  it to save wrong values...  Be aware!
15 */
16#include <linux/config.h>
17#include <linux/init.h>
18
19#include <asm/thread_info.h>
20#include <asm/glue.h>
21#include <asm/ptrace.h>
22#include <asm/vfpmacros.h>
23
24#include "entry-header.S"
25
26/*
27 * Invalid mode handlers
28 */
29	.macro	inv_entry, sym, reason
30	sub	sp, sp, #S_FRAME_SIZE		@ Allocate frame size in one go
31	stmia	sp, {r0 - lr}			@ Save XXX r0 - lr
32	ldr	r4, .LC\sym
33	mov	r1, #\reason
34	.endm
35
36__pabt_invalid:
37	inv_entry abt, BAD_PREFETCH
38	b	1f
39
40__dabt_invalid:
41	inv_entry abt, BAD_DATA
42	b	1f
43
44__irq_invalid:
45	inv_entry irq, BAD_IRQ
46	b	1f
47
48__und_invalid:
49	inv_entry und, BAD_UNDEFINSTR
50
511:	zero_fp
52	ldmia	r4, {r5 - r7}			@ Get XXX pc, cpsr, old_r0
53	add	r4, sp, #S_PC
54	stmia	r4, {r5 - r7}			@ Save XXX pc, cpsr, old_r0
55	mov	r0, sp
56	and	r2, r6, #31			@ int mode
57	b	bad_mode
58
59/*
60 * SVC mode handlers
61 */
62	.macro	svc_entry, sym
63	sub	sp, sp, #S_FRAME_SIZE
64	stmia	sp, {r0 - r12}			@ save r0 - r12
65	ldr	r2, .LC\sym
66	add	r0, sp, #S_FRAME_SIZE
67	ldmia	r2, {r2 - r4}			@ get pc, cpsr
68	add	r5, sp, #S_SP
69	mov	r1, lr
70
71	@
72	@ We are now ready to fill in the remaining blanks on the stack:
73	@
74	@  r0 - sp_svc
75	@  r1 - lr_svc
76	@  r2 - lr_<exception>, already fixed up for correct return/restart
77	@  r3 - spsr_<exception>
78	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
79	@
80	stmia	r5, {r0 - r4}
81	.endm
82
83	.align	5
84__dabt_svc:
85	svc_entry abt
86
87	@
88	@ get ready to re-enable interrupts if appropriate
89	@
90	mrs	r9, cpsr
91	tst	r3, #PSR_I_BIT
92	biceq	r9, r9, #PSR_I_BIT
93
94	@
95	@ Call the processor-specific abort handler:
96	@
97	@  r2 - aborted context pc
98	@  r3 - aborted context cpsr
99	@
100	@ The abort handler must return the aborted address in r0, and
101	@ the fault status register in r1.  r9 must be preserved.
102	@
103#ifdef MULTI_ABORT
104	ldr	r4, .LCprocfns
105	mov	lr, pc
106	ldr	pc, [r4]
107#else
108	bl	CPU_ABORT_HANDLER
109#endif
110
111	@
112	@ set desired IRQ state, then call main handler
113	@
114	msr	cpsr_c, r9
115	mov	r2, sp
116	bl	do_DataAbort
117
118	@
119	@ IRQs off again before pulling preserved data off the stack
120	@
121	disable_irq r0
122
123	@
124	@ restore SPSR and restart the instruction
125	@
126	ldr	r0, [sp, #S_PSR]
127	msr	spsr_cxsf, r0
128	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
129
130	.align	5
131__irq_svc:
132	svc_entry irq
133#ifdef CONFIG_PREEMPT
134	get_thread_info r8
135	ldr	r9, [r8, #TI_PREEMPT]		@ get preempt count
136	add	r7, r9, #1			@ increment it
137	str	r7, [r8, #TI_PREEMPT]
138#endif
1391:	get_irqnr_and_base r0, r6, r5, lr
140	movne	r1, sp
141	@
142	@ routine called with r0 = irq number, r1 = struct pt_regs *
143	@
144	adrne	lr, 1b
145	bne	asm_do_IRQ
146#ifdef CONFIG_PREEMPT
147	ldr	r0, [r8, #TI_FLAGS]		@ get flags
148	tst	r0, #_TIF_NEED_RESCHED
149	blne	svc_preempt
150preempt_return:
151	ldr	r0, [r8, #TI_PREEMPT]		@ read preempt value
152	teq	r0, r7
153	str	r9, [r8, #TI_PREEMPT]		@ restore preempt count
154	strne	r0, [r0, -r0]			@ bug()
155#endif
156	ldr	r0, [sp, #S_PSR]		@ irqs are already disabled
157	msr	spsr_cxsf, r0
158	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
159
160	.ltorg
161
162#ifdef CONFIG_PREEMPT
163svc_preempt:
164	teq	r9, #0				@ was preempt count = 0
165	ldreq	r6, .LCirq_stat
166	movne	pc, lr				@ no
167	ldr	r0, [r6, #4]			@ local_irq_count
168	ldr	r1, [r6, #8]			@ local_bh_count
169	adds	r0, r0, r1
170	movne	pc, lr
171	mov	r7, #0				@ preempt_schedule_irq
172	str	r7, [r8, #TI_PREEMPT]		@ expects preempt_count == 0
1731:	bl	preempt_schedule_irq		@ irq en/disable is done inside
174	ldr	r0, [r8, #TI_FLAGS]		@ get new tasks TI_FLAGS
175	tst	r0, #_TIF_NEED_RESCHED
176	beq	preempt_return			@ go again
177	b	1b
178#endif
179
180	.align	5
181__und_svc:
182	svc_entry und
183
184	@
185	@ call emulation code, which returns using r9 if it has emulated
186	@ the instruction, or the more conventional lr if we are to treat
187	@ this as a real undefined instruction
188	@
189	@  r0 - instruction
190	@
191	ldr	r0, [r2, #-4]
192	adr	r9, 1f
193	bl	call_fpe
194
195	mov	r0, sp				@ struct pt_regs *regs
196	bl	do_undefinstr
197
198	@
199	@ IRQs off again before pulling preserved data off the stack
200	@
2011:	disable_irq r0
202
203	@
204	@ restore SPSR and restart the instruction
205	@
206	ldr	lr, [sp, #S_PSR]		@ Get SVC cpsr
207	msr	spsr_cxsf, lr
208	ldmia	sp, {r0 - pc}^			@ Restore SVC registers
209
210	.align	5
211__pabt_svc:
212	svc_entry abt
213
214	@
215	@ re-enable interrupts if appropriate
216	@
217	mrs	r9, cpsr
218	tst	r3, #PSR_I_BIT
219	biceq	r9, r9, #PSR_I_BIT
220	msr	cpsr_c, r9
221
222	@
223	@ set args, then call main handler
224	@
225	@  r0 - address of faulting instruction
226	@  r1 - pointer to registers on stack
227	@
228	mov	r0, r2				@ address (pc)
229	mov	r1, sp				@ regs
230	bl	do_PrefetchAbort		@ call abort handler
231
232	@
233	@ IRQs off again before pulling preserved data off the stack
234	@
235	disable_irq r0
236
237	@
238	@ restore SPSR and restart the instruction
239	@
240	ldr	r0, [sp, #S_PSR]
241	msr	spsr_cxsf, r0
242	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
243
244	.align	5
245.LCirq:
246	.word	__temp_irq
247.LCund:
248	.word	__temp_und
249.LCabt:
250	.word	__temp_abt
251#ifdef MULTI_ABORT
252.LCprocfns:
253	.word	processor
254#endif
255.LCfp:
256	.word	fp_enter
257#ifdef CONFIG_PREEMPT
258.LCirq_stat:
259	.word	irq_stat
260#endif
261
262/*
263 * User mode handlers
264 */
265	.macro	usr_entry, sym
266	sub	sp, sp, #S_FRAME_SIZE		@ Allocate frame size in one go
267	stmia	sp, {r0 - r12}			@ save r0 - r12
268	ldr	r7, .LC\sym
269	add	r5, sp, #S_PC
270	ldmia	r7, {r2 - r4}			@ Get USR pc, cpsr
271
272	@
273	@ We are now ready to fill in the remaining blanks on the stack:
274	@
275	@  r2 - lr_<exception>, already fixed up for correct return/restart
276	@  r3 - spsr_<exception>
277	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
278	@
279	@ Also, separately save sp_usr and lr_usr
280	@
281	stmia	r5, {r2 - r4}
282	stmdb	r5, {sp, lr}^
283
284	@
285	@ Enable the alignment trap while in kernel mode
286	@
287	alignment_trap r7, r0, __temp_\sym
288
289	@
290	@ Clear FP to mark the first stack frame
291	@
292	zero_fp
293	.endm
294
295	.align	5
296__dabt_usr:
297	usr_entry abt
298
299	@
300	@ Call the processor-specific abort handler:
301	@
302	@  r2 - aborted context pc
303	@  r3 - aborted context cpsr
304	@
305	@ The abort handler must return the aborted address in r0, and
306	@ the fault status register in r1.
307	@
308#ifdef MULTI_ABORT
309	ldr	r4, .LCprocfns
310	mov	lr, pc
311	ldr	pc, [r4]
312#else
313	bl	CPU_ABORT_HANDLER
314#endif
315
316	@
317	@ IRQs on, then call the main handler
318	@
319	enable_irq r2
320	mov	r2, sp
321	adr	lr, ret_from_exception
322	b	do_DataAbort
323
324	.align	5
325__irq_usr:
326	usr_entry irq
327
328#ifdef CONFIG_PREEMPT
329	get_thread_info r8
330	ldr	r9, [r8, #TI_PREEMPT]		@ get preempt count
331	add	r7, r9, #1			@ increment it
332	str	r7, [r8, #TI_PREEMPT]
333#endif
3341:	get_irqnr_and_base r0, r6, r5, lr
335	movne	r1, sp
336	adrne	lr, 1b
337	@
338	@ routine called with r0 = irq number, r1 = struct pt_regs *
339	@
340	bne	asm_do_IRQ
341#ifdef CONFIG_PREEMPT
342	ldr	r0, [r8, #TI_PREEMPT]
343	teq	r0, r7
344	str	r9, [r8, #TI_PREEMPT]
345	strne	r0, [r0, -r0]
346	mov	tsk, r8
347#else
348	get_thread_info tsk
349#endif
350	mov	why, #0
351	b	ret_to_user
352
353	.ltorg
354
355	.align	5
356__und_usr:
357	usr_entry und
358
359	tst	r3, #PSR_T_BIT			@ Thumb mode?
360	bne	fpundefinstr			@ ignore FP
361	sub	r4, r2, #4
362
363	@
364	@ fall through to the emulation code, which returns using r9 if
365	@ it has emulated the instruction, or the more conventional lr
366	@ if we are to treat this as a real undefined instruction
367	@
368	@  r0 - instruction
369	@
3701:	ldrt	r0, [r4]
371	adr	r9, ret_from_exception
372	adr	lr, fpundefinstr
373	@
374	@ fallthrough to call_fpe
375	@
376
377/*
378 * The out of line fixup for the ldrt above.
379 */
380	.section .fixup, "ax"
3812:	mov	pc, r9
382	.previous
383	.section __ex_table,"a"
384	.long	1b, 2b
385	.previous
386
387/*
388 * Check whether the instruction is a co-processor instruction.
389 * If yes, we need to call the relevant co-processor handler.
390 *
391 * Note that we don't do a full check here for the co-processor
392 * instructions; all instructions with bit 27 set are well
393 * defined.  The only instructions that should fault are the
394 * co-processor instructions.  However, we have to watch out
395 * for the ARM6/ARM7 SWI bug.
396 *
397 * Emulators may wish to make use of the following registers:
398 *  r0  = instruction opcode.
399 *  r2  = PC+4
400 *  r10 = this threads thread_info structure.
401 */
402call_fpe:
403	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
404#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
405	and	r8, r0, #0x0f000000		@ mask out op-code bits
406	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
407#endif
408	moveq	pc, lr
409	get_thread_info r10			@ get current thread
410	and	r8, r0, #0x00000f00		@ mask out CP number
411	mov	r7, #1
412	add	r6, r10, #TI_USED_CP
413	strb	r7, [r6, r8, lsr #8]		@ set appropriate used_cp[]
414#ifdef CONFIG_IWMMXT
415	@ Test if we need to give access to iWMMXt coprocessors
416	ldr	r5, [r10, #TI_FLAGS]
417	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
418	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
419	bcs	iwmmxt_task_enable
420#endif
421	enable_irq r7
422	add	pc, pc, r8, lsr #6
423	mov	r0, r0
424
425	mov	pc, lr				@ CP#0
426	b	do_fpe				@ CP#1 (FPE)
427	b	do_fpe				@ CP#2 (FPE)
428	mov	pc, lr				@ CP#3
429	mov	pc, lr				@ CP#4
430	mov	pc, lr				@ CP#5
431	mov	pc, lr				@ CP#6
432	mov	pc, lr				@ CP#7
433	mov	pc, lr				@ CP#8
434	mov	pc, lr				@ CP#9
435#ifdef CONFIG_VFP
436	b	do_vfp				@ CP#10 (VFP)
437	b	do_vfp				@ CP#11 (VFP)
438#else
439	mov	pc, lr				@ CP#10 (VFP)
440	mov	pc, lr				@ CP#11 (VFP)
441#endif
442	mov	pc, lr				@ CP#12
443	mov	pc, lr				@ CP#13
444	mov	pc, lr				@ CP#14 (Debug)
445	mov	pc, lr				@ CP#15 (Control)
446
447do_fpe:
448	ldr	r4, .LCfp
449	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
450	ldr	pc, [r4]			@ Call FP module USR entry point
451
452/*
453 * The FP module is called with these registers set:
454 *  r0  = instruction
455 *  r2  = PC+4
456 *  r9  = normal "successful" return address
457 *  r10 = FP workspace
458 *  lr  = unrecognised FP instruction return address
459 */
460
461	.data
462ENTRY(fp_enter)
463	.word	fpundefinstr
464	.text
465
466fpundefinstr:
467	mov	r0, sp
468	adr	lr, ret_from_exception
469	b	do_undefinstr
470
471	.align	5
472__pabt_usr:
473	usr_entry abt
474
475	enable_irq r0				@ Enable interrupts
476	mov	r0, r2				@ address (pc)
477	mov	r1, sp				@ regs
478	bl	do_PrefetchAbort		@ call abort handler
479	/* fall through */
480/*
481 * This is the return code to user mode for abort handlers
482 */
483ENTRY(ret_from_exception)
484	get_thread_info tsk
485	mov	why, #0
486	b	ret_to_user
487
488/*
489 * Register switch for ARMv3 and ARMv4 processors
490 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
491 * previous and next are guaranteed not to be the same.
492 */
493ENTRY(__switch_to)
494	add	ip, r1, #TI_CPU_SAVE
495	ldr	r3, [r2, #TI_TP_VALUE]
496	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack
497	ldr	r6, [r2, #TI_CPU_DOMAIN]!
498#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
499	mra	r4, r5, acc0
500	stmia   ip, {r4, r5}
501#endif
502	mov	r4, #0xffff0fff
503	str	r3, [r4, #-3]			@ Set TLS ptr
504	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
505#ifdef CONFIG_VFP
506	@ Always disable VFP so we can lazily save/restore the old
507	@ state. This occurs in the context of the previous thread.
508	VFPFMRX	r4, FPEXC
509	bic	r4, r4, #FPEXC_ENABLE
510	VFPFMXR	FPEXC, r4
511#endif
512#if defined(CONFIG_IWMMXT)
513	bl	iwmmxt_task_switch
514#elif defined(CONFIG_CPU_XSCALE)
515	add	r4, r2, #40			@ cpu_context_save->extra
516	ldmib	r4, {r4, r5}
517	mar	acc0, r4, r5
518#endif
519	ldmib	r2, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously
520
521	__INIT
522/*
523 * Vector stubs.
524 *
525 * This code is copied to 0x200 or 0xffff0200 so we can use branches in the
526 * vectors, rather than ldr's.
527 *
528 * Common stub entry macro:
529 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
530 */
531	.macro	vector_stub, name, sym, correction=0
532	.align	5
533
534vector_\name:
535	ldr	r13, .LCs\sym
536	.if \correction
537	sub	lr, lr, #\correction
538	.endif
539	str	lr, [r13]			@ save lr_IRQ
540	mrs	lr, spsr
541	str	lr, [r13, #4]			@ save spsr_IRQ
542	@
543	@ now branch to the relevant MODE handling routine
544	@
545	mrs	r13, cpsr
546	bic	r13, r13, #MODE_MASK
547	orr	r13, r13, #MODE_SVC
548	msr	spsr_cxsf, r13			@ switch to SVC_32 mode
549
550	and	lr, lr, #15
551	ldr	lr, [pc, lr, lsl #2]
552	movs	pc, lr				@ Changes mode and branches
553	.endm
554
555__stubs_start:
556/*
557 * Interrupt dispatcher
558 */
559	vector_stub	irq, irq, 4
560
561	.long	__irq_usr			@  0  (USR_26 / USR_32)
562	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
563	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
564	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
565	.long	__irq_invalid			@  4
566	.long	__irq_invalid			@  5
567	.long	__irq_invalid			@  6
568	.long	__irq_invalid			@  7
569	.long	__irq_invalid			@  8
570	.long	__irq_invalid			@  9
571	.long	__irq_invalid			@  a
572	.long	__irq_invalid			@  b
573	.long	__irq_invalid			@  c
574	.long	__irq_invalid			@  d
575	.long	__irq_invalid			@  e
576	.long	__irq_invalid			@  f
577
578/*
579 * Data abort dispatcher
580 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
581 */
582	vector_stub	dabt, abt, 8
583
584	.long	__dabt_usr			@  0  (USR_26 / USR_32)
585	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
586	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
587	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
588	.long	__dabt_invalid			@  4
589	.long	__dabt_invalid			@  5
590	.long	__dabt_invalid			@  6
591	.long	__dabt_invalid			@  7
592	.long	__dabt_invalid			@  8
593	.long	__dabt_invalid			@  9
594	.long	__dabt_invalid			@  a
595	.long	__dabt_invalid			@  b
596	.long	__dabt_invalid			@  c
597	.long	__dabt_invalid			@  d
598	.long	__dabt_invalid			@  e
599	.long	__dabt_invalid			@  f
600
601/*
602 * Prefetch abort dispatcher
603 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
604 */
605	vector_stub	pabt, abt, 4
606
607	.long	__pabt_usr			@  0 (USR_26 / USR_32)
608	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
609	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
610	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
611	.long	__pabt_invalid			@  4
612	.long	__pabt_invalid			@  5
613	.long	__pabt_invalid			@  6
614	.long	__pabt_invalid			@  7
615	.long	__pabt_invalid			@  8
616	.long	__pabt_invalid			@  9
617	.long	__pabt_invalid			@  a
618	.long	__pabt_invalid			@  b
619	.long	__pabt_invalid			@  c
620	.long	__pabt_invalid			@  d
621	.long	__pabt_invalid			@  e
622	.long	__pabt_invalid			@  f
623
624/*
625 * Undef instr entry dispatcher
626 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
627 */
628	vector_stub	und, und
629
630	.long	__und_usr			@  0 (USR_26 / USR_32)
631	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
632	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
633	.long	__und_svc			@  3 (SVC_26 / SVC_32)
634	.long	__und_invalid			@  4
635	.long	__und_invalid			@  5
636	.long	__und_invalid			@  6
637	.long	__und_invalid			@  7
638	.long	__und_invalid			@  8
639	.long	__und_invalid			@  9
640	.long	__und_invalid			@  a
641	.long	__und_invalid			@  b
642	.long	__und_invalid			@  c
643	.long	__und_invalid			@  d
644	.long	__und_invalid			@  e
645	.long	__und_invalid			@  f
646
647	.align	5
648
649/*=============================================================================
650 * Undefined FIQs
651 *-----------------------------------------------------------------------------
652 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
653 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
654 * Basically to switch modes, we *HAVE* to clobber one register...  brain
655 * damage alert!  I don't think that we can execute any code in here in any
656 * other mode than FIQ...  Ok you can switch to another mode, but you can't
657 * get out of that mode without clobbering one register.
658 */
659vector_fiq:
660	disable_fiq
661	subs	pc, lr, #4
662
663/*=============================================================================
664 * Address exception handler
665 *-----------------------------------------------------------------------------
666 * These aren't too critical.
667 * (they're not supposed to happen, and won't happen in 32-bit data mode).
668 */
669
670vector_addrexcptn:
671	b	vector_addrexcptn
672
673/*
674 * We group all the following data together to optimise
675 * for CPUs with separate I & D caches.
676 */
677	.align	5
678
679.LCvswi:
680	.word	vector_swi
681
682.LCsirq:
683	.word	__temp_irq
684.LCsund:
685	.word	__temp_und
686.LCsabt:
687	.word	__temp_abt
688
689__stubs_end:
690
691	.equ	__real_stubs_start, .LCvectors + 0x200
692
693.LCvectors:
694	swi	SYS_ERROR0
695	b	__real_stubs_start + (vector_und - __stubs_start)
696	ldr	pc, __real_stubs_start + (.LCvswi - __stubs_start)
697	b	__real_stubs_start + (vector_pabt - __stubs_start)
698	b	__real_stubs_start + (vector_dabt - __stubs_start)
699	b	__real_stubs_start + (vector_addrexcptn - __stubs_start)
700	b	__real_stubs_start + (vector_irq - __stubs_start)
701	b	__real_stubs_start + (vector_fiq - __stubs_start)
702
703ENTRY(__trap_init)
704	stmfd	sp!, {r4 - r6, lr}
705
706	mov	r0, #0xff000000
707	orr	r0, r0, #0x00ff0000		@ high vectors position
708	adr	r1, .LCvectors			@ set up the vectors
709	ldmia	r1, {r1, r2, r3, r4, r5, r6, ip, lr}
710	stmia	r0, {r1, r2, r3, r4, r5, r6, ip, lr}
711
712	add	r2, r0, #0x200
713	adr	r0, __stubs_start		@ copy stubs to 0x200
714	adr	r1, __stubs_end
7151:	ldr	r3, [r0], #4
716	str	r3, [r2], #4
717	cmp	r0, r1
718	blt	1b
719	LOADREGS(fd, sp!, {r4 - r6, pc})
720
721	.data
722
723/*
724 * Do not reorder these, and do not insert extra data between...
725 */
726
727__temp_irq:
728	.word	0				@ saved lr_irq
729	.word	0				@ saved spsr_irq
730	.word	-1				@ old_r0
731__temp_und:
732	.word	0				@ Saved lr_und
733	.word	0				@ Saved spsr_und
734	.word	-1				@ old_r0
735__temp_abt:
736	.word	0				@ Saved lr_abt
737	.word	0				@ Saved spsr_abt
738	.word	-1				@ old_r0
739
740	.globl	cr_alignment
741	.globl	cr_no_alignment
742cr_alignment:
743	.space	4
744cr_no_alignment:
745	.space	4
746