xref: /openbmc/linux/arch/microblaze/kernel/entry.S (revision e8e0929d)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34/* The size of a state save frame. */
35#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
36
37/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
39
40#define C_ENTRY(name)	.globl name; .align 4; name
41
42/*
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
46 */
47#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48	.macro	clear_bip
49	msrclr	r11, MSR_BIP
50	nop
51	.endm
52
53	.macro	set_bip
54	msrset	r11, MSR_BIP
55	nop
56	.endm
57
58	.macro	clear_eip
59	msrclr	r11, MSR_EIP
60	nop
61	.endm
62
63	.macro	set_ee
64	msrset	r11, MSR_EE
65	nop
66	.endm
67
68	.macro	disable_irq
69	msrclr	r11, MSR_IE
70	nop
71	.endm
72
73	.macro	enable_irq
74	msrset	r11, MSR_IE
75	nop
76	.endm
77
78	.macro	set_ums
79	msrset	r11, MSR_UMS
80	nop
81	msrclr	r11, MSR_VMS
82	nop
83	.endm
84
85	.macro	set_vms
86	msrclr	r11, MSR_UMS
87	nop
88	msrset	r11, MSR_VMS
89	nop
90	.endm
91
92	.macro	clear_vms_ums
93	msrclr	r11, MSR_VMS
94	nop
95	msrclr	r11, MSR_UMS
96	nop
97	.endm
98#else
99	.macro	clear_bip
100	mfs	r11, rmsr
101	nop
102	andi	r11, r11, ~MSR_BIP
103	mts	rmsr, r11
104	nop
105	.endm
106
107	.macro	set_bip
108	mfs	r11, rmsr
109	nop
110	ori	r11, r11, MSR_BIP
111	mts	rmsr, r11
112	nop
113	.endm
114
115	.macro	clear_eip
116	mfs	r11, rmsr
117	nop
118	andi	r11, r11, ~MSR_EIP
119	mts	rmsr, r11
120	nop
121	.endm
122
123	.macro	set_ee
124	mfs	r11, rmsr
125	nop
126	ori	r11, r11, MSR_EE
127	mts	rmsr, r11
128	nop
129	.endm
130
131	.macro	disable_irq
132	mfs	r11, rmsr
133	nop
134	andi	r11, r11, ~MSR_IE
135	mts	rmsr, r11
136	nop
137	.endm
138
139	.macro	enable_irq
140	mfs	r11, rmsr
141	nop
142	ori	r11, r11, MSR_IE
143	mts	rmsr, r11
144	nop
145	.endm
146
147	.macro set_ums
148	mfs	r11, rmsr
149	nop
150	ori	r11, r11, MSR_VMS
151	andni	r11, r11, MSR_UMS
152	mts	rmsr, r11
153	nop
154	.endm
155
156	.macro	set_vms
157	mfs	r11, rmsr
158	nop
159	ori	r11, r11, MSR_VMS
160	andni	r11, r11, MSR_UMS
161	mts	rmsr, r11
162	nop
163	.endm
164
165	.macro	clear_vms_ums
166	mfs	r11, rmsr
167	nop
168	andni	r11, r11, (MSR_VMS|MSR_UMS)
169	mts	rmsr,r11
170	nop
171	.endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON		\
181	set_ums;		\
182	rted	r0, 2f;	\
1832: nop;
184
185/* turn off virtual protected mode save and user mode save*/
186#define VM_OFF			\
187	clear_vms_ums;			\
188	rted	r0, TOPHYS(1f);	\
1891: nop;
190
191#define SAVE_REGS \
192	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
193	swi	r5, r1, PTO+PT_R5;					\
194	swi	r6, r1, PTO+PT_R6;					\
195	swi	r7, r1, PTO+PT_R7;					\
196	swi	r8, r1, PTO+PT_R8;					\
197	swi	r9, r1, PTO+PT_R9;					\
198	swi	r10, r1, PTO+PT_R10;					\
199	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
200	swi	r12, r1, PTO+PT_R12;					\
201	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
202	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
203	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
204	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
205	swi	r19, r1, PTO+PT_R19;					\
206	swi	r20, r1, PTO+PT_R20;					\
207	swi	r21, r1, PTO+PT_R21;					\
208	swi	r22, r1, PTO+PT_R22;					\
209	swi	r23, r1, PTO+PT_R23;					\
210	swi	r24, r1, PTO+PT_R24;					\
211	swi	r25, r1, PTO+PT_R25;					\
212	swi	r26, r1, PTO+PT_R26;					\
213	swi	r27, r1, PTO+PT_R27;					\
214	swi	r28, r1, PTO+PT_R28;					\
215	swi	r29, r1, PTO+PT_R29;					\
216	swi	r30, r1, PTO+PT_R30;					\
217	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
218	mfs	r11, rmsr;		/* save MSR */			\
219	nop;								\
220	swi	r11, r1, PTO+PT_MSR;
221
222#define RESTORE_REGS \
223	lwi	r11, r1, PTO+PT_MSR;					\
224	mts	rmsr , r11;						\
225	nop;								\
226	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
227	lwi	r5, r1, PTO+PT_R5;					\
228	lwi	r6, r1, PTO+PT_R6;					\
229	lwi	r7, r1, PTO+PT_R7;					\
230	lwi	r8, r1, PTO+PT_R8;					\
231	lwi	r9, r1, PTO+PT_R9;					\
232	lwi	r10, r1, PTO+PT_R10;					\
233	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
234	lwi	r12, r1, PTO+PT_R12;					\
235	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
236	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
237	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
238	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
239	lwi	r19, r1, PTO+PT_R19;					\
240	lwi	r20, r1, PTO+PT_R20;					\
241	lwi	r21, r1, PTO+PT_R21;					\
242	lwi	r22, r1, PTO+PT_R22;					\
243	lwi	r23, r1, PTO+PT_R23;					\
244	lwi	r24, r1, PTO+PT_R24;					\
245	lwi	r25, r1, PTO+PT_R25;					\
246	lwi	r26, r1, PTO+PT_R26;					\
247	lwi	r27, r1, PTO+PT_R27;					\
248	lwi	r28, r1, PTO+PT_R28;					\
249	lwi	r29, r1, PTO+PT_R29;					\
250	lwi	r30, r1, PTO+PT_R30;					\
251	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
252
253.text
254
255/*
256 * User trap.
257 *
258 * System calls are handled here.
259 *
260 * Syscall protocol:
261 * Syscall number in r12, args in r5-r10
262 * Return value in r3
263 *
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
266 */
267C_ENTRY(_user_exception):
268	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269	addi	r14, r14, 4	/* return address is 4 byte after call */
270	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
271
272	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273	beqi	r11, 1f;		/* Jump ahead if coming from user */
274/* Kernel-mode state save. */
275	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276	tophys(r1,r11);
277	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279
280	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281	SAVE_REGS
282
283	addi	r11, r0, 1; 		/* Was in kernel-mode. */
284	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285	brid	2f;
286	nop;				/* Fill delay slot */
287
288/* User-mode state save.  */
2891:
290	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
291	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292	tophys(r1,r1);
293	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
294/* calculate kernel stack pointer from task struct 8k */
295	addik	r1, r1, THREAD_SIZE;
296	tophys(r1,r1);
297
298	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
299	SAVE_REGS
300
301	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
302	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
304	addi	r11, r0, 1;
305	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
3062:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
307	/* Save away the syscall number.  */
308	swi	r12, r1, PTO+PT_R0;
309	tovirt(r1,r1)
310
311/* where the trap should return need -8 to adjust for rtsd r15, 8*/
312/* Jump to the appropriate function for the system call number in r12
313 * (r12 is not preserved), or return an error if r12 is not valid. The LP
314 * register should point to the location where
315 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
316
317	# Step into virtual mode.
318	set_vms;
319	addik	r11, r0, 3f
320	rtid	r11, 0
321	nop
3223:
323	add	r11, r0, CURRENT_TASK	 /* Get current task ptr into r11 */
324	lwi	r11, r11, TS_THREAD_INFO /* get thread info */
325	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
326	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
327	beqi	r11, 4f
328
329	addik	r3, r0, -ENOSYS
330	swi	r3, r1, PTO + PT_R3
331	brlid	r15, do_syscall_trace_enter
332	addik	r5, r1, PTO + PT_R0
333
334	# do_syscall_trace_enter returns the new syscall nr.
335	addk	r12, r0, r3
336	lwi	r5, r1, PTO+PT_R5;
337	lwi	r6, r1, PTO+PT_R6;
338	lwi	r7, r1, PTO+PT_R7;
339	lwi	r8, r1, PTO+PT_R8;
340	lwi	r9, r1, PTO+PT_R9;
341	lwi	r10, r1, PTO+PT_R10;
3424:
343/* Jump to the appropriate function for the system call number in r12
344 * (r12 is not preserved), or return an error if r12 is not valid.
345 * The LP register should point to the location where the called function
346 * should return.  [note that MAKE_SYS_CALL uses label 1] */
347	/* See if the system call number is valid */
348	addi	r11, r12, -__NR_syscalls;
349	bgei	r11,5f;
350	/* Figure out which function to use for this system call.  */
351	/* Note Microblaze barrel shift is optional, so don't rely on it */
352	add	r12, r12, r12;			/* convert num -> ptr */
353	add	r12, r12, r12;
354
355	/* Trac syscalls and stored them to r0_ram */
356	lwi	r3, r12, 0x400 + r0_ram
357	addi	r3, r3, 1
358	swi	r3, r12, 0x400 + r0_ram
359
360	# Find and jump into the syscall handler.
361	lwi	r12, r12, sys_call_table
362	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
363	la	r15, r0, ret_from_trap-8
364	bra	r12
365
366	/* The syscall number is invalid, return an error.  */
3675:
368	addi	r3, r0, -ENOSYS;
369	rtsd	r15,8;		/* looks like a normal subroutine return */
370	or 	r0, r0, r0
371
372
373/* Entry point used to return from a syscall/trap */
374/* We re-enable BIP bit before state restore */
375C_ENTRY(ret_from_trap):
376	set_bip;			/*  Ints masked for state restore*/
377	lwi	r11, r1, PTO+PT_MODE;
378/* See if returning to kernel mode, if so, skip resched &c.  */
379	bnei	r11, 2f;
380
381	/* We're returning to user mode, so check for various conditions that
382	 * trigger rescheduling. */
383	# FIXME: Restructure all these flag checks.
384	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
385	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
386	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
387	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
388	beqi	r11, 1f
389
390	swi	r3, r1, PTO + PT_R3
391	swi	r4, r1, PTO + PT_R4
392	brlid	r15, do_syscall_trace_leave
393	addik	r5, r1, PTO + PT_R0
394	lwi	r3, r1, PTO + PT_R3
395	lwi	r4, r1, PTO + PT_R4
3961:
397
398	/* We're returning to user mode, so check for various conditions that
399	 * trigger rescheduling. */
400	/* Get current task ptr into r11 */
401	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
402	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
403	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
404	andi	r11, r11, _TIF_NEED_RESCHED;
405	beqi	r11, 5f;
406
407	swi	r3, r1, PTO + PT_R3; /* store syscall result */
408	swi	r4, r1, PTO + PT_R4;
409	bralid	r15, schedule;	/* Call scheduler */
410	nop;				/* delay slot */
411	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
412	lwi	r4, r1, PTO + PT_R4;
413
414	/* Maybe handle a signal */
4155:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
416	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
417	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
418	andi	r11, r11, _TIF_SIGPENDING;
419	beqi	r11, 1f;		/* Signals to handle, handle them */
420
421	swi	r3, r1, PTO + PT_R3; /* store syscall result */
422	swi	r4, r1, PTO + PT_R4;
423	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
424	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
425	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
426	bralid	r15, do_signal;	/* Handle any signals */
427	nop;
428	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
429	lwi	r4, r1, PTO + PT_R4;
430
431/* Finally, return to user state.  */
4321:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
433	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
434	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
435	VM_OFF;
436	tophys(r1,r1);
437	RESTORE_REGS;
438	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
439	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
440	bri	6f;
441
442/* Return to kernel state.  */
4432:	VM_OFF;
444	tophys(r1,r1);
445	RESTORE_REGS;
446	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
447	tovirt(r1,r1);
4486:
449TRAP_return:		/* Make global symbol for debugging */
450	rtbd	r14, 0;	/* Instructions to return from an IRQ */
451	nop;
452
453
454/* These syscalls need access to the struct pt_regs on the stack, so we
455   implement them in assembly (they're basically all wrappers anyway).  */
456
457C_ENTRY(sys_fork_wrapper):
458	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
459	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
460	la	r7, r1, PTO			/* Arg 2: parent context */
461	add	r8. r0, r0			/* Arg 3: (unused) */
462	add	r9, r0, r0;			/* Arg 4: (unused) */
463	add	r10, r0, r0;			/* Arg 5: (unused) */
464	brid	do_fork		/* Do real work (tail-call) */
465	nop;
466
467/* This the initial entry point for a new child thread, with an appropriate
468   stack in place that makes it look the the child is in the middle of an
469   syscall.  This function is actually `returned to' from switch_thread
470   (copy_thread makes ret_from_fork the return address in each new thread's
471   saved context).  */
472C_ENTRY(ret_from_fork):
473	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
474	add	r3, r5, r0;	/* switch_thread returns the prev task */
475				/* ( in the delay slot ) */
476	add	r3, r0, r0;	/* Child's fork call should return 0. */
477	brid	ret_from_trap;	/* Do normal trap return */
478	nop;
479
480C_ENTRY(sys_vfork):
481	brid	microblaze_vfork	/* Do real work (tail-call) */
482	la	r5, r1, PTO
483
484C_ENTRY(sys_clone):
485	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
486	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
4871:	la	r7, r1, PTO;			/* Arg 2: parent context */
488	add	r8, r0, r0;			/* Arg 3: (unused) */
489	add	r9, r0, r0;			/* Arg 4: (unused) */
490	add	r10, r0, r0;			/* Arg 5: (unused) */
491	brid	do_fork		/* Do real work (tail-call) */
492	nop;
493
494C_ENTRY(sys_execve):
495	la	r8, r1, PTO;		/* add user context as 4th arg */
496	brid	microblaze_execve;	/* Do real work (tail-call).*/
497	nop;
498
499C_ENTRY(sys_rt_sigsuspend_wrapper):
500	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501	swi	r4, r1, PTO+PT_R4;
502	la	r7, r1, PTO;		/* add user context as 3rd arg */
503	brlid	r15, sys_rt_sigsuspend;	/* Do real work.*/
504	nop;
505	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
506	lwi	r4, r1, PTO+PT_R4;
507	bri ret_from_trap /* fall through will not work here due to align */
508	nop;
509
510C_ENTRY(sys_rt_sigreturn_wrapper):
511	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
512	swi	r4, r1, PTO+PT_R4;
513	la	r5, r1, PTO;		/* add user context as 1st arg */
514	brlid	r15, sys_rt_sigreturn	/* Do real work */
515	nop;
516	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
517	lwi	r4, r1, PTO+PT_R4;
518	bri ret_from_trap /* fall through will not work here due to align */
519	nop;
520
521/*
522 * HW EXCEPTION rutine start
523 */
524
525#define SAVE_STATE	\
526	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
527	set_bip;	/*equalize initial state for all possible entries*/\
528	clear_eip;							\
529	enable_irq;							\
530	set_ee;								\
531	/* See if already in kernel mode.*/				\
532	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
533	beqi	r11, 1f;		/* Jump ahead if coming from user */\
534	/* Kernel-mode state save.  */					\
535	/* Reload kernel stack-ptr. */					\
536	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
537	tophys(r1,r11);							\
538	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
539	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
540	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
541	/* store return registers separately because			\
542	 * this macros is use for others exceptions */			\
543	swi	r3, r1, PTO + PT_R3;					\
544	swi	r4, r1, PTO + PT_R4;					\
545	SAVE_REGS							\
546	/* PC, before IRQ/trap - this is one instruction above */	\
547	swi	r17, r1, PTO+PT_PC;					\
548									\
549	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
550	swi	r11, r1, PTO+PT_MODE; 	 				\
551	brid	2f;							\
552	nop;				/* Fill delay slot */		\
5531:	/* User-mode state save.  */					\
554	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
555	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
556	tophys(r1,r1);							\
557	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
558	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
559	tophys(r1,r1);							\
560									\
561	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
562	/* store return registers separately because this macros	\
563	 * is use for others exceptions */				\
564	swi	r3, r1, PTO + PT_R3; 					\
565	swi	r4, r1, PTO + PT_R4;					\
566	SAVE_REGS							\
567	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
568	swi	r17, r1, PTO+PT_PC;					\
569									\
570	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
571	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
572	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
573	addi	r11, r0, 1;						\
574	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5752:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
576	/* Save away the syscall number.  */				\
577	swi	r0, r1, PTO+PT_R0;					\
578	tovirt(r1,r1)
579
580C_ENTRY(full_exception_trap):
581	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
582	/* adjust exception address for privileged instruction
583	 * for finding where is it */
584	addik	r17, r17, -4
585	SAVE_STATE /* Save registers */
586	/* FIXME this can be store directly in PT_ESR reg.
587	 * I tested it but there is a fault */
588	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
589	la	r15, r0, ret_from_exc - 8
590	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
591	mfs	r6, resr
592	nop
593	mfs	r7, rfsr;		/* save FSR */
594	nop
595	la	r12, r0, full_exception
596	set_vms;
597	rtbd	r12, 0;
598	nop;
599
600/*
601 * Unaligned data trap.
602 *
603 * Unaligned data trap last on 4k page is handled here.
604 *
605 * Trap entered via exception, so EE bit is set, and interrupts
606 * are masked.  This is nice, means we don't have to CLI before state save
607 *
608 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
609 */
610C_ENTRY(unaligned_data_trap):
611	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
612	SAVE_STATE		/* Save registers.*/
613	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
614	la	r15, r0, ret_from_exc-8
615	mfs	r3, resr		/* ESR */
616	nop
617	mfs	r4, rear		/* EAR */
618	nop
619	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
620	la	r12, r0, _unaligned_data_exception
621	set_vms;
622	rtbd	r12, 0;	/* interrupts enabled */
623	nop;
624
625/*
626 * Page fault traps.
627 *
628 * If the real exception handler (from hw_exception_handler.S) didn't find
629 * the mapping for the process, then we're thrown here to handle such situation.
630 *
631 * Trap entered via exceptions, so EE bit is set, and interrupts
632 * are masked.  This is nice, means we don't have to CLI before state save
633 *
634 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
635 * will bail out to this point if they can't resolve the lightweight TLB fault.
636 *
637 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
638 * void do_page_fault(struct pt_regs *regs,
639 *				unsigned long address,
640 *				unsigned long error_code)
641 */
642/* data and intruction trap - which is choose is resolved int fault.c */
643C_ENTRY(page_fault_data_trap):
644	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
645	SAVE_STATE		/* Save registers.*/
646	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
647	la	r15, r0, ret_from_exc-8
648	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
649	mfs	r6, rear		/* parameter unsigned long address */
650	nop
651	mfs	r7, resr		/* parameter unsigned long error_code */
652	nop
653	la	r12, r0, do_page_fault
654	set_vms;
655	rtbd	r12, 0;	/* interrupts enabled */
656	nop;
657
658C_ENTRY(page_fault_instr_trap):
659	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
660	SAVE_STATE		/* Save registers.*/
661	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
662	la	r15, r0, ret_from_exc-8
663	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
664	mfs	r6, rear		/* parameter unsigned long address */
665	nop
666	ori	r7, r0, 0		/* parameter unsigned long error_code */
667	la	r12, r0, do_page_fault
668	set_vms;
669	rtbd	r12, 0;	/* interrupts enabled */
670	nop;
671
672/* Entry point used to return from an exception.  */
673C_ENTRY(ret_from_exc):
674	set_bip;			/*  Ints masked for state restore*/
675	lwi	r11, r1, PTO+PT_MODE;
676	bnei	r11, 2f;		/* See if returning to kernel mode, */
677					/* ... if so, skip resched &c.  */
678
679	/* We're returning to user mode, so check for various conditions that
680	   trigger rescheduling. */
681	/* Get current task ptr into r11 */
682	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
683	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
684	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
685	andi	r11, r11, _TIF_NEED_RESCHED;
686	beqi	r11, 5f;
687
688/* Call the scheduler before returning from a syscall/trap. */
689	bralid	r15, schedule;	/* Call scheduler */
690	nop;				/* delay slot */
691
692	/* Maybe handle a signal */
6935:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
694	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
695	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
696	andi	r11, r11, _TIF_SIGPENDING;
697	beqi	r11, 1f;		/* Signals to handle, handle them */
698
699	/*
700	 * Handle a signal return; Pending signals should be in r18.
701	 *
702	 * Not all registers are saved by the normal trap/interrupt entry
703	 * points (for instance, call-saved registers (because the normal
704	 * C-compiler calling sequence in the kernel makes sure they're
705	 * preserved), and call-clobbered registers in the case of
706	 * traps), but signal handlers may want to examine or change the
707	 * complete register state.  Here we save anything not saved by
708	 * the normal entry sequence, so that it may be safely restored
709	 * (in a possibly modified form) after do_signal returns.
710	 * store return registers separately because this macros is use
711	 * for others exceptions */
712	swi	r3, r1, PTO + PT_R3;
713	swi	r4, r1, PTO + PT_R4;
714	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
715	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
716	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
717	bralid	r15, do_signal;	/* Handle any signals */
718	nop;
719	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
720	lwi	r4, r1, PTO+PT_R4;
721
722/* Finally, return to user state.  */
7231:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
724	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
725	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
726	VM_OFF;
727	tophys(r1,r1);
728
729	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
730	lwi	r4, r1, PTO+PT_R4;
731	RESTORE_REGS;
732	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
733
734	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
735	bri	6f;
736/* Return to kernel state.  */
7372:	VM_OFF;
738	tophys(r1,r1);
739	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
740	lwi	r4, r1, PTO+PT_R4;
741	RESTORE_REGS;
742	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
743
744	tovirt(r1,r1);
7456:
746EXC_return:		/* Make global symbol for debugging */
747	rtbd	r14, 0;	/* Instructions to return from an IRQ */
748	nop;
749
750/*
751 * HW EXCEPTION rutine end
752 */
753
754/*
755 * Hardware maskable interrupts.
756 *
757 * The stack-pointer (r1) should have already been saved to the memory
758 * location PER_CPU(ENTRY_SP).
759 */
760C_ENTRY(_interrupt):
761/* MS: we are in physical address */
762/* Save registers, switch to proper stack, convert SP to virtual.*/
763	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
764	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
765	/* MS: See if already in kernel mode. */
766	lwi	r11, r0, TOPHYS(PER_CPU(KM));
767	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
768
769/* Kernel-mode state save. */
770	or	r11, r1, r0
771	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
772/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
773	swi	r11, r1, (PT_R1 - PT_SIZE);
774/* MS: restore r11 because of saving in SAVE_REGS */
775	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
776	/* save registers */
777/* MS: Make room on the stack -> activation record */
778	addik	r1, r1, -STATE_SAVE_SIZE;
779/* MS: store return registers separately because
780 * this macros is use for others exceptions */
781	swi	r3, r1, PTO + PT_R3;
782	swi	r4, r1, PTO + PT_R4;
783	SAVE_REGS
784	/* MS: store mode */
785	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
786	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
787	brid	2f;
788	nop; /* MS: Fill delay slot */
789
7901:
791/* User-mode state save. */
792/* MS: restore r11 -> FIXME move before SAVE_REG */
793	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
794 /* MS: get the saved current */
795	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
796	tophys(r1,r1);
797	lwi	r1, r1, TS_THREAD_INFO;
798	addik	r1, r1, THREAD_SIZE;
799	tophys(r1,r1);
800	/* save registers */
801	addik	r1, r1, -STATE_SAVE_SIZE;
802	swi	r3, r1, PTO+PT_R3;
803	swi	r4, r1, PTO+PT_R4;
804	SAVE_REGS
805	/* calculate mode */
806	swi	r0, r1, PTO + PT_MODE;
807	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
808	swi	r11, r1, PTO+PT_R1;
809	/* setup kernel mode to KM */
810	addi	r11, r0, 1;
811	swi	r11, r0, TOPHYS(PER_CPU(KM));
812
8132:
814	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
815	swi	r0, r1, PTO + PT_R0;
816	tovirt(r1,r1)
817	la	r5, r1, PTO;
818	set_vms;
819	la	r11, r0, do_IRQ;
820	la	r15, r0, irq_call;
821irq_call:rtbd	r11, 0;
822	nop;
823
824/* MS: we are in virtual mode */
825ret_from_irq:
826	lwi	r11, r1, PTO + PT_MODE;
827	bnei	r11, 2f;
828
829	add	r11, r0, CURRENT_TASK;
830	lwi	r11, r11, TS_THREAD_INFO;
831	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
832	andi	r11, r11, _TIF_NEED_RESCHED;
833	beqi	r11, 5f
834	bralid	r15, schedule;
835	nop; /* delay slot */
836
837    /* Maybe handle a signal */
8385:	add	r11, r0, CURRENT_TASK;
839	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
840	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
841	andi	r11, r11, _TIF_SIGPENDING;
842	beqid	r11, no_intr_resched
843/* Handle a signal return; Pending signals should be in r18. */
844	addi	r7, r0, 0; /* Arg 3: int in_syscall */
845	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
846	bralid	r15, do_signal;	/* Handle any signals */
847	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
848
849/* Finally, return to user state. */
850no_intr_resched:
851    /* Disable interrupts, we are now committed to the state restore */
852	disable_irq
853	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
854	add	r11, r0, CURRENT_TASK;
855	swi	r11, r0, PER_CPU(CURRENT_SAVE);
856	VM_OFF;
857	tophys(r1,r1);
858	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
859	lwi	r4, r1, PTO + PT_R4;
860	RESTORE_REGS
861	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
862	lwi	r1, r1, PT_R1 - PT_SIZE;
863	bri	6f;
864/* MS: Return to kernel state. */
8652:	VM_OFF /* MS: turn off MMU */
866	tophys(r1,r1)
867	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
868	lwi	r4, r1, PTO + PT_R4;
869	RESTORE_REGS
870	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
871	tovirt(r1,r1);
8726:
873IRQ_return: /* MS: Make global symbol for debugging */
874	rtid	r14, 0
875	nop
876
877/*
878 * `Debug' trap
879 *  We enter dbtrap in "BIP" (breakpoint) mode.
880 *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
881 *  original dbtrap.
882 *  however, wait to save state first
883 */
884C_ENTRY(_debug_exception):
885	/* BIP bit is set on entry, no interrupts can occur */
886	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
887
888	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
889	set_bip;	/*equalize initial state for all possible entries*/
890	clear_eip;
891	enable_irq;
892	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
893	beqi	r11, 1f;		/* Jump ahead if coming from user */
894	/* Kernel-mode state save.  */
895	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
896	tophys(r1,r11);
897	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
898	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
899
900	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
901	swi	r3, r1, PTO + PT_R3;
902	swi	r4, r1, PTO + PT_R4;
903	SAVE_REGS;
904
905	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
906	swi	r11, r1, PTO + PT_MODE;
907	brid	2f;
908	nop;				/* Fill delay slot */
9091:      /* User-mode state save.  */
910	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
911	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
912	tophys(r1,r1);
913	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
914	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
915	tophys(r1,r1);
916
917	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
918	swi	r3, r1, PTO + PT_R3;
919	swi	r4, r1, PTO + PT_R4;
920	SAVE_REGS;
921
922	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
923	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
924	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
925	addi	r11, r0, 1;
926	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
9272:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
928	/* Save away the syscall number.  */
929	swi	r0, r1, PTO+PT_R0;
930	tovirt(r1,r1)
931
932	addi	r5, r0, SIGTRAP		     /* send the trap signal */
933	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
934	addk	r7, r0, r0		     /* 3rd param zero */
935
936	set_vms;
937	la	r11, r0, send_sig;
938	la	r15, r0, dbtrap_call;
939dbtrap_call:	rtbd	r11, 0;
940	nop;
941
942	set_bip;			/*  Ints masked for state restore*/
943	lwi	r11, r1, PTO+PT_MODE;
944	bnei	r11, 2f;
945
946	/* Get current task ptr into r11 */
947	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
948	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
949	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
950	andi	r11, r11, _TIF_NEED_RESCHED;
951	beqi	r11, 5f;
952
953/* Call the scheduler before returning from a syscall/trap. */
954
955	bralid	r15, schedule;	/* Call scheduler */
956	nop;				/* delay slot */
957	/* XXX Is PT_DTRACE handling needed here? */
958	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
959
960	/* Maybe handle a signal */
9615:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
962	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
963	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
964	andi	r11, r11, _TIF_SIGPENDING;
965	beqi	r11, 1f;		/* Signals to handle, handle them */
966
967/* Handle a signal return; Pending signals should be in r18.  */
968	/* Not all registers are saved by the normal trap/interrupt entry
969	   points (for instance, call-saved registers (because the normal
970	   C-compiler calling sequence in the kernel makes sure they're
971	   preserved), and call-clobbered registers in the case of
972	   traps), but signal handlers may want to examine or change the
973	   complete register state.  Here we save anything not saved by
974	   the normal entry sequence, so that it may be safely restored
975	   (in a possibly modified form) after do_signal returns.  */
976
977	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
978	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
979	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
980	bralid	r15, do_signal;	/* Handle any signals */
981	nop;
982
983
984/* Finally, return to user state.  */
9851:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
986	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
987	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
988	VM_OFF;
989	tophys(r1,r1);
990
991	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
992	lwi	r4, r1, PTO+PT_R4;
993	RESTORE_REGS
994	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
995
996
997	lwi	r1, r1, PT_R1 - PT_SIZE;
998					/* Restore user stack pointer. */
999	bri	6f;
1000
1001/* Return to kernel state.  */
10022:	VM_OFF;
1003	tophys(r1,r1);
1004	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
1005	lwi	r4, r1, PTO+PT_R4;
1006	RESTORE_REGS
1007	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
1008
1009	tovirt(r1,r1);
10106:
1011DBTRAP_return:		/* Make global symbol for debugging */
1012	rtbd	r14, 0;	/* Instructions to return from an IRQ */
1013	nop;
1014
1015
1016
1017ENTRY(_switch_to)
1018	/* prepare return value */
1019	addk	r3, r0, r31
1020
1021	/* save registers in cpu_context */
1022	/* use r11 and r12, volatile registers, as temp register */
1023	/* give start of cpu_context for previous process */
1024	addik	r11, r5, TI_CPU_CONTEXT
1025	swi	r1, r11, CC_R1
1026	swi	r2, r11, CC_R2
1027	/* skip volatile registers.
1028	 * they are saved on stack when we jumped to _switch_to() */
1029	/* dedicated registers */
1030	swi	r13, r11, CC_R13
1031	swi	r14, r11, CC_R14
1032	swi	r15, r11, CC_R15
1033	swi	r16, r11, CC_R16
1034	swi	r17, r11, CC_R17
1035	swi	r18, r11, CC_R18
1036	/* save non-volatile registers */
1037	swi	r19, r11, CC_R19
1038	swi	r20, r11, CC_R20
1039	swi	r21, r11, CC_R21
1040	swi	r22, r11, CC_R22
1041	swi	r23, r11, CC_R23
1042	swi	r24, r11, CC_R24
1043	swi	r25, r11, CC_R25
1044	swi	r26, r11, CC_R26
1045	swi	r27, r11, CC_R27
1046	swi	r28, r11, CC_R28
1047	swi	r29, r11, CC_R29
1048	swi	r30, r11, CC_R30
1049	/* special purpose registers */
1050	mfs	r12, rmsr
1051	nop
1052	swi	r12, r11, CC_MSR
1053	mfs	r12, rear
1054	nop
1055	swi	r12, r11, CC_EAR
1056	mfs	r12, resr
1057	nop
1058	swi	r12, r11, CC_ESR
1059	mfs	r12, rfsr
1060	nop
1061	swi	r12, r11, CC_FSR
1062
1063	/* update r31, the current */
1064	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
1065	/* stored it to current_save too */
1066	swi	r31, r0, PER_CPU(CURRENT_SAVE)
1067
1068	/* get new process' cpu context and restore */
1069	/* give me start where start context of next task */
1070	addik	r11, r6, TI_CPU_CONTEXT
1071
1072	/* non-volatile registers */
1073	lwi	r30, r11, CC_R30
1074	lwi	r29, r11, CC_R29
1075	lwi	r28, r11, CC_R28
1076	lwi	r27, r11, CC_R27
1077	lwi	r26, r11, CC_R26
1078	lwi	r25, r11, CC_R25
1079	lwi	r24, r11, CC_R24
1080	lwi	r23, r11, CC_R23
1081	lwi	r22, r11, CC_R22
1082	lwi	r21, r11, CC_R21
1083	lwi	r20, r11, CC_R20
1084	lwi	r19, r11, CC_R19
1085	/* dedicated registers */
1086	lwi	r18, r11, CC_R18
1087	lwi	r17, r11, CC_R17
1088	lwi	r16, r11, CC_R16
1089	lwi	r15, r11, CC_R15
1090	lwi	r14, r11, CC_R14
1091	lwi	r13, r11, CC_R13
1092	/* skip volatile registers */
1093	lwi	r2, r11, CC_R2
1094	lwi	r1, r11, CC_R1
1095
1096	/* special purpose registers */
1097	lwi	r12, r11, CC_FSR
1098	mts	rfsr, r12
1099	nop
1100	lwi	r12, r11, CC_MSR
1101	mts	rmsr, r12
1102	nop
1103
1104	rtsd	r15, 8
1105	nop
1106
1107ENTRY(_reset)
1108	brai	0x70; /* Jump back to FS-boot */
1109
1110ENTRY(_break)
1111	mfs	r5, rmsr
1112	nop
1113	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1114	mfs	r5, resr
1115	nop
1116	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1117	bri	0
1118
1119	/* These are compiled and loaded into high memory, then
1120	 * copied into place in mach_early_setup */
1121	.section	.init.ivt, "ax"
1122	.org	0x0
1123	/* this is very important - here is the reset vector */
1124	/* in current MMU branch you don't care what is here - it is
1125	 * used from bootloader site - but this is correct for FS-BOOT */
1126	brai	0x70
1127	nop
1128	brai	TOPHYS(_user_exception); /* syscall handler */
1129	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1130	brai	TOPHYS(_break);		/* nmi trap handler */
1131	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1132
1133	.org	0x60
1134	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1135
1136.section .rodata,"a"
1137#include "syscall_table.S"
1138
1139syscall_table_size=(.-sys_call_table)
1140
1141