xref: /openbmc/linux/arch/microblaze/kernel/entry.S (revision 7dd65feb)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34#undef DEBUG
35
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name)	.globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50	.macro	clear_bip
51	msrclr	r11, MSR_BIP
52	nop
53	.endm
54
55	.macro	set_bip
56	msrset	r11, MSR_BIP
57	nop
58	.endm
59
60	.macro	clear_eip
61	msrclr	r11, MSR_EIP
62	nop
63	.endm
64
65	.macro	set_ee
66	msrset	r11, MSR_EE
67	nop
68	.endm
69
70	.macro	disable_irq
71	msrclr	r11, MSR_IE
72	nop
73	.endm
74
75	.macro	enable_irq
76	msrset	r11, MSR_IE
77	nop
78	.endm
79
80	.macro	set_ums
81	msrset	r11, MSR_UMS
82	nop
83	msrclr	r11, MSR_VMS
84	nop
85	.endm
86
87	.macro	set_vms
88	msrclr	r11, MSR_UMS
89	nop
90	msrset	r11, MSR_VMS
91	nop
92	.endm
93
94	.macro	clear_vms_ums
95	msrclr	r11, MSR_VMS
96	nop
97	msrclr	r11, MSR_UMS
98	nop
99	.endm
100#else
101	.macro	clear_bip
102	mfs	r11, rmsr
103	nop
104	andi	r11, r11, ~MSR_BIP
105	mts	rmsr, r11
106	nop
107	.endm
108
109	.macro	set_bip
110	mfs	r11, rmsr
111	nop
112	ori	r11, r11, MSR_BIP
113	mts	rmsr, r11
114	nop
115	.endm
116
117	.macro	clear_eip
118	mfs	r11, rmsr
119	nop
120	andi	r11, r11, ~MSR_EIP
121	mts	rmsr, r11
122	nop
123	.endm
124
125	.macro	set_ee
126	mfs	r11, rmsr
127	nop
128	ori	r11, r11, MSR_EE
129	mts	rmsr, r11
130	nop
131	.endm
132
133	.macro	disable_irq
134	mfs	r11, rmsr
135	nop
136	andi	r11, r11, ~MSR_IE
137	mts	rmsr, r11
138	nop
139	.endm
140
141	.macro	enable_irq
142	mfs	r11, rmsr
143	nop
144	ori	r11, r11, MSR_IE
145	mts	rmsr, r11
146	nop
147	.endm
148
149	.macro set_ums
150	mfs	r11, rmsr
151	nop
152	ori	r11, r11, MSR_VMS
153	andni	r11, r11, MSR_UMS
154	mts	rmsr, r11
155	nop
156	.endm
157
158	.macro	set_vms
159	mfs	r11, rmsr
160	nop
161	ori	r11, r11, MSR_VMS
162	andni	r11, r11, MSR_UMS
163	mts	rmsr, r11
164	nop
165	.endm
166
167	.macro	clear_vms_ums
168	mfs	r11, rmsr
169	nop
170	andni	r11, r11, (MSR_VMS|MSR_UMS)
171	mts	rmsr,r11
172	nop
173	.endm
174#endif
175
176/* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181/* turn on virtual protected mode save */
182#define VM_ON		\
183	set_ums;		\
184	rted	r0, 2f;	\
1852: nop;
186
187/* turn off virtual protected mode save and user mode save*/
188#define VM_OFF			\
189	clear_vms_ums;			\
190	rted	r0, TOPHYS(1f);	\
1911: nop;
192
193#define SAVE_REGS \
194	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
195	swi	r5, r1, PTO+PT_R5;					\
196	swi	r6, r1, PTO+PT_R6;					\
197	swi	r7, r1, PTO+PT_R7;					\
198	swi	r8, r1, PTO+PT_R8;					\
199	swi	r9, r1, PTO+PT_R9;					\
200	swi	r10, r1, PTO+PT_R10;					\
201	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
202	swi	r12, r1, PTO+PT_R12;					\
203	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
204	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
205	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
206	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
207	swi	r19, r1, PTO+PT_R19;					\
208	swi	r20, r1, PTO+PT_R20;					\
209	swi	r21, r1, PTO+PT_R21;					\
210	swi	r22, r1, PTO+PT_R22;					\
211	swi	r23, r1, PTO+PT_R23;					\
212	swi	r24, r1, PTO+PT_R24;					\
213	swi	r25, r1, PTO+PT_R25;					\
214	swi	r26, r1, PTO+PT_R26;					\
215	swi	r27, r1, PTO+PT_R27;					\
216	swi	r28, r1, PTO+PT_R28;					\
217	swi	r29, r1, PTO+PT_R29;					\
218	swi	r30, r1, PTO+PT_R30;					\
219	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
220	mfs	r11, rmsr;		/* save MSR */			\
221	nop;								\
222	swi	r11, r1, PTO+PT_MSR;
223
224#define RESTORE_REGS \
225	lwi	r11, r1, PTO+PT_MSR;					\
226	mts	rmsr , r11;						\
227	nop;								\
228	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
229	lwi	r5, r1, PTO+PT_R5;					\
230	lwi	r6, r1, PTO+PT_R6;					\
231	lwi	r7, r1, PTO+PT_R7;					\
232	lwi	r8, r1, PTO+PT_R8;					\
233	lwi	r9, r1, PTO+PT_R9;					\
234	lwi	r10, r1, PTO+PT_R10;					\
235	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
236	lwi	r12, r1, PTO+PT_R12;					\
237	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
238	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
239	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
240	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
241	lwi	r19, r1, PTO+PT_R19;					\
242	lwi	r20, r1, PTO+PT_R20;					\
243	lwi	r21, r1, PTO+PT_R21;					\
244	lwi	r22, r1, PTO+PT_R22;					\
245	lwi	r23, r1, PTO+PT_R23;					\
246	lwi	r24, r1, PTO+PT_R24;					\
247	lwi	r25, r1, PTO+PT_R25;					\
248	lwi	r26, r1, PTO+PT_R26;					\
249	lwi	r27, r1, PTO+PT_R27;					\
250	lwi	r28, r1, PTO+PT_R28;					\
251	lwi	r29, r1, PTO+PT_R29;					\
252	lwi	r30, r1, PTO+PT_R30;					\
253	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
254
255.text
256
257/*
258 * User trap.
259 *
260 * System calls are handled here.
261 *
262 * Syscall protocol:
263 * Syscall number in r12, args in r5-r10
264 * Return value in r3
265 *
266 * Trap entered via brki instruction, so BIP bit is set, and interrupts
267 * are masked. This is nice, means we don't have to CLI before state save
268 */
269C_ENTRY(_user_exception):
270	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
271	addi	r14, r14, 4	/* return address is 4 byte after call */
272	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
273
274	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
275	beqi	r11, 1f;		/* Jump ahead if coming from user */
276/* Kernel-mode state save. */
277	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
278	tophys(r1,r11);
279	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
280	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
281
282	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283	SAVE_REGS
284
285	addi	r11, r0, 1; 		/* Was in kernel-mode. */
286	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
287	brid	2f;
288	nop;				/* Fill delay slot */
289
290/* User-mode state save.  */
2911:
292	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
293	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294	tophys(r1,r1);
295	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
296/* calculate kernel stack pointer from task struct 8k */
297	addik	r1, r1, THREAD_SIZE;
298	tophys(r1,r1);
299
300	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
301	SAVE_REGS
302
303	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
304	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
305	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
306	addi	r11, r0, 1;
307	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
3082:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
309	/* Save away the syscall number.  */
310	swi	r12, r1, PTO+PT_R0;
311	tovirt(r1,r1)
312
313/* where the trap should return need -8 to adjust for rtsd r15, 8*/
314/* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
318
319	# Step into virtual mode.
320	set_vms;
321	addik	r11, r0, 3f
322	rtid	r11, 0
323	nop
3243:
325	add	r11, r0, CURRENT_TASK	 /* Get current task ptr into r11 */
326	lwi	r11, r11, TS_THREAD_INFO /* get thread info */
327	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
328	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
329	beqi	r11, 4f
330
331	addik	r3, r0, -ENOSYS
332	swi	r3, r1, PTO + PT_R3
333	brlid	r15, do_syscall_trace_enter
334	addik	r5, r1, PTO + PT_R0
335
336	# do_syscall_trace_enter returns the new syscall nr.
337	addk	r12, r0, r3
338	lwi	r5, r1, PTO+PT_R5;
339	lwi	r6, r1, PTO+PT_R6;
340	lwi	r7, r1, PTO+PT_R7;
341	lwi	r8, r1, PTO+PT_R8;
342	lwi	r9, r1, PTO+PT_R9;
343	lwi	r10, r1, PTO+PT_R10;
3444:
345/* Jump to the appropriate function for the system call number in r12
346 * (r12 is not preserved), or return an error if r12 is not valid.
347 * The LP register should point to the location where the called function
348 * should return.  [note that MAKE_SYS_CALL uses label 1] */
349	/* See if the system call number is valid */
350	addi	r11, r12, -__NR_syscalls;
351	bgei	r11,5f;
352	/* Figure out which function to use for this system call.  */
353	/* Note Microblaze barrel shift is optional, so don't rely on it */
354	add	r12, r12, r12;			/* convert num -> ptr */
355	add	r12, r12, r12;
356
357#ifdef DEBUG
358	/* Trac syscalls and stored them to r0_ram */
359	lwi	r3, r12, 0x400 + r0_ram
360	addi	r3, r3, 1
361	swi	r3, r12, 0x400 + r0_ram
362#endif
363
364	# Find and jump into the syscall handler.
365	lwi	r12, r12, sys_call_table
366	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
367	la	r15, r0, ret_from_trap-8
368	bra	r12
369
370	/* The syscall number is invalid, return an error.  */
3715:
372	addi	r3, r0, -ENOSYS;
373	rtsd	r15,8;		/* looks like a normal subroutine return */
374	or 	r0, r0, r0
375
376
377/* Entry point used to return from a syscall/trap */
378/* We re-enable BIP bit before state restore */
379C_ENTRY(ret_from_trap):
380	set_bip;			/*  Ints masked for state restore*/
381	lwi	r11, r1, PTO+PT_MODE;
382/* See if returning to kernel mode, if so, skip resched &c.  */
383	bnei	r11, 2f;
384
385	/* We're returning to user mode, so check for various conditions that
386	 * trigger rescheduling. */
387	# FIXME: Restructure all these flag checks.
388	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
389	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
390	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
391	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
392	beqi	r11, 1f
393
394	swi	r3, r1, PTO + PT_R3
395	swi	r4, r1, PTO + PT_R4
396	brlid	r15, do_syscall_trace_leave
397	addik	r5, r1, PTO + PT_R0
398	lwi	r3, r1, PTO + PT_R3
399	lwi	r4, r1, PTO + PT_R4
4001:
401
402	/* We're returning to user mode, so check for various conditions that
403	 * trigger rescheduling. */
404	/* Get current task ptr into r11 */
405	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
406	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
407	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
408	andi	r11, r11, _TIF_NEED_RESCHED;
409	beqi	r11, 5f;
410
411	swi	r3, r1, PTO + PT_R3; /* store syscall result */
412	swi	r4, r1, PTO + PT_R4;
413	bralid	r15, schedule;	/* Call scheduler */
414	nop;				/* delay slot */
415	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
416	lwi	r4, r1, PTO + PT_R4;
417
418	/* Maybe handle a signal */
4195:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
420	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
421	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
422	andi	r11, r11, _TIF_SIGPENDING;
423	beqi	r11, 1f;		/* Signals to handle, handle them */
424
425	swi	r3, r1, PTO + PT_R3; /* store syscall result */
426	swi	r4, r1, PTO + PT_R4;
427	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
428	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
429	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
430	bralid	r15, do_signal;	/* Handle any signals */
431	nop;
432	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
433	lwi	r4, r1, PTO + PT_R4;
434
435/* Finally, return to user state.  */
4361:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
437	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
438	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
439	VM_OFF;
440	tophys(r1,r1);
441	RESTORE_REGS;
442	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
443	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
444	bri	6f;
445
446/* Return to kernel state.  */
4472:	VM_OFF;
448	tophys(r1,r1);
449	RESTORE_REGS;
450	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
451	tovirt(r1,r1);
4526:
453TRAP_return:		/* Make global symbol for debugging */
454	rtbd	r14, 0;	/* Instructions to return from an IRQ */
455	nop;
456
457
458/* These syscalls need access to the struct pt_regs on the stack, so we
459   implement them in assembly (they're basically all wrappers anyway).  */
460
461C_ENTRY(sys_fork_wrapper):
462	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
463	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
464	la	r7, r1, PTO			/* Arg 2: parent context */
465	add	r8. r0, r0			/* Arg 3: (unused) */
466	add	r9, r0, r0;			/* Arg 4: (unused) */
467	add	r10, r0, r0;			/* Arg 5: (unused) */
468	brid	do_fork		/* Do real work (tail-call) */
469	nop;
470
471/* This the initial entry point for a new child thread, with an appropriate
472   stack in place that makes it look the the child is in the middle of an
473   syscall.  This function is actually `returned to' from switch_thread
474   (copy_thread makes ret_from_fork the return address in each new thread's
475   saved context).  */
476C_ENTRY(ret_from_fork):
477	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
478	add	r3, r5, r0;	/* switch_thread returns the prev task */
479				/* ( in the delay slot ) */
480	add	r3, r0, r0;	/* Child's fork call should return 0. */
481	brid	ret_from_trap;	/* Do normal trap return */
482	nop;
483
484C_ENTRY(sys_vfork):
485	brid	microblaze_vfork	/* Do real work (tail-call) */
486	la	r5, r1, PTO
487
488C_ENTRY(sys_clone):
489	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
490	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
4911:	la	r7, r1, PTO;			/* Arg 2: parent context */
492	add	r8, r0, r0;			/* Arg 3: (unused) */
493	add	r9, r0, r0;			/* Arg 4: (unused) */
494	add	r10, r0, r0;			/* Arg 5: (unused) */
495	brid	do_fork		/* Do real work (tail-call) */
496	nop;
497
498C_ENTRY(sys_execve):
499	la	r8, r1, PTO;		/* add user context as 4th arg */
500	brid	microblaze_execve;	/* Do real work (tail-call).*/
501	nop;
502
503C_ENTRY(sys_rt_sigreturn_wrapper):
504	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
505	swi	r4, r1, PTO+PT_R4;
506	la	r5, r1, PTO;		/* add user context as 1st arg */
507	brlid	r15, sys_rt_sigreturn	/* Do real work */
508	nop;
509	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
510	lwi	r4, r1, PTO+PT_R4;
511	bri ret_from_trap /* fall through will not work here due to align */
512	nop;
513
514/*
515 * HW EXCEPTION rutine start
516 */
517
518#define SAVE_STATE	\
519	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
520	set_bip;	/*equalize initial state for all possible entries*/\
521	clear_eip;							\
522	enable_irq;							\
523	set_ee;								\
524	/* See if already in kernel mode.*/				\
525	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
526	beqi	r11, 1f;		/* Jump ahead if coming from user */\
527	/* Kernel-mode state save.  */					\
528	/* Reload kernel stack-ptr. */					\
529	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
530	tophys(r1,r11);							\
531	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
532	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
533	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
534	/* store return registers separately because			\
535	 * this macros is use for others exceptions */			\
536	swi	r3, r1, PTO + PT_R3;					\
537	swi	r4, r1, PTO + PT_R4;					\
538	SAVE_REGS							\
539	/* PC, before IRQ/trap - this is one instruction above */	\
540	swi	r17, r1, PTO+PT_PC;					\
541									\
542	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
543	swi	r11, r1, PTO+PT_MODE; 	 				\
544	brid	2f;							\
545	nop;				/* Fill delay slot */		\
5461:	/* User-mode state save.  */					\
547	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
548	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
549	tophys(r1,r1);							\
550	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
551	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
552	tophys(r1,r1);							\
553									\
554	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
555	/* store return registers separately because this macros	\
556	 * is use for others exceptions */				\
557	swi	r3, r1, PTO + PT_R3; 					\
558	swi	r4, r1, PTO + PT_R4;					\
559	SAVE_REGS							\
560	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
561	swi	r17, r1, PTO+PT_PC;					\
562									\
563	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
564	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
565	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
566	addi	r11, r0, 1;						\
567	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5682:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
569	/* Save away the syscall number.  */				\
570	swi	r0, r1, PTO+PT_R0;					\
571	tovirt(r1,r1)
572
573C_ENTRY(full_exception_trap):
574	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
575	/* adjust exception address for privileged instruction
576	 * for finding where is it */
577	addik	r17, r17, -4
578	SAVE_STATE /* Save registers */
579	/* FIXME this can be store directly in PT_ESR reg.
580	 * I tested it but there is a fault */
581	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
582	la	r15, r0, ret_from_exc - 8
583	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
584	mfs	r6, resr
585	nop
586	mfs	r7, rfsr;		/* save FSR */
587	nop
588	mts	rfsr, r0;	/* Clear sticky fsr */
589	nop
590	la	r12, r0, full_exception
591	set_vms;
592	rtbd	r12, 0;
593	nop;
594
595/*
596 * Unaligned data trap.
597 *
598 * Unaligned data trap last on 4k page is handled here.
599 *
600 * Trap entered via exception, so EE bit is set, and interrupts
601 * are masked.  This is nice, means we don't have to CLI before state save
602 *
603 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
604 */
605C_ENTRY(unaligned_data_trap):
606	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
607	SAVE_STATE		/* Save registers.*/
608	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
609	la	r15, r0, ret_from_exc-8
610	mfs	r3, resr		/* ESR */
611	nop
612	mfs	r4, rear		/* EAR */
613	nop
614	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
615	la	r12, r0, _unaligned_data_exception
616	set_vms;
617	rtbd	r12, 0;	/* interrupts enabled */
618	nop;
619
620/*
621 * Page fault traps.
622 *
623 * If the real exception handler (from hw_exception_handler.S) didn't find
624 * the mapping for the process, then we're thrown here to handle such situation.
625 *
626 * Trap entered via exceptions, so EE bit is set, and interrupts
627 * are masked.  This is nice, means we don't have to CLI before state save
628 *
629 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
630 * will bail out to this point if they can't resolve the lightweight TLB fault.
631 *
632 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
633 * void do_page_fault(struct pt_regs *regs,
634 *				unsigned long address,
635 *				unsigned long error_code)
636 */
637/* data and intruction trap - which is choose is resolved int fault.c */
638C_ENTRY(page_fault_data_trap):
639	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
640	SAVE_STATE		/* Save registers.*/
641	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
642	la	r15, r0, ret_from_exc-8
643	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
644	mfs	r6, rear		/* parameter unsigned long address */
645	nop
646	mfs	r7, resr		/* parameter unsigned long error_code */
647	nop
648	la	r12, r0, do_page_fault
649	set_vms;
650	rtbd	r12, 0;	/* interrupts enabled */
651	nop;
652
653C_ENTRY(page_fault_instr_trap):
654	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
655	SAVE_STATE		/* Save registers.*/
656	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
657	la	r15, r0, ret_from_exc-8
658	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
659	mfs	r6, rear		/* parameter unsigned long address */
660	nop
661	ori	r7, r0, 0		/* parameter unsigned long error_code */
662	la	r12, r0, do_page_fault
663	set_vms;
664	rtbd	r12, 0;	/* interrupts enabled */
665	nop;
666
667/* Entry point used to return from an exception.  */
668C_ENTRY(ret_from_exc):
669	set_bip;			/*  Ints masked for state restore*/
670	lwi	r11, r1, PTO+PT_MODE;
671	bnei	r11, 2f;		/* See if returning to kernel mode, */
672					/* ... if so, skip resched &c.  */
673
674	/* We're returning to user mode, so check for various conditions that
675	   trigger rescheduling. */
676	/* Get current task ptr into r11 */
677	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
678	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
679	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
680	andi	r11, r11, _TIF_NEED_RESCHED;
681	beqi	r11, 5f;
682
683/* Call the scheduler before returning from a syscall/trap. */
684	bralid	r15, schedule;	/* Call scheduler */
685	nop;				/* delay slot */
686
687	/* Maybe handle a signal */
6885:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
689	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
690	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
691	andi	r11, r11, _TIF_SIGPENDING;
692	beqi	r11, 1f;		/* Signals to handle, handle them */
693
694	/*
695	 * Handle a signal return; Pending signals should be in r18.
696	 *
697	 * Not all registers are saved by the normal trap/interrupt entry
698	 * points (for instance, call-saved registers (because the normal
699	 * C-compiler calling sequence in the kernel makes sure they're
700	 * preserved), and call-clobbered registers in the case of
701	 * traps), but signal handlers may want to examine or change the
702	 * complete register state.  Here we save anything not saved by
703	 * the normal entry sequence, so that it may be safely restored
704	 * (in a possibly modified form) after do_signal returns.
705	 * store return registers separately because this macros is use
706	 * for others exceptions */
707	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
708	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
709	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
710	bralid	r15, do_signal;	/* Handle any signals */
711	nop;
712
713/* Finally, return to user state.  */
7141:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
715	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
716	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
717	VM_OFF;
718	tophys(r1,r1);
719
720	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
721	lwi	r4, r1, PTO+PT_R4;
722	RESTORE_REGS;
723	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
724
725	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
726	bri	6f;
727/* Return to kernel state.  */
7282:	VM_OFF;
729	tophys(r1,r1);
730	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
731	lwi	r4, r1, PTO+PT_R4;
732	RESTORE_REGS;
733	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
734
735	tovirt(r1,r1);
7366:
737EXC_return:		/* Make global symbol for debugging */
738	rtbd	r14, 0;	/* Instructions to return from an IRQ */
739	nop;
740
741/*
742 * HW EXCEPTION rutine end
743 */
744
745/*
746 * Hardware maskable interrupts.
747 *
748 * The stack-pointer (r1) should have already been saved to the memory
749 * location PER_CPU(ENTRY_SP).
750 */
751C_ENTRY(_interrupt):
752/* MS: we are in physical address */
753/* Save registers, switch to proper stack, convert SP to virtual.*/
754	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
755	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
756	/* MS: See if already in kernel mode. */
757	lwi	r11, r0, TOPHYS(PER_CPU(KM));
758	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
759
760/* Kernel-mode state save. */
761	or	r11, r1, r0
762	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
763/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
764	swi	r11, r1, (PT_R1 - PT_SIZE);
765/* MS: restore r11 because of saving in SAVE_REGS */
766	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
767	/* save registers */
768/* MS: Make room on the stack -> activation record */
769	addik	r1, r1, -STATE_SAVE_SIZE;
770/* MS: store return registers separately because
771 * this macros is use for others exceptions */
772	swi	r3, r1, PTO + PT_R3;
773	swi	r4, r1, PTO + PT_R4;
774	SAVE_REGS
775	/* MS: store mode */
776	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
777	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
778	brid	2f;
779	nop; /* MS: Fill delay slot */
780
7811:
782/* User-mode state save. */
783/* MS: restore r11 -> FIXME move before SAVE_REG */
784	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
785 /* MS: get the saved current */
786	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
787	tophys(r1,r1);
788	lwi	r1, r1, TS_THREAD_INFO;
789	addik	r1, r1, THREAD_SIZE;
790	tophys(r1,r1);
791	/* save registers */
792	addik	r1, r1, -STATE_SAVE_SIZE;
793	swi	r3, r1, PTO+PT_R3;
794	swi	r4, r1, PTO+PT_R4;
795	SAVE_REGS
796	/* calculate mode */
797	swi	r0, r1, PTO + PT_MODE;
798	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
799	swi	r11, r1, PTO+PT_R1;
800	/* setup kernel mode to KM */
801	addi	r11, r0, 1;
802	swi	r11, r0, TOPHYS(PER_CPU(KM));
803
8042:
805	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
806	swi	r0, r1, PTO + PT_R0;
807	tovirt(r1,r1)
808	la	r5, r1, PTO;
809	set_vms;
810	la	r11, r0, do_IRQ;
811	la	r15, r0, irq_call;
812irq_call:rtbd	r11, 0;
813	nop;
814
815/* MS: we are in virtual mode */
816ret_from_irq:
817	lwi	r11, r1, PTO + PT_MODE;
818	bnei	r11, 2f;
819
820	add	r11, r0, CURRENT_TASK;
821	lwi	r11, r11, TS_THREAD_INFO;
822	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
823	andi	r11, r11, _TIF_NEED_RESCHED;
824	beqi	r11, 5f
825	bralid	r15, schedule;
826	nop; /* delay slot */
827
828    /* Maybe handle a signal */
8295:	add	r11, r0, CURRENT_TASK;
830	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
831	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
832	andi	r11, r11, _TIF_SIGPENDING;
833	beqid	r11, no_intr_resched
834/* Handle a signal return; Pending signals should be in r18. */
835	addi	r7, r0, 0; /* Arg 3: int in_syscall */
836	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
837	bralid	r15, do_signal;	/* Handle any signals */
838	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
839
840/* Finally, return to user state. */
841no_intr_resched:
842    /* Disable interrupts, we are now committed to the state restore */
843	disable_irq
844	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
845	add	r11, r0, CURRENT_TASK;
846	swi	r11, r0, PER_CPU(CURRENT_SAVE);
847	VM_OFF;
848	tophys(r1,r1);
849	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
850	lwi	r4, r1, PTO + PT_R4;
851	RESTORE_REGS
852	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
853	lwi	r1, r1, PT_R1 - PT_SIZE;
854	bri	6f;
855/* MS: Return to kernel state. */
8562:	VM_OFF /* MS: turn off MMU */
857	tophys(r1,r1)
858	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
859	lwi	r4, r1, PTO + PT_R4;
860	RESTORE_REGS
861	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
862	tovirt(r1,r1);
8636:
864IRQ_return: /* MS: Make global symbol for debugging */
865	rtid	r14, 0
866	nop
867
868/*
869 * `Debug' trap
870 *  We enter dbtrap in "BIP" (breakpoint) mode.
871 *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
872 *  original dbtrap.
873 *  however, wait to save state first
874 */
875C_ENTRY(_debug_exception):
876	/* BIP bit is set on entry, no interrupts can occur */
877	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
878
879	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
880	set_bip;	/*equalize initial state for all possible entries*/
881	clear_eip;
882	enable_irq;
883	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
884	beqi	r11, 1f;		/* Jump ahead if coming from user */
885	/* Kernel-mode state save.  */
886	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
887	tophys(r1,r11);
888	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
889	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
890
891	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
892	swi	r3, r1, PTO + PT_R3;
893	swi	r4, r1, PTO + PT_R4;
894	SAVE_REGS;
895
896	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
897	swi	r11, r1, PTO + PT_MODE;
898	brid	2f;
899	nop;				/* Fill delay slot */
9001:      /* User-mode state save.  */
901	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
902	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
903	tophys(r1,r1);
904	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
905	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
906	tophys(r1,r1);
907
908	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
909	swi	r3, r1, PTO + PT_R3;
910	swi	r4, r1, PTO + PT_R4;
911	SAVE_REGS;
912
913	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
914	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
915	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
916	addi	r11, r0, 1;
917	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
9182:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
919	/* Save away the syscall number.  */
920	swi	r0, r1, PTO+PT_R0;
921	tovirt(r1,r1)
922
923	addi	r5, r0, SIGTRAP		     /* send the trap signal */
924	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
925	addk	r7, r0, r0		     /* 3rd param zero */
926
927	set_vms;
928	la	r11, r0, send_sig;
929	la	r15, r0, dbtrap_call;
930dbtrap_call:	rtbd	r11, 0;
931	nop;
932
933	set_bip;			/*  Ints masked for state restore*/
934	lwi	r11, r1, PTO+PT_MODE;
935	bnei	r11, 2f;
936
937	/* Get current task ptr into r11 */
938	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
939	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
940	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
941	andi	r11, r11, _TIF_NEED_RESCHED;
942	beqi	r11, 5f;
943
944/* Call the scheduler before returning from a syscall/trap. */
945
946	bralid	r15, schedule;	/* Call scheduler */
947	nop;				/* delay slot */
948	/* XXX Is PT_DTRACE handling needed here? */
949	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
950
951	/* Maybe handle a signal */
9525:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
953	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
954	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
955	andi	r11, r11, _TIF_SIGPENDING;
956	beqi	r11, 1f;		/* Signals to handle, handle them */
957
958/* Handle a signal return; Pending signals should be in r18.  */
959	/* Not all registers are saved by the normal trap/interrupt entry
960	   points (for instance, call-saved registers (because the normal
961	   C-compiler calling sequence in the kernel makes sure they're
962	   preserved), and call-clobbered registers in the case of
963	   traps), but signal handlers may want to examine or change the
964	   complete register state.  Here we save anything not saved by
965	   the normal entry sequence, so that it may be safely restored
966	   (in a possibly modified form) after do_signal returns.  */
967
968	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
969	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
970	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
971	bralid	r15, do_signal;	/* Handle any signals */
972	nop;
973
974
975/* Finally, return to user state.  */
9761:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
977	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
978	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
979	VM_OFF;
980	tophys(r1,r1);
981
982	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
983	lwi	r4, r1, PTO+PT_R4;
984	RESTORE_REGS
985	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
986
987
988	lwi	r1, r1, PT_R1 - PT_SIZE;
989					/* Restore user stack pointer. */
990	bri	6f;
991
992/* Return to kernel state.  */
9932:	VM_OFF;
994	tophys(r1,r1);
995	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
996	lwi	r4, r1, PTO+PT_R4;
997	RESTORE_REGS
998	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
999
1000	tovirt(r1,r1);
10016:
1002DBTRAP_return:		/* Make global symbol for debugging */
1003	rtbd	r14, 0;	/* Instructions to return from an IRQ */
1004	nop;
1005
1006
1007
1008ENTRY(_switch_to)
1009	/* prepare return value */
1010	addk	r3, r0, r31
1011
1012	/* save registers in cpu_context */
1013	/* use r11 and r12, volatile registers, as temp register */
1014	/* give start of cpu_context for previous process */
1015	addik	r11, r5, TI_CPU_CONTEXT
1016	swi	r1, r11, CC_R1
1017	swi	r2, r11, CC_R2
1018	/* skip volatile registers.
1019	 * they are saved on stack when we jumped to _switch_to() */
1020	/* dedicated registers */
1021	swi	r13, r11, CC_R13
1022	swi	r14, r11, CC_R14
1023	swi	r15, r11, CC_R15
1024	swi	r16, r11, CC_R16
1025	swi	r17, r11, CC_R17
1026	swi	r18, r11, CC_R18
1027	/* save non-volatile registers */
1028	swi	r19, r11, CC_R19
1029	swi	r20, r11, CC_R20
1030	swi	r21, r11, CC_R21
1031	swi	r22, r11, CC_R22
1032	swi	r23, r11, CC_R23
1033	swi	r24, r11, CC_R24
1034	swi	r25, r11, CC_R25
1035	swi	r26, r11, CC_R26
1036	swi	r27, r11, CC_R27
1037	swi	r28, r11, CC_R28
1038	swi	r29, r11, CC_R29
1039	swi	r30, r11, CC_R30
1040	/* special purpose registers */
1041	mfs	r12, rmsr
1042	nop
1043	swi	r12, r11, CC_MSR
1044	mfs	r12, rear
1045	nop
1046	swi	r12, r11, CC_EAR
1047	mfs	r12, resr
1048	nop
1049	swi	r12, r11, CC_ESR
1050	mfs	r12, rfsr
1051	nop
1052	swi	r12, r11, CC_FSR
1053
1054	/* update r31, the current */
1055	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
1056	/* stored it to current_save too */
1057	swi	r31, r0, PER_CPU(CURRENT_SAVE)
1058
1059	/* get new process' cpu context and restore */
1060	/* give me start where start context of next task */
1061	addik	r11, r6, TI_CPU_CONTEXT
1062
1063	/* non-volatile registers */
1064	lwi	r30, r11, CC_R30
1065	lwi	r29, r11, CC_R29
1066	lwi	r28, r11, CC_R28
1067	lwi	r27, r11, CC_R27
1068	lwi	r26, r11, CC_R26
1069	lwi	r25, r11, CC_R25
1070	lwi	r24, r11, CC_R24
1071	lwi	r23, r11, CC_R23
1072	lwi	r22, r11, CC_R22
1073	lwi	r21, r11, CC_R21
1074	lwi	r20, r11, CC_R20
1075	lwi	r19, r11, CC_R19
1076	/* dedicated registers */
1077	lwi	r18, r11, CC_R18
1078	lwi	r17, r11, CC_R17
1079	lwi	r16, r11, CC_R16
1080	lwi	r15, r11, CC_R15
1081	lwi	r14, r11, CC_R14
1082	lwi	r13, r11, CC_R13
1083	/* skip volatile registers */
1084	lwi	r2, r11, CC_R2
1085	lwi	r1, r11, CC_R1
1086
1087	/* special purpose registers */
1088	lwi	r12, r11, CC_FSR
1089	mts	rfsr, r12
1090	nop
1091	lwi	r12, r11, CC_MSR
1092	mts	rmsr, r12
1093	nop
1094
1095	rtsd	r15, 8
1096	nop
1097
1098ENTRY(_reset)
1099	brai	0x70; /* Jump back to FS-boot */
1100
1101ENTRY(_break)
1102	mfs	r5, rmsr
1103	nop
1104	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1105	mfs	r5, resr
1106	nop
1107	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1108	bri	0
1109
1110	/* These are compiled and loaded into high memory, then
1111	 * copied into place in mach_early_setup */
1112	.section	.init.ivt, "ax"
1113	.org	0x0
1114	/* this is very important - here is the reset vector */
1115	/* in current MMU branch you don't care what is here - it is
1116	 * used from bootloader site - but this is correct for FS-BOOT */
1117	brai	0x70
1118	nop
1119	brai	TOPHYS(_user_exception); /* syscall handler */
1120	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1121	brai	TOPHYS(_break);		/* nmi trap handler */
1122	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1123
1124	.org	0x60
1125	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1126
1127.section .rodata,"a"
1128#include "syscall_table.S"
1129
1130syscall_table_size=(.-sys_call_table)
1131
1132