xref: /openbmc/linux/arch/microblaze/kernel/entry.S (revision 88707ebe77e23e856981e597f322cabbf6415662)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33#include <asm/mmu.h>
34
35#undef DEBUG
36
37#ifdef DEBUG
38/* Create space for syscalls counting. */
39.section .data
40.global syscall_debug_table
41.align 4
42syscall_debug_table:
43	.space	(__NR_syscalls * 4)
44#endif /* DEBUG */
45
46#define C_ENTRY(name)	.globl name; .align 4; name
47
48/*
49 * Various ways of setting and clearing BIP in flags reg.
50 * This is mucky, but necessary using microblaze version that
51 * allows msr ops to write to BIP
52 */
53#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
54	.macro	clear_bip
55	msrclr	r0, MSR_BIP
56	.endm
57
58	.macro	set_bip
59	msrset	r0, MSR_BIP
60	.endm
61
62	.macro	clear_eip
63	msrclr	r0, MSR_EIP
64	.endm
65
66	.macro	set_ee
67	msrset	r0, MSR_EE
68	.endm
69
70	.macro	disable_irq
71	msrclr	r0, MSR_IE
72	.endm
73
74	.macro	enable_irq
75	msrset	r0, MSR_IE
76	.endm
77
78	.macro	set_ums
79	msrset	r0, MSR_UMS
80	msrclr	r0, MSR_VMS
81	.endm
82
83	.macro	set_vms
84	msrclr	r0, MSR_UMS
85	msrset	r0, MSR_VMS
86	.endm
87
88	.macro	clear_ums
89	msrclr	r0, MSR_UMS
90	.endm
91
92	.macro	clear_vms_ums
93	msrclr	r0, MSR_VMS | MSR_UMS
94	.endm
95#else
96	.macro	clear_bip
97	mfs	r11, rmsr
98	andi	r11, r11, ~MSR_BIP
99	mts	rmsr, r11
100	.endm
101
102	.macro	set_bip
103	mfs	r11, rmsr
104	ori	r11, r11, MSR_BIP
105	mts	rmsr, r11
106	.endm
107
108	.macro	clear_eip
109	mfs	r11, rmsr
110	andi	r11, r11, ~MSR_EIP
111	mts	rmsr, r11
112	.endm
113
114	.macro	set_ee
115	mfs	r11, rmsr
116	ori	r11, r11, MSR_EE
117	mts	rmsr, r11
118	.endm
119
120	.macro	disable_irq
121	mfs	r11, rmsr
122	andi	r11, r11, ~MSR_IE
123	mts	rmsr, r11
124	.endm
125
126	.macro	enable_irq
127	mfs	r11, rmsr
128	ori	r11, r11, MSR_IE
129	mts	rmsr, r11
130	.endm
131
132	.macro set_ums
133	mfs	r11, rmsr
134	ori	r11, r11, MSR_VMS
135	andni	r11, r11, MSR_UMS
136	mts	rmsr, r11
137	.endm
138
139	.macro	set_vms
140	mfs	r11, rmsr
141	ori	r11, r11, MSR_VMS
142	andni	r11, r11, MSR_UMS
143	mts	rmsr, r11
144	.endm
145
146	.macro	clear_ums
147	mfs	r11, rmsr
148	andni	r11, r11, MSR_UMS
149	mts	rmsr,r11
150	.endm
151
152	.macro	clear_vms_ums
153	mfs	r11, rmsr
154	andni	r11, r11, (MSR_VMS|MSR_UMS)
155	mts	rmsr,r11
156	.endm
157#endif
158
159/* Define how to call high-level functions. With MMU, virtual mode must be
160 * enabled when calling the high-level function. Clobbers R11.
161 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
162 */
163
164/* turn on virtual protected mode save */
165#define VM_ON		\
166	set_ums;	\
167	rted	r0, 2f;	\
168	nop; \
1692:
170
171/* turn off virtual protected mode save and user mode save*/
172#define VM_OFF			\
173	clear_vms_ums;		\
174	rted	r0, TOPHYS(1f);	\
175	nop; \
1761:
177
178#define SAVE_REGS \
179	swi	r2, r1, PT_R2;	/* Save SDA */			\
180	swi	r3, r1, PT_R3;					\
181	swi	r4, r1, PT_R4;					\
182	swi	r5, r1, PT_R5;					\
183	swi	r6, r1, PT_R6;					\
184	swi	r7, r1, PT_R7;					\
185	swi	r8, r1, PT_R8;					\
186	swi	r9, r1, PT_R9;					\
187	swi	r10, r1, PT_R10;					\
188	swi	r11, r1, PT_R11;	/* save clobbered regs after rval */\
189	swi	r12, r1, PT_R12;					\
190	swi	r13, r1, PT_R13;	/* Save SDA2 */			\
191	swi	r14, r1, PT_PC;	/* PC, before IRQ/trap */	\
192	swi	r15, r1, PT_R15;	/* Save LP */			\
193	swi	r16, r1, PT_R16;					\
194	swi	r17, r1, PT_R17;					\
195	swi	r18, r1, PT_R18;	/* Save asm scratch reg */	\
196	swi	r19, r1, PT_R19;					\
197	swi	r20, r1, PT_R20;					\
198	swi	r21, r1, PT_R21;					\
199	swi	r22, r1, PT_R22;					\
200	swi	r23, r1, PT_R23;					\
201	swi	r24, r1, PT_R24;					\
202	swi	r25, r1, PT_R25;					\
203	swi	r26, r1, PT_R26;					\
204	swi	r27, r1, PT_R27;					\
205	swi	r28, r1, PT_R28;					\
206	swi	r29, r1, PT_R29;					\
207	swi	r30, r1, PT_R30;					\
208	swi	r31, r1, PT_R31;	/* Save current task reg */	\
209	mfs	r11, rmsr;		/* save MSR */			\
210	swi	r11, r1, PT_MSR;
211
212#define RESTORE_REGS_GP \
213	lwi	r2, r1, PT_R2;	/* restore SDA */		\
214	lwi	r3, r1, PT_R3;					\
215	lwi	r4, r1, PT_R4;					\
216	lwi	r5, r1, PT_R5;					\
217	lwi	r6, r1, PT_R6;					\
218	lwi	r7, r1, PT_R7;					\
219	lwi	r8, r1, PT_R8;					\
220	lwi	r9, r1, PT_R9;					\
221	lwi	r10, r1, PT_R10;					\
222	lwi	r11, r1, PT_R11;	/* restore clobbered regs after rval */\
223	lwi	r12, r1, PT_R12;					\
224	lwi	r13, r1, PT_R13;	/* restore SDA2 */		\
225	lwi	r14, r1, PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
226	lwi	r15, r1, PT_R15;	/* restore LP */		\
227	lwi	r16, r1, PT_R16;					\
228	lwi	r17, r1, PT_R17;					\
229	lwi	r18, r1, PT_R18;	/* restore asm scratch reg */	\
230	lwi	r19, r1, PT_R19;					\
231	lwi	r20, r1, PT_R20;					\
232	lwi	r21, r1, PT_R21;					\
233	lwi	r22, r1, PT_R22;					\
234	lwi	r23, r1, PT_R23;					\
235	lwi	r24, r1, PT_R24;					\
236	lwi	r25, r1, PT_R25;					\
237	lwi	r26, r1, PT_R26;					\
238	lwi	r27, r1, PT_R27;					\
239	lwi	r28, r1, PT_R28;					\
240	lwi	r29, r1, PT_R29;					\
241	lwi	r30, r1, PT_R30;					\
242	lwi	r31, r1, PT_R31;	/* Restore cur task reg */
243
244#define RESTORE_REGS \
245	lwi	r11, r1, PT_MSR;					\
246	mts	rmsr , r11;						\
247	RESTORE_REGS_GP
248
249#define RESTORE_REGS_RTBD \
250	lwi	r11, r1, PT_MSR;					\
251	andni	r11, r11, MSR_EIP;          /* clear EIP */             \
252	ori	r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */        \
253	mts	rmsr , r11;						\
254	RESTORE_REGS_GP
255
256#define SAVE_STATE	\
257	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */	\
258	/* See if already in kernel mode.*/				\
259	mfs	r1, rmsr;						\
260	andi	r1, r1, MSR_UMS;					\
261	bnei	r1, 1f;						\
262	/* Kernel-mode state save.  */					\
263	/* Reload kernel stack-ptr. */					\
264	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
265	/* FIXME: I can add these two lines to one */			\
266	/* tophys(r1,r1); */						\
267	/* addik	r1, r1, -PT_SIZE; */				\
268	addik	r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
269	SAVE_REGS							\
270	brid	2f;							\
271	swi	r1, r1, PT_MODE; 	 				\
2721:	/* User-mode state save.  */					\
273	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
274	tophys(r1,r1);							\
275	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
276	/* MS these three instructions can be added to one */		\
277	/* addik	r1, r1, THREAD_SIZE; */				\
278	/* tophys(r1,r1); */						\
279	/* addik	r1, r1, -PT_SIZE; */			\
280	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
281	SAVE_REGS							\
282	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
283	swi	r11, r1, PT_R1; /* Store user SP.  */		\
284	swi	r0, r1, PT_MODE; /* Was in user-mode.  */		\
285	/* MS: I am clearing UMS even in case when I come from kernel space */ \
286	clear_ums; 							\
2872:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
288
289.text
290
291.extern cpuinfo
292
293C_ENTRY(mb_flush_dcache):
294	addik	r1, r1, -PT_SIZE
295	SAVE_REGS
296
297	addik	r3, r0, cpuinfo
298	lwi	r7, r3, CI_DCS
299	lwi	r8, r3, CI_DCL
300	sub	r9, r7, r8
3011:
302	wdc.flush r9, r0
303	bgtid	r9, 1b
304	addk	r9, r9, r8
305
306	RESTORE_REGS
307	addik	r1, r1, PT_SIZE
308	rtsd	r15, 8
309	nop
310
311C_ENTRY(mb_invalidate_icache):
312	addik	r1, r1, -PT_SIZE
313	SAVE_REGS
314
315	addik	r3, r0, cpuinfo
316	lwi	r7, r3, CI_ICS
317	lwi	r8, r3, CI_ICL
318	sub	r9, r7, r8
3191:
320	wic 	r9, r0
321	bgtid	r9, 1b
322	addk	r9, r9, r8
323
324	RESTORE_REGS
325	addik	r1, r1, PT_SIZE
326	rtsd	r15, 8
327	nop
328
329/*
330 * User trap.
331 *
332 * System calls are handled here.
333 *
334 * Syscall protocol:
335 * Syscall number in r12, args in r5-r10
336 * Return value in r3
337 *
338 * Trap entered via brki instruction, so BIP bit is set, and interrupts
339 * are masked. This is nice, means we don't have to CLI before state save
340 */
341C_ENTRY(_user_exception):
342	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
343	addi	r14, r14, 4	/* return address is 4 byte after call */
344
345	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
346	tophys(r1,r1);
347	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
348/* calculate kernel stack pointer from task struct 8k */
349	addik	r1, r1, THREAD_SIZE;
350	tophys(r1,r1);
351
352	addik	r1, r1, -PT_SIZE; /* Make room on the stack.  */
353	SAVE_REGS
354	swi	r0, r1, PT_R3
355	swi	r0, r1, PT_R4
356
357	swi	r0, r1, PT_MODE;			/* Was in user-mode. */
358	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
359	swi	r11, r1, PT_R1;		/* Store user SP.  */
360	clear_ums;
3612:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
362	/* Save away the syscall number.  */
363	swi	r12, r1, PT_R0;
364	tovirt(r1,r1)
365
366/* where the trap should return need -8 to adjust for rtsd r15, 8*/
367/* Jump to the appropriate function for the system call number in r12
368 * (r12 is not preserved), or return an error if r12 is not valid. The LP
369 * register should point to the location where
370 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
371
372	/* Step into virtual mode */
373	rtbd	r0, 3f
374	nop
3753:
376	lwi	r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
377	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
378	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
379	beqi	r11, 4f
380
381	addik	r3, r0, -ENOSYS
382	swi	r3, r1, PT_R3
383	brlid	r15, do_syscall_trace_enter
384	addik	r5, r1, PT_R0
385
386	# do_syscall_trace_enter returns the new syscall nr.
387	addk	r12, r0, r3
388	lwi	r5, r1, PT_R5;
389	lwi	r6, r1, PT_R6;
390	lwi	r7, r1, PT_R7;
391	lwi	r8, r1, PT_R8;
392	lwi	r9, r1, PT_R9;
393	lwi	r10, r1, PT_R10;
3944:
395/* Jump to the appropriate function for the system call number in r12
396 * (r12 is not preserved), or return an error if r12 is not valid.
397 * The LP register should point to the location where the called function
398 * should return.  [note that MAKE_SYS_CALL uses label 1] */
399	/* See if the system call number is valid */
400	blti	r12, 5f
401	addi	r11, r12, -__NR_syscalls;
402	bgei	r11, 5f;
403	/* Figure out which function to use for this system call.  */
404	/* Note Microblaze barrel shift is optional, so don't rely on it */
405	add	r12, r12, r12;			/* convert num -> ptr */
406	add	r12, r12, r12;
407	addi	r30, r0, 1			/* restarts allowed */
408
409#ifdef DEBUG
410	/* Trac syscalls and stored them to syscall_debug_table */
411	/* The first syscall location stores total syscall number */
412	lwi	r3, r0, syscall_debug_table
413	addi	r3, r3, 1
414	swi	r3, r0, syscall_debug_table
415	lwi	r3, r12, syscall_debug_table
416	addi	r3, r3, 1
417	swi	r3, r12, syscall_debug_table
418#endif
419
420	# Find and jump into the syscall handler.
421	lwi	r12, r12, sys_call_table
422	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
423	addi	r15, r0, ret_from_trap-8
424	bra	r12
425
426	/* The syscall number is invalid, return an error.  */
4275:
428	braid	ret_from_trap
429	addi	r3, r0, -ENOSYS;
430
431/* Entry point used to return from a syscall/trap */
432/* We re-enable BIP bit before state restore */
433C_ENTRY(ret_from_trap):
434	swi	r3, r1, PT_R3
435	swi	r4, r1, PT_R4
436
437	lwi	r11, r1, PT_MODE;
438/* See if returning to kernel mode, if so, skip resched &c.  */
439	bnei	r11, 2f;
440	/* We're returning to user mode, so check for various conditions that
441	 * trigger rescheduling. */
442	/* FIXME: Restructure all these flag checks. */
443	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
444	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
445	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
446	beqi	r11, 1f
447
448	brlid	r15, do_syscall_trace_leave
449	addik	r5, r1, PT_R0
4501:
451	/* We're returning to user mode, so check for various conditions that
452	 * trigger rescheduling. */
453	/* get thread info from current task */
454	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
455	lwi	r19, r11, TI_FLAGS;		/* get flags in thread info */
456	andi	r11, r19, _TIF_NEED_RESCHED;
457	beqi	r11, 5f;
458
459	bralid	r15, schedule;	/* Call scheduler */
460	nop;				/* delay slot */
461	bri	1b
462
463	/* Maybe handle a signal */
4645:
465	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
466	beqi	r11, 4f;		/* Signals to handle, handle them */
467
468	addik	r5, r1, 0;		/* Arg 1: struct pt_regs *regs */
469	bralid	r15, do_notify_resume;	/* Handle any signals */
470	add	r6, r30, r0;		/* Arg 2: int in_syscall */
471	add	r30, r0, r0		/* no more restarts */
472	bri	1b
473
474/* Finally, return to user state.  */
4754:	set_bip;			/*  Ints masked for state restore */
476	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
477	VM_OFF;
478	tophys(r1,r1);
479	RESTORE_REGS_RTBD;
480	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
481	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
482	bri	6f;
483
484/* Return to kernel state.  */
4852:	set_bip;			/*  Ints masked for state restore */
486	VM_OFF;
487	tophys(r1,r1);
488	RESTORE_REGS_RTBD;
489	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
490	tovirt(r1,r1);
4916:
492TRAP_return:		/* Make global symbol for debugging */
493	rtbd	r14, 0;	/* Instructions to return from an IRQ */
494	nop;
495
496
497/* This the initial entry point for a new child thread, with an appropriate
498   stack in place that makes it look like the child is in the middle of a
499   syscall.  This function is actually `returned to' from switch_thread
500   (copy_thread makes ret_from_fork the return address in each new thread's
501   saved context).  */
502C_ENTRY(ret_from_fork):
503	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
504	add	r5, r3, r0;	/* switch_thread returns the prev task */
505				/* ( in the delay slot ) */
506	brid	ret_from_trap;	/* Do normal trap return */
507	add	r3, r0, r0;	/* Child's fork call should return 0. */
508
509C_ENTRY(ret_from_kernel_thread):
510	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
511	add	r5, r3, r0;	/* switch_thread returns the prev task */
512				/* ( in the delay slot ) */
513	brald	r15, r20	/* fn was left in r20 */
514	addk	r5, r0, r19	/* ... and argument - in r19 */
515	brid	ret_from_trap
516	add	r3, r0, r0
517
518C_ENTRY(sys_rt_sigreturn_wrapper):
519	addik	r30, r0, 0		/* no restarts */
520	brid	sys_rt_sigreturn	/* Do real work */
521	addik	r5, r1, 0;		/* add user context as 1st arg */
522
523/*
524 * HW EXCEPTION rutine start
525 */
526C_ENTRY(full_exception_trap):
527	/* adjust exception address for privileged instruction
528	 * for finding where is it */
529	addik	r17, r17, -4
530	SAVE_STATE /* Save registers */
531	/* PC, before IRQ/trap - this is one instruction above */
532	swi	r17, r1, PT_PC;
533	tovirt(r1,r1)
534	/* FIXME this can be store directly in PT_ESR reg.
535	 * I tested it but there is a fault */
536	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
537	addik	r15, r0, ret_from_exc - 8
538	mfs	r6, resr
539	mfs	r7, rfsr;		/* save FSR */
540	mts	rfsr, r0;	/* Clear sticky fsr */
541	rted	r0, full_exception
542	addik	r5, r1, 0		 /* parameter struct pt_regs * regs */
543
544/*
545 * Unaligned data trap.
546 *
547 * Unaligned data trap last on 4k page is handled here.
548 *
549 * Trap entered via exception, so EE bit is set, and interrupts
550 * are masked.  This is nice, means we don't have to CLI before state save
551 *
552 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
553 */
554C_ENTRY(unaligned_data_trap):
555	/* MS: I have to save r11 value and then restore it because
556	 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
557	 * instructions are not used. We don't need to do if MSR instructions
558	 * are used and they use r0 instead of r11.
559	 * I am using ENTRY_SP which should be primary used only for stack
560	 * pointer saving. */
561	swi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
562	set_bip;        /* equalize initial state for all possible entries */
563	clear_eip;
564	set_ee;
565	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
566	SAVE_STATE		/* Save registers.*/
567	/* PC, before IRQ/trap - this is one instruction above */
568	swi	r17, r1, PT_PC;
569	tovirt(r1,r1)
570	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
571	addik	r15, r0, ret_from_exc-8
572	mfs	r3, resr		/* ESR */
573	mfs	r4, rear		/* EAR */
574	rtbd	r0, _unaligned_data_exception
575	addik	r7, r1, 0		/* parameter struct pt_regs * regs */
576
577/*
578 * Page fault traps.
579 *
580 * If the real exception handler (from hw_exception_handler.S) didn't find
581 * the mapping for the process, then we're thrown here to handle such situation.
582 *
583 * Trap entered via exceptions, so EE bit is set, and interrupts
584 * are masked.  This is nice, means we don't have to CLI before state save
585 *
586 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
587 * will bail out to this point if they can't resolve the lightweight TLB fault.
588 *
589 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
590 * void do_page_fault(struct pt_regs *regs,
591 *				unsigned long address,
592 *				unsigned long error_code)
593 */
594/* data and intruction trap - which is choose is resolved int fault.c */
595C_ENTRY(page_fault_data_trap):
596	SAVE_STATE		/* Save registers.*/
597	/* PC, before IRQ/trap - this is one instruction above */
598	swi	r17, r1, PT_PC;
599	tovirt(r1,r1)
600	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
601	addik	r15, r0, ret_from_exc-8
602	mfs	r6, rear		/* parameter unsigned long address */
603	mfs	r7, resr		/* parameter unsigned long error_code */
604	rted	r0, do_page_fault
605	addik	r5, r1, 0		/* parameter struct pt_regs * regs */
606
607C_ENTRY(page_fault_instr_trap):
608	SAVE_STATE		/* Save registers.*/
609	/* PC, before IRQ/trap - this is one instruction above */
610	swi	r17, r1, PT_PC;
611	tovirt(r1,r1)
612	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
613	addik	r15, r0, ret_from_exc-8
614	mfs	r6, rear		/* parameter unsigned long address */
615	ori	r7, r0, 0		/* parameter unsigned long error_code */
616	rted	r0, do_page_fault
617	addik	r5, r1, 0		/* parameter struct pt_regs * regs */
618
619/* Entry point used to return from an exception.  */
620C_ENTRY(ret_from_exc):
621	lwi	r11, r1, PT_MODE;
622	bnei	r11, 2f;		/* See if returning to kernel mode, */
623					/* ... if so, skip resched &c.  */
624
625	/* We're returning to user mode, so check for various conditions that
626	   trigger rescheduling. */
6271:
628	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
629	lwi	r19, r11, TI_FLAGS;	/* get flags in thread info */
630	andi	r11, r19, _TIF_NEED_RESCHED;
631	beqi	r11, 5f;
632
633/* Call the scheduler before returning from a syscall/trap. */
634	bralid	r15, schedule;	/* Call scheduler */
635	nop;				/* delay slot */
636	bri	1b
637
638	/* Maybe handle a signal */
6395:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
640	beqi	r11, 4f;		/* Signals to handle, handle them */
641
642	/*
643	 * Handle a signal return; Pending signals should be in r18.
644	 *
645	 * Not all registers are saved by the normal trap/interrupt entry
646	 * points (for instance, call-saved registers (because the normal
647	 * C-compiler calling sequence in the kernel makes sure they're
648	 * preserved), and call-clobbered registers in the case of
649	 * traps), but signal handlers may want to examine or change the
650	 * complete register state.  Here we save anything not saved by
651	 * the normal entry sequence, so that it may be safely restored
652	 * (in a possibly modified form) after do_notify_resume returns. */
653	addik	r5, r1, 0;		/* Arg 1: struct pt_regs *regs */
654	bralid	r15, do_notify_resume;	/* Handle any signals */
655	addi	r6, r0, 0;		/* Arg 2: int in_syscall */
656	bri	1b
657
658/* Finally, return to user state.  */
6594:	set_bip;			/* Ints masked for state restore */
660	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
661	VM_OFF;
662	tophys(r1,r1);
663
664	RESTORE_REGS_RTBD;
665	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
666
667	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
668	bri	6f;
669/* Return to kernel state.  */
6702:	set_bip;			/* Ints masked for state restore */
671	VM_OFF;
672	tophys(r1,r1);
673	RESTORE_REGS_RTBD;
674	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
675
676	tovirt(r1,r1);
6776:
678EXC_return:		/* Make global symbol for debugging */
679	rtbd	r14, 0;	/* Instructions to return from an IRQ */
680	nop;
681
682/*
683 * HW EXCEPTION rutine end
684 */
685
686/*
687 * Hardware maskable interrupts.
688 *
689 * The stack-pointer (r1) should have already been saved to the memory
690 * location PER_CPU(ENTRY_SP).
691 */
692C_ENTRY(_interrupt):
693/* MS: we are in physical address */
694/* Save registers, switch to proper stack, convert SP to virtual.*/
695	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
696	/* MS: See if already in kernel mode. */
697	mfs	r1, rmsr
698	nop
699	andi	r1, r1, MSR_UMS
700	bnei	r1, 1f
701
702/* Kernel-mode state save. */
703	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
704	tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
705	/* save registers */
706/* MS: Make room on the stack -> activation record */
707	addik	r1, r1, -PT_SIZE;
708	SAVE_REGS
709	brid	2f;
710	swi	r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
7111:
712/* User-mode state save. */
713 /* MS: get the saved current */
714	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
715	tophys(r1,r1);
716	lwi	r1, r1, TS_THREAD_INFO;
717	addik	r1, r1, THREAD_SIZE;
718	tophys(r1,r1);
719	/* save registers */
720	addik	r1, r1, -PT_SIZE;
721	SAVE_REGS
722	/* calculate mode */
723	swi	r0, r1, PT_MODE;
724	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
725	swi	r11, r1, PT_R1;
726	clear_ums;
7272:
728	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
729	tovirt(r1,r1)
730	addik	r15, r0, irq_call;
731irq_call:rtbd	r0, do_IRQ;
732	addik	r5, r1, 0;
733
734/* MS: we are in virtual mode */
735ret_from_irq:
736	lwi	r11, r1, PT_MODE;
737	bnei	r11, 2f;
738
7391:
740	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
741	lwi	r19, r11, TI_FLAGS; /* MS: get flags from thread info */
742	andi	r11, r19, _TIF_NEED_RESCHED;
743	beqi	r11, 5f
744	bralid	r15, schedule;
745	nop; /* delay slot */
746	bri	1b
747
748    /* Maybe handle a signal */
7495:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
750	beqid	r11, no_intr_resched
751/* Handle a signal return; Pending signals should be in r18. */
752	addik	r5, r1, 0; /* Arg 1: struct pt_regs *regs */
753	bralid	r15, do_notify_resume;	/* Handle any signals */
754	addi	r6, r0, 0; /* Arg 2: int in_syscall */
755	bri	1b
756
757/* Finally, return to user state. */
758no_intr_resched:
759    /* Disable interrupts, we are now committed to the state restore */
760	disable_irq
761	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
762	VM_OFF;
763	tophys(r1,r1);
764	RESTORE_REGS
765	addik	r1, r1, PT_SIZE /* MS: Clean up stack space. */
766	lwi	r1, r1, PT_R1 - PT_SIZE;
767	bri	6f;
768/* MS: Return to kernel state. */
7692:
770#ifdef CONFIG_PREEMPTION
771	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
772	/* MS: get preempt_count from thread info */
773	lwi	r5, r11, TI_PREEMPT_COUNT;
774	bgti	r5, restore;
775
776	lwi	r5, r11, TI_FLAGS;		/* get flags in thread info */
777	andi	r5, r5, _TIF_NEED_RESCHED;
778	beqi	r5, restore /* if zero jump over */
779
780	/* interrupts are off that's why I am calling preempt_chedule_irq */
781	bralid	r15, preempt_schedule_irq
782	nop
783restore:
784#endif
785	VM_OFF /* MS: turn off MMU */
786	tophys(r1,r1)
787	RESTORE_REGS
788	addik	r1, r1, PT_SIZE	/* MS: Clean up stack space. */
789	tovirt(r1,r1);
7906:
791IRQ_return: /* MS: Make global symbol for debugging */
792	rtid	r14, 0
793	nop
794
795#ifdef CONFIG_MB_MANAGER
796
797#define	PT_PID		PT_SIZE
798#define	PT_TLBI		PT_SIZE + 4
799#define	PT_ZPR		PT_SIZE	+ 8
800#define	PT_TLBL0	PT_SIZE + 12
801#define	PT_TLBH0	PT_SIZE + 16
802
803C_ENTRY(_xtmr_manager_reset):
804	lwi	r1, r0, xmb_manager_stackpointer
805
806	/* Restore MSR */
807	lwi	r2, r1, PT_MSR
808	mts	rmsr, r2
809	bri	4
810
811	/* restore Special purpose registers */
812	lwi	r2, r1, PT_PID
813	mts	rpid, r2
814
815	lwi	r2, r1, PT_TLBI
816	mts	rtlbx, r2
817
818	lwi	r2, r1, PT_ZPR
819	mts	rzpr, r2
820
821#if CONFIG_XILINX_MICROBLAZE0_USE_FPU
822	lwi	r2, r1, PT_FSR
823	mts	rfsr, r2
824#endif
825
826	/* restore all the tlb's */
827	addik	r3, r0, TOPHYS(tlb_skip)
828	addik	r6, r0, PT_TLBL0
829	addik	r7, r0, PT_TLBH0
830restore_tlb:
831	add	r6, r6, r1
832	add	r7, r7, r1
833	lwi	r2, r6, 0
834	mts 	rtlblo, r2
835	lwi	r2, r7, 0
836	mts	rtlbhi, r2
837	addik	r6, r6, 4
838	addik	r7, r7, 4
839	bgtid	r3, restore_tlb
840	addik	r3, r3, -1
841
842	lwi  	r5, r0, TOPHYS(xmb_manager_dev)
843	lwi	r8, r0, TOPHYS(xmb_manager_reset_callback)
844	set_vms
845	/* return from reset need -8 to adjust for rtsd r15, 8 */
846	addik   r15, r0, ret_from_reset - 8
847	rtbd	r8, 0
848	nop
849
850ret_from_reset:
851	set_bip /* Ints masked for state restore */
852	VM_OFF
853	/* MS: Restore all regs */
854	RESTORE_REGS
855	lwi	r14, r1, PT_R14
856	lwi	r16, r1, PT_PC
857	addik	r1, r1, PT_SIZE + 36
858	rtbd	r16, 0
859	nop
860
861/*
862 * Break handler for MB Manager. Enter to _xmb_manager_break by
863 * injecting fault in one of the TMR Microblaze core.
864 * FIXME: This break handler supports getting
865 * called from kernel space only.
866 */
867C_ENTRY(_xmb_manager_break):
868	/*
869	 * Reserve memory in the stack for context store/restore
870	 * (which includes memory for storing tlbs (max two tlbs))
871	 */
872	addik	r1, r1, -PT_SIZE - 36
873	swi	r1, r0, xmb_manager_stackpointer
874	SAVE_REGS
875	swi	r14, r1, PT_R14	/* rewrite saved R14 value */
876	swi	r16, r1, PT_PC; /* PC and r16 are the same */
877
878	lwi	r6, r0, TOPHYS(xmb_manager_baseaddr)
879	lwi	r7, r0, TOPHYS(xmb_manager_crval)
880	/*
881	 * When the break vector gets asserted because of error injection,
882	 * the break signal must be blocked before exiting from the
883	 * break handler, below code configures the tmr manager
884	 * control register to block break signal.
885	 */
886	swi	r7, r6, 0
887
888	/* Save the special purpose registers  */
889	mfs	r2, rpid
890	swi	r2, r1, PT_PID
891
892	mfs	r2, rtlbx
893	swi	r2, r1, PT_TLBI
894
895	mfs	r2, rzpr
896	swi	r2, r1, PT_ZPR
897
898#if CONFIG_XILINX_MICROBLAZE0_USE_FPU
899	mfs	r2, rfsr
900	swi	r2, r1, PT_FSR
901#endif
902	mfs	r2, rmsr
903	swi	r2, r1, PT_MSR
904
905	/* Save all the tlb's */
906	addik	r3, r0, TOPHYS(tlb_skip)
907	addik	r6, r0, PT_TLBL0
908	addik	r7, r0, PT_TLBH0
909save_tlb:
910	add	r6, r6, r1
911	add	r7, r7, r1
912	mfs	r2, rtlblo
913	swi	r2, r6, 0
914	mfs	r2, rtlbhi
915	swi	r2, r7, 0
916	addik	r6, r6, 4
917	addik	r7, r7, 4
918	bgtid	r3, save_tlb
919	addik	r3, r3, -1
920
921	lwi  	r5, r0, TOPHYS(xmb_manager_dev)
922	lwi	r8, r0, TOPHYS(xmb_manager_callback)
923	/* return from break need -8 to adjust for rtsd r15, 8 */
924	addik   r15, r0, ret_from_break - 8
925	rtbd	r8, 0
926	nop
927
928ret_from_break:
929	/* flush the d-cache */
930	bralid	r15, mb_flush_dcache
931	nop
932
933	/*
934	 * To make sure microblaze i-cache is in a proper state
935	 * invalidate the i-cache.
936	 */
937	bralid	r15, mb_invalidate_icache
938	nop
939
940	set_bip; /* Ints masked for state restore */
941	VM_OFF;
942	mbar	1
943	mbar	2
944	bri	4
945	suspend
946	nop
947#endif
948
949/*
950 * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
951 * and call handling function with saved pt_regs
952 */
953C_ENTRY(_debug_exception):
954	/* BIP bit is set on entry, no interrupts can occur */
955	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
956
957	mfs	r1, rmsr
958	nop
959	andi	r1, r1, MSR_UMS
960	bnei	r1, 1f
961/* MS: Kernel-mode state save - kgdb */
962	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
963
964	/* BIP bit is set on entry, no interrupts can occur */
965	addik   r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
966	SAVE_REGS;
967	/* save all regs to pt_reg structure */
968	swi	r0, r1, PT_R0;	/* R0 must be saved too */
969	swi	r14, r1, PT_R14	/* rewrite saved R14 value */
970	swi	r16, r1, PT_PC; /* PC and r16 are the same */
971	/* save special purpose registers to pt_regs */
972	mfs	r11, rear;
973	swi	r11, r1, PT_EAR;
974	mfs	r11, resr;
975	swi	r11, r1, PT_ESR;
976	mfs	r11, rfsr;
977	swi	r11, r1, PT_FSR;
978
979	/* stack pointer is in physical address at it is decrease
980	 * by PT_SIZE but we need to get correct R1 value */
981	addik   r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
982	swi	r11, r1, PT_R1
983	/* MS: r31 - current pointer isn't changed */
984	tovirt(r1,r1)
985#ifdef CONFIG_KGDB
986	addi	r5, r1, 0 /* pass pt_reg address as the first arg */
987	addik	r15, r0, dbtrap_call; /* return address */
988	rtbd	r0, microblaze_kgdb_break
989	nop;
990#endif
991	/* MS: Place handler for brki from kernel space if KGDB is OFF.
992	 * It is very unlikely that another brki instruction is called. */
993	bri 0
994
995/* MS: User-mode state save - gdb */
9961:	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
997	tophys(r1,r1);
998	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
999	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
1000	tophys(r1,r1);
1001
1002	addik	r1, r1, -PT_SIZE; /* Make room on the stack.  */
1003	SAVE_REGS;
1004	swi	r16, r1, PT_PC;	/* Save LP */
1005	swi	r0, r1, PT_MODE; /* Was in user-mode.  */
1006	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
1007	swi	r11, r1, PT_R1; /* Store user SP.  */
1008	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
1009	tovirt(r1,r1)
1010	set_vms;
1011	addik	r5, r1, 0;
1012	addik	r15, r0, dbtrap_call;
1013dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
1014	rtbd	r0, sw_exception
1015	nop
1016
1017	/* MS: The first instruction for the second part of the gdb/kgdb */
1018	set_bip; /* Ints masked for state restore */
1019	lwi	r11, r1, PT_MODE;
1020	bnei	r11, 2f;
1021/* MS: Return to user space - gdb */
10221:
1023	/* Get current task ptr into r11 */
1024	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
1025	lwi	r19, r11, TI_FLAGS;	/* get flags in thread info */
1026	andi	r11, r19, _TIF_NEED_RESCHED;
1027	beqi	r11, 5f;
1028
1029	/* Call the scheduler before returning from a syscall/trap. */
1030	bralid	r15, schedule;	/* Call scheduler */
1031	nop;				/* delay slot */
1032	bri	1b
1033
1034	/* Maybe handle a signal */
10355:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
1036	beqi	r11, 4f;		/* Signals to handle, handle them */
1037
1038	addik	r5, r1, 0;		/* Arg 1: struct pt_regs *regs */
1039	bralid	r15, do_notify_resume;	/* Handle any signals */
1040	addi  r6, r0, 0;	/* Arg 2: int in_syscall */
1041	bri	1b
1042
1043/* Finally, return to user state.  */
10444:	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
1045	VM_OFF;
1046	tophys(r1,r1);
1047	/* MS: Restore all regs */
1048	RESTORE_REGS_RTBD
1049	addik	r1, r1, PT_SIZE	 /* Clean up stack space */
1050	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
1051DBTRAP_return_user: /* MS: Make global symbol for debugging */
1052	rtbd	r16, 0; /* MS: Instructions to return from a debug trap */
1053	nop;
1054
1055/* MS: Return to kernel state - kgdb */
10562:	VM_OFF;
1057	tophys(r1,r1);
1058	/* MS: Restore all regs */
1059	RESTORE_REGS_RTBD
1060	lwi	r14, r1, PT_R14;
1061	lwi	r16, r1, PT_PC;
1062	addik	r1, r1, PT_SIZE; /* MS: Clean up stack space */
1063	tovirt(r1,r1);
1064DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
1065	rtbd	r16, 0; /* MS: Instructions to return from a debug trap */
1066	nop;
1067
1068
1069ENTRY(_switch_to)
1070	/* prepare return value */
1071	addk	r3, r0, CURRENT_TASK
1072
1073	/* save registers in cpu_context */
1074	/* use r11 and r12, volatile registers, as temp register */
1075	/* give start of cpu_context for previous process */
1076	addik	r11, r5, TI_CPU_CONTEXT
1077	swi	r1, r11, CC_R1
1078	swi	r2, r11, CC_R2
1079	/* skip volatile registers.
1080	 * they are saved on stack when we jumped to _switch_to() */
1081	/* dedicated registers */
1082	swi	r13, r11, CC_R13
1083	swi	r14, r11, CC_R14
1084	swi	r15, r11, CC_R15
1085	swi	r16, r11, CC_R16
1086	swi	r17, r11, CC_R17
1087	swi	r18, r11, CC_R18
1088	/* save non-volatile registers */
1089	swi	r19, r11, CC_R19
1090	swi	r20, r11, CC_R20
1091	swi	r21, r11, CC_R21
1092	swi	r22, r11, CC_R22
1093	swi	r23, r11, CC_R23
1094	swi	r24, r11, CC_R24
1095	swi	r25, r11, CC_R25
1096	swi	r26, r11, CC_R26
1097	swi	r27, r11, CC_R27
1098	swi	r28, r11, CC_R28
1099	swi	r29, r11, CC_R29
1100	swi	r30, r11, CC_R30
1101	/* special purpose registers */
1102	mfs	r12, rmsr
1103	swi	r12, r11, CC_MSR
1104	mfs	r12, rear
1105	swi	r12, r11, CC_EAR
1106	mfs	r12, resr
1107	swi	r12, r11, CC_ESR
1108	mfs	r12, rfsr
1109	swi	r12, r11, CC_FSR
1110
1111	/* update r31, the current-give me pointer to task which will be next */
1112	lwi	CURRENT_TASK, r6, TI_TASK
1113	/* stored it to current_save too */
1114	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1115
1116	/* get new process' cpu context and restore */
1117	/* give me start where start context of next task */
1118	addik	r11, r6, TI_CPU_CONTEXT
1119
1120	/* non-volatile registers */
1121	lwi	r30, r11, CC_R30
1122	lwi	r29, r11, CC_R29
1123	lwi	r28, r11, CC_R28
1124	lwi	r27, r11, CC_R27
1125	lwi	r26, r11, CC_R26
1126	lwi	r25, r11, CC_R25
1127	lwi	r24, r11, CC_R24
1128	lwi	r23, r11, CC_R23
1129	lwi	r22, r11, CC_R22
1130	lwi	r21, r11, CC_R21
1131	lwi	r20, r11, CC_R20
1132	lwi	r19, r11, CC_R19
1133	/* dedicated registers */
1134	lwi	r18, r11, CC_R18
1135	lwi	r17, r11, CC_R17
1136	lwi	r16, r11, CC_R16
1137	lwi	r15, r11, CC_R15
1138	lwi	r14, r11, CC_R14
1139	lwi	r13, r11, CC_R13
1140	/* skip volatile registers */
1141	lwi	r2, r11, CC_R2
1142	lwi	r1, r11, CC_R1
1143
1144	/* special purpose registers */
1145	lwi	r12, r11, CC_FSR
1146	mts	rfsr, r12
1147	lwi	r12, r11, CC_MSR
1148	mts	rmsr, r12
1149
1150	rtsd	r15, 8
1151	nop
1152
1153#ifdef CONFIG_MB_MANAGER
1154.section .data
1155.global xmb_manager_dev
1156.global xmb_manager_baseaddr
1157.global xmb_manager_crval
1158.global xmb_manager_callback
1159.global xmb_manager_reset_callback
1160.global xmb_manager_stackpointer
1161.align 4
1162xmb_manager_dev:
1163	.long 0
1164xmb_manager_baseaddr:
1165	.long 0
1166xmb_manager_crval:
1167	.long 0
1168xmb_manager_callback:
1169	.long 0
1170xmb_manager_reset_callback:
1171	.long 0
1172xmb_manager_stackpointer:
1173	.long 0
1174
1175/*
1176 * When the break vector gets asserted because of error injection,
1177 * the break signal must be blocked before exiting from the
1178 * break handler, Below api updates the manager address and
1179 * control register and error count callback arguments,
1180 * which will be used by the break handler to block the
1181 * break and call the callback function.
1182 */
1183.global xmb_manager_register
1184.section .text
1185.align 2
1186.ent xmb_manager_register
1187.type xmb_manager_register, @function
1188xmb_manager_register:
1189	swi	r5, r0, xmb_manager_baseaddr
1190	swi	r6, r0, xmb_manager_crval
1191	swi	r7, r0, xmb_manager_callback
1192	swi	r8, r0, xmb_manager_dev
1193	swi	r9, r0, xmb_manager_reset_callback
1194
1195	rtsd	r15, 8;
1196	nop;
1197.end xmb_manager_register
1198#endif
1199
1200ENTRY(_reset)
1201	VM_OFF
1202	brai	0; /* Jump to reset vector */
1203
1204	/* These are compiled and loaded into high memory, then
1205	 * copied into place in mach_early_setup */
1206	.section	.init.ivt, "ax"
1207#if CONFIG_MANUAL_RESET_VECTOR && !defined(CONFIG_MB_MANAGER)
1208	.org	0x0
1209	brai	CONFIG_MANUAL_RESET_VECTOR
1210#elif defined(CONFIG_MB_MANAGER)
1211	.org	0x0
1212	brai	TOPHYS(_xtmr_manager_reset);
1213#endif
1214	.org	0x8
1215	brai	TOPHYS(_user_exception); /* syscall handler */
1216	.org	0x10
1217	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1218#ifdef CONFIG_MB_MANAGER
1219	.org	0x18
1220	brai	TOPHYS(_xmb_manager_break);	/* microblaze manager break handler */
1221#else
1222	.org	0x18
1223	brai	TOPHYS(_debug_exception);	/* debug trap handler */
1224#endif
1225	.org	0x20
1226	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1227
1228.section .rodata,"a"
1229#include "syscall_table.S"
1230
1231syscall_table_size=(.-sys_call_table)
1232
1233type_SYSCALL:
1234	.ascii "SYSCALL\0"
1235type_IRQ:
1236	.ascii "IRQ\0"
1237type_IRQ_PREEMPT:
1238	.ascii "IRQ (PREEMPTED)\0"
1239type_SYSCALL_PREEMPT:
1240	.ascii " SYSCALL (PREEMPTED)\0"
1241
1242	/*
1243	 * Trap decoding for stack unwinder
1244	 * Tuples are (start addr, end addr, string)
1245	 * If return address lies on [start addr, end addr],
1246	 * unwinder displays 'string'
1247	 */
1248
1249	.align 4
1250.global microblaze_trap_handlers
1251microblaze_trap_handlers:
1252	/* Exact matches come first */
1253	.word ret_from_trap; .word ret_from_trap   ; .word type_SYSCALL
1254	.word ret_from_irq ; .word ret_from_irq    ; .word type_IRQ
1255	/* Fuzzy matches go here */
1256	.word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1257	.word ret_from_trap; .word TRAP_return     ; .word type_SYSCALL_PREEMPT
1258	/* End of table */
1259	.word 0               ; .word 0               ; .word 0
1260