xref: /openbmc/linux/arch/microblaze/kernel/entry.S (revision e513588f75912f022677866244de6b19b98b8d9a)
1ca54502bSMichal Simek/*
2ca54502bSMichal Simek * Low-level system-call handling, trap handlers and context-switching
3ca54502bSMichal Simek *
4ca54502bSMichal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5ca54502bSMichal Simek * Copyright (C) 2008-2009 PetaLogix
6ca54502bSMichal Simek * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7ca54502bSMichal Simek * Copyright (C) 2001,2002	NEC Corporation
8ca54502bSMichal Simek * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9ca54502bSMichal Simek *
10ca54502bSMichal Simek * This file is subject to the terms and conditions of the GNU General
11ca54502bSMichal Simek * Public License. See the file COPYING in the main directory of this
12ca54502bSMichal Simek * archive for more details.
13ca54502bSMichal Simek *
14ca54502bSMichal Simek * Written by Miles Bader <miles@gnu.org>
15ca54502bSMichal Simek * Heavily modified by John Williams for Microblaze
16ca54502bSMichal Simek */
17ca54502bSMichal Simek
18ca54502bSMichal Simek#include <linux/sys.h>
19ca54502bSMichal Simek#include <linux/linkage.h>
20ca54502bSMichal Simek
21ca54502bSMichal Simek#include <asm/entry.h>
22ca54502bSMichal Simek#include <asm/current.h>
23ca54502bSMichal Simek#include <asm/processor.h>
24ca54502bSMichal Simek#include <asm/exceptions.h>
25ca54502bSMichal Simek#include <asm/asm-offsets.h>
26ca54502bSMichal Simek#include <asm/thread_info.h>
27ca54502bSMichal Simek
28ca54502bSMichal Simek#include <asm/page.h>
29ca54502bSMichal Simek#include <asm/unistd.h>
30ca54502bSMichal Simek
31ca54502bSMichal Simek#include <linux/errno.h>
32ca54502bSMichal Simek#include <asm/signal.h>
33ca54502bSMichal Simek
34ca54502bSMichal Simek/* The size of a state save frame. */
35ca54502bSMichal Simek#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
36ca54502bSMichal Simek
37ca54502bSMichal Simek/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38ca54502bSMichal Simek#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
39ca54502bSMichal Simek
40ca54502bSMichal Simek#define C_ENTRY(name)	.globl name; .align 4; name
41ca54502bSMichal Simek
42ca54502bSMichal Simek/*
43ca54502bSMichal Simek * Various ways of setting and clearing BIP in flags reg.
44ca54502bSMichal Simek * This is mucky, but necessary using microblaze version that
45ca54502bSMichal Simek * allows msr ops to write to BIP
46ca54502bSMichal Simek */
47ca54502bSMichal Simek#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48ca54502bSMichal Simek	.macro	clear_bip
49ca54502bSMichal Simek	msrclr	r11, MSR_BIP
50ca54502bSMichal Simek	nop
51ca54502bSMichal Simek	.endm
52ca54502bSMichal Simek
53ca54502bSMichal Simek	.macro	set_bip
54ca54502bSMichal Simek	msrset	r11, MSR_BIP
55ca54502bSMichal Simek	nop
56ca54502bSMichal Simek	.endm
57ca54502bSMichal Simek
58ca54502bSMichal Simek	.macro	clear_eip
59ca54502bSMichal Simek	msrclr	r11, MSR_EIP
60ca54502bSMichal Simek	nop
61ca54502bSMichal Simek	.endm
62ca54502bSMichal Simek
63ca54502bSMichal Simek	.macro	set_ee
64ca54502bSMichal Simek	msrset	r11, MSR_EE
65ca54502bSMichal Simek	nop
66ca54502bSMichal Simek	.endm
67ca54502bSMichal Simek
68ca54502bSMichal Simek	.macro	disable_irq
69ca54502bSMichal Simek	msrclr	r11, MSR_IE
70ca54502bSMichal Simek	nop
71ca54502bSMichal Simek	.endm
72ca54502bSMichal Simek
73ca54502bSMichal Simek	.macro	enable_irq
74ca54502bSMichal Simek	msrset	r11, MSR_IE
75ca54502bSMichal Simek	nop
76ca54502bSMichal Simek	.endm
77ca54502bSMichal Simek
78ca54502bSMichal Simek	.macro	set_ums
79ca54502bSMichal Simek	msrset	r11, MSR_UMS
80ca54502bSMichal Simek	nop
81ca54502bSMichal Simek	msrclr	r11, MSR_VMS
82ca54502bSMichal Simek	nop
83ca54502bSMichal Simek	.endm
84ca54502bSMichal Simek
85ca54502bSMichal Simek	.macro	set_vms
86ca54502bSMichal Simek	msrclr	r11, MSR_UMS
87ca54502bSMichal Simek	nop
88ca54502bSMichal Simek	msrset	r11, MSR_VMS
89ca54502bSMichal Simek	nop
90ca54502bSMichal Simek	.endm
91ca54502bSMichal Simek
92ca54502bSMichal Simek	.macro	clear_vms_ums
93ca54502bSMichal Simek	msrclr	r11, MSR_VMS
94ca54502bSMichal Simek	nop
95ca54502bSMichal Simek	msrclr	r11, MSR_UMS
96ca54502bSMichal Simek	nop
97ca54502bSMichal Simek	.endm
98ca54502bSMichal Simek#else
99ca54502bSMichal Simek	.macro	clear_bip
100ca54502bSMichal Simek	mfs	r11, rmsr
101ca54502bSMichal Simek	nop
102ca54502bSMichal Simek	andi	r11, r11, ~MSR_BIP
103ca54502bSMichal Simek	mts	rmsr, r11
104ca54502bSMichal Simek	nop
105ca54502bSMichal Simek	.endm
106ca54502bSMichal Simek
107ca54502bSMichal Simek	.macro	set_bip
108ca54502bSMichal Simek	mfs	r11, rmsr
109ca54502bSMichal Simek	nop
110ca54502bSMichal Simek	ori	r11, r11, MSR_BIP
111ca54502bSMichal Simek	mts	rmsr, r11
112ca54502bSMichal Simek	nop
113ca54502bSMichal Simek	.endm
114ca54502bSMichal Simek
115ca54502bSMichal Simek	.macro	clear_eip
116ca54502bSMichal Simek	mfs	r11, rmsr
117ca54502bSMichal Simek	nop
118ca54502bSMichal Simek	andi	r11, r11, ~MSR_EIP
119ca54502bSMichal Simek	mts	rmsr, r11
120ca54502bSMichal Simek	nop
121ca54502bSMichal Simek	.endm
122ca54502bSMichal Simek
123ca54502bSMichal Simek	.macro	set_ee
124ca54502bSMichal Simek	mfs	r11, rmsr
125ca54502bSMichal Simek	nop
126ca54502bSMichal Simek	ori	r11, r11, MSR_EE
127ca54502bSMichal Simek	mts	rmsr, r11
128ca54502bSMichal Simek	nop
129ca54502bSMichal Simek	.endm
130ca54502bSMichal Simek
131ca54502bSMichal Simek	.macro	disable_irq
132ca54502bSMichal Simek	mfs	r11, rmsr
133ca54502bSMichal Simek	nop
134ca54502bSMichal Simek	andi	r11, r11, ~MSR_IE
135ca54502bSMichal Simek	mts	rmsr, r11
136ca54502bSMichal Simek	nop
137ca54502bSMichal Simek	.endm
138ca54502bSMichal Simek
139ca54502bSMichal Simek	.macro	enable_irq
140ca54502bSMichal Simek	mfs	r11, rmsr
141ca54502bSMichal Simek	nop
142ca54502bSMichal Simek	ori	r11, r11, MSR_IE
143ca54502bSMichal Simek	mts	rmsr, r11
144ca54502bSMichal Simek	nop
145ca54502bSMichal Simek	.endm
146ca54502bSMichal Simek
147ca54502bSMichal Simek	.macro set_ums
148ca54502bSMichal Simek	mfs	r11, rmsr
149ca54502bSMichal Simek	nop
150ca54502bSMichal Simek	ori	r11, r11, MSR_VMS
151ca54502bSMichal Simek	andni	r11, r11, MSR_UMS
152ca54502bSMichal Simek	mts	rmsr, r11
153ca54502bSMichal Simek	nop
154ca54502bSMichal Simek	.endm
155ca54502bSMichal Simek
156ca54502bSMichal Simek	.macro	set_vms
157ca54502bSMichal Simek	mfs	r11, rmsr
158ca54502bSMichal Simek	nop
159ca54502bSMichal Simek	ori	r11, r11, MSR_VMS
160ca54502bSMichal Simek	andni	r11, r11, MSR_UMS
161ca54502bSMichal Simek	mts	rmsr, r11
162ca54502bSMichal Simek	nop
163ca54502bSMichal Simek	.endm
164ca54502bSMichal Simek
165ca54502bSMichal Simek	.macro	clear_vms_ums
166ca54502bSMichal Simek	mfs	r11, rmsr
167ca54502bSMichal Simek	nop
168ca54502bSMichal Simek	andni	r11, r11, (MSR_VMS|MSR_UMS)
169ca54502bSMichal Simek	mts	rmsr,r11
170ca54502bSMichal Simek	nop
171ca54502bSMichal Simek	.endm
172ca54502bSMichal Simek#endif
173ca54502bSMichal Simek
174ca54502bSMichal Simek/* Define how to call high-level functions. With MMU, virtual mode must be
175ca54502bSMichal Simek * enabled when calling the high-level function. Clobbers R11.
176ca54502bSMichal Simek * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177ca54502bSMichal Simek */
178ca54502bSMichal Simek
179ca54502bSMichal Simek/* turn on virtual protected mode save */
180ca54502bSMichal Simek#define VM_ON		\
181ca54502bSMichal Simek	set_ums;		\
182ca54502bSMichal Simek	rted	r0, 2f;	\
183ca54502bSMichal Simek2: nop;
184ca54502bSMichal Simek
185ca54502bSMichal Simek/* turn off virtual protected mode save and user mode save*/
186ca54502bSMichal Simek#define VM_OFF			\
187ca54502bSMichal Simek	clear_vms_ums;			\
188ca54502bSMichal Simek	rted	r0, TOPHYS(1f);	\
189ca54502bSMichal Simek1: nop;
190ca54502bSMichal Simek
191ca54502bSMichal Simek#define SAVE_REGS \
192ca54502bSMichal Simek	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
193ca54502bSMichal Simek	swi	r5, r1, PTO+PT_R5;					\
194ca54502bSMichal Simek	swi	r6, r1, PTO+PT_R6;					\
195ca54502bSMichal Simek	swi	r7, r1, PTO+PT_R7;					\
196ca54502bSMichal Simek	swi	r8, r1, PTO+PT_R8;					\
197ca54502bSMichal Simek	swi	r9, r1, PTO+PT_R9;					\
198ca54502bSMichal Simek	swi	r10, r1, PTO+PT_R10;					\
199ca54502bSMichal Simek	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
200ca54502bSMichal Simek	swi	r12, r1, PTO+PT_R12;					\
201ca54502bSMichal Simek	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
202ca54502bSMichal Simek	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
203ca54502bSMichal Simek	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
204ca54502bSMichal Simek	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
205ca54502bSMichal Simek	swi	r19, r1, PTO+PT_R19;					\
206ca54502bSMichal Simek	swi	r20, r1, PTO+PT_R20;					\
207ca54502bSMichal Simek	swi	r21, r1, PTO+PT_R21;					\
208ca54502bSMichal Simek	swi	r22, r1, PTO+PT_R22;					\
209ca54502bSMichal Simek	swi	r23, r1, PTO+PT_R23;					\
210ca54502bSMichal Simek	swi	r24, r1, PTO+PT_R24;					\
211ca54502bSMichal Simek	swi	r25, r1, PTO+PT_R25;					\
212ca54502bSMichal Simek	swi	r26, r1, PTO+PT_R26;					\
213ca54502bSMichal Simek	swi	r27, r1, PTO+PT_R27;					\
214ca54502bSMichal Simek	swi	r28, r1, PTO+PT_R28;					\
215ca54502bSMichal Simek	swi	r29, r1, PTO+PT_R29;					\
216ca54502bSMichal Simek	swi	r30, r1, PTO+PT_R30;					\
217ca54502bSMichal Simek	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
218ca54502bSMichal Simek	mfs	r11, rmsr;		/* save MSR */			\
219ca54502bSMichal Simek	nop;								\
220ca54502bSMichal Simek	swi	r11, r1, PTO+PT_MSR;
221ca54502bSMichal Simek
222ca54502bSMichal Simek#define RESTORE_REGS \
223ca54502bSMichal Simek	lwi	r11, r1, PTO+PT_MSR;					\
224ca54502bSMichal Simek	mts	rmsr , r11;						\
225ca54502bSMichal Simek	nop;								\
226ca54502bSMichal Simek	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
227ca54502bSMichal Simek	lwi	r5, r1, PTO+PT_R5;					\
228ca54502bSMichal Simek	lwi	r6, r1, PTO+PT_R6;					\
229ca54502bSMichal Simek	lwi	r7, r1, PTO+PT_R7;					\
230ca54502bSMichal Simek	lwi	r8, r1, PTO+PT_R8;					\
231ca54502bSMichal Simek	lwi	r9, r1, PTO+PT_R9;					\
232ca54502bSMichal Simek	lwi	r10, r1, PTO+PT_R10;					\
233ca54502bSMichal Simek	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
234ca54502bSMichal Simek	lwi	r12, r1, PTO+PT_R12;					\
235ca54502bSMichal Simek	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
236ca54502bSMichal Simek	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
237ca54502bSMichal Simek	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
238ca54502bSMichal Simek	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
239ca54502bSMichal Simek	lwi	r19, r1, PTO+PT_R19;					\
240ca54502bSMichal Simek	lwi	r20, r1, PTO+PT_R20;					\
241ca54502bSMichal Simek	lwi	r21, r1, PTO+PT_R21;					\
242ca54502bSMichal Simek	lwi	r22, r1, PTO+PT_R22;					\
243ca54502bSMichal Simek	lwi	r23, r1, PTO+PT_R23;					\
244ca54502bSMichal Simek	lwi	r24, r1, PTO+PT_R24;					\
245ca54502bSMichal Simek	lwi	r25, r1, PTO+PT_R25;					\
246ca54502bSMichal Simek	lwi	r26, r1, PTO+PT_R26;					\
247ca54502bSMichal Simek	lwi	r27, r1, PTO+PT_R27;					\
248ca54502bSMichal Simek	lwi	r28, r1, PTO+PT_R28;					\
249ca54502bSMichal Simek	lwi	r29, r1, PTO+PT_R29;					\
250ca54502bSMichal Simek	lwi	r30, r1, PTO+PT_R30;					\
251ca54502bSMichal Simek	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
252ca54502bSMichal Simek
253ca54502bSMichal Simek.text
254ca54502bSMichal Simek
255ca54502bSMichal Simek/*
256ca54502bSMichal Simek * User trap.
257ca54502bSMichal Simek *
258ca54502bSMichal Simek * System calls are handled here.
259ca54502bSMichal Simek *
260ca54502bSMichal Simek * Syscall protocol:
261ca54502bSMichal Simek * Syscall number in r12, args in r5-r10
262ca54502bSMichal Simek * Return value in r3
263ca54502bSMichal Simek *
264ca54502bSMichal Simek * Trap entered via brki instruction, so BIP bit is set, and interrupts
265ca54502bSMichal Simek * are masked. This is nice, means we don't have to CLI before state save
266ca54502bSMichal Simek */
267ca54502bSMichal SimekC_ENTRY(_user_exception):
268ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269ca54502bSMichal Simek	addi	r14, r14, 4	/* return address is 4 byte after call */
270ca54502bSMichal Simek	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
271ca54502bSMichal Simek
272ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273ca54502bSMichal Simek	beqi	r11, 1f;		/* Jump ahead if coming from user */
274ca54502bSMichal Simek/* Kernel-mode state save. */
275ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276ca54502bSMichal Simek	tophys(r1,r11);
277ca54502bSMichal Simek	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279ca54502bSMichal Simek
280ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281ca54502bSMichal Simek	SAVE_REGS
282ca54502bSMichal Simek
283ca54502bSMichal Simek	addi	r11, r0, 1; 		/* Was in kernel-mode. */
284ca54502bSMichal Simek	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285ca54502bSMichal Simek	brid	2f;
286ca54502bSMichal Simek	nop;				/* Fill delay slot */
287ca54502bSMichal Simek
288ca54502bSMichal Simek/* User-mode state save.  */
289ca54502bSMichal Simek1:
290ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
291ca54502bSMichal Simek	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292ca54502bSMichal Simek	tophys(r1,r1);
293ca54502bSMichal Simek	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
294ca54502bSMichal Simek/* calculate kernel stack pointer from task struct 8k */
295ca54502bSMichal Simek	addik	r1, r1, THREAD_SIZE;
296ca54502bSMichal Simek	tophys(r1,r1);
297ca54502bSMichal Simek
298ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
299ca54502bSMichal Simek	SAVE_REGS
300ca54502bSMichal Simek
301ca54502bSMichal Simek	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
302ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303ca54502bSMichal Simek	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
304ca54502bSMichal Simek	addi	r11, r0, 1;
305ca54502bSMichal Simek	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
306ca54502bSMichal Simek2:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
307ca54502bSMichal Simek	/* Save away the syscall number.  */
308ca54502bSMichal Simek	swi	r12, r1, PTO+PT_R0;
309ca54502bSMichal Simek	tovirt(r1,r1)
310ca54502bSMichal Simek
311ca54502bSMichal Simek	la	r15, r0, ret_from_trap-8
312ca54502bSMichal Simek/* where the trap should return need -8 to adjust for rtsd r15, 8*/
313ca54502bSMichal Simek/* Jump to the appropriate function for the system call number in r12
314ca54502bSMichal Simek * (r12 is not preserved), or return an error if r12 is not valid. The LP
315ca54502bSMichal Simek * register should point to the location where
316ca54502bSMichal Simek * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
317ca54502bSMichal Simek	/* See if the system call number is valid.  */
318ca54502bSMichal Simek	addi	r11, r12, -__NR_syscalls;
319ca54502bSMichal Simek	bgei	r11,1f;
320ca54502bSMichal Simek	/* Figure out which function to use for this system call.  */
321ca54502bSMichal Simek	/* Note Microblaze barrel shift is optional, so don't rely on it */
322ca54502bSMichal Simek	add	r12, r12, r12;			/* convert num -> ptr */
323ca54502bSMichal Simek	add	r12, r12, r12;
324ca54502bSMichal Simek
325ca54502bSMichal Simek	/* Trac syscalls and stored them to r0_ram */
326ca54502bSMichal Simek	lwi	r3, r12, 0x400 + TOPHYS(r0_ram)
327ca54502bSMichal Simek	addi	r3, r3, 1
328ca54502bSMichal Simek	swi	r3, r12, 0x400 + TOPHYS(r0_ram)
329ca54502bSMichal Simek
330ca54502bSMichal Simek	lwi	r12, r12, TOPHYS(sys_call_table); /* Function ptr */
331ca54502bSMichal Simek	/* Make the system call.  to r12*/
332ca54502bSMichal Simek	set_vms;
333ca54502bSMichal Simek	rtid	r12, 0;
334ca54502bSMichal Simek	nop;
335ca54502bSMichal Simek	/* The syscall number is invalid, return an error.  */
336ca54502bSMichal Simek1:	VM_ON;	/* RETURN() expects virtual mode*/
337ca54502bSMichal Simek	addi	r3, r0, -ENOSYS;
338ca54502bSMichal Simek	rtsd	r15,8;		/* looks like a normal subroutine return */
339ca54502bSMichal Simek	or 	r0, r0, r0
340ca54502bSMichal Simek
341ca54502bSMichal Simek
342ca54502bSMichal Simek/* Entry point used to return from a syscall/trap.  */
343ca54502bSMichal Simek/* We re-enable BIP bit before state restore */
344ca54502bSMichal SimekC_ENTRY(ret_from_trap):
345ca54502bSMichal Simek	set_bip;			/*  Ints masked for state restore*/
346ca54502bSMichal Simek	lwi	r11, r1, PTO+PT_MODE;
347ca54502bSMichal Simek/* See if returning to kernel mode, if so, skip resched &c.  */
348ca54502bSMichal Simek	bnei	r11, 2f;
349ca54502bSMichal Simek
350ca54502bSMichal Simek	/* We're returning to user mode, so check for various conditions that
351ca54502bSMichal Simek	 * trigger rescheduling. */
352ca54502bSMichal Simek	/* Get current task ptr into r11 */
353ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
354ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
355ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
356ca54502bSMichal Simek	andi	r11, r11, _TIF_NEED_RESCHED;
357ca54502bSMichal Simek	beqi	r11, 5f;
358ca54502bSMichal Simek
359ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3; /* store syscall result */
360ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;
361ca54502bSMichal Simek	bralid	r15, schedule;	/* Call scheduler */
362ca54502bSMichal Simek	nop;				/* delay slot */
363ca54502bSMichal Simek	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
364ca54502bSMichal Simek	lwi	r4, r1, PTO + PT_R4;
365ca54502bSMichal Simek
366ca54502bSMichal Simek	/* Maybe handle a signal */
367ca54502bSMichal Simek5:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
368ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
369ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
370ca54502bSMichal Simek	andi	r11, r11, _TIF_SIGPENDING;
371ca54502bSMichal Simek	beqi	r11, 1f;		/* Signals to handle, handle them */
372ca54502bSMichal Simek
373ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3; /* store syscall result */
374ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;
375ca54502bSMichal Simek	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
376ca54502bSMichal Simek	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
377ca54502bSMichal Simek	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
378ca54502bSMichal Simek	bralid	r15, do_signal;	/* Handle any signals */
379ca54502bSMichal Simek	nop;
380ca54502bSMichal Simek	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
381ca54502bSMichal Simek	lwi	r4, r1, PTO + PT_R4;
382ca54502bSMichal Simek
383ca54502bSMichal Simek/* Finally, return to user state.  */
384ca54502bSMichal Simek1:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
385ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
386ca54502bSMichal Simek	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
387ca54502bSMichal Simek	VM_OFF;
388ca54502bSMichal Simek	tophys(r1,r1);
389ca54502bSMichal Simek	RESTORE_REGS;
390ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
391ca54502bSMichal Simek	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
392ca54502bSMichal Simek	bri	6f;
393ca54502bSMichal Simek
394ca54502bSMichal Simek/* Return to kernel state.  */
395ca54502bSMichal Simek2:	VM_OFF;
396ca54502bSMichal Simek	tophys(r1,r1);
397ca54502bSMichal Simek	RESTORE_REGS;
398ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
399ca54502bSMichal Simek	tovirt(r1,r1);
400ca54502bSMichal Simek6:
401ca54502bSMichal SimekTRAP_return:		/* Make global symbol for debugging */
402ca54502bSMichal Simek	rtbd	r14, 0;	/* Instructions to return from an IRQ */
403ca54502bSMichal Simek	nop;
404ca54502bSMichal Simek
405ca54502bSMichal Simek
406ca54502bSMichal Simek/* These syscalls need access to the struct pt_regs on the stack, so we
407ca54502bSMichal Simek   implement them in assembly (they're basically all wrappers anyway).  */
408ca54502bSMichal Simek
409ca54502bSMichal SimekC_ENTRY(sys_fork_wrapper):
410ca54502bSMichal Simek	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
411ca54502bSMichal Simek	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
412ca54502bSMichal Simek	la	r7, r1, PTO			/* Arg 2: parent context */
413ca54502bSMichal Simek	add	r8. r0, r0			/* Arg 3: (unused) */
414ca54502bSMichal Simek	add	r9, r0, r0;			/* Arg 4: (unused) */
415ca54502bSMichal Simek	add	r10, r0, r0;			/* Arg 5: (unused) */
416ca54502bSMichal Simek	brid	do_fork		/* Do real work (tail-call) */
417ca54502bSMichal Simek	nop;
418ca54502bSMichal Simek
419ca54502bSMichal Simek/* This the initial entry point for a new child thread, with an appropriate
420ca54502bSMichal Simek   stack in place that makes it look the the child is in the middle of an
421ca54502bSMichal Simek   syscall.  This function is actually `returned to' from switch_thread
422ca54502bSMichal Simek   (copy_thread makes ret_from_fork the return address in each new thread's
423ca54502bSMichal Simek   saved context).  */
424ca54502bSMichal SimekC_ENTRY(ret_from_fork):
425ca54502bSMichal Simek	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
426ca54502bSMichal Simek	add	r3, r5, r0;	/* switch_thread returns the prev task */
427ca54502bSMichal Simek				/* ( in the delay slot ) */
428ca54502bSMichal Simek	add	r3, r0, r0;	/* Child's fork call should return 0. */
429ca54502bSMichal Simek	brid	ret_from_trap;	/* Do normal trap return */
430ca54502bSMichal Simek	nop;
431ca54502bSMichal Simek
432*e513588fSArnd BergmannC_ENTRY(sys_vfork):
433*e513588fSArnd Bergmann	brid	microblaze_vfork	/* Do real work (tail-call) */
434ca54502bSMichal Simek	la	r5, r1, PTO
435ca54502bSMichal Simek
436*e513588fSArnd BergmannC_ENTRY(sys_clone):
437ca54502bSMichal Simek	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
438ca54502bSMichal Simek	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
439ca54502bSMichal Simek1:	la	r7, r1, PTO;			/* Arg 2: parent context */
440ca54502bSMichal Simek	add	r8, r0, r0;			/* Arg 3: (unused) */
441ca54502bSMichal Simek	add	r9, r0, r0;			/* Arg 4: (unused) */
442ca54502bSMichal Simek	add	r10, r0, r0;			/* Arg 5: (unused) */
443ca54502bSMichal Simek	brid	do_fork		/* Do real work (tail-call) */
444ca54502bSMichal Simek	nop;
445ca54502bSMichal Simek
446*e513588fSArnd BergmannC_ENTRY(sys_execve):
447ca54502bSMichal Simek	la	r8, r1, PTO;		/* add user context as 4th arg */
448*e513588fSArnd Bergmann	brid	microblaze_execve;	/* Do real work (tail-call).*/
449ca54502bSMichal Simek	nop;
450ca54502bSMichal Simek
451ca54502bSMichal SimekC_ENTRY(sys_rt_sigsuspend_wrapper):
452ca54502bSMichal Simek	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
453ca54502bSMichal Simek	swi	r4, r1, PTO+PT_R4;
454ca54502bSMichal Simek	la	r7, r1, PTO;		/* add user context as 3rd arg */
455ca54502bSMichal Simek	brlid	r15, sys_rt_sigsuspend;	/* Do real work.*/
456ca54502bSMichal Simek	nop;
457ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
458ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
459ca54502bSMichal Simek	bri ret_from_trap /* fall through will not work here due to align */
460ca54502bSMichal Simek	nop;
461ca54502bSMichal Simek
462ca54502bSMichal SimekC_ENTRY(sys_rt_sigreturn_wrapper):
463ca54502bSMichal Simek	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
464ca54502bSMichal Simek	swi	r4, r1, PTO+PT_R4;
465ca54502bSMichal Simek	la	r5, r1, PTO;		/* add user context as 1st arg */
466ca54502bSMichal Simek	brlid	r15, sys_rt_sigreturn	/* Do real work */
467ca54502bSMichal Simek	nop;
468ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
469ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
470ca54502bSMichal Simek	bri ret_from_trap /* fall through will not work here due to align */
471ca54502bSMichal Simek	nop;
472ca54502bSMichal Simek
473ca54502bSMichal Simek/*
474ca54502bSMichal Simek * HW EXCEPTION rutine start
475ca54502bSMichal Simek */
476ca54502bSMichal Simek
477ca54502bSMichal Simek#define SAVE_STATE	\
478ca54502bSMichal Simek	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
479ca54502bSMichal Simek	set_bip;	/*equalize initial state for all possible entries*/\
480ca54502bSMichal Simek	clear_eip;							\
481ca54502bSMichal Simek	enable_irq;							\
482ca54502bSMichal Simek	set_ee;								\
483ca54502bSMichal Simek	/* See if already in kernel mode.*/				\
484ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
485ca54502bSMichal Simek	beqi	r11, 1f;		/* Jump ahead if coming from user */\
486ca54502bSMichal Simek	/* Kernel-mode state save.  */					\
487ca54502bSMichal Simek	/* Reload kernel stack-ptr. */					\
488ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
489ca54502bSMichal Simek	tophys(r1,r11);							\
490ca54502bSMichal Simek	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
491ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
492ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
493ca54502bSMichal Simek	/* store return registers separately because			\
494ca54502bSMichal Simek	 * this macros is use for others exceptions */			\
495ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3;					\
496ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;					\
497ca54502bSMichal Simek	SAVE_REGS							\
498ca54502bSMichal Simek	/* PC, before IRQ/trap - this is one instruction above */	\
499ca54502bSMichal Simek	swi	r17, r1, PTO+PT_PC;					\
500ca54502bSMichal Simek									\
501ca54502bSMichal Simek	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
502ca54502bSMichal Simek	swi	r11, r1, PTO+PT_MODE; 	 				\
503ca54502bSMichal Simek	brid	2f;							\
504ca54502bSMichal Simek	nop;				/* Fill delay slot */		\
505ca54502bSMichal Simek1:	/* User-mode state save.  */					\
506ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
507ca54502bSMichal Simek	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
508ca54502bSMichal Simek	tophys(r1,r1);							\
509ca54502bSMichal Simek	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
510ca54502bSMichal Simek	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
511ca54502bSMichal Simek	tophys(r1,r1);							\
512ca54502bSMichal Simek									\
513ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
514ca54502bSMichal Simek	/* store return registers separately because this macros	\
515ca54502bSMichal Simek	 * is use for others exceptions */				\
516ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3; 					\
517ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;					\
518ca54502bSMichal Simek	SAVE_REGS							\
519ca54502bSMichal Simek	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
520ca54502bSMichal Simek	swi	r17, r1, PTO+PT_PC;					\
521ca54502bSMichal Simek									\
522ca54502bSMichal Simek	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
523ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
524ca54502bSMichal Simek	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
525ca54502bSMichal Simek	addi	r11, r0, 1;						\
526ca54502bSMichal Simek	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
527ca54502bSMichal Simek2:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
528ca54502bSMichal Simek	/* Save away the syscall number.  */				\
529ca54502bSMichal Simek	swi	r0, r1, PTO+PT_R0;					\
530ca54502bSMichal Simek	tovirt(r1,r1)
531ca54502bSMichal Simek
532ca54502bSMichal SimekC_ENTRY(full_exception_trap):
533ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
534ca54502bSMichal Simek	/* adjust exception address for privileged instruction
535ca54502bSMichal Simek	 * for finding where is it */
536ca54502bSMichal Simek	addik	r17, r17, -4
537ca54502bSMichal Simek	SAVE_STATE /* Save registers */
538ca54502bSMichal Simek	/* FIXME this can be store directly in PT_ESR reg.
539ca54502bSMichal Simek	 * I tested it but there is a fault */
540ca54502bSMichal Simek	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
541ca54502bSMichal Simek	la	r15, r0, ret_from_exc - 8
542ca54502bSMichal Simek	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
543ca54502bSMichal Simek	mfs	r6, resr
544ca54502bSMichal Simek	nop
545ca54502bSMichal Simek	mfs	r7, rfsr;		/* save FSR */
546ca54502bSMichal Simek	nop
547ca54502bSMichal Simek	la	r12, r0, full_exception
548ca54502bSMichal Simek	set_vms;
549ca54502bSMichal Simek	rtbd	r12, 0;
550ca54502bSMichal Simek	nop;
551ca54502bSMichal Simek
552ca54502bSMichal Simek/*
553ca54502bSMichal Simek * Unaligned data trap.
554ca54502bSMichal Simek *
555ca54502bSMichal Simek * Unaligned data trap last on 4k page is handled here.
556ca54502bSMichal Simek *
557ca54502bSMichal Simek * Trap entered via exception, so EE bit is set, and interrupts
558ca54502bSMichal Simek * are masked.  This is nice, means we don't have to CLI before state save
559ca54502bSMichal Simek *
560ca54502bSMichal Simek * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
561ca54502bSMichal Simek */
562ca54502bSMichal SimekC_ENTRY(unaligned_data_trap):
563ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
564ca54502bSMichal Simek	SAVE_STATE		/* Save registers.*/
565ca54502bSMichal Simek	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
566ca54502bSMichal Simek	la	r15, r0, ret_from_exc-8
567ca54502bSMichal Simek	mfs	r3, resr		/* ESR */
568ca54502bSMichal Simek	nop
569ca54502bSMichal Simek	mfs	r4, rear		/* EAR */
570ca54502bSMichal Simek	nop
571ca54502bSMichal Simek	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
572ca54502bSMichal Simek	la	r12, r0, _unaligned_data_exception
573ca54502bSMichal Simek	set_vms;
574ca54502bSMichal Simek	rtbd	r12, 0;	/* interrupts enabled */
575ca54502bSMichal Simek	nop;
576ca54502bSMichal Simek
577ca54502bSMichal Simek/*
578ca54502bSMichal Simek * Page fault traps.
579ca54502bSMichal Simek *
580ca54502bSMichal Simek * If the real exception handler (from hw_exception_handler.S) didn't find
581ca54502bSMichal Simek * the mapping for the process, then we're thrown here to handle such situation.
582ca54502bSMichal Simek *
583ca54502bSMichal Simek * Trap entered via exceptions, so EE bit is set, and interrupts
584ca54502bSMichal Simek * are masked.  This is nice, means we don't have to CLI before state save
585ca54502bSMichal Simek *
586ca54502bSMichal Simek * Build a standard exception frame for TLB Access errors.  All TLB exceptions
587ca54502bSMichal Simek * will bail out to this point if they can't resolve the lightweight TLB fault.
588ca54502bSMichal Simek *
589ca54502bSMichal Simek * The C function called is in "arch/microblaze/mm/fault.c", declared as:
590ca54502bSMichal Simek * void do_page_fault(struct pt_regs *regs,
591ca54502bSMichal Simek *				unsigned long address,
592ca54502bSMichal Simek *				unsigned long error_code)
593ca54502bSMichal Simek */
594ca54502bSMichal Simek/* data and intruction trap - which is choose is resolved int fault.c */
595ca54502bSMichal SimekC_ENTRY(page_fault_data_trap):
596ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
597ca54502bSMichal Simek	SAVE_STATE		/* Save registers.*/
598ca54502bSMichal Simek	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
599ca54502bSMichal Simek	la	r15, r0, ret_from_exc-8
600ca54502bSMichal Simek	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
601ca54502bSMichal Simek	mfs	r6, rear		/* parameter unsigned long address */
602ca54502bSMichal Simek	nop
603ca54502bSMichal Simek	mfs	r7, resr		/* parameter unsigned long error_code */
604ca54502bSMichal Simek	nop
605ca54502bSMichal Simek	la	r12, r0, do_page_fault
606ca54502bSMichal Simek	set_vms;
607ca54502bSMichal Simek	rtbd	r12, 0;	/* interrupts enabled */
608ca54502bSMichal Simek	nop;
609ca54502bSMichal Simek
610ca54502bSMichal SimekC_ENTRY(page_fault_instr_trap):
611ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
612ca54502bSMichal Simek	SAVE_STATE		/* Save registers.*/
613ca54502bSMichal Simek	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
614ca54502bSMichal Simek	la	r15, r0, ret_from_exc-8
615ca54502bSMichal Simek	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
616ca54502bSMichal Simek	mfs	r6, rear		/* parameter unsigned long address */
617ca54502bSMichal Simek	nop
618ca54502bSMichal Simek	ori	r7, r0, 0		/* parameter unsigned long error_code */
619ca54502bSMichal Simek	la	r12, r0, do_page_fault
620ca54502bSMichal Simek	set_vms;
621ca54502bSMichal Simek	rtbd	r12, 0;	/* interrupts enabled */
622ca54502bSMichal Simek	nop;
623ca54502bSMichal Simek
624ca54502bSMichal Simek/* Entry point used to return from an exception.  */
625ca54502bSMichal SimekC_ENTRY(ret_from_exc):
626ca54502bSMichal Simek	set_bip;			/*  Ints masked for state restore*/
627ca54502bSMichal Simek	lwi	r11, r1, PTO+PT_MODE;
628ca54502bSMichal Simek	bnei	r11, 2f;		/* See if returning to kernel mode, */
629ca54502bSMichal Simek					/* ... if so, skip resched &c.  */
630ca54502bSMichal Simek
631ca54502bSMichal Simek	/* We're returning to user mode, so check for various conditions that
632ca54502bSMichal Simek	   trigger rescheduling. */
633ca54502bSMichal Simek	/* Get current task ptr into r11 */
634ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
635ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
636ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
637ca54502bSMichal Simek	andi	r11, r11, _TIF_NEED_RESCHED;
638ca54502bSMichal Simek	beqi	r11, 5f;
639ca54502bSMichal Simek
640ca54502bSMichal Simek/* Call the scheduler before returning from a syscall/trap. */
641ca54502bSMichal Simek	bralid	r15, schedule;	/* Call scheduler */
642ca54502bSMichal Simek	nop;				/* delay slot */
643ca54502bSMichal Simek
644ca54502bSMichal Simek	/* Maybe handle a signal */
645ca54502bSMichal Simek5:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
646ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
647ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
648ca54502bSMichal Simek	andi	r11, r11, _TIF_SIGPENDING;
649ca54502bSMichal Simek	beqi	r11, 1f;		/* Signals to handle, handle them */
650ca54502bSMichal Simek
651ca54502bSMichal Simek	/*
652ca54502bSMichal Simek	 * Handle a signal return; Pending signals should be in r18.
653ca54502bSMichal Simek	 *
654ca54502bSMichal Simek	 * Not all registers are saved by the normal trap/interrupt entry
655ca54502bSMichal Simek	 * points (for instance, call-saved registers (because the normal
656ca54502bSMichal Simek	 * C-compiler calling sequence in the kernel makes sure they're
657ca54502bSMichal Simek	 * preserved), and call-clobbered registers in the case of
658ca54502bSMichal Simek	 * traps), but signal handlers may want to examine or change the
659ca54502bSMichal Simek	 * complete register state.  Here we save anything not saved by
660ca54502bSMichal Simek	 * the normal entry sequence, so that it may be safely restored
661ca54502bSMichal Simek	 * (in a possibly modified form) after do_signal returns.
662ca54502bSMichal Simek	 * store return registers separately because this macros is use
663ca54502bSMichal Simek	 * for others exceptions */
664ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3;
665ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;
666ca54502bSMichal Simek	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
667ca54502bSMichal Simek	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
668ca54502bSMichal Simek	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
669ca54502bSMichal Simek	bralid	r15, do_signal;	/* Handle any signals */
670ca54502bSMichal Simek	nop;
671ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
672ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
673ca54502bSMichal Simek
674ca54502bSMichal Simek/* Finally, return to user state.  */
675ca54502bSMichal Simek1:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
676ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
677ca54502bSMichal Simek	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
678ca54502bSMichal Simek	VM_OFF;
679ca54502bSMichal Simek	tophys(r1,r1);
680ca54502bSMichal Simek
681ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
682ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
683ca54502bSMichal Simek	RESTORE_REGS;
684ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
685ca54502bSMichal Simek
686ca54502bSMichal Simek	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
687ca54502bSMichal Simek	bri	6f;
688ca54502bSMichal Simek/* Return to kernel state.  */
689ca54502bSMichal Simek2:	VM_OFF;
690ca54502bSMichal Simek	tophys(r1,r1);
691ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
692ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
693ca54502bSMichal Simek	RESTORE_REGS;
694ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
695ca54502bSMichal Simek
696ca54502bSMichal Simek	tovirt(r1,r1);
697ca54502bSMichal Simek6:
698ca54502bSMichal SimekEXC_return:		/* Make global symbol for debugging */
699ca54502bSMichal Simek	rtbd	r14, 0;	/* Instructions to return from an IRQ */
700ca54502bSMichal Simek	nop;
701ca54502bSMichal Simek
702ca54502bSMichal Simek/*
703ca54502bSMichal Simek * HW EXCEPTION rutine end
704ca54502bSMichal Simek */
705ca54502bSMichal Simek
706ca54502bSMichal Simek/*
707ca54502bSMichal Simek * Hardware maskable interrupts.
708ca54502bSMichal Simek *
709ca54502bSMichal Simek * The stack-pointer (r1) should have already been saved to the memory
710ca54502bSMichal Simek * location PER_CPU(ENTRY_SP).
711ca54502bSMichal Simek */
712ca54502bSMichal SimekC_ENTRY(_interrupt):
713ca54502bSMichal Simek/* MS: we are in physical address */
714ca54502bSMichal Simek/* Save registers, switch to proper stack, convert SP to virtual.*/
715ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
716ca54502bSMichal Simek	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
717ca54502bSMichal Simek	/* MS: See if already in kernel mode. */
718ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(KM));
719ca54502bSMichal Simek	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
720ca54502bSMichal Simek
721ca54502bSMichal Simek/* Kernel-mode state save. */
722ca54502bSMichal Simek	or	r11, r1, r0
723ca54502bSMichal Simek	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
724ca54502bSMichal Simek/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
725ca54502bSMichal Simek	swi	r11, r1, (PT_R1 - PT_SIZE);
726ca54502bSMichal Simek/* MS: restore r11 because of saving in SAVE_REGS */
727ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
728ca54502bSMichal Simek	/* save registers */
729ca54502bSMichal Simek/* MS: Make room on the stack -> activation record */
730ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE;
731ca54502bSMichal Simek/* MS: store return registers separately because
732ca54502bSMichal Simek * this macros is use for others exceptions */
733ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3;
734ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;
735ca54502bSMichal Simek	SAVE_REGS
736ca54502bSMichal Simek	/* MS: store mode */
737ca54502bSMichal Simek	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
738ca54502bSMichal Simek	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
739ca54502bSMichal Simek	brid	2f;
740ca54502bSMichal Simek	nop; /* MS: Fill delay slot */
741ca54502bSMichal Simek
742ca54502bSMichal Simek1:
743ca54502bSMichal Simek/* User-mode state save. */
744ca54502bSMichal Simek/* MS: restore r11 -> FIXME move before SAVE_REG */
745ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
746ca54502bSMichal Simek /* MS: get the saved current */
747ca54502bSMichal Simek	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
748ca54502bSMichal Simek	tophys(r1,r1);
749ca54502bSMichal Simek	lwi	r1, r1, TS_THREAD_INFO;
750ca54502bSMichal Simek	addik	r1, r1, THREAD_SIZE;
751ca54502bSMichal Simek	tophys(r1,r1);
752ca54502bSMichal Simek	/* save registers */
753ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE;
754ca54502bSMichal Simek	swi	r3, r1, PTO+PT_R3;
755ca54502bSMichal Simek	swi	r4, r1, PTO+PT_R4;
756ca54502bSMichal Simek	SAVE_REGS
757ca54502bSMichal Simek	/* calculate mode */
758ca54502bSMichal Simek	swi	r0, r1, PTO + PT_MODE;
759ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
760ca54502bSMichal Simek	swi	r11, r1, PTO+PT_R1;
761ca54502bSMichal Simek	/* setup kernel mode to KM */
762ca54502bSMichal Simek	addi	r11, r0, 1;
763ca54502bSMichal Simek	swi	r11, r0, TOPHYS(PER_CPU(KM));
764ca54502bSMichal Simek
765ca54502bSMichal Simek2:
766ca54502bSMichal Simek	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
767ca54502bSMichal Simek	swi	r0, r1, PTO + PT_R0;
768ca54502bSMichal Simek	tovirt(r1,r1)
769ca54502bSMichal Simek	la	r5, r1, PTO;
770ca54502bSMichal Simek	set_vms;
771ca54502bSMichal Simek	la	r11, r0, do_IRQ;
772ca54502bSMichal Simek	la	r15, r0, irq_call;
773ca54502bSMichal Simekirq_call:rtbd	r11, 0;
774ca54502bSMichal Simek	nop;
775ca54502bSMichal Simek
776ca54502bSMichal Simek/* MS: we are in virtual mode */
777ca54502bSMichal Simekret_from_irq:
778ca54502bSMichal Simek	lwi	r11, r1, PTO + PT_MODE;
779ca54502bSMichal Simek	bnei	r11, 2f;
780ca54502bSMichal Simek
781ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK;
782ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;
783ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
784ca54502bSMichal Simek	andi	r11, r11, _TIF_NEED_RESCHED;
785ca54502bSMichal Simek	beqi	r11, 5f
786ca54502bSMichal Simek	bralid	r15, schedule;
787ca54502bSMichal Simek	nop; /* delay slot */
788ca54502bSMichal Simek
789ca54502bSMichal Simek    /* Maybe handle a signal */
790ca54502bSMichal Simek5:	add	r11, r0, CURRENT_TASK;
791ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
792ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
793ca54502bSMichal Simek	andi	r11, r11, _TIF_SIGPENDING;
794ca54502bSMichal Simek	beqid	r11, no_intr_resched
795ca54502bSMichal Simek/* Handle a signal return; Pending signals should be in r18. */
796ca54502bSMichal Simek	addi	r7, r0, 0; /* Arg 3: int in_syscall */
797ca54502bSMichal Simek	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
798ca54502bSMichal Simek	bralid	r15, do_signal;	/* Handle any signals */
799ca54502bSMichal Simek	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
800ca54502bSMichal Simek
801ca54502bSMichal Simek/* Finally, return to user state. */
802ca54502bSMichal Simekno_intr_resched:
803ca54502bSMichal Simek    /* Disable interrupts, we are now committed to the state restore */
804ca54502bSMichal Simek	disable_irq
805ca54502bSMichal Simek	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
806ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK;
807ca54502bSMichal Simek	swi	r11, r0, PER_CPU(CURRENT_SAVE);
808ca54502bSMichal Simek	VM_OFF;
809ca54502bSMichal Simek	tophys(r1,r1);
810ca54502bSMichal Simek	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
811ca54502bSMichal Simek	lwi	r4, r1, PTO + PT_R4;
812ca54502bSMichal Simek	RESTORE_REGS
813ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
814ca54502bSMichal Simek	lwi	r1, r1, PT_R1 - PT_SIZE;
815ca54502bSMichal Simek	bri	6f;
816ca54502bSMichal Simek/* MS: Return to kernel state. */
817ca54502bSMichal Simek2:	VM_OFF /* MS: turn off MMU */
818ca54502bSMichal Simek	tophys(r1,r1)
819ca54502bSMichal Simek	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
820ca54502bSMichal Simek	lwi	r4, r1, PTO + PT_R4;
821ca54502bSMichal Simek	RESTORE_REGS
822ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
823ca54502bSMichal Simek	tovirt(r1,r1);
824ca54502bSMichal Simek6:
825ca54502bSMichal SimekIRQ_return: /* MS: Make global symbol for debugging */
826ca54502bSMichal Simek	rtid	r14, 0
827ca54502bSMichal Simek	nop
828ca54502bSMichal Simek
829ca54502bSMichal Simek/*
830ca54502bSMichal Simek * `Debug' trap
831ca54502bSMichal Simek *  We enter dbtrap in "BIP" (breakpoint) mode.
832ca54502bSMichal Simek *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
833ca54502bSMichal Simek *  original dbtrap.
834ca54502bSMichal Simek *  however, wait to save state first
835ca54502bSMichal Simek */
836ca54502bSMichal SimekC_ENTRY(_debug_exception):
837ca54502bSMichal Simek	/* BIP bit is set on entry, no interrupts can occur */
838ca54502bSMichal Simek	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
839ca54502bSMichal Simek
840ca54502bSMichal Simek	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
841ca54502bSMichal Simek	set_bip;	/*equalize initial state for all possible entries*/
842ca54502bSMichal Simek	clear_eip;
843ca54502bSMichal Simek	enable_irq;
844ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
845ca54502bSMichal Simek	beqi	r11, 1f;		/* Jump ahead if coming from user */
846ca54502bSMichal Simek	/* Kernel-mode state save.  */
847ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
848ca54502bSMichal Simek	tophys(r1,r11);
849ca54502bSMichal Simek	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
850ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
851ca54502bSMichal Simek
852ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
853ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3;
854ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;
855ca54502bSMichal Simek	SAVE_REGS;
856ca54502bSMichal Simek
857ca54502bSMichal Simek	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
858ca54502bSMichal Simek	swi	r11, r1, PTO + PT_MODE;
859ca54502bSMichal Simek	brid	2f;
860ca54502bSMichal Simek	nop;				/* Fill delay slot */
861ca54502bSMichal Simek1:      /* User-mode state save.  */
862ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
863ca54502bSMichal Simek	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
864ca54502bSMichal Simek	tophys(r1,r1);
865ca54502bSMichal Simek	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
866ca54502bSMichal Simek	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
867ca54502bSMichal Simek	tophys(r1,r1);
868ca54502bSMichal Simek
869ca54502bSMichal Simek	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
870ca54502bSMichal Simek	swi	r3, r1, PTO + PT_R3;
871ca54502bSMichal Simek	swi	r4, r1, PTO + PT_R4;
872ca54502bSMichal Simek	SAVE_REGS;
873ca54502bSMichal Simek
874ca54502bSMichal Simek	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
875ca54502bSMichal Simek	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
876ca54502bSMichal Simek	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
877ca54502bSMichal Simek	addi	r11, r0, 1;
878ca54502bSMichal Simek	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
879ca54502bSMichal Simek2:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
880ca54502bSMichal Simek	/* Save away the syscall number.  */
881ca54502bSMichal Simek	swi	r0, r1, PTO+PT_R0;
882ca54502bSMichal Simek	tovirt(r1,r1)
883ca54502bSMichal Simek
884ca54502bSMichal Simek	addi	r5, r0, SIGTRAP		     /* send the trap signal */
885ca54502bSMichal Simek	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
886ca54502bSMichal Simek	addk	r7, r0, r0		     /* 3rd param zero */
887ca54502bSMichal Simek
888ca54502bSMichal Simek	set_vms;
889ca54502bSMichal Simek	la	r11, r0, send_sig;
890ca54502bSMichal Simek	la	r15, r0, dbtrap_call;
891ca54502bSMichal Simekdbtrap_call:	rtbd	r11, 0;
892ca54502bSMichal Simek	nop;
893ca54502bSMichal Simek
894ca54502bSMichal Simek	set_bip;			/*  Ints masked for state restore*/
895ca54502bSMichal Simek	lwi	r11, r1, PTO+PT_MODE;
896ca54502bSMichal Simek	bnei	r11, 2f;
897ca54502bSMichal Simek
898ca54502bSMichal Simek	/* Get current task ptr into r11 */
899ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
900ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
901ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
902ca54502bSMichal Simek	andi	r11, r11, _TIF_NEED_RESCHED;
903ca54502bSMichal Simek	beqi	r11, 5f;
904ca54502bSMichal Simek
905ca54502bSMichal Simek/* Call the scheduler before returning from a syscall/trap. */
906ca54502bSMichal Simek
907ca54502bSMichal Simek	bralid	r15, schedule;	/* Call scheduler */
908ca54502bSMichal Simek	nop;				/* delay slot */
909ca54502bSMichal Simek	/* XXX Is PT_DTRACE handling needed here? */
910ca54502bSMichal Simek	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
911ca54502bSMichal Simek
912ca54502bSMichal Simek	/* Maybe handle a signal */
913ca54502bSMichal Simek5:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
914ca54502bSMichal Simek	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
915ca54502bSMichal Simek	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
916ca54502bSMichal Simek	andi	r11, r11, _TIF_SIGPENDING;
917ca54502bSMichal Simek	beqi	r11, 1f;		/* Signals to handle, handle them */
918ca54502bSMichal Simek
919ca54502bSMichal Simek/* Handle a signal return; Pending signals should be in r18.  */
920ca54502bSMichal Simek	/* Not all registers are saved by the normal trap/interrupt entry
921ca54502bSMichal Simek	   points (for instance, call-saved registers (because the normal
922ca54502bSMichal Simek	   C-compiler calling sequence in the kernel makes sure they're
923ca54502bSMichal Simek	   preserved), and call-clobbered registers in the case of
924ca54502bSMichal Simek	   traps), but signal handlers may want to examine or change the
925ca54502bSMichal Simek	   complete register state.  Here we save anything not saved by
926ca54502bSMichal Simek	   the normal entry sequence, so that it may be safely restored
927ca54502bSMichal Simek	   (in a possibly modified form) after do_signal returns.  */
928ca54502bSMichal Simek
929ca54502bSMichal Simek	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
930ca54502bSMichal Simek	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
931ca54502bSMichal Simek	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
932ca54502bSMichal Simek	bralid	r15, do_signal;	/* Handle any signals */
933ca54502bSMichal Simek	nop;
934ca54502bSMichal Simek
935ca54502bSMichal Simek
936ca54502bSMichal Simek/* Finally, return to user state.  */
937ca54502bSMichal Simek1:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
938ca54502bSMichal Simek	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
939ca54502bSMichal Simek	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
940ca54502bSMichal Simek	VM_OFF;
941ca54502bSMichal Simek	tophys(r1,r1);
942ca54502bSMichal Simek
943ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
944ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
945ca54502bSMichal Simek	RESTORE_REGS
946ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
947ca54502bSMichal Simek
948ca54502bSMichal Simek
949ca54502bSMichal Simek	lwi	r1, r1, PT_R1 - PT_SIZE;
950ca54502bSMichal Simek					/* Restore user stack pointer. */
951ca54502bSMichal Simek	bri	6f;
952ca54502bSMichal Simek
953ca54502bSMichal Simek/* Return to kernel state.  */
954ca54502bSMichal Simek2:	VM_OFF;
955ca54502bSMichal Simek	tophys(r1,r1);
956ca54502bSMichal Simek	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
957ca54502bSMichal Simek	lwi	r4, r1, PTO+PT_R4;
958ca54502bSMichal Simek	RESTORE_REGS
959ca54502bSMichal Simek	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
960ca54502bSMichal Simek
961ca54502bSMichal Simek	tovirt(r1,r1);
962ca54502bSMichal Simek6:
963ca54502bSMichal SimekDBTRAP_return:		/* Make global symbol for debugging */
964ca54502bSMichal Simek	rtbd	r14, 0;	/* Instructions to return from an IRQ */
965ca54502bSMichal Simek	nop;
966ca54502bSMichal Simek
967ca54502bSMichal Simek
968ca54502bSMichal Simek
969ca54502bSMichal SimekENTRY(_switch_to)
970ca54502bSMichal Simek	/* prepare return value */
971ca54502bSMichal Simek	addk	r3, r0, r31
972ca54502bSMichal Simek
973ca54502bSMichal Simek	/* save registers in cpu_context */
974ca54502bSMichal Simek	/* use r11 and r12, volatile registers, as temp register */
975ca54502bSMichal Simek	/* give start of cpu_context for previous process */
976ca54502bSMichal Simek	addik	r11, r5, TI_CPU_CONTEXT
977ca54502bSMichal Simek	swi	r1, r11, CC_R1
978ca54502bSMichal Simek	swi	r2, r11, CC_R2
979ca54502bSMichal Simek	/* skip volatile registers.
980ca54502bSMichal Simek	 * they are saved on stack when we jumped to _switch_to() */
981ca54502bSMichal Simek	/* dedicated registers */
982ca54502bSMichal Simek	swi	r13, r11, CC_R13
983ca54502bSMichal Simek	swi	r14, r11, CC_R14
984ca54502bSMichal Simek	swi	r15, r11, CC_R15
985ca54502bSMichal Simek	swi	r16, r11, CC_R16
986ca54502bSMichal Simek	swi	r17, r11, CC_R17
987ca54502bSMichal Simek	swi	r18, r11, CC_R18
988ca54502bSMichal Simek	/* save non-volatile registers */
989ca54502bSMichal Simek	swi	r19, r11, CC_R19
990ca54502bSMichal Simek	swi	r20, r11, CC_R20
991ca54502bSMichal Simek	swi	r21, r11, CC_R21
992ca54502bSMichal Simek	swi	r22, r11, CC_R22
993ca54502bSMichal Simek	swi	r23, r11, CC_R23
994ca54502bSMichal Simek	swi	r24, r11, CC_R24
995ca54502bSMichal Simek	swi	r25, r11, CC_R25
996ca54502bSMichal Simek	swi	r26, r11, CC_R26
997ca54502bSMichal Simek	swi	r27, r11, CC_R27
998ca54502bSMichal Simek	swi	r28, r11, CC_R28
999ca54502bSMichal Simek	swi	r29, r11, CC_R29
1000ca54502bSMichal Simek	swi	r30, r11, CC_R30
1001ca54502bSMichal Simek	/* special purpose registers */
1002ca54502bSMichal Simek	mfs	r12, rmsr
1003ca54502bSMichal Simek	nop
1004ca54502bSMichal Simek	swi	r12, r11, CC_MSR
1005ca54502bSMichal Simek	mfs	r12, rear
1006ca54502bSMichal Simek	nop
1007ca54502bSMichal Simek	swi	r12, r11, CC_EAR
1008ca54502bSMichal Simek	mfs	r12, resr
1009ca54502bSMichal Simek	nop
1010ca54502bSMichal Simek	swi	r12, r11, CC_ESR
1011ca54502bSMichal Simek	mfs	r12, rfsr
1012ca54502bSMichal Simek	nop
1013ca54502bSMichal Simek	swi	r12, r11, CC_FSR
1014ca54502bSMichal Simek
1015ca54502bSMichal Simek	/* update r31, the current */
1016ca54502bSMichal Simek	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
1017ca54502bSMichal Simek	/* stored it to current_save too */
1018ca54502bSMichal Simek	swi	r31, r0, PER_CPU(CURRENT_SAVE)
1019ca54502bSMichal Simek
1020ca54502bSMichal Simek	/* get new process' cpu context and restore */
1021ca54502bSMichal Simek	/* give me start where start context of next task */
1022ca54502bSMichal Simek	addik	r11, r6, TI_CPU_CONTEXT
1023ca54502bSMichal Simek
1024ca54502bSMichal Simek	/* non-volatile registers */
1025ca54502bSMichal Simek	lwi	r30, r11, CC_R30
1026ca54502bSMichal Simek	lwi	r29, r11, CC_R29
1027ca54502bSMichal Simek	lwi	r28, r11, CC_R28
1028ca54502bSMichal Simek	lwi	r27, r11, CC_R27
1029ca54502bSMichal Simek	lwi	r26, r11, CC_R26
1030ca54502bSMichal Simek	lwi	r25, r11, CC_R25
1031ca54502bSMichal Simek	lwi	r24, r11, CC_R24
1032ca54502bSMichal Simek	lwi	r23, r11, CC_R23
1033ca54502bSMichal Simek	lwi	r22, r11, CC_R22
1034ca54502bSMichal Simek	lwi	r21, r11, CC_R21
1035ca54502bSMichal Simek	lwi	r20, r11, CC_R20
1036ca54502bSMichal Simek	lwi	r19, r11, CC_R19
1037ca54502bSMichal Simek	/* dedicated registers */
1038ca54502bSMichal Simek	lwi	r18, r11, CC_R18
1039ca54502bSMichal Simek	lwi	r17, r11, CC_R17
1040ca54502bSMichal Simek	lwi	r16, r11, CC_R16
1041ca54502bSMichal Simek	lwi	r15, r11, CC_R15
1042ca54502bSMichal Simek	lwi	r14, r11, CC_R14
1043ca54502bSMichal Simek	lwi	r13, r11, CC_R13
1044ca54502bSMichal Simek	/* skip volatile registers */
1045ca54502bSMichal Simek	lwi	r2, r11, CC_R2
1046ca54502bSMichal Simek	lwi	r1, r11, CC_R1
1047ca54502bSMichal Simek
1048ca54502bSMichal Simek	/* special purpose registers */
1049ca54502bSMichal Simek	lwi	r12, r11, CC_FSR
1050ca54502bSMichal Simek	mts	rfsr, r12
1051ca54502bSMichal Simek	nop
1052ca54502bSMichal Simek	lwi	r12, r11, CC_MSR
1053ca54502bSMichal Simek	mts	rmsr, r12
1054ca54502bSMichal Simek	nop
1055ca54502bSMichal Simek
1056ca54502bSMichal Simek	rtsd	r15, 8
1057ca54502bSMichal Simek	nop
1058ca54502bSMichal Simek
1059ca54502bSMichal SimekENTRY(_reset)
1060ca54502bSMichal Simek	brai	0x70; /* Jump back to FS-boot */
1061ca54502bSMichal Simek
1062ca54502bSMichal SimekENTRY(_break)
1063ca54502bSMichal Simek	mfs	r5, rmsr
1064ca54502bSMichal Simek	nop
1065ca54502bSMichal Simek	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1066ca54502bSMichal Simek	mfs	r5, resr
1067ca54502bSMichal Simek	nop
1068ca54502bSMichal Simek	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1069ca54502bSMichal Simek	bri	0
1070ca54502bSMichal Simek
1071ca54502bSMichal Simek	/* These are compiled and loaded into high memory, then
1072ca54502bSMichal Simek	 * copied into place in mach_early_setup */
1073ca54502bSMichal Simek	.section	.init.ivt, "ax"
1074ca54502bSMichal Simek	.org	0x0
1075ca54502bSMichal Simek	/* this is very important - here is the reset vector */
1076ca54502bSMichal Simek	/* in current MMU branch you don't care what is here - it is
1077ca54502bSMichal Simek	 * used from bootloader site - but this is correct for FS-BOOT */
1078ca54502bSMichal Simek	brai	0x70
1079ca54502bSMichal Simek	nop
1080ca54502bSMichal Simek	brai	TOPHYS(_user_exception); /* syscall handler */
1081ca54502bSMichal Simek	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1082ca54502bSMichal Simek	brai	TOPHYS(_break);		/* nmi trap handler */
1083ca54502bSMichal Simek	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1084ca54502bSMichal Simek
1085ca54502bSMichal Simek	.org	0x60
1086ca54502bSMichal Simek	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1087ca54502bSMichal Simek
1088ca54502bSMichal Simek.section .rodata,"a"
1089ca54502bSMichal Simek#include "syscall_table.S"
1090ca54502bSMichal Simek
1091ca54502bSMichal Simeksyscall_table_size=(.-sys_call_table)
1092ca54502bSMichal Simek
1093