xref: /openbmc/linux/arch/xtensa/kernel/entry.S (revision ee8a99bd)
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004 - 2008 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
17#include <asm/asm-offsets.h>
18#include <asm/processor.h>
19#include <asm/coprocessor.h>
20#include <asm/thread_info.h>
21#include <asm/uaccess.h>
22#include <asm/unistd.h>
23#include <asm/ptrace.h>
24#include <asm/current.h>
25#include <asm/pgtable.h>
26#include <asm/page.h>
27#include <asm/signal.h>
28#include <asm/tlbflush.h>
29#include <variant/tie-asm.h>
30
31/* Unimplemented features. */
32
33#undef KERNEL_STACK_OVERFLOW_CHECK
34#undef PREEMPTIBLE_KERNEL
35#undef ALLOCA_EXCEPTION_IN_IRAM
36
37/* Not well tested.
38 *
39 * - fast_coprocessor
40 */
41
42/*
43 * Macro to find first bit set in WINDOWBASE from the left + 1
44 *
45 * 100....0 -> 1
46 * 010....0 -> 2
47 * 000....1 -> WSBITS
48 */
49
50	.macro ffs_ws bit mask
51
52#if XCHAL_HAVE_NSA
53	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
54	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
55#else
56	movi    \bit, WSBITS
57#if WSBITS > 16
58	_bltui  \mask, 0x10000, 99f
59	addi    \bit, \bit, -16
60	extui   \mask, \mask, 16, 16
61#endif
62#if WSBITS > 8
6399:	_bltui  \mask, 0x100, 99f
64	addi    \bit, \bit, -8
65	srli    \mask, \mask, 8
66#endif
6799:	_bltui  \mask, 0x10, 99f
68	addi    \bit, \bit, -4
69	srli    \mask, \mask, 4
7099:	_bltui  \mask, 0x4, 99f
71	addi    \bit, \bit, -2
72	srli    \mask, \mask, 2
7399:	_bltui  \mask, 0x2, 99f
74	addi    \bit, \bit, -1
7599:
76
77#endif
78	.endm
79
80/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
81
82/*
83 * First-level exception handler for user exceptions.
84 * Save some special registers, extra states and all registers in the AR
85 * register file that were in use in the user task, and jump to the common
86 * exception code.
87 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
88 * save them for kernel exceptions).
89 *
90 * Entry condition for user_exception:
91 *
92 *   a0:	trashed, original value saved on stack (PT_AREG0)
93 *   a1:	a1
94 *   a2:	new stack pointer, original value in depc
95 *   a3:	dispatch table
96 *   depc:	a2, original value saved on stack (PT_DEPC)
97 *   excsave1:	a3
98 *
99 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
100 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
101 *
102 * Entry condition for _user_exception:
103 *
104 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
105 *   excsave has been restored, and
106 *   stack pointer (a1) has been set.
107 *
108 * Note: _user_exception might be at an odd address. Don't use call0..call12
109 */
110
111ENTRY(user_exception)
112
113	/* Save a2, a3, and depc, restore excsave_1 and set SP. */
114
115	xsr	a3, excsave1
116	rsr	a0, depc
117	s32i	a1, a2, PT_AREG1
118	s32i	a0, a2, PT_AREG2
119	s32i	a3, a2, PT_AREG3
120	mov	a1, a2
121
122	.globl _user_exception
123_user_exception:
124
125	/* Save SAR and turn off single stepping */
126
127	movi	a2, 0
128	rsr	a3, sar
129	xsr	a2, icountlevel
130	s32i	a3, a1, PT_SAR
131	s32i	a2, a1, PT_ICOUNTLEVEL
132
133#if XCHAL_HAVE_THREADPTR
134	rur	a2, threadptr
135	s32i	a2, a1, PT_THREADPTR
136#endif
137
138	/* Rotate ws so that the current windowbase is at bit0. */
139	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
140
141	rsr	a2, windowbase
142	rsr	a3, windowstart
143	ssr	a2
144	s32i	a2, a1, PT_WINDOWBASE
145	s32i	a3, a1, PT_WINDOWSTART
146	slli	a2, a3, 32-WSBITS
147	src	a2, a3, a2
148	srli	a2, a2, 32-WSBITS
149	s32i	a2, a1, PT_WMASK	# needed for restoring registers
150
151	/* Save only live registers. */
152
153	_bbsi.l	a2, 1, 1f
154	s32i	a4, a1, PT_AREG4
155	s32i	a5, a1, PT_AREG5
156	s32i	a6, a1, PT_AREG6
157	s32i	a7, a1, PT_AREG7
158	_bbsi.l	a2, 2, 1f
159	s32i	a8, a1, PT_AREG8
160	s32i	a9, a1, PT_AREG9
161	s32i	a10, a1, PT_AREG10
162	s32i	a11, a1, PT_AREG11
163	_bbsi.l	a2, 3, 1f
164	s32i	a12, a1, PT_AREG12
165	s32i	a13, a1, PT_AREG13
166	s32i	a14, a1, PT_AREG14
167	s32i	a15, a1, PT_AREG15
168	_bnei	a2, 1, 1f		# only one valid frame?
169
170	/* Only one valid frame, skip saving regs. */
171
172	j	2f
173
174	/* Save the remaining registers.
175	 * We have to save all registers up to the first '1' from
176	 * the right, except the current frame (bit 0).
177	 * Assume a2 is:  001001000110001
178	 * All register frames starting from the top field to the marked '1'
179	 * must be saved.
180	 */
181
1821:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
183	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
184	and	a3, a3, a2		# max. only one bit is set
185
186	/* Find number of frames to save */
187
188	ffs_ws	a0, a3			# number of frames to the '1' from left
189
190	/* Store information into WMASK:
191	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
192	 * bits 4...: number of valid 4-register frames
193	 */
194
195	slli	a3, a0, 4		# number of frames to save in bits 8..4
196	extui	a2, a2, 0, 4		# mask for the first 16 registers
197	or	a2, a3, a2
198	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
199
200	/* Save 4 registers at a time */
201
2021:	rotw	-1
203	s32i	a0, a5, PT_AREG_END - 16
204	s32i	a1, a5, PT_AREG_END - 12
205	s32i	a2, a5, PT_AREG_END - 8
206	s32i	a3, a5, PT_AREG_END - 4
207	addi	a0, a4, -1
208	addi	a1, a5, -16
209	_bnez	a0, 1b
210
211	/* WINDOWBASE still in SAR! */
212
213	rsr	a2, sar			# original WINDOWBASE
214	movi	a3, 1
215	ssl	a2
216	sll	a3, a3
217	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
218	wsr	a2, windowbase		# and WINDOWSTART
219	rsync
220
221	/* We are back to the original stack pointer (a1) */
222
2232:	/* Now, jump to the common exception handler. */
224
225	j	common_exception
226
227ENDPROC(user_exception)
228
229/*
230 * First-level exit handler for kernel exceptions
231 * Save special registers and the live window frame.
232 * Note: Even though we changes the stack pointer, we don't have to do a
233 *	 MOVSP here, as we do that when we return from the exception.
234 *	 (See comment in the kernel exception exit code)
235 *
236 * Entry condition for kernel_exception:
237 *
238 *   a0:	trashed, original value saved on stack (PT_AREG0)
239 *   a1:	a1
240 *   a2:	new stack pointer, original in DEPC
241 *   a3:	dispatch table
242 *   depc:	a2, original value saved on stack (PT_DEPC)
243 *   excsave_1:	a3
244 *
245 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
246 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
247 *
248 * Entry condition for _kernel_exception:
249 *
250 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
251 *   excsave has been restored, and
252 *   stack pointer (a1) has been set.
253 *
254 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
255 */
256
257ENTRY(kernel_exception)
258
259	/* Save a0, a2, a3, DEPC and set SP. */
260
261	xsr	a3, excsave1		# restore a3, excsave_1
262	rsr	a0, depc		# get a2
263	s32i	a1, a2, PT_AREG1
264	s32i	a0, a2, PT_AREG2
265	s32i	a3, a2, PT_AREG3
266	mov	a1, a2
267
268	.globl _kernel_exception
269_kernel_exception:
270
271	/* Save SAR and turn off single stepping */
272
273	movi	a2, 0
274	rsr	a3, sar
275	xsr	a2, icountlevel
276	s32i	a3, a1, PT_SAR
277	s32i	a2, a1, PT_ICOUNTLEVEL
278
279	/* Rotate ws so that the current windowbase is at bit0. */
280	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
281
282	rsr	a2, windowbase		# don't need to save these, we only
283	rsr	a3, windowstart		# need shifted windowstart: windowmask
284	ssr	a2
285	slli	a2, a3, 32-WSBITS
286	src	a2, a3, a2
287	srli	a2, a2, 32-WSBITS
288	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
289
290	/* Save only the live window-frame */
291
292	_bbsi.l	a2, 1, 1f
293	s32i	a4, a1, PT_AREG4
294	s32i	a5, a1, PT_AREG5
295	s32i	a6, a1, PT_AREG6
296	s32i	a7, a1, PT_AREG7
297	_bbsi.l	a2, 2, 1f
298	s32i	a8, a1, PT_AREG8
299	s32i	a9, a1, PT_AREG9
300	s32i	a10, a1, PT_AREG10
301	s32i	a11, a1, PT_AREG11
302	_bbsi.l	a2, 3, 1f
303	s32i	a12, a1, PT_AREG12
304	s32i	a13, a1, PT_AREG13
305	s32i	a14, a1, PT_AREG14
306	s32i	a15, a1, PT_AREG15
307
3081:
309
310#ifdef KERNEL_STACK_OVERFLOW_CHECK
311
312	/*  Stack overflow check, for debugging  */
313	extui	a2, a1, TASK_SIZE_BITS,XX
314	movi	a3, SIZE??
315	_bge	a2, a3, out_of_stack_panic
316
317#endif
318
319/*
320 * This is the common exception handler.
321 * We get here from the user exception handler or simply by falling through
322 * from the kernel exception handler.
323 * Save the remaining special registers, switch to kernel mode, and jump
324 * to the second-level exception handler.
325 *
326 */
327
328common_exception:
329
330	/* Save some registers, disable loops and clear the syscall flag. */
331
332	rsr	a2, debugcause
333	rsr	a3, epc1
334	s32i	a2, a1, PT_DEBUGCAUSE
335	s32i	a3, a1, PT_PC
336
337	movi	a2, -1
338	rsr	a3, excvaddr
339	s32i	a2, a1, PT_SYSCALL
340	movi	a2, 0
341	s32i	a3, a1, PT_EXCVADDR
342	xsr	a2, lcount
343	s32i	a2, a1, PT_LCOUNT
344
345	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
346
347	rsr	a0, exccause
348	movi	a3, 0
349	rsr	a2, excsave1
350	s32i	a0, a1, PT_EXCCAUSE
351	s32i	a3, a2, EXC_TABLE_FIXUP
352
353	/* All unrecoverable states are saved on stack, now, and a1 is valid,
354	 * so we can allow exceptions and interrupts (*) again.
355	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
356	 *
357	 * (*) We only allow interrupts if they were previously enabled and
358	 *     we're not handling an IRQ
359	 */
360
361	rsr	a3, ps
362	addi	a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
363	movi	a2, LOCKLEVEL
364	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
365					# a3 = PS.INTLEVEL
366	moveqz	a3, a2, a0		# a3 = LOCKLEVEL iff interrupt
367	movi	a2, 1 << PS_WOE_BIT
368	or	a3, a3, a2
369	rsr	a0, exccause
370	xsr	a3, ps
371
372	s32i	a3, a1, PT_PS		# save ps
373
374	/* Save lbeg, lend */
375
376	rsr	a2, lbeg
377	rsr	a3, lend
378	s32i	a2, a1, PT_LBEG
379	s32i	a3, a1, PT_LEND
380
381	/* Save SCOMPARE1 */
382
383#if XCHAL_HAVE_S32C1I
384	rsr     a2, scompare1
385	s32i    a2, a1, PT_SCOMPARE1
386#endif
387
388	/* Save optional registers. */
389
390	save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
391
392#ifdef CONFIG_TRACE_IRQFLAGS
393	l32i	a4, a1, PT_DEPC
394	/* Double exception means we came here with an exception
395	 * while PS.EXCM was set, i.e. interrupts disabled.
396	 */
397	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
398	l32i	a4, a1, PT_EXCCAUSE
399	bnei	a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
400	/* We came here with an interrupt means interrupts were enabled
401	 * and we've just disabled them.
402	 */
403	movi	a4, trace_hardirqs_off
404	callx4	a4
4051:
406#endif
407
408	/* Go to second-level dispatcher. Set up parameters to pass to the
409	 * exception handler and call the exception handler.
410	 */
411
412	movi	a4, exc_table
413	mov	a6, a1			# pass stack frame
414	mov	a7, a0			# pass EXCCAUSE
415	addx4	a4, a0, a4
416	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
417
418	/* Call the second-level handler */
419
420	callx4	a4
421
422	/* Jump here for exception exit */
423	.global common_exception_return
424common_exception_return:
425
426#ifdef CONFIG_TRACE_IRQFLAGS
427	l32i	a4, a1, PT_DEPC
428	/* Double exception means we came here with an exception
429	 * while PS.EXCM was set, i.e. interrupts disabled.
430	 */
431	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
432	l32i	a4, a1, PT_EXCCAUSE
433	bnei	a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
434	/* We came here with an interrupt means interrupts were enabled
435	 * and we'll reenable them on return.
436	 */
437	movi	a4, trace_hardirqs_on
438	callx4	a4
4391:
440#endif
441
442	/* Jump if we are returning from kernel exceptions. */
443
4441:	l32i	a3, a1, PT_PS
445	_bbci.l	a3, PS_UM_BIT, 4f
446
447	rsil	a2, 0
448
449	/* Specific to a user exception exit:
450	 * We need to check some flags for signal handling and rescheduling,
451	 * and have to restore WB and WS, extra states, and all registers
452	 * in the register file that were in use in the user task.
453	 * Note that we don't disable interrupts here.
454	 */
455
456	GET_THREAD_INFO(a2,a1)
457	l32i	a4, a2, TI_FLAGS
458
459	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
460	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
461	_bbci.l	a4, TIF_SIGPENDING, 5f
462
4632:	l32i	a4, a1, PT_DEPC
464	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
465
466	/* Call do_signal() */
467
468	movi	a4, do_notify_resume	# int do_notify_resume(struct pt_regs*)
469	mov	a6, a1
470	callx4	a4
471	j	1b
472
4733:	/* Reschedule */
474
475	movi	a4, schedule	# void schedule (void)
476	callx4	a4
477	j	1b
478
4795:
480#ifdef CONFIG_DEBUG_TLB_SANITY
481	l32i	a4, a1, PT_DEPC
482	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
483	movi	a4, check_tlb_sanity
484	callx4	a4
485#endif
4864:	/* Restore optional registers. */
487
488	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
489
490	/* Restore SCOMPARE1 */
491
492#if XCHAL_HAVE_S32C1I
493	l32i    a2, a1, PT_SCOMPARE1
494	wsr     a2, scompare1
495#endif
496	wsr	a3, ps		/* disable interrupts */
497
498	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit
499
500user_exception_exit:
501
502	/* Restore the state of the task and return from the exception. */
503
504	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
505
506	l32i	a2, a1, PT_WINDOWBASE
507	l32i	a3, a1, PT_WINDOWSTART
508	wsr	a1, depc		# use DEPC as temp storage
509	wsr	a3, windowstart		# restore WINDOWSTART
510	ssr	a2			# preserve user's WB in the SAR
511	wsr	a2, windowbase		# switch to user's saved WB
512	rsync
513	rsr	a1, depc		# restore stack pointer
514	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
515	rotw	-1			# we restore a4..a7
516	_bltui	a6, 16, 1f		# only have to restore current window?
517
518	/* The working registers are a0 and a3.  We are restoring to
519	 * a4..a7.  Be careful not to destroy what we have just restored.
520	 * Note: wmask has the format YYYYM:
521	 *       Y: number of registers saved in groups of 4
522	 *       M: 4 bit mask of first 16 registers
523	 */
524
525	mov	a2, a6
526	mov	a3, a5
527
5282:	rotw	-1			# a0..a3 become a4..a7
529	addi	a3, a7, -4*4		# next iteration
530	addi	a2, a6, -16		# decrementing Y in WMASK
531	l32i	a4, a3, PT_AREG_END + 0
532	l32i	a5, a3, PT_AREG_END + 4
533	l32i	a6, a3, PT_AREG_END + 8
534	l32i	a7, a3, PT_AREG_END + 12
535	_bgeui	a2, 16, 2b
536
537	/* Clear unrestored registers (don't leak anything to user-land */
538
5391:	rsr	a0, windowbase
540	rsr	a3, sar
541	sub	a3, a0, a3
542	beqz	a3, 2f
543	extui	a3, a3, 0, WBBITS
544
5451:	rotw	-1
546	addi	a3, a7, -1
547	movi	a4, 0
548	movi	a5, 0
549	movi	a6, 0
550	movi	a7, 0
551	bgei	a3, 1, 1b
552
553	/* We are back were we were when we started.
554	 * Note: a2 still contains WMASK (if we've returned to the original
555	 *	 frame where we had loaded a2), or at least the lower 4 bits
556	 *	 (if we have restored WSBITS-1 frames).
557	 */
558
559#if XCHAL_HAVE_THREADPTR
560	l32i	a3, a1, PT_THREADPTR
561	wur	a3, threadptr
562#endif
563
5642:	j	common_exception_exit
565
566	/* This is the kernel exception exit.
567	 * We avoided to do a MOVSP when we entered the exception, but we
568	 * have to do it here.
569	 */
570
571kernel_exception_exit:
572
573#ifdef PREEMPTIBLE_KERNEL
574
575#ifdef CONFIG_PREEMPT
576
577	/*
578	 * Note: We've just returned from a call4, so we have
579	 * at least 4 addt'l regs.
580	 */
581
582	/* Check current_thread_info->preempt_count */
583
584	GET_THREAD_INFO(a2)
585	l32i	a3, a2, TI_PREEMPT
586	bnez	a3, 1f
587
588	l32i	a2, a2, TI_FLAGS
589
5901:
591
592#endif
593
594#endif
595
596	/* Check if we have to do a movsp.
597	 *
598	 * We only have to do a movsp if the previous window-frame has
599	 * been spilled to the *temporary* exception stack instead of the
600	 * task's stack. This is the case if the corresponding bit in
601	 * WINDOWSTART for the previous window-frame was set before
602	 * (not spilled) but is zero now (spilled).
603	 * If this bit is zero, all other bits except the one for the
604	 * current window frame are also zero. So, we can use a simple test:
605	 * 'and' WINDOWSTART and WINDOWSTART-1:
606	 *
607	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
608	 *
609	 * The result is zero only if one bit was set.
610	 *
611	 * (Note: We might have gone through several task switches before
612	 *        we come back to the current task, so WINDOWBASE might be
613	 *        different from the time the exception occurred.)
614	 */
615
616	/* Test WINDOWSTART before and after the exception.
617	 * We actually have WMASK, so we only have to test if it is 1 or not.
618	 */
619
620	l32i	a2, a1, PT_WMASK
621	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
622
623	/* Test WINDOWSTART now. If spilled, do the movsp */
624
625	rsr     a3, windowstart
626	addi	a0, a3, -1
627	and     a3, a3, a0
628	_bnez	a3, common_exception_exit
629
630	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
631
632	addi    a0, a1, -16
633	l32i    a3, a0, 0
634	l32i    a4, a0, 4
635	s32i    a3, a1, PT_SIZE+0
636	s32i    a4, a1, PT_SIZE+4
637	l32i    a3, a0, 8
638	l32i    a4, a0, 12
639	s32i    a3, a1, PT_SIZE+8
640	s32i    a4, a1, PT_SIZE+12
641
642	/* Common exception exit.
643	 * We restore the special register and the current window frame, and
644	 * return from the exception.
645	 *
646	 * Note: We expect a2 to hold PT_WMASK
647	 */
648
649common_exception_exit:
650
651	/* Restore address registers. */
652
653	_bbsi.l	a2, 1, 1f
654	l32i	a4,  a1, PT_AREG4
655	l32i	a5,  a1, PT_AREG5
656	l32i	a6,  a1, PT_AREG6
657	l32i	a7,  a1, PT_AREG7
658	_bbsi.l	a2, 2, 1f
659	l32i	a8,  a1, PT_AREG8
660	l32i	a9,  a1, PT_AREG9
661	l32i	a10, a1, PT_AREG10
662	l32i	a11, a1, PT_AREG11
663	_bbsi.l	a2, 3, 1f
664	l32i	a12, a1, PT_AREG12
665	l32i	a13, a1, PT_AREG13
666	l32i	a14, a1, PT_AREG14
667	l32i	a15, a1, PT_AREG15
668
669	/* Restore PC, SAR */
670
6711:	l32i	a2, a1, PT_PC
672	l32i	a3, a1, PT_SAR
673	wsr	a2, epc1
674	wsr	a3, sar
675
676	/* Restore LBEG, LEND, LCOUNT */
677
678	l32i	a2, a1, PT_LBEG
679	l32i	a3, a1, PT_LEND
680	wsr	a2, lbeg
681	l32i	a2, a1, PT_LCOUNT
682	wsr	a3, lend
683	wsr	a2, lcount
684
685	/* We control single stepping through the ICOUNTLEVEL register. */
686
687	l32i	a2, a1, PT_ICOUNTLEVEL
688	movi	a3, -2
689	wsr	a2, icountlevel
690	wsr	a3, icount
691
692	/* Check if it was double exception. */
693
694	l32i	a0, a1, PT_DEPC
695	l32i	a3, a1, PT_AREG3
696	l32i	a2, a1, PT_AREG2
697	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
698
699	/* Restore a0...a3 and return */
700
701	l32i	a0, a1, PT_AREG0
702	l32i	a1, a1, PT_AREG1
703	rfe
704
7051: 	wsr	a0, depc
706	l32i	a0, a1, PT_AREG0
707	l32i	a1, a1, PT_AREG1
708	rfde
709
710ENDPROC(kernel_exception)
711
712/*
713 * Debug exception handler.
714 *
715 * Currently, we don't support KGDB, so only user application can be debugged.
716 *
717 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
718 */
719
720ENTRY(debug_exception)
721
722	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
723	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
724
725	/* Set EPC1 and EXCCAUSE */
726
727	wsr	a2, depc		# save a2 temporarily
728	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
729	wsr	a2, epc1
730
731	movi	a2, EXCCAUSE_MAPPED_DEBUG
732	wsr	a2, exccause
733
734	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
735
736	movi	a2, 1 << PS_EXCM_BIT
737	or	a2, a0, a2
738	movi	a0, debug_exception	# restore a3, debug jump vector
739	wsr	a2, ps
740	xsr	a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
741
742	/* Switch to kernel/user stack, restore jump vector, and save a0 */
743
744	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
745
746	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
747	s32i	a0, a2, PT_AREG0
748	movi	a0, 0
749	s32i	a1, a2, PT_AREG1
750	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
751	xsr	a0, depc
752	s32i	a3, a2, PT_AREG3
753	s32i	a0, a2, PT_AREG2
754	mov	a1, a2
755	j	_kernel_exception
756
7572:	rsr	a2, excsave1
758	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
759	s32i	a0, a2, PT_AREG0
760	movi	a0, 0
761	s32i	a1, a2, PT_AREG1
762	s32i	a0, a2, PT_DEPC
763	xsr	a0, depc
764	s32i	a3, a2, PT_AREG3
765	s32i	a0, a2, PT_AREG2
766	mov	a1, a2
767	j	_user_exception
768
769	/* Debug exception while in exception mode. */
7701:	j	1b	// FIXME!!
771
772ENDPROC(debug_exception)
773
774/*
775 * We get here in case of an unrecoverable exception.
776 * The only thing we can do is to be nice and print a panic message.
777 * We only produce a single stack frame for panic, so ???
778 *
779 *
780 * Entry conditions:
781 *
782 *   - a0 contains the caller address; original value saved in excsave1.
783 *   - the original a0 contains a valid return address (backtrace) or 0.
784 *   - a2 contains a valid stackpointer
785 *
786 * Notes:
787 *
788 *   - If the stack pointer could be invalid, the caller has to setup a
789 *     dummy stack pointer (e.g. the stack of the init_task)
790 *
791 *   - If the return address could be invalid, the caller has to set it
792 *     to 0, so the backtrace would stop.
793 *
794 */
795	.align 4
796unrecoverable_text:
797	.ascii "Unrecoverable error in exception handler\0"
798
799ENTRY(unrecoverable_exception)
800
801	movi	a0, 1
802	movi	a1, 0
803
804	wsr	a0, windowstart
805	wsr	a1, windowbase
806	rsync
807
808	movi	a1, (1 << PS_WOE_BIT) | LOCKLEVEL
809	wsr	a1, ps
810	rsync
811
812	movi	a1, init_task
813	movi	a0, 0
814	addi	a1, a1, PT_REGS_OFFSET
815
816	movi	a4, panic
817	movi	a6, unrecoverable_text
818
819	callx4	a4
820
8211:	j	1b
822
823ENDPROC(unrecoverable_exception)
824
825/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
826
827/*
828 * Fast-handler for alloca exceptions
829 *
830 *  The ALLOCA handler is entered when user code executes the MOVSP
831 *  instruction and the caller's frame is not in the register file.
832 *  In this case, the caller frame's a0..a3 are on the stack just
833 *  below sp (a1), and this handler moves them.
834 *
835 *  For "MOVSP <ar>,<as>" without destination register a1, this routine
836 *  simply moves the value from <as> to <ar> without moving the save area.
837 *
838 * Entry condition:
839 *
840 *   a0:	trashed, original value saved on stack (PT_AREG0)
841 *   a1:	a1
842 *   a2:	new stack pointer, original in DEPC
843 *   a3:	dispatch table
844 *   depc:	a2, original value saved on stack (PT_DEPC)
845 *   excsave_1:	a3
846 *
847 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
848 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
849 */
850
851#if XCHAL_HAVE_BE
852#define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 4, 4
853#define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 0, 4
854#else
855#define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 0, 4
856#define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 4, 4
857#endif
858
859ENTRY(fast_alloca)
860
861	/* We shouldn't be in a double exception. */
862
863	l32i	a0, a2, PT_DEPC
864	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
865
866	rsr	a0, depc		# get a2
867	s32i	a4, a2, PT_AREG4	# save a4 and
868	s32i	a0, a2, PT_AREG2	# a2 to stack
869
870	/* Exit critical section. */
871
872	movi	a0, 0
873	s32i	a0, a3, EXC_TABLE_FIXUP
874
875	/* Restore a3, excsave_1 */
876
877	xsr	a3, excsave1		# make sure excsave_1 is valid for dbl.
878	rsr	a4, epc1		# get exception address
879	s32i	a3, a2, PT_AREG3	# save a3 to stack
880
881#ifdef ALLOCA_EXCEPTION_IN_IRAM
882#error	iram not supported
883#else
884	/* Note: l8ui not allowed in IRAM/IROM!! */
885	l8ui	a0, a4, 1		# read as(src) from MOVSP instruction
886#endif
887	movi	a3, .Lmovsp_src
888	_EXTUI_MOVSP_SRC(a0)		# extract source register number
889	addx8	a3, a0, a3
890	jx	a3
891
892.Lunhandled_double:
893	wsr	a0, excsave1
894	movi	a0, unrecoverable_exception
895	callx0	a0
896
897	.align 8
898.Lmovsp_src:
899	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
900	mov	a3, a1;			_j 1f;	.align 8
901	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
902	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
903	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
904	mov	a3, a5;			_j 1f;	.align 8
905	mov	a3, a6;			_j 1f;	.align 8
906	mov	a3, a7;			_j 1f;	.align 8
907	mov	a3, a8;			_j 1f;	.align 8
908	mov	a3, a9;			_j 1f;	.align 8
909	mov	a3, a10;		_j 1f;	.align 8
910	mov	a3, a11;		_j 1f;	.align 8
911	mov	a3, a12;		_j 1f;	.align 8
912	mov	a3, a13;		_j 1f;	.align 8
913	mov	a3, a14;		_j 1f;	.align 8
914	mov	a3, a15;		_j 1f;	.align 8
915
9161:
917
918#ifdef ALLOCA_EXCEPTION_IN_IRAM
919#error	iram not supported
920#else
921	l8ui	a0, a4, 0		# read ar(dst) from MOVSP instruction
922#endif
923	addi	a4, a4, 3		# step over movsp
924	_EXTUI_MOVSP_DST(a0)		# extract destination register
925	wsr	a4, epc1		# save new epc_1
926
927	_bnei	a0, 1, 1f		# no 'movsp a1, ax': jump
928
929	/* Move the save area. This implies the use of the L32E
930	 * and S32E instructions, because this move must be done with
931	 * the user's PS.RING privilege levels, not with ring 0
932	 * (kernel's) privileges currently active with PS.EXCM
933	 * set. Note that we have stil registered a fixup routine with the
934	 * double exception vector in case a double exception occurs.
935	 */
936
937	/* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
938
939	l32e	a0, a1, -16
940	l32e	a4, a1, -12
941	s32e	a0, a3, -16
942	s32e	a4, a3, -12
943	l32e	a0, a1, -8
944	l32e	a4, a1, -4
945	s32e	a0, a3, -8
946	s32e	a4, a3, -4
947
948	/* Restore stack-pointer and all the other saved registers. */
949
950	mov	a1, a3
951
952	l32i	a4, a2, PT_AREG4
953	l32i	a3, a2, PT_AREG3
954	l32i	a0, a2, PT_AREG0
955	l32i	a2, a2, PT_AREG2
956	rfe
957
958	/*  MOVSP <at>,<as>  was invoked with <at> != a1.
959	 *  Because the stack pointer is not being modified,
960	 *  we should be able to just modify the pointer
961	 *  without moving any save area.
962	 *  The processor only traps these occurrences if the
963	 *  caller window isn't live, so unfortunately we can't
964	 *  use this as an alternate trap mechanism.
965	 *  So we just do the move.  This requires that we
966	 *  resolve the destination register, not just the source,
967	 *  so there's some extra work.
968	 *  (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
969	 */
970
971	/* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
972
9731:	movi	a4, .Lmovsp_dst
974	addx8	a4, a0, a4
975	jx	a4
976
977	.align 8
978.Lmovsp_dst:
979	s32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
980	mov	a1, a3;			_j 1f;	.align 8
981	s32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
982	s32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
983	s32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
984	mov	a5, a3;			_j 1f;	.align 8
985	mov	a6, a3;			_j 1f;	.align 8
986	mov	a7, a3;			_j 1f;	.align 8
987	mov	a8, a3;			_j 1f;	.align 8
988	mov	a9, a3;			_j 1f;	.align 8
989	mov	a10, a3;		_j 1f;	.align 8
990	mov	a11, a3;		_j 1f;	.align 8
991	mov	a12, a3;		_j 1f;	.align 8
992	mov	a13, a3;		_j 1f;	.align 8
993	mov	a14, a3;		_j 1f;	.align 8
994	mov	a15, a3;		_j 1f;	.align 8
995
9961:	l32i	a4, a2, PT_AREG4
997	l32i	a3, a2, PT_AREG3
998	l32i	a0, a2, PT_AREG0
999	l32i	a2, a2, PT_AREG2
1000	rfe
1001
1002ENDPROC(fast_alloca)
1003
1004/*
1005 * fast system calls.
1006 *
1007 * WARNING:  The kernel doesn't save the entire user context before
1008 * handling a fast system call.  These functions are small and short,
1009 * usually offering some functionality not available to user tasks.
1010 *
1011 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1012 *
1013 * Entry condition:
1014 *
1015 *   a0:	trashed, original value saved on stack (PT_AREG0)
1016 *   a1:	a1
1017 *   a2:	new stack pointer, original in DEPC
1018 *   a3:	dispatch table
1019 *   depc:	a2, original value saved on stack (PT_DEPC)
1020 *   excsave_1:	a3
1021 */
1022
1023ENTRY(fast_syscall_kernel)
1024
1025	/* Skip syscall. */
1026
1027	rsr	a0, epc1
1028	addi	a0, a0, 3
1029	wsr	a0, epc1
1030
1031	l32i	a0, a2, PT_DEPC
1032	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1033
1034	rsr	a0, depc			# get syscall-nr
1035	_beqz	a0, fast_syscall_spill_registers
1036	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1037
1038	j	kernel_exception
1039
1040ENDPROC(fast_syscall_kernel)
1041
1042ENTRY(fast_syscall_user)
1043
1044	/* Skip syscall. */
1045
1046	rsr	a0, epc1
1047	addi	a0, a0, 3
1048	wsr	a0, epc1
1049
1050	l32i	a0, a2, PT_DEPC
1051	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1052
1053	rsr	a0, depc			# get syscall-nr
1054	_beqz	a0, fast_syscall_spill_registers
1055	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1056
1057	j	user_exception
1058
1059ENDPROC(fast_syscall_user)
1060
1061ENTRY(fast_syscall_unrecoverable)
1062
1063	/* Restore all states. */
1064
1065	l32i    a0, a2, PT_AREG0        # restore a0
1066	xsr     a2, depc                # restore a2, depc
1067	rsr     a3, excsave1
1068
1069	wsr     a0, excsave1
1070	movi    a0, unrecoverable_exception
1071	callx0  a0
1072
1073ENDPROC(fast_syscall_unrecoverable)
1074
1075/*
1076 * sysxtensa syscall handler
1077 *
1078 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
1079 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
1080 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
1081 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1082 *        a2            a6                   a3    a4      a5
1083 *
1084 * Entry condition:
1085 *
1086 *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
1087 *   a1:	a1
1088 *   a2:	new stack pointer, original in a0 and DEPC
1089 *   a3:	dispatch table, original in excsave_1
1090 *   a4..a15:	unchanged
1091 *   depc:	a2, original value saved on stack (PT_DEPC)
1092 *   excsave_1:	a3
1093 *
1094 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1095 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1096 *
1097 * Note: we don't have to save a2; a2 holds the return value
1098 *
1099 * We use the two macros TRY and CATCH:
1100 *
1101 * TRY	 adds an entry to the __ex_table fixup table for the immediately
1102 *	 following instruction.
1103 *
1104 * CATCH catches any exception that occurred at one of the preceding TRY
1105 *       statements and continues from there
1106 *
1107 * Usage TRY	l32i	a0, a1, 0
1108 *		<other code>
1109 *	 done:	rfe
1110 *	 CATCH	<set return code>
1111 *		j done
1112 */
1113
1114#define TRY								\
1115	.section __ex_table, "a";					\
1116	.word	66f, 67f;						\
1117	.text;								\
111866:
1119
1120#define CATCH								\
112167:
1122
1123ENTRY(fast_syscall_xtensa)
1124
1125	xsr	a3, excsave1		# restore a3, excsave1
1126
1127	s32i	a7, a2, PT_AREG7	# we need an additional register
1128	movi	a7, 4			# sizeof(unsigned int)
1129	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
1130
1131	addi	a6, a6, -1		# assuming SYS_XTENSA_ATOMIC_SET = 1
1132	_bgeui	a6, SYS_XTENSA_COUNT - 1, .Lill
1133	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
1134
1135	/* Fall through for ATOMIC_CMP_SWP. */
1136
1137.Lswp:	/* Atomic compare and swap */
1138
1139TRY	l32i	a0, a3, 0		# read old value
1140	bne	a0, a4, 1f		# same as old value? jump
1141TRY	s32i	a5, a3, 0		# different, modify value
1142	l32i	a7, a2, PT_AREG7	# restore a7
1143	l32i	a0, a2, PT_AREG0	# restore a0
1144	movi	a2, 1			# and return 1
1145	addi	a6, a6, 1		# restore a6 (really necessary?)
1146	rfe
1147
11481:	l32i	a7, a2, PT_AREG7	# restore a7
1149	l32i	a0, a2, PT_AREG0	# restore a0
1150	movi	a2, 0			# return 0 (note that we cannot set
1151	addi	a6, a6, 1		# restore a6 (really necessary?)
1152	rfe
1153
1154.Lnswp:	/* Atomic set, add, and exg_add. */
1155
1156TRY	l32i	a7, a3, 0		# orig
1157	add	a0, a4, a7		# + arg
1158	moveqz	a0, a4, a6		# set
1159TRY	s32i	a0, a3, 0		# write new value
1160
1161	mov	a0, a2
1162	mov	a2, a7
1163	l32i	a7, a0, PT_AREG7	# restore a7
1164	l32i	a0, a0, PT_AREG0	# restore a0
1165	addi	a6, a6, 1		# restore a6 (really necessary?)
1166	rfe
1167
1168CATCH
1169.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
1170	l32i	a0, a2, PT_AREG0	# restore a0
1171	movi	a2, -EFAULT
1172	rfe
1173
1174.Lill:	l32i	a7, a2, PT_AREG0	# restore a7
1175	l32i	a0, a2, PT_AREG0	# restore a0
1176	movi	a2, -EINVAL
1177	rfe
1178
1179ENDPROC(fast_syscall_xtensa)
1180
1181
1182/* fast_syscall_spill_registers.
1183 *
1184 * Entry condition:
1185 *
1186 *   a0:	trashed, original value saved on stack (PT_AREG0)
1187 *   a1:	a1
1188 *   a2:	new stack pointer, original in DEPC
1189 *   a3:	dispatch table
1190 *   depc:	a2, original value saved on stack (PT_DEPC)
1191 *   excsave_1:	a3
1192 *
1193 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1194 */
1195
1196ENTRY(fast_syscall_spill_registers)
1197
1198	/* Register a FIXUP handler (pass current wb as a parameter) */
1199
1200	movi	a0, fast_syscall_spill_registers_fixup
1201	s32i	a0, a3, EXC_TABLE_FIXUP
1202	rsr	a0, windowbase
1203	s32i	a0, a3, EXC_TABLE_PARAM
1204
1205	/* Save a3 and SAR on stack. */
1206
1207	rsr	a0, sar
1208	xsr	a3, excsave1		# restore a3 and excsave_1
1209	s32i	a3, a2, PT_AREG3
1210	s32i	a4, a2, PT_AREG4
1211	s32i	a0, a2, PT_AREG5	# store SAR to PT_AREG5
1212
1213	/* The spill routine might clobber a7, a11, and a15. */
1214
1215	s32i	a7, a2, PT_AREG7
1216	s32i	a11, a2, PT_AREG11
1217	s32i	a15, a2, PT_AREG15
1218
1219	call0	_spill_registers	# destroys a3, a4, and SAR
1220
1221	/* Advance PC, restore registers and SAR, and return from exception. */
1222
1223	l32i	a3, a2, PT_AREG5
1224	l32i	a4, a2, PT_AREG4
1225	l32i	a0, a2, PT_AREG0
1226	wsr	a3, sar
1227	l32i	a3, a2, PT_AREG3
1228
1229	/* Restore clobbered registers. */
1230
1231	l32i	a7, a2, PT_AREG7
1232	l32i	a11, a2, PT_AREG11
1233	l32i	a15, a2, PT_AREG15
1234
1235	movi	a2, 0
1236	rfe
1237
1238ENDPROC(fast_syscall_spill_registers)
1239
1240/* Fixup handler.
1241 *
1242 * We get here if the spill routine causes an exception, e.g. tlb miss.
1243 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1244 * we entered the spill routine and jump to the user exception handler.
1245 *
1246 * a0: value of depc, original value in depc
1247 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1248 * a3: exctable, original value in excsave1
1249 */
1250
1251fast_syscall_spill_registers_fixup:
1252
1253	rsr	a2, windowbase	# get current windowbase (a2 is saved)
1254	xsr	a0, depc	# restore depc and a0
1255	ssl	a2		# set shift (32 - WB)
1256
1257	/* We need to make sure the current registers (a0-a3) are preserved.
1258	 * To do this, we simply set the bit for the current window frame
1259	 * in WS, so that the exception handlers save them to the task stack.
1260	 */
1261
1262	rsr	a3, excsave1	# get spill-mask
1263	slli	a2, a3, 1	# shift left by one
1264
1265	slli	a3, a2, 32-WSBITS
1266	src	a2, a2, a3	# a1 = xxwww1yyxxxwww1yy......
1267	wsr	a2, windowstart	# set corrected windowstart
1268
1269	movi	a3, exc_table
1270	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE	# restore a2
1271	l32i	a3, a3, EXC_TABLE_PARAM	# original WB (in user task)
1272
1273	/* Return to the original (user task) WINDOWBASE.
1274	 * We leave the following frame behind:
1275	 * a0, a1, a2	same
1276	 * a3:		trashed (saved in excsave_1)
1277	 * depc:	depc (we have to return to that address)
1278	 * excsave_1:	a3
1279	 */
1280
1281	wsr	a3, windowbase
1282	rsync
1283
1284	/* We are now in the original frame when we entered _spill_registers:
1285	 *  a0: return address
1286	 *  a1: used, stack pointer
1287	 *  a2: kernel stack pointer
1288	 *  a3: available, saved in EXCSAVE_1
1289	 *  depc: exception address
1290	 *  excsave: a3
1291	 * Note: This frame might be the same as above.
1292	 */
1293
1294	/* Setup stack pointer. */
1295
1296	addi	a2, a2, -PT_USER_SIZE
1297	s32i	a0, a2, PT_AREG0
1298
1299	/* Make sure we return to this fixup handler. */
1300
1301	movi	a3, fast_syscall_spill_registers_fixup_return
1302	s32i	a3, a2, PT_DEPC		# setup depc
1303
1304	/* Jump to the exception handler. */
1305
1306	movi	a3, exc_table
1307	rsr	a0, exccause
1308	addx4	a0, a0, a3              	# find entry in table
1309	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
1310	jx	a0
1311
1312fast_syscall_spill_registers_fixup_return:
1313
1314	/* When we return here, all registers have been restored (a2: DEPC) */
1315
1316	wsr	a2, depc		# exception address
1317
1318	/* Restore fixup handler. */
1319
1320	xsr	a3, excsave1
1321	movi	a2, fast_syscall_spill_registers_fixup
1322	s32i	a2, a3, EXC_TABLE_FIXUP
1323	rsr	a2, windowbase
1324	s32i	a2, a3, EXC_TABLE_PARAM
1325	l32i	a2, a3, EXC_TABLE_KSTK
1326
1327	/* Load WB at the time the exception occurred. */
1328
1329	rsr	a3, sar			# WB is still in SAR
1330	neg	a3, a3
1331	wsr	a3, windowbase
1332	rsync
1333
1334	/* Restore a3 and return. */
1335
1336	movi	a3, exc_table
1337	xsr	a3, excsave1
1338
1339	rfde
1340
1341
1342/*
1343 * spill all registers.
1344 *
1345 * This is not a real function. The following conditions must be met:
1346 *
1347 *  - must be called with call0.
1348 *  - uses a3, a4 and SAR.
1349 *  - the last 'valid' register of each frame are clobbered.
1350 *  - the caller must have registered a fixup handler
1351 *    (or be inside a critical section)
1352 *  - PS_EXCM must be set (PS_WOE cleared?)
1353 */
1354
1355ENTRY(_spill_registers)
1356
1357	/*
1358	 * Rotate ws so that the current windowbase is at bit 0.
1359	 * Assume ws = xxxwww1yy (www1 current window frame).
1360	 * Rotate ws right so that a4 = yyxxxwww1.
1361	 */
1362
1363	rsr	a4, windowbase
1364	rsr	a3, windowstart		# a3 = xxxwww1yy
1365	ssr	a4			# holds WB
1366	slli	a4, a3, WSBITS
1367	or	a3, a3, a4		# a3 = xxxwww1yyxxxwww1yy
1368	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
1369
1370	/* We are done if there are no more than the current register frame. */
1371
1372	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
1373	movi	a4, (1 << (WSBITS-1))
1374	_beqz	a3, .Lnospill		# only one active frame? jump
1375
1376	/* We want 1 at the top, so that we return to the current windowbase */
1377
1378	or	a3, a3, a4		# 1yyxxxwww
1379
1380	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1381
1382	wsr	a3, windowstart		# save shifted windowstart
1383	neg	a4, a3
1384	and	a3, a4, a3		# first bit set from right: 000010000
1385
1386	ffs_ws	a4, a3			# a4: shifts to skip empty frames
1387	movi	a3, WSBITS
1388	sub	a4, a3, a4		# WSBITS-a4:number of 0-bits from right
1389	ssr	a4			# save in SAR for later.
1390
1391	rsr	a3, windowbase
1392	add	a3, a3, a4
1393	wsr	a3, windowbase
1394	rsync
1395
1396	rsr	a3, windowstart
1397	srl	a3, a3			# shift windowstart
1398
1399	/* WB is now just one frame below the oldest frame in the register
1400	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
1401	   and WS differ by one 4-register frame. */
1402
1403	/* Save frames. Depending what call was used (call4, call8, call12),
1404	 * we have to save 4,8. or 12 registers.
1405	 */
1406
1407	_bbsi.l	a3, 1, .Lc4
1408	_bbsi.l	a3, 2, .Lc8
1409
1410	/* Special case: we have a call12-frame starting at a4. */
1411
1412	_bbci.l	a3, 3, .Lc12	# bit 3 shouldn't be zero! (Jump to Lc12 first)
1413
1414	s32e	a4, a1, -16	# a1 is valid with an empty spill area
1415	l32e	a4, a5, -12
1416	s32e	a8, a4, -48
1417	mov	a8, a4
1418	l32e	a4, a1, -16
1419	j	.Lc12c
1420
1421.Lnospill:
1422	ret
1423
1424.Lloop: _bbsi.l	a3, 1, .Lc4
1425	_bbci.l	a3, 2, .Lc12
1426
1427.Lc8:	s32e	a4, a13, -16
1428	l32e	a4, a5, -12
1429	s32e	a8, a4, -32
1430	s32e	a5, a13, -12
1431	s32e	a6, a13, -8
1432	s32e	a7, a13, -4
1433	s32e	a9, a4, -28
1434	s32e	a10, a4, -24
1435	s32e	a11, a4, -20
1436
1437	srli	a11, a3, 2		# shift windowbase by 2
1438	rotw	2
1439	_bnei	a3, 1, .Lloop
1440
1441.Lexit: /* Done. Do the final rotation, set WS, and return. */
1442
1443	rotw	1
1444	rsr	a3, windowbase
1445	ssl	a3
1446	movi	a3, 1
1447	sll	a3, a3
1448	wsr	a3, windowstart
1449	ret
1450
1451.Lc4:	s32e	a4, a9, -16
1452	s32e	a5, a9, -12
1453	s32e	a6, a9, -8
1454	s32e	a7, a9, -4
1455
1456	srli	a7, a3, 1
1457	rotw	1
1458	_bnei	a3, 1, .Lloop
1459	j	.Lexit
1460
1461.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
1462
1463	/* 12-register frame (call12) */
1464
1465	l32e	a2, a5, -12
1466	s32e	a8, a2, -48
1467	mov	a8, a2
1468
1469.Lc12c: s32e	a9, a8, -44
1470	s32e	a10, a8, -40
1471	s32e	a11, a8, -36
1472	s32e	a12, a8, -32
1473	s32e	a13, a8, -28
1474	s32e	a14, a8, -24
1475	s32e	a15, a8, -20
1476	srli	a15, a3, 3
1477
1478	/* The stack pointer for a4..a7 is out of reach, so we rotate the
1479	 * window, grab the stackpointer, and rotate back.
1480	 * Alternatively, we could also use the following approach, but that
1481	 * makes the fixup routine much more complicated:
1482	 * rotw	1
1483	 * s32e	a0, a13, -16
1484	 * ...
1485	 * rotw 2
1486	 */
1487
1488	rotw	1
1489	mov	a5, a13
1490	rotw	-1
1491
1492	s32e	a4, a9, -16
1493	s32e	a5, a9, -12
1494	s32e	a6, a9, -8
1495	s32e	a7, a9, -4
1496
1497	rotw	3
1498
1499	_beqi	a3, 1, .Lexit
1500	j	.Lloop
1501
1502.Linvalid_mask:
1503
1504	/* We get here because of an unrecoverable error in the window
1505	 * registers. If we are in user space, we kill the application,
1506	 * however, this condition is unrecoverable in kernel space.
1507	 */
1508
1509	rsr	a0, ps
1510	_bbci.l	a0, PS_UM_BIT, 1f
1511
1512	/* User space: Setup a dummy frame and kill application.
1513	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1514	 */
1515
1516	movi	a0, 1
1517	movi	a1, 0
1518
1519	wsr	a0, windowstart
1520	wsr	a1, windowbase
1521	rsync
1522
1523	movi	a0, 0
1524
1525	movi	a3, exc_table
1526	l32i	a1, a3, EXC_TABLE_KSTK
1527	wsr	a3, excsave1
1528
1529	movi	a4, (1 << PS_WOE_BIT) | LOCKLEVEL
1530	wsr	a4, ps
1531	rsync
1532
1533	movi	a6, SIGSEGV
1534	movi	a4, do_exit
1535	callx4	a4
1536
15371:	/* Kernel space: PANIC! */
1538
1539	wsr	a0, excsave1
1540	movi	a0, unrecoverable_exception
1541	callx0	a0		# should not return
15421:	j	1b
1543
1544ENDPROC(_spill_registers)
1545
1546#ifdef CONFIG_MMU
1547/*
1548 * We should never get here. Bail out!
1549 */
1550
1551ENTRY(fast_second_level_miss_double_kernel)
1552
15531:	movi	a0, unrecoverable_exception
1554	callx0	a0		# should not return
15551:	j	1b
1556
1557ENDPROC(fast_second_level_miss_double_kernel)
1558
1559/* First-level entry handler for user, kernel, and double 2nd-level
1560 * TLB miss exceptions.  Note that for now, user and kernel miss
1561 * exceptions share the same entry point and are handled identically.
1562 *
1563 * An old, less-efficient C version of this function used to exist.
1564 * We include it below, interleaved as comments, for reference.
1565 *
1566 * Entry condition:
1567 *
1568 *   a0:	trashed, original value saved on stack (PT_AREG0)
1569 *   a1:	a1
1570 *   a2:	new stack pointer, original in DEPC
1571 *   a3:	dispatch table
1572 *   depc:	a2, original value saved on stack (PT_DEPC)
1573 *   excsave_1:	a3
1574 *
1575 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1576 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1577 */
1578
1579ENTRY(fast_second_level_miss)
1580
1581	/* Save a1. Note: we don't expect a double exception. */
1582
1583	s32i	a1, a2, PT_AREG1
1584
1585	/* We need to map the page of PTEs for the user task.  Find
1586	 * the pointer to that page.  Also, it's possible for tsk->mm
1587	 * to be NULL while tsk->active_mm is nonzero if we faulted on
1588	 * a vmalloc address.  In that rare case, we must use
1589	 * active_mm instead to avoid a fault in this handler.  See
1590	 *
1591	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1592	 *   (or search Internet on "mm vs. active_mm")
1593	 *
1594	 *	if (!mm)
1595	 *		mm = tsk->active_mm;
1596	 *	pgd = pgd_offset (mm, regs->excvaddr);
1597	 *	pmd = pmd_offset (pgd, regs->excvaddr);
1598	 *	pmdval = *pmd;
1599	 */
1600
1601	GET_CURRENT(a1,a2)
1602	l32i	a0, a1, TASK_MM		# tsk->mm
1603	beqz	a0, 9f
1604
1605
1606	/* We deliberately destroy a3 that holds the exception table. */
1607
16088:	rsr	a3, excvaddr		# fault address
1609	_PGD_OFFSET(a0, a3, a1)
1610	l32i	a0, a0, 0		# read pmdval
1611	beqz	a0, 2f
1612
1613	/* Read ptevaddr and convert to top of page-table page.
1614	 *
1615	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
1616	 * 	vpnval += DTLB_WAY_PGTABLE;
1617	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1618	 *	write_dtlb_entry (pteval, vpnval);
1619	 *
1620	 * The messy computation for 'pteval' above really simplifies
1621	 * into the following:
1622	 *
1623	 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
1624	 */
1625
1626	movi	a1, (-PAGE_OFFSET) & 0xffffffff
1627	add	a0, a0, a1		# pmdval - PAGE_OFFSET
1628	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
1629	xor	a0, a0, a1
1630
1631	movi	a1, _PAGE_DIRECTORY
1632	or	a0, a0, a1		# ... | PAGE_DIRECTORY
1633
1634	/*
1635	 * We utilize all three wired-ways (7-9) to hold pmd translations.
1636	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1637	 * This allows to map the three most common regions to three different
1638	 * DTLBs:
1639	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
1640	 *  2   -> way 8	shared libaries (2000.0000)
1641	 *  3   -> way 0	stack (3000.0000)
1642	 */
1643
1644	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
1645	rsr	a1, ptevaddr
1646	addx2	a3, a3, a3		# ->			0,3,6,9
1647	srli	a1, a1, PAGE_SHIFT
1648	extui	a3, a3, 2, 2		# ->			0,0,1,2
1649	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
1650	addi	a3, a3, DTLB_WAY_PGD
1651	add	a1, a1, a3		# ... + way_number
1652
16533:	wdtlb	a0, a1
1654	dsync
1655
1656	/* Exit critical section. */
1657
16584:	movi	a3, exc_table		# restore a3
1659	movi	a0, 0
1660	s32i	a0, a3, EXC_TABLE_FIXUP
1661
1662	/* Restore the working registers, and return. */
1663
1664	l32i	a0, a2, PT_AREG0
1665	l32i	a1, a2, PT_AREG1
1666	l32i	a2, a2, PT_DEPC
1667	xsr	a3, excsave1
1668
1669	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1670
1671	/* Restore excsave1 and return. */
1672
1673	rsr	a2, depc
1674	rfe
1675
1676	/* Return from double exception. */
1677
16781:	xsr	a2, depc
1679	esync
1680	rfde
1681
16829:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1683	j	8b
1684
1685#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1686
16872:	/* Special case for cache aliasing.
1688	 * We (should) only get here if a clear_user_page, copy_user_page
1689	 * or the aliased cache flush functions got preemptively interrupted
1690	 * by another task. Re-establish temporary mapping to the
1691	 * TLBTEMP_BASE areas.
1692	 */
1693
1694	/* We shouldn't be in a double exception */
1695
1696	l32i	a0, a2, PT_DEPC
1697	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1698
1699	/* Make sure the exception originated in the special functions */
1700
1701	movi	a0, __tlbtemp_mapping_start
1702	rsr	a3, epc1
1703	bltu	a3, a0, 2f
1704	movi	a0, __tlbtemp_mapping_end
1705	bgeu	a3, a0, 2f
1706
1707	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1708
1709	movi	a3, TLBTEMP_BASE_1
1710	rsr	a0, excvaddr
1711	bltu	a0, a3, 2f
1712
1713	addi	a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
1714	bgeu	a1, a3, 2f
1715
1716	/* Check if we have to restore an ITLB mapping. */
1717
1718	movi	a1, __tlbtemp_mapping_itlb
1719	rsr	a3, epc1
1720	sub	a3, a3, a1
1721
1722	/* Calculate VPN */
1723
1724	movi	a1, PAGE_MASK
1725	and	a1, a1, a0
1726
1727	/* Jump for ITLB entry */
1728
1729	bgez	a3, 1f
1730
1731	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
1732
1733	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1734	add	a1, a3, a1
1735
1736	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1737
1738	mov	a0, a6
1739	movnez	a0, a7, a3
1740	j	3b
1741
1742	/* ITLB entry. We only use dst in a6. */
1743
17441:	witlb	a6, a1
1745	isync
1746	j	4b
1747
1748
1749#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
1750
1751
17522:	/* Invalid PGD, default exception handling */
1753
1754	movi	a3, exc_table
1755	rsr	a1, depc
1756	xsr	a3, excsave1
1757	s32i	a1, a2, PT_AREG2
1758	s32i	a3, a2, PT_AREG3
1759	mov	a1, a2
1760
1761	rsr	a2, ps
1762	bbsi.l	a2, PS_UM_BIT, 1f
1763	j	_kernel_exception
17641:	j	_user_exception
1765
1766ENDPROC(fast_second_level_miss)
1767
1768/*
1769 * StoreProhibitedException
1770 *
1771 * Update the pte and invalidate the itlb mapping for this pte.
1772 *
1773 * Entry condition:
1774 *
1775 *   a0:	trashed, original value saved on stack (PT_AREG0)
1776 *   a1:	a1
1777 *   a2:	new stack pointer, original in DEPC
1778 *   a3:	dispatch table
1779 *   depc:	a2, original value saved on stack (PT_DEPC)
1780 *   excsave_1:	a3
1781 *
1782 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1783 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1784 */
1785
1786ENTRY(fast_store_prohibited)
1787
1788	/* Save a1 and a4. */
1789
1790	s32i	a1, a2, PT_AREG1
1791	s32i	a4, a2, PT_AREG4
1792
1793	GET_CURRENT(a1,a2)
1794	l32i	a0, a1, TASK_MM		# tsk->mm
1795	beqz	a0, 9f
1796
17978:	rsr	a1, excvaddr		# fault address
1798	_PGD_OFFSET(a0, a1, a4)
1799	l32i	a0, a0, 0
1800	beqz	a0, 2f
1801
1802	/*
1803	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1804	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1805	 */
1806
1807	_PTE_OFFSET(a0, a1, a4)
1808	l32i	a4, a0, 0		# read pteval
1809	movi	a1, _PAGE_CA_INVALID
1810	ball	a4, a1, 2f
1811	bbci.l	a4, _PAGE_WRITABLE_BIT, 2f
1812
1813	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1814	or	a4, a4, a1
1815	rsr	a1, excvaddr
1816	s32i	a4, a0, 0
1817
1818	/* We need to flush the cache if we have page coloring. */
1819#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1820	dhwb	a0, 0
1821#endif
1822	pdtlb	a0, a1
1823	wdtlb	a4, a0
1824
1825	/* Exit critical section. */
1826
1827	movi	a0, 0
1828	s32i	a0, a3, EXC_TABLE_FIXUP
1829
1830	/* Restore the working registers, and return. */
1831
1832	l32i	a4, a2, PT_AREG4
1833	l32i	a1, a2, PT_AREG1
1834	l32i	a0, a2, PT_AREG0
1835	l32i	a2, a2, PT_DEPC
1836
1837	/* Restore excsave1 and a3. */
1838
1839	xsr	a3, excsave1
1840	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1841
1842	rsr	a2, depc
1843	rfe
1844
1845	/* Double exception. Restore FIXUP handler and return. */
1846
18471:	xsr	a2, depc
1848	esync
1849	rfde
1850
18519:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1852	j	8b
1853
18542:	/* If there was a problem, handle fault in C */
1855
1856	rsr	a4, depc	# still holds a2
1857	xsr	a3, excsave1
1858	s32i	a4, a2, PT_AREG2
1859	s32i	a3, a2, PT_AREG3
1860	l32i	a4, a2, PT_AREG4
1861	mov	a1, a2
1862
1863	rsr	a2, ps
1864	bbsi.l	a2, PS_UM_BIT, 1f
1865	j	_kernel_exception
18661:	j	_user_exception
1867
1868ENDPROC(fast_store_prohibited)
1869
1870#endif /* CONFIG_MMU */
1871
1872/*
1873 * System Calls.
1874 *
1875 * void system_call (struct pt_regs* regs, int exccause)
1876 *                            a2                 a3
1877 */
1878
1879ENTRY(system_call)
1880
1881	entry	a1, 32
1882
1883	/* regs->syscall = regs->areg[2] */
1884
1885	l32i	a3, a2, PT_AREG2
1886	mov	a6, a2
1887	movi	a4, do_syscall_trace_enter
1888	s32i	a3, a2, PT_SYSCALL
1889	callx4	a4
1890
1891	/* syscall = sys_call_table[syscall_nr] */
1892
1893	movi	a4, sys_call_table;
1894	movi	a5, __NR_syscall_count
1895	movi	a6, -ENOSYS
1896	bgeu	a3, a5, 1f
1897
1898	addx4	a4, a3, a4
1899	l32i	a4, a4, 0
1900	movi	a5, sys_ni_syscall;
1901	beq	a4, a5, 1f
1902
1903	/* Load args: arg0 - arg5 are passed via regs. */
1904
1905	l32i	a6, a2, PT_AREG6
1906	l32i	a7, a2, PT_AREG3
1907	l32i	a8, a2, PT_AREG4
1908	l32i	a9, a2, PT_AREG5
1909	l32i	a10, a2, PT_AREG8
1910	l32i	a11, a2, PT_AREG9
1911
1912	/* Pass one additional argument to the syscall: pt_regs (on stack) */
1913	s32i	a2, a1, 0
1914
1915	callx4	a4
1916
19171:	/* regs->areg[2] = return_value */
1918
1919	s32i	a6, a2, PT_AREG2
1920	movi	a4, do_syscall_trace_leave
1921	mov	a6, a2
1922	callx4	a4
1923	retw
1924
1925ENDPROC(system_call)
1926
1927
1928/*
1929 * Task switch.
1930 *
1931 * struct task*  _switch_to (struct task* prev, struct task* next)
1932 *         a2                              a2                 a3
1933 */
1934
1935ENTRY(_switch_to)
1936
1937	entry	a1, 16
1938
1939	mov	a12, a2			# preserve 'prev' (a2)
1940	mov	a13, a3			# and 'next' (a3)
1941
1942	l32i	a4, a2, TASK_THREAD_INFO
1943	l32i	a5, a3, TASK_THREAD_INFO
1944
1945	save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
1946
1947	s32i	a0, a12, THREAD_RA	# save return address
1948	s32i	a1, a12, THREAD_SP	# save stack pointer
1949
1950	/* Disable ints while we manipulate the stack pointer. */
1951
1952	movi	a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
1953	xsr	a14, ps
1954	rsr	a3, excsave1
1955	rsync
1956	s32i	a3, a3, EXC_TABLE_FIXUP	/* enter critical section */
1957
1958	/* Switch CPENABLE */
1959
1960#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1961	l32i	a3, a5, THREAD_CPENABLE
1962	xsr	a3, cpenable
1963	s32i	a3, a4, THREAD_CPENABLE
1964#endif
1965
1966	/* Flush register file. */
1967
1968	call0	_spill_registers	# destroys a3, a4, and SAR
1969
1970	/* Set kernel stack (and leave critical section)
1971	 * Note: It's save to set it here. The stack will not be overwritten
1972	 *       because the kernel stack will only be loaded again after
1973	 *       we return from kernel space.
1974	 */
1975
1976	rsr	a3, excsave1		# exc_table
1977	movi	a6, 0
1978	addi	a7, a5, PT_REGS_OFFSET
1979	s32i	a6, a3, EXC_TABLE_FIXUP
1980	s32i	a7, a3, EXC_TABLE_KSTK
1981
1982	/* restore context of the task 'next' */
1983
1984	l32i	a0, a13, THREAD_RA	# restore return address
1985	l32i	a1, a13, THREAD_SP	# restore stack pointer
1986
1987	load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
1988
1989	wsr	a14, ps
1990	mov	a2, a12			# return 'prev'
1991	rsync
1992
1993	retw
1994
1995ENDPROC(_switch_to)
1996
1997ENTRY(ret_from_fork)
1998
1999	/* void schedule_tail (struct task_struct *prev)
2000	 * Note: prev is still in a6 (return value from fake call4 frame)
2001	 */
2002	movi	a4, schedule_tail
2003	callx4	a4
2004
2005	movi	a4, do_syscall_trace_leave
2006	mov	a6, a1
2007	callx4	a4
2008
2009	j	common_exception_return
2010
2011ENDPROC(ret_from_fork)
2012
2013/*
2014 * Kernel thread creation helper
2015 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
2016 *           left from _switch_to: a6 = prev
2017 */
2018ENTRY(ret_from_kernel_thread)
2019
2020	call4	schedule_tail
2021	mov	a6, a3
2022	callx4	a2
2023	j	common_exception_return
2024
2025ENDPROC(ret_from_kernel_thread)
2026