xref: /openbmc/linux/arch/xtensa/kernel/entry.S (revision bacf743e)
1/*
2 * Low-level exception handling
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2004 - 2008 by Tensilica Inc.
9 * Copyright (C) 2015 Cadence Design Systems Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 *
13 */
14
15#include <linux/linkage.h>
16#include <linux/pgtable.h>
17#include <asm/asm-offsets.h>
18#include <asm/asmmacro.h>
19#include <asm/processor.h>
20#include <asm/coprocessor.h>
21#include <asm/thread_info.h>
22#include <asm/asm-uaccess.h>
23#include <asm/unistd.h>
24#include <asm/ptrace.h>
25#include <asm/current.h>
26#include <asm/page.h>
27#include <asm/signal.h>
28#include <asm/tlbflush.h>
29#include <variant/tie-asm.h>
30
31/* Unimplemented features. */
32
33#undef KERNEL_STACK_OVERFLOW_CHECK
34
35/* Not well tested.
36 *
37 * - fast_coprocessor
38 */
39
40/*
41 * Macro to find first bit set in WINDOWBASE from the left + 1
42 *
43 * 100....0 -> 1
44 * 010....0 -> 2
45 * 000....1 -> WSBITS
46 */
47
48	.macro ffs_ws bit mask
49
50#if XCHAL_HAVE_NSA
51	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
52	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
53#else
54	movi    \bit, WSBITS
55#if WSBITS > 16
56	_bltui  \mask, 0x10000, 99f
57	addi    \bit, \bit, -16
58	extui   \mask, \mask, 16, 16
59#endif
60#if WSBITS > 8
6199:	_bltui  \mask, 0x100, 99f
62	addi    \bit, \bit, -8
63	srli    \mask, \mask, 8
64#endif
6599:	_bltui  \mask, 0x10, 99f
66	addi    \bit, \bit, -4
67	srli    \mask, \mask, 4
6899:	_bltui  \mask, 0x4, 99f
69	addi    \bit, \bit, -2
70	srli    \mask, \mask, 2
7199:	_bltui  \mask, 0x2, 99f
72	addi    \bit, \bit, -1
7399:
74
75#endif
76	.endm
77
78
79	.macro	irq_save flags tmp
80#if XTENSA_FAKE_NMI
81#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
82	rsr	\flags, ps
83	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
84	bgei	\tmp, LOCKLEVEL, 99f
85	rsil	\tmp, LOCKLEVEL
8699:
87#else
88	movi	\tmp, LOCKLEVEL
89	rsr	\flags, ps
90	or	\flags, \flags, \tmp
91	xsr	\flags, ps
92	rsync
93#endif
94#else
95	rsil	\flags, LOCKLEVEL
96#endif
97	.endm
98
99/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
100
101/*
102 * First-level exception handler for user exceptions.
103 * Save some special registers, extra states and all registers in the AR
104 * register file that were in use in the user task, and jump to the common
105 * exception code.
106 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
107 * save them for kernel exceptions).
108 *
109 * Entry condition for user_exception:
110 *
111 *   a0:	trashed, original value saved on stack (PT_AREG0)
112 *   a1:	a1
113 *   a2:	new stack pointer, original value in depc
114 *   a3:	a3
115 *   depc:	a2, original value saved on stack (PT_DEPC)
116 *   excsave1:	dispatch table
117 *
118 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
119 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
120 *
121 * Entry condition for _user_exception:
122 *
123 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
124 *   excsave has been restored, and
125 *   stack pointer (a1) has been set.
126 *
127 * Note: _user_exception might be at an odd address. Don't use call0..call12
128 */
129	.literal_position
130
131ENTRY(user_exception)
132
133	/* Save a1, a2, a3, and set SP. */
134
135	rsr	a0, depc
136	s32i	a1, a2, PT_AREG1
137	s32i	a0, a2, PT_AREG2
138	s32i	a3, a2, PT_AREG3
139	mov	a1, a2
140
141	.globl _user_exception
142_user_exception:
143
144	/* Save SAR and turn off single stepping */
145
146	movi	a2, 0
147	wsr	a2, depc		# terminate user stack trace with 0
148	rsr	a3, sar
149	xsr	a2, icountlevel
150	s32i	a3, a1, PT_SAR
151	s32i	a2, a1, PT_ICOUNTLEVEL
152
153#if XCHAL_HAVE_THREADPTR
154	rur	a2, threadptr
155	s32i	a2, a1, PT_THREADPTR
156#endif
157
158	/* Rotate ws so that the current windowbase is at bit0. */
159	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
160
161#if defined(USER_SUPPORT_WINDOWED)
162	rsr	a2, windowbase
163	rsr	a3, windowstart
164	ssr	a2
165	s32i	a2, a1, PT_WINDOWBASE
166	s32i	a3, a1, PT_WINDOWSTART
167	slli	a2, a3, 32-WSBITS
168	src	a2, a3, a2
169	srli	a2, a2, 32-WSBITS
170	s32i	a2, a1, PT_WMASK	# needed for restoring registers
171#else
172	movi	a2, 0
173	movi	a3, 1
174	s32i	a2, a1, PT_WINDOWBASE
175	s32i	a3, a1, PT_WINDOWSTART
176	s32i	a3, a1, PT_WMASK
177#endif
178
179	/* Save only live registers. */
180
181UABI_W	_bbsi.l	a2, 1, 1f
182	s32i	a4, a1, PT_AREG4
183	s32i	a5, a1, PT_AREG5
184	s32i	a6, a1, PT_AREG6
185	s32i	a7, a1, PT_AREG7
186UABI_W	_bbsi.l	a2, 2, 1f
187	s32i	a8, a1, PT_AREG8
188	s32i	a9, a1, PT_AREG9
189	s32i	a10, a1, PT_AREG10
190	s32i	a11, a1, PT_AREG11
191UABI_W	_bbsi.l	a2, 3, 1f
192	s32i	a12, a1, PT_AREG12
193	s32i	a13, a1, PT_AREG13
194	s32i	a14, a1, PT_AREG14
195	s32i	a15, a1, PT_AREG15
196
197#if defined(USER_SUPPORT_WINDOWED)
198	_bnei	a2, 1, 1f		# only one valid frame?
199
200	/* Only one valid frame, skip saving regs. */
201
202	j	2f
203
204	/* Save the remaining registers.
205	 * We have to save all registers up to the first '1' from
206	 * the right, except the current frame (bit 0).
207	 * Assume a2 is:  001001000110001
208	 * All register frames starting from the top field to the marked '1'
209	 * must be saved.
210	 */
211
2121:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
213	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
214	and	a3, a3, a2		# max. only one bit is set
215
216	/* Find number of frames to save */
217
218	ffs_ws	a0, a3			# number of frames to the '1' from left
219
220	/* Store information into WMASK:
221	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
222	 * bits 4...: number of valid 4-register frames
223	 */
224
225	slli	a3, a0, 4		# number of frames to save in bits 8..4
226	extui	a2, a2, 0, 4		# mask for the first 16 registers
227	or	a2, a3, a2
228	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
229
230	/* Save 4 registers at a time */
231
2321:	rotw	-1
233	s32i	a0, a5, PT_AREG_END - 16
234	s32i	a1, a5, PT_AREG_END - 12
235	s32i	a2, a5, PT_AREG_END - 8
236	s32i	a3, a5, PT_AREG_END - 4
237	addi	a0, a4, -1
238	addi	a1, a5, -16
239	_bnez	a0, 1b
240
241	/* WINDOWBASE still in SAR! */
242
243	rsr	a2, sar			# original WINDOWBASE
244	movi	a3, 1
245	ssl	a2
246	sll	a3, a3
247	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
248	wsr	a2, windowbase		# and WINDOWSTART
249	rsync
250
251	/* We are back to the original stack pointer (a1) */
252#endif
2532:	/* Now, jump to the common exception handler. */
254
255	j	common_exception
256
257ENDPROC(user_exception)
258
259/*
260 * First-level exit handler for kernel exceptions
261 * Save special registers and the live window frame.
262 * Note: Even though we changes the stack pointer, we don't have to do a
263 *	 MOVSP here, as we do that when we return from the exception.
264 *	 (See comment in the kernel exception exit code)
265 *
266 * Entry condition for kernel_exception:
267 *
268 *   a0:	trashed, original value saved on stack (PT_AREG0)
269 *   a1:	a1
270 *   a2:	new stack pointer, original in DEPC
271 *   a3:	a3
272 *   depc:	a2, original value saved on stack (PT_DEPC)
273 *   excsave_1:	dispatch table
274 *
275 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
276 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
277 *
278 * Entry condition for _kernel_exception:
279 *
280 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
281 *   excsave has been restored, and
282 *   stack pointer (a1) has been set.
283 *
284 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
285 */
286
287ENTRY(kernel_exception)
288
289	/* Save a1, a2, a3, and set SP. */
290
291	rsr	a0, depc		# get a2
292	s32i	a1, a2, PT_AREG1
293	s32i	a0, a2, PT_AREG2
294	s32i	a3, a2, PT_AREG3
295	mov	a1, a2
296
297	.globl _kernel_exception
298_kernel_exception:
299
300	/* Save SAR and turn off single stepping */
301
302	movi	a2, 0
303	rsr	a3, sar
304	xsr	a2, icountlevel
305	s32i	a3, a1, PT_SAR
306	s32i	a2, a1, PT_ICOUNTLEVEL
307
308#if defined(__XTENSA_WINDOWED_ABI__)
309	/* Rotate ws so that the current windowbase is at bit0. */
310	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
311
312	rsr	a2, windowbase		# don't need to save these, we only
313	rsr	a3, windowstart		# need shifted windowstart: windowmask
314	ssr	a2
315	slli	a2, a3, 32-WSBITS
316	src	a2, a3, a2
317	srli	a2, a2, 32-WSBITS
318	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
319#endif
320
321	/* Save only the live window-frame */
322
323KABI_W	_bbsi.l	a2, 1, 1f
324	s32i	a4, a1, PT_AREG4
325	s32i	a5, a1, PT_AREG5
326	s32i	a6, a1, PT_AREG6
327	s32i	a7, a1, PT_AREG7
328KABI_W	_bbsi.l	a2, 2, 1f
329	s32i	a8, a1, PT_AREG8
330	s32i	a9, a1, PT_AREG9
331	s32i	a10, a1, PT_AREG10
332	s32i	a11, a1, PT_AREG11
333KABI_W	_bbsi.l	a2, 3, 1f
334	s32i	a12, a1, PT_AREG12
335	s32i	a13, a1, PT_AREG13
336	s32i	a14, a1, PT_AREG14
337	s32i	a15, a1, PT_AREG15
338
339#ifdef __XTENSA_WINDOWED_ABI__
340	_bnei	a2, 1, 1f
341	/* Copy spill slots of a0 and a1 to imitate movsp
342	 * in order to keep exception stack continuous
343	 */
344	l32i	a3, a1, PT_KERNEL_SIZE
345	l32i	a0, a1, PT_KERNEL_SIZE + 4
346	s32e	a3, a1, -16
347	s32e	a0, a1, -12
348#endif
3491:
350	l32i	a0, a1, PT_AREG0	# restore saved a0
351	wsr	a0, depc
352
353#ifdef KERNEL_STACK_OVERFLOW_CHECK
354
355	/*  Stack overflow check, for debugging  */
356	extui	a2, a1, TASK_SIZE_BITS,XX
357	movi	a3, SIZE??
358	_bge	a2, a3, out_of_stack_panic
359
360#endif
361
362/*
363 * This is the common exception handler.
364 * We get here from the user exception handler or simply by falling through
365 * from the kernel exception handler.
366 * Save the remaining special registers, switch to kernel mode, and jump
367 * to the second-level exception handler.
368 *
369 */
370
371common_exception:
372
373	/* Save some registers, disable loops and clear the syscall flag. */
374
375	rsr	a2, debugcause
376	rsr	a3, epc1
377	s32i	a2, a1, PT_DEBUGCAUSE
378	s32i	a3, a1, PT_PC
379
380	movi	a2, NO_SYSCALL
381	rsr	a3, excvaddr
382	s32i	a2, a1, PT_SYSCALL
383	movi	a2, 0
384	s32i	a3, a1, PT_EXCVADDR
385#if XCHAL_HAVE_LOOPS
386	xsr	a2, lcount
387	s32i	a2, a1, PT_LCOUNT
388#endif
389
390#if XCHAL_HAVE_EXCLUSIVE
391	/* Clear exclusive access monitor set by interrupted code */
392	clrex
393#endif
394
395	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
396
397	rsr	a2, exccause
398	movi	a3, 0
399	rsr	a0, excsave1
400	s32i	a2, a1, PT_EXCCAUSE
401	s32i	a3, a0, EXC_TABLE_FIXUP
402
403	/* All unrecoverable states are saved on stack, now, and a1 is valid.
404	 * Now we can allow exceptions again. In case we've got an interrupt
405	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
406	 * otherwise it's left unchanged.
407	 *
408	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
409	 */
410
411	rsr	a3, ps
412	s32i	a3, a1, PT_PS		# save ps
413
414#if XTENSA_FAKE_NMI
415	/* Correct PS needs to be saved in the PT_PS:
416	 * - in case of exception or level-1 interrupt it's in the PS,
417	 *   and is already saved.
418	 * - in case of medium level interrupt it's in the excsave2.
419	 */
420	movi	a0, EXCCAUSE_MAPPED_NMI
421	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
422	beq	a2, a0, .Lmedium_level_irq
423	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
424	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
425
426.Lmedium_level_irq:
427	rsr	a0, excsave2
428	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
429	bgei	a3, LOCKLEVEL, .Lexception
430
431.Llevel1_irq:
432	movi	a3, LOCKLEVEL
433
434.Lexception:
435KABI_W	movi	a0, PS_WOE_MASK
436KABI_W	or	a3, a3, a0
437#else
438	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
439	movi	a0, LOCKLEVEL
440	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
441					# a3 = PS.INTLEVEL
442	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
443KABI_W	movi	a2, PS_WOE_MASK
444KABI_W	or	a3, a3, a2
445	rsr	a2, exccause
446#endif
447
448	/* restore return address (or 0 if return to userspace) */
449	rsr	a0, depc
450	wsr	a3, ps
451	rsync				# PS.WOE => rsync => overflow
452
453	/* Save lbeg, lend */
454#if XCHAL_HAVE_LOOPS
455	rsr	a4, lbeg
456	rsr	a3, lend
457	s32i	a4, a1, PT_LBEG
458	s32i	a3, a1, PT_LEND
459#endif
460
461	/* Save SCOMPARE1 */
462
463#if XCHAL_HAVE_S32C1I
464	rsr     a3, scompare1
465	s32i    a3, a1, PT_SCOMPARE1
466#endif
467
468	/* Save optional registers. */
469
470	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
471
472	/* Go to second-level dispatcher. Set up parameters to pass to the
473	 * exception handler and call the exception handler.
474	 */
475
476	rsr	a4, excsave1
477	addx4	a4, a2, a4
478	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
479	mov	abi_arg1, a2			# pass EXCCAUSE
480	mov	abi_arg0, a1			# pass stack frame
481
482	/* Call the second-level handler */
483
484	abi_callx	a4
485
486	/* Jump here for exception exit */
487	.global common_exception_return
488common_exception_return:
489
490#if XTENSA_FAKE_NMI
491	l32i	abi_tmp0, a1, PT_EXCCAUSE
492	movi	abi_tmp1, EXCCAUSE_MAPPED_NMI
493	l32i	abi_saved1, a1, PT_PS
494	beq	abi_tmp0, abi_tmp1, .Lrestore_state
495#endif
496.Ltif_loop:
497	irq_save a2, a3
498#ifdef CONFIG_TRACE_IRQFLAGS
499	abi_call	trace_hardirqs_off
500#endif
501
502	/* Jump if we are returning from kernel exceptions. */
503
504	l32i	abi_saved1, a1, PT_PS
505	GET_THREAD_INFO(a2, a1)
506	l32i	a4, a2, TI_FLAGS
507	_bbci.l	abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
508
509	/* Specific to a user exception exit:
510	 * We need to check some flags for signal handling and rescheduling,
511	 * and have to restore WB and WS, extra states, and all registers
512	 * in the register file that were in use in the user task.
513	 * Note that we don't disable interrupts here.
514	 */
515
516	_bbsi.l	a4, TIF_NEED_RESCHED, .Lresched
517	movi	a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
518	bnone	a4, a2, .Lexit_tif_loop_user
519
520	l32i	a4, a1, PT_DEPC
521	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
522
523	/* Call do_signal() */
524
525#ifdef CONFIG_TRACE_IRQFLAGS
526	abi_call	trace_hardirqs_on
527#endif
528	rsil	a2, 0
529	mov	abi_arg0, a1
530	abi_call	do_notify_resume	# int do_notify_resume(struct pt_regs*)
531	j	.Ltif_loop
532
533.Lresched:
534#ifdef CONFIG_TRACE_IRQFLAGS
535	abi_call	trace_hardirqs_on
536#endif
537	rsil	a2, 0
538	abi_call	schedule	# void schedule (void)
539	j	.Ltif_loop
540
541.Lexit_tif_loop_kernel:
542#ifdef CONFIG_PREEMPTION
543	_bbci.l	a4, TIF_NEED_RESCHED, .Lrestore_state
544
545	/* Check current_thread_info->preempt_count */
546
547	l32i	a4, a2, TI_PRE_COUNT
548	bnez	a4, .Lrestore_state
549	abi_call	preempt_schedule_irq
550#endif
551	j	.Lrestore_state
552
553.Lexit_tif_loop_user:
554#ifdef CONFIG_HAVE_HW_BREAKPOINT
555	_bbci.l	a4, TIF_DB_DISABLED, 1f
556	abi_call	restore_dbreak
5571:
558#endif
559#ifdef CONFIG_DEBUG_TLB_SANITY
560	l32i	a4, a1, PT_DEPC
561	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
562	abi_call	check_tlb_sanity
563#endif
564
565.Lrestore_state:
566#ifdef CONFIG_TRACE_IRQFLAGS
567	extui	a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
568	bgei	a4, LOCKLEVEL, 1f
569	abi_call	trace_hardirqs_on
5701:
571#endif
572	/* Restore optional registers. */
573
574	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
575
576	/* Restore SCOMPARE1 */
577
578#if XCHAL_HAVE_S32C1I
579	l32i    a2, a1, PT_SCOMPARE1
580	wsr     a2, scompare1
581#endif
582	wsr	abi_saved1, ps		/* disable interrupts */
583
584	_bbci.l	abi_saved1, PS_UM_BIT, kernel_exception_exit
585
586user_exception_exit:
587
588	/* Restore the state of the task and return from the exception. */
589
590#if defined(USER_SUPPORT_WINDOWED)
591	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
592
593	l32i	a2, a1, PT_WINDOWBASE
594	l32i	a3, a1, PT_WINDOWSTART
595	wsr	a1, depc		# use DEPC as temp storage
596	wsr	a3, windowstart		# restore WINDOWSTART
597	ssr	a2			# preserve user's WB in the SAR
598	wsr	a2, windowbase		# switch to user's saved WB
599	rsync
600	rsr	a1, depc		# restore stack pointer
601	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
602	rotw	-1			# we restore a4..a7
603	_bltui	a6, 16, .Lclear_regs	# only have to restore current window?
604
605	/* The working registers are a0 and a3.  We are restoring to
606	 * a4..a7.  Be careful not to destroy what we have just restored.
607	 * Note: wmask has the format YYYYM:
608	 *       Y: number of registers saved in groups of 4
609	 *       M: 4 bit mask of first 16 registers
610	 */
611
612	mov	a2, a6
613	mov	a3, a5
614
6151:	rotw	-1			# a0..a3 become a4..a7
616	addi	a3, a7, -4*4		# next iteration
617	addi	a2, a6, -16		# decrementing Y in WMASK
618	l32i	a4, a3, PT_AREG_END + 0
619	l32i	a5, a3, PT_AREG_END + 4
620	l32i	a6, a3, PT_AREG_END + 8
621	l32i	a7, a3, PT_AREG_END + 12
622	_bgeui	a2, 16, 1b
623
624	/* Clear unrestored registers (don't leak anything to user-land */
625
626.Lclear_regs:
627	rsr	a0, windowbase
628	rsr	a3, sar
629	sub	a3, a0, a3
630	beqz	a3, 2f
631	extui	a3, a3, 0, WBBITS
632
6331:	rotw	-1
634	addi	a3, a7, -1
635	movi	a4, 0
636	movi	a5, 0
637	movi	a6, 0
638	movi	a7, 0
639	bgei	a3, 1, 1b
640
641	/* We are back were we were when we started.
642	 * Note: a2 still contains WMASK (if we've returned to the original
643	 *	 frame where we had loaded a2), or at least the lower 4 bits
644	 *	 (if we have restored WSBITS-1 frames).
645	 */
6462:
647#else
648	movi	a2, 1
649#endif
650#if XCHAL_HAVE_THREADPTR
651	l32i	a3, a1, PT_THREADPTR
652	wur	a3, threadptr
653#endif
654
655	j	common_exception_exit
656
657	/* This is the kernel exception exit.
658	 * We avoided to do a MOVSP when we entered the exception, but we
659	 * have to do it here.
660	 */
661
662kernel_exception_exit:
663
664#if defined(__XTENSA_WINDOWED_ABI__)
665	/* Check if we have to do a movsp.
666	 *
667	 * We only have to do a movsp if the previous window-frame has
668	 * been spilled to the *temporary* exception stack instead of the
669	 * task's stack. This is the case if the corresponding bit in
670	 * WINDOWSTART for the previous window-frame was set before
671	 * (not spilled) but is zero now (spilled).
672	 * If this bit is zero, all other bits except the one for the
673	 * current window frame are also zero. So, we can use a simple test:
674	 * 'and' WINDOWSTART and WINDOWSTART-1:
675	 *
676	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
677	 *
678	 * The result is zero only if one bit was set.
679	 *
680	 * (Note: We might have gone through several task switches before
681	 *        we come back to the current task, so WINDOWBASE might be
682	 *        different from the time the exception occurred.)
683	 */
684
685	/* Test WINDOWSTART before and after the exception.
686	 * We actually have WMASK, so we only have to test if it is 1 or not.
687	 */
688
689	l32i	a2, a1, PT_WMASK
690	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
691
692	/* Test WINDOWSTART now. If spilled, do the movsp */
693
694	rsr     a3, windowstart
695	addi	a0, a3, -1
696	and     a3, a3, a0
697	_bnez	a3, common_exception_exit
698
699	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
700
701	addi    a0, a1, -16
702	l32i    a3, a0, 0
703	l32i    a4, a0, 4
704	s32i    a3, a1, PT_KERNEL_SIZE + 0
705	s32i    a4, a1, PT_KERNEL_SIZE + 4
706	l32i    a3, a0, 8
707	l32i    a4, a0, 12
708	s32i    a3, a1, PT_KERNEL_SIZE + 8
709	s32i    a4, a1, PT_KERNEL_SIZE + 12
710
711	/* Common exception exit.
712	 * We restore the special register and the current window frame, and
713	 * return from the exception.
714	 *
715	 * Note: We expect a2 to hold PT_WMASK
716	 */
717#else
718	movi	a2, 1
719#endif
720
721common_exception_exit:
722
723	/* Restore address registers. */
724
725	_bbsi.l	a2, 1, 1f
726	l32i	a4,  a1, PT_AREG4
727	l32i	a5,  a1, PT_AREG5
728	l32i	a6,  a1, PT_AREG6
729	l32i	a7,  a1, PT_AREG7
730	_bbsi.l	a2, 2, 1f
731	l32i	a8,  a1, PT_AREG8
732	l32i	a9,  a1, PT_AREG9
733	l32i	a10, a1, PT_AREG10
734	l32i	a11, a1, PT_AREG11
735	_bbsi.l	a2, 3, 1f
736	l32i	a12, a1, PT_AREG12
737	l32i	a13, a1, PT_AREG13
738	l32i	a14, a1, PT_AREG14
739	l32i	a15, a1, PT_AREG15
740
741	/* Restore PC, SAR */
742
7431:	l32i	a2, a1, PT_PC
744	l32i	a3, a1, PT_SAR
745	wsr	a2, epc1
746	wsr	a3, sar
747
748	/* Restore LBEG, LEND, LCOUNT */
749#if XCHAL_HAVE_LOOPS
750	l32i	a2, a1, PT_LBEG
751	l32i	a3, a1, PT_LEND
752	wsr	a2, lbeg
753	l32i	a2, a1, PT_LCOUNT
754	wsr	a3, lend
755	wsr	a2, lcount
756#endif
757
758	/* We control single stepping through the ICOUNTLEVEL register. */
759
760	l32i	a2, a1, PT_ICOUNTLEVEL
761	movi	a3, -2
762	wsr	a2, icountlevel
763	wsr	a3, icount
764
765	/* Check if it was double exception. */
766
767	l32i	a0, a1, PT_DEPC
768	l32i	a3, a1, PT_AREG3
769	l32i	a2, a1, PT_AREG2
770	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
771
772	/* Restore a0...a3 and return */
773
774	l32i	a0, a1, PT_AREG0
775	l32i	a1, a1, PT_AREG1
776	rfe
777
7781: 	wsr	a0, depc
779	l32i	a0, a1, PT_AREG0
780	l32i	a1, a1, PT_AREG1
781	rfde
782
783ENDPROC(kernel_exception)
784
785/*
786 * Debug exception handler.
787 *
788 * Currently, we don't support KGDB, so only user application can be debugged.
789 *
790 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
791 */
792
793	.literal_position
794
795ENTRY(debug_exception)
796
797	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
798	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
799
800	/* Set EPC1 and EXCCAUSE */
801
802	wsr	a2, depc		# save a2 temporarily
803	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
804	wsr	a2, epc1
805
806	movi	a2, EXCCAUSE_MAPPED_DEBUG
807	wsr	a2, exccause
808
809	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
810
811	movi	a2, 1 << PS_EXCM_BIT
812	or	a2, a0, a2
813	wsr	a2, ps
814
815	/* Switch to kernel/user stack, restore jump vector, and save a0 */
816
817	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
818
819	addi	a2, a1, -16 - PT_KERNEL_SIZE	# assume kernel stack
8203:
821	l32i	a0, a3, DT_DEBUG_SAVE
822	s32i	a1, a2, PT_AREG1
823	s32i	a0, a2, PT_AREG0
824	movi	a0, 0
825	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
826	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
827	xsr	a0, depc
828	s32i	a3, a2, PT_AREG3
829	s32i	a0, a2, PT_AREG2
830	mov	a1, a2
831
832	/* Debug exception is handled as an exception, so interrupts will
833	 * likely be enabled in the common exception handler. Disable
834	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
835	 * meaning.
836	 */
837#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
838	GET_THREAD_INFO(a2, a1)
839	l32i	a3, a2, TI_PRE_COUNT
840	addi	a3, a3, 1
841	s32i	a3, a2, TI_PRE_COUNT
842#endif
843
844	rsr	a2, ps
845	bbsi.l	a2, PS_UM_BIT, _user_exception
846	j	_kernel_exception
847
8482:	rsr	a2, excsave1
849	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
850	j	3b
851
852#ifdef CONFIG_HAVE_HW_BREAKPOINT
853	/* Debug exception while in exception mode. This may happen when
854	 * window overflow/underflow handler or fast exception handler hits
855	 * data breakpoint, in which case save and disable all data
856	 * breakpoints, single-step faulting instruction and restore data
857	 * breakpoints.
858	 */
8591:
860	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode
861
862	rsr	a0, debugcause
863	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
864
865	.set	_index, 0
866	.rept	XCHAL_NUM_DBREAK
867	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
868	wsr	a0, SREG_DBREAKC + _index
869	.set	_index, _index + 1
870	.endr
871
872	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
873	wsr	a0, icountlevel
874
875	l32i	a0, a3, DT_ICOUNT_SAVE
876	xsr	a0, icount
877
878	l32i	a0, a3, DT_DEBUG_SAVE
879	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
880	rfi	XCHAL_DEBUGLEVEL
881
882.Ldebug_save_dbreak:
883	.set	_index, 0
884	.rept	XCHAL_NUM_DBREAK
885	movi	a0, 0
886	xsr	a0, SREG_DBREAKC + _index
887	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
888	.set	_index, _index + 1
889	.endr
890
891	movi	a0, XCHAL_EXCM_LEVEL + 1
892	xsr	a0, icountlevel
893	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
894
895	movi	a0, 0xfffffffe
896	xsr	a0, icount
897	s32i	a0, a3, DT_ICOUNT_SAVE
898
899	l32i	a0, a3, DT_DEBUG_SAVE
900	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
901	rfi	XCHAL_DEBUGLEVEL
902#else
903	/* Debug exception while in exception mode. Should not happen. */
9041:	j	1b	// FIXME!!
905#endif
906
907ENDPROC(debug_exception)
908
909/*
910 * We get here in case of an unrecoverable exception.
911 * The only thing we can do is to be nice and print a panic message.
912 * We only produce a single stack frame for panic, so ???
913 *
914 *
915 * Entry conditions:
916 *
917 *   - a0 contains the caller address; original value saved in excsave1.
918 *   - the original a0 contains a valid return address (backtrace) or 0.
919 *   - a2 contains a valid stackpointer
920 *
921 * Notes:
922 *
923 *   - If the stack pointer could be invalid, the caller has to setup a
924 *     dummy stack pointer (e.g. the stack of the init_task)
925 *
926 *   - If the return address could be invalid, the caller has to set it
927 *     to 0, so the backtrace would stop.
928 *
929 */
930	.align 4
931unrecoverable_text:
932	.ascii "Unrecoverable error in exception handler\0"
933
934	.literal_position
935
936ENTRY(unrecoverable_exception)
937
938#if XCHAL_HAVE_WINDOWED
939	movi	a0, 1
940	movi	a1, 0
941
942	wsr	a0, windowstart
943	wsr	a1, windowbase
944	rsync
945#endif
946
947	movi	a1, KERNEL_PS_WOE_MASK | LOCKLEVEL
948	wsr	a1, ps
949	rsync
950
951	movi	a1, init_task
952	movi	a0, 0
953	addi	a1, a1, PT_REGS_OFFSET
954
955	movi	abi_arg0, unrecoverable_text
956	abi_call	panic
957
9581:	j	1b
959
960ENDPROC(unrecoverable_exception)
961
962/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
963
964	__XTENSA_HANDLER
965	.literal_position
966
967#ifdef SUPPORT_WINDOWED
968/*
969 * Fast-handler for alloca exceptions
970 *
971 *  The ALLOCA handler is entered when user code executes the MOVSP
972 *  instruction and the caller's frame is not in the register file.
973 *
974 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
975 *
976 *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
977 *
978 * It leverages the existing window spill/fill routines and their support for
979 * double exceptions. The 'movsp' instruction will only cause an exception if
980 * the next window needs to be loaded. In fact this ALLOCA exception may be
981 * replaced at some point by changing the hardware to do a underflow exception
982 * of the proper size instead.
983 *
984 * This algorithm simply backs out the register changes started by the user
985 * exception handler, makes it appear that we have started a window underflow
986 * by rotating the window back and then setting the old window base (OWB) in
987 * the 'ps' register with the rolled back window base. The 'movsp' instruction
988 * will be re-executed and this time since the next window frames is in the
989 * active AR registers it won't cause an exception.
990 *
991 * If the WindowUnderflow code gets a TLB miss the page will get mapped
992 * the partial WindowUnderflow will be handled in the double exception
993 * handler.
994 *
995 * Entry condition:
996 *
997 *   a0:	trashed, original value saved on stack (PT_AREG0)
998 *   a1:	a1
999 *   a2:	new stack pointer, original in DEPC
1000 *   a3:	a3
1001 *   depc:	a2, original value saved on stack (PT_DEPC)
1002 *   excsave_1:	dispatch table
1003 *
1004 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1005 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1006 */
1007
1008ENTRY(fast_alloca)
1009	rsr	a0, windowbase
1010	rotw	-1
1011	rsr	a2, ps
1012	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
1013	xor	a3, a3, a4
1014	l32i	a4, a6, PT_AREG0
1015	l32i	a1, a6, PT_DEPC
1016	rsr	a6, depc
1017	wsr	a1, depc
1018	slli	a3, a3, PS_OWB_SHIFT
1019	xor	a2, a2, a3
1020	wsr	a2, ps
1021	rsync
1022
1023	_bbci.l	a4, 31, 4f
1024	rotw	-1
1025	_bbci.l	a8, 30, 8f
1026	rotw	-1
1027	j	_WindowUnderflow12
10288:	j	_WindowUnderflow8
10294:	j	_WindowUnderflow4
1030ENDPROC(fast_alloca)
1031#endif
1032
1033#ifdef CONFIG_USER_ABI_CALL0_PROBE
1034/*
1035 * fast illegal instruction handler.
1036 *
1037 * This is used to fix up user PS.WOE on the exception caused
1038 * by the first opcode related to register window. If PS.WOE is
1039 * already set it goes directly to the common user exception handler.
1040 *
1041 * Entry condition:
1042 *
1043 *   a0:	trashed, original value saved on stack (PT_AREG0)
1044 *   a1:	a1
1045 *   a2:	new stack pointer, original in DEPC
1046 *   a3:	a3
1047 *   depc:	a2, original value saved on stack (PT_DEPC)
1048 *   excsave_1:	dispatch table
1049 */
1050
1051ENTRY(fast_illegal_instruction_user)
1052
1053	rsr	a0, ps
1054	bbsi.l	a0, PS_WOE_BIT, 1f
1055	s32i	a3, a2, PT_AREG3
1056	movi	a3, PS_WOE_MASK
1057	or	a0, a0, a3
1058	wsr	a0, ps
1059	l32i	a3, a2, PT_AREG3
1060	l32i	a0, a2, PT_AREG0
1061	rsr	a2, depc
1062	rfe
10631:
1064	call0	user_exception
1065
1066ENDPROC(fast_illegal_instruction_user)
1067#endif
1068
1069	/*
1070 * fast system calls.
1071 *
1072 * WARNING:  The kernel doesn't save the entire user context before
1073 * handling a fast system call.  These functions are small and short,
1074 * usually offering some functionality not available to user tasks.
1075 *
1076 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1077 *
1078 * Entry condition:
1079 *
1080 *   a0:	trashed, original value saved on stack (PT_AREG0)
1081 *   a1:	a1
1082 *   a2:	new stack pointer, original in DEPC
1083 *   a3:	a3
1084 *   depc:	a2, original value saved on stack (PT_DEPC)
1085 *   excsave_1:	dispatch table
1086 */
1087
1088ENTRY(fast_syscall_user)
1089
1090	/* Skip syscall. */
1091
1092	rsr	a0, epc1
1093	addi	a0, a0, 3
1094	wsr	a0, epc1
1095
1096	l32i	a0, a2, PT_DEPC
1097	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1098
1099	rsr	a0, depc			# get syscall-nr
1100	_beqz	a0, fast_syscall_spill_registers
1101	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1102
1103	call0	user_exception
1104
1105ENDPROC(fast_syscall_user)
1106
1107ENTRY(fast_syscall_unrecoverable)
1108
1109	/* Restore all states. */
1110
1111	l32i    a0, a2, PT_AREG0        # restore a0
1112	xsr     a2, depc                # restore a2, depc
1113
1114	wsr     a0, excsave1
1115	call0	unrecoverable_exception
1116
1117ENDPROC(fast_syscall_unrecoverable)
1118
1119/*
1120 * sysxtensa syscall handler
1121 *
1122 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
1123 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
1124 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
1125 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1126 *        a2            a6                   a3    a4      a5
1127 *
1128 * Entry condition:
1129 *
1130 *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
1131 *   a1:	a1
1132 *   a2:	new stack pointer, original in a0 and DEPC
1133 *   a3:	a3
1134 *   a4..a15:	unchanged
1135 *   depc:	a2, original value saved on stack (PT_DEPC)
1136 *   excsave_1:	dispatch table
1137 *
1138 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1139 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1140 *
1141 * Note: we don't have to save a2; a2 holds the return value
1142 */
1143
1144	.literal_position
1145
1146#ifdef CONFIG_FAST_SYSCALL_XTENSA
1147
1148ENTRY(fast_syscall_xtensa)
1149
1150	s32i	a7, a2, PT_AREG7	# we need an additional register
1151	movi	a7, 4			# sizeof(unsigned int)
1152	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
1153
1154	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
1155	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1156
1157	/* Fall through for ATOMIC_CMP_SWP. */
1158
1159.Lswp:	/* Atomic compare and swap */
1160
1161EX(.Leac) l32i	a0, a3, 0		# read old value
1162	bne	a0, a4, 1f		# same as old value? jump
1163EX(.Leac) s32i	a5, a3, 0		# different, modify value
1164	l32i	a7, a2, PT_AREG7	# restore a7
1165	l32i	a0, a2, PT_AREG0	# restore a0
1166	movi	a2, 1			# and return 1
1167	rfe
1168
11691:	l32i	a7, a2, PT_AREG7	# restore a7
1170	l32i	a0, a2, PT_AREG0	# restore a0
1171	movi	a2, 0			# return 0 (note that we cannot set
1172	rfe
1173
1174.Lnswp:	/* Atomic set, add, and exg_add. */
1175
1176EX(.Leac) l32i	a7, a3, 0		# orig
1177	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
1178	add	a0, a4, a7		# + arg
1179	moveqz	a0, a4, a6		# set
1180	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
1181EX(.Leac) s32i	a0, a3, 0		# write new value
1182
1183	mov	a0, a2
1184	mov	a2, a7
1185	l32i	a7, a0, PT_AREG7	# restore a7
1186	l32i	a0, a0, PT_AREG0	# restore a0
1187	rfe
1188
1189.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
1190	l32i	a0, a2, PT_AREG0	# restore a0
1191	movi	a2, -EFAULT
1192	rfe
1193
1194.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
1195	l32i	a0, a2, PT_AREG0	# restore a0
1196	movi	a2, -EINVAL
1197	rfe
1198
1199ENDPROC(fast_syscall_xtensa)
1200
1201#else /* CONFIG_FAST_SYSCALL_XTENSA */
1202
1203ENTRY(fast_syscall_xtensa)
1204
1205	l32i    a0, a2, PT_AREG0        # restore a0
1206	movi	a2, -ENOSYS
1207	rfe
1208
1209ENDPROC(fast_syscall_xtensa)
1210
1211#endif /* CONFIG_FAST_SYSCALL_XTENSA */
1212
1213
1214/* fast_syscall_spill_registers.
1215 *
1216 * Entry condition:
1217 *
1218 *   a0:	trashed, original value saved on stack (PT_AREG0)
1219 *   a1:	a1
1220 *   a2:	new stack pointer, original in DEPC
1221 *   a3:	a3
1222 *   depc:	a2, original value saved on stack (PT_DEPC)
1223 *   excsave_1:	dispatch table
1224 *
1225 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1226 */
1227
1228#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \
1229		defined(USER_SUPPORT_WINDOWED)
1230
1231ENTRY(fast_syscall_spill_registers)
1232
1233	/* Register a FIXUP handler (pass current wb as a parameter) */
1234
1235	xsr	a3, excsave1
1236	movi	a0, fast_syscall_spill_registers_fixup
1237	s32i	a0, a3, EXC_TABLE_FIXUP
1238	rsr	a0, windowbase
1239	s32i	a0, a3, EXC_TABLE_PARAM
1240	xsr	a3, excsave1		# restore a3 and excsave_1
1241
1242	/* Save a3, a4 and SAR on stack. */
1243
1244	rsr	a0, sar
1245	s32i	a3, a2, PT_AREG3
1246	s32i	a0, a2, PT_SAR
1247
1248	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1249
1250	s32i	a4, a2, PT_AREG4
1251	s32i	a7, a2, PT_AREG7
1252	s32i	a8, a2, PT_AREG8
1253	s32i	a11, a2, PT_AREG11
1254	s32i	a12, a2, PT_AREG12
1255	s32i	a15, a2, PT_AREG15
1256
1257	/*
1258	 * Rotate ws so that the current windowbase is at bit 0.
1259	 * Assume ws = xxxwww1yy (www1 current window frame).
1260	 * Rotate ws right so that a4 = yyxxxwww1.
1261	 */
1262
1263	rsr	a0, windowbase
1264	rsr	a3, windowstart		# a3 = xxxwww1yy
1265	ssr	a0			# holds WB
1266	slli	a0, a3, WSBITS
1267	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
1268	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
1269
1270	/* We are done if there are no more than the current register frame. */
1271
1272	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
1273	movi	a0, (1 << (WSBITS-1))
1274	_beqz	a3, .Lnospill		# only one active frame? jump
1275
1276	/* We want 1 at the top, so that we return to the current windowbase */
1277
1278	or	a3, a3, a0		# 1yyxxxwww
1279
1280	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1281
1282	wsr	a3, windowstart		# save shifted windowstart
1283	neg	a0, a3
1284	and	a3, a0, a3		# first bit set from right: 000010000
1285
1286	ffs_ws	a0, a3			# a0: shifts to skip empty frames
1287	movi	a3, WSBITS
1288	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
1289	ssr	a0			# save in SAR for later.
1290
1291	rsr	a3, windowbase
1292	add	a3, a3, a0
1293	wsr	a3, windowbase
1294	rsync
1295
1296	rsr	a3, windowstart
1297	srl	a3, a3			# shift windowstart
1298
1299	/* WB is now just one frame below the oldest frame in the register
1300	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
1301	   and WS differ by one 4-register frame. */
1302
1303	/* Save frames. Depending what call was used (call4, call8, call12),
1304	 * we have to save 4,8. or 12 registers.
1305	 */
1306
1307
1308.Lloop: _bbsi.l	a3, 1, .Lc4
1309	_bbci.l	a3, 2, .Lc12
1310
1311.Lc8:	s32e	a4, a13, -16
1312	l32e	a4, a5, -12
1313	s32e	a8, a4, -32
1314	s32e	a5, a13, -12
1315	s32e	a6, a13, -8
1316	s32e	a7, a13, -4
1317	s32e	a9, a4, -28
1318	s32e	a10, a4, -24
1319	s32e	a11, a4, -20
1320	srli	a11, a3, 2		# shift windowbase by 2
1321	rotw	2
1322	_bnei	a3, 1, .Lloop
1323	j	.Lexit
1324
1325.Lc4:	s32e	a4, a9, -16
1326	s32e	a5, a9, -12
1327	s32e	a6, a9, -8
1328	s32e	a7, a9, -4
1329
1330	srli	a7, a3, 1
1331	rotw	1
1332	_bnei	a3, 1, .Lloop
1333	j	.Lexit
1334
1335.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
1336
1337	/* 12-register frame (call12) */
1338
1339	l32e	a0, a5, -12
1340	s32e	a8, a0, -48
1341	mov	a8, a0
1342
1343	s32e	a9, a8, -44
1344	s32e	a10, a8, -40
1345	s32e	a11, a8, -36
1346	s32e	a12, a8, -32
1347	s32e	a13, a8, -28
1348	s32e	a14, a8, -24
1349	s32e	a15, a8, -20
1350	srli	a15, a3, 3
1351
1352	/* The stack pointer for a4..a7 is out of reach, so we rotate the
1353	 * window, grab the stackpointer, and rotate back.
1354	 * Alternatively, we could also use the following approach, but that
1355	 * makes the fixup routine much more complicated:
1356	 * rotw	1
1357	 * s32e	a0, a13, -16
1358	 * ...
1359	 * rotw 2
1360	 */
1361
1362	rotw	1
1363	mov	a4, a13
1364	rotw	-1
1365
1366	s32e	a4, a8, -16
1367	s32e	a5, a8, -12
1368	s32e	a6, a8, -8
1369	s32e	a7, a8, -4
1370
1371	rotw	3
1372
1373	_beqi	a3, 1, .Lexit
1374	j	.Lloop
1375
1376.Lexit:
1377
1378	/* Done. Do the final rotation and set WS */
1379
1380	rotw	1
1381	rsr	a3, windowbase
1382	ssl	a3
1383	movi	a3, 1
1384	sll	a3, a3
1385	wsr	a3, windowstart
1386.Lnospill:
1387
1388	/* Advance PC, restore registers and SAR, and return from exception. */
1389
1390	l32i	a3, a2, PT_SAR
1391	l32i	a0, a2, PT_AREG0
1392	wsr	a3, sar
1393	l32i	a3, a2, PT_AREG3
1394
1395	/* Restore clobbered registers. */
1396
1397	l32i	a4, a2, PT_AREG4
1398	l32i	a7, a2, PT_AREG7
1399	l32i	a8, a2, PT_AREG8
1400	l32i	a11, a2, PT_AREG11
1401	l32i	a12, a2, PT_AREG12
1402	l32i	a15, a2, PT_AREG15
1403
1404	movi	a2, 0
1405	rfe
1406
1407.Linvalid_mask:
1408
1409	/* We get here because of an unrecoverable error in the window
1410	 * registers, so set up a dummy frame and kill the user application.
1411	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1412	 */
1413
1414	movi	a0, 1
1415	movi	a1, 0
1416
1417	wsr	a0, windowstart
1418	wsr	a1, windowbase
1419	rsync
1420
1421	movi	a0, 0
1422
1423	rsr	a3, excsave1
1424	l32i	a1, a3, EXC_TABLE_KSTK
1425
1426	movi	a4, KERNEL_PS_WOE_MASK | LOCKLEVEL
1427	wsr	a4, ps
1428	rsync
1429
1430	movi	abi_arg0, SIGSEGV
1431	abi_call	make_task_dead
1432
1433	/* shouldn't return, so panic */
1434
1435	wsr	a0, excsave1
1436	call0	unrecoverable_exception		# should not return
14371:	j	1b
1438
1439
1440ENDPROC(fast_syscall_spill_registers)
1441
1442/* Fixup handler.
1443 *
1444 * We get here if the spill routine causes an exception, e.g. tlb miss.
1445 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1446 * we entered the spill routine and jump to the user exception handler.
1447 *
1448 * Note that we only need to restore the bits in windowstart that have not
1449 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1450 * rotated windowstart with only those bits set for frames that haven't been
1451 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1452 * frame for the current windowbase - 1, we need to rotate a3 left by the
1453 * value of the current windowbase + 1 and move it to windowstart.
1454 *
1455 * a0: value of depc, original value in depc
1456 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1457 * a3: exctable, original value in excsave1
1458 */
1459
1460ENTRY(fast_syscall_spill_registers_fixup)
1461
1462	rsr	a2, windowbase	# get current windowbase (a2 is saved)
1463	xsr	a0, depc	# restore depc and a0
1464	ssl	a2		# set shift (32 - WB)
1465
1466	/* We need to make sure the current registers (a0-a3) are preserved.
1467	 * To do this, we simply set the bit for the current window frame
1468	 * in WS, so that the exception handlers save them to the task stack.
1469	 *
1470	 * Note: we use a3 to set the windowbase, so we take a special care
1471	 * of it, saving it in the original _spill_registers frame across
1472	 * the exception handler call.
1473	 */
1474
1475	xsr	a3, excsave1	# get spill-mask
1476	slli	a3, a3, 1	# shift left by one
1477	addi	a3, a3, 1	# set the bit for the current window frame
1478
1479	slli	a2, a3, 32-WSBITS
1480	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
1481	wsr	a2, windowstart	# set corrected windowstart
1482
1483	srli	a3, a3, 1
1484	rsr	a2, excsave1
1485	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
1486	xsr	a2, excsave1
1487	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
1488	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
1489	xsr	a2, excsave1
1490
1491	/* Return to the original (user task) WINDOWBASE.
1492	 * We leave the following frame behind:
1493	 * a0, a1, a2	same
1494	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1495	 * depc:	depc (we have to return to that address)
1496	 * excsave_1:	exctable
1497	 */
1498
1499	wsr	a3, windowbase
1500	rsync
1501
1502	/* We are now in the original frame when we entered _spill_registers:
1503	 *  a0: return address
1504	 *  a1: used, stack pointer
1505	 *  a2: kernel stack pointer
1506	 *  a3: available
1507	 *  depc: exception address
1508	 *  excsave: exctable
1509	 * Note: This frame might be the same as above.
1510	 */
1511
1512	/* Setup stack pointer. */
1513
1514	addi	a2, a2, -PT_USER_SIZE
1515	s32i	a0, a2, PT_AREG0
1516
1517	/* Make sure we return to this fixup handler. */
1518
1519	movi	a3, fast_syscall_spill_registers_fixup_return
1520	s32i	a3, a2, PT_DEPC		# setup depc
1521
1522	/* Jump to the exception handler. */
1523
1524	rsr	a3, excsave1
1525	rsr	a0, exccause
1526	addx4	a0, a0, a3              	# find entry in table
1527	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
1528	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1529	jx	a0
1530
1531ENDPROC(fast_syscall_spill_registers_fixup)
1532
1533ENTRY(fast_syscall_spill_registers_fixup_return)
1534
1535	/* When we return here, all registers have been restored (a2: DEPC) */
1536
1537	wsr	a2, depc		# exception address
1538
1539	/* Restore fixup handler. */
1540
1541	rsr	a2, excsave1
1542	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
1543	movi	a3, fast_syscall_spill_registers_fixup
1544	s32i	a3, a2, EXC_TABLE_FIXUP
1545	rsr	a3, windowbase
1546	s32i	a3, a2, EXC_TABLE_PARAM
1547	l32i	a2, a2, EXC_TABLE_KSTK
1548
1549	/* Load WB at the time the exception occurred. */
1550
1551	rsr	a3, sar			# WB is still in SAR
1552	neg	a3, a3
1553	wsr	a3, windowbase
1554	rsync
1555
1556	rsr	a3, excsave1
1557	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1558
1559	rfde
1560
1561ENDPROC(fast_syscall_spill_registers_fixup_return)
1562
1563#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1564
1565ENTRY(fast_syscall_spill_registers)
1566
1567	l32i    a0, a2, PT_AREG0        # restore a0
1568	movi	a2, -ENOSYS
1569	rfe
1570
1571ENDPROC(fast_syscall_spill_registers)
1572
1573#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1574
1575#ifdef CONFIG_MMU
1576/*
1577 * We should never get here. Bail out!
1578 */
1579
1580ENTRY(fast_second_level_miss_double_kernel)
1581
15821:
1583	call0	unrecoverable_exception		# should not return
15841:	j	1b
1585
1586ENDPROC(fast_second_level_miss_double_kernel)
1587
1588/* First-level entry handler for user, kernel, and double 2nd-level
1589 * TLB miss exceptions.  Note that for now, user and kernel miss
1590 * exceptions share the same entry point and are handled identically.
1591 *
1592 * An old, less-efficient C version of this function used to exist.
1593 * We include it below, interleaved as comments, for reference.
1594 *
1595 * Entry condition:
1596 *
1597 *   a0:	trashed, original value saved on stack (PT_AREG0)
1598 *   a1:	a1
1599 *   a2:	new stack pointer, original in DEPC
1600 *   a3:	a3
1601 *   depc:	a2, original value saved on stack (PT_DEPC)
1602 *   excsave_1:	dispatch table
1603 *
1604 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1605 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1606 */
1607
1608ENTRY(fast_second_level_miss)
1609
1610	/* Save a1 and a3. Note: we don't expect a double exception. */
1611
1612	s32i	a1, a2, PT_AREG1
1613	s32i	a3, a2, PT_AREG3
1614
1615	/* We need to map the page of PTEs for the user task.  Find
1616	 * the pointer to that page.  Also, it's possible for tsk->mm
1617	 * to be NULL while tsk->active_mm is nonzero if we faulted on
1618	 * a vmalloc address.  In that rare case, we must use
1619	 * active_mm instead to avoid a fault in this handler.  See
1620	 *
1621	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1622	 *   (or search Internet on "mm vs. active_mm")
1623	 *
1624	 *	if (!mm)
1625	 *		mm = tsk->active_mm;
1626	 *	pgd = pgd_offset (mm, regs->excvaddr);
1627	 *	pmd = pmd_offset (pgd, regs->excvaddr);
1628	 *	pmdval = *pmd;
1629	 */
1630
1631	GET_CURRENT(a1,a2)
1632	l32i	a0, a1, TASK_MM		# tsk->mm
1633	beqz	a0, 9f
1634
16358:	rsr	a3, excvaddr		# fault address
1636	_PGD_OFFSET(a0, a3, a1)
1637	l32i	a0, a0, 0		# read pmdval
1638	beqz	a0, 2f
1639
1640	/* Read ptevaddr and convert to top of page-table page.
1641	 *
1642	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
1643	 * 	vpnval += DTLB_WAY_PGTABLE;
1644	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1645	 *	write_dtlb_entry (pteval, vpnval);
1646	 *
1647	 * The messy computation for 'pteval' above really simplifies
1648	 * into the following:
1649	 *
1650	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
1651	 *                 | PAGE_DIRECTORY
1652	 */
1653
1654	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
1655	add	a0, a0, a1		# pmdval - PAGE_OFFSET
1656	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
1657	xor	a0, a0, a1
1658
1659	movi	a1, _PAGE_DIRECTORY
1660	or	a0, a0, a1		# ... | PAGE_DIRECTORY
1661
1662	/*
1663	 * We utilize all three wired-ways (7-9) to hold pmd translations.
1664	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1665	 * This allows to map the three most common regions to three different
1666	 * DTLBs:
1667	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
1668	 *  2   -> way 8	shared libaries (2000.0000)
1669	 *  3   -> way 0	stack (3000.0000)
1670	 */
1671
1672	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
1673	rsr	a1, ptevaddr
1674	addx2	a3, a3, a3		# ->			0,3,6,9
1675	srli	a1, a1, PAGE_SHIFT
1676	extui	a3, a3, 2, 2		# ->			0,0,1,2
1677	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
1678	addi	a3, a3, DTLB_WAY_PGD
1679	add	a1, a1, a3		# ... + way_number
1680
16813:	wdtlb	a0, a1
1682	dsync
1683
1684	/* Exit critical section. */
1685
16864:	rsr	a3, excsave1
1687	movi	a0, 0
1688	s32i	a0, a3, EXC_TABLE_FIXUP
1689
1690	/* Restore the working registers, and return. */
1691
1692	l32i	a0, a2, PT_AREG0
1693	l32i	a1, a2, PT_AREG1
1694	l32i	a3, a2, PT_AREG3
1695	l32i	a2, a2, PT_DEPC
1696
1697	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1698
1699	/* Restore excsave1 and return. */
1700
1701	rsr	a2, depc
1702	rfe
1703
1704	/* Return from double exception. */
1705
17061:	xsr	a2, depc
1707	esync
1708	rfde
1709
17109:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1711	bnez	a0, 8b
1712
1713	/* Even more unlikely case active_mm == 0.
1714	 * We can get here with NMI in the middle of context_switch that
1715	 * touches vmalloc area.
1716	 */
1717	movi	a0, init_mm
1718	j	8b
1719
1720#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1721
17222:	/* Special case for cache aliasing.
1723	 * We (should) only get here if a clear_user_page, copy_user_page
1724	 * or the aliased cache flush functions got preemptively interrupted
1725	 * by another task. Re-establish temporary mapping to the
1726	 * TLBTEMP_BASE areas.
1727	 */
1728
1729	/* We shouldn't be in a double exception */
1730
1731	l32i	a0, a2, PT_DEPC
1732	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1733
1734	/* Make sure the exception originated in the special functions */
1735
1736	movi	a0, __tlbtemp_mapping_start
1737	rsr	a3, epc1
1738	bltu	a3, a0, 2f
1739	movi	a0, __tlbtemp_mapping_end
1740	bgeu	a3, a0, 2f
1741
1742	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1743
1744	movi	a3, TLBTEMP_BASE_1
1745	rsr	a0, excvaddr
1746	bltu	a0, a3, 2f
1747
1748	addi	a1, a0, -TLBTEMP_SIZE
1749	bgeu	a1, a3, 2f
1750
1751	/* Check if we have to restore an ITLB mapping. */
1752
1753	movi	a1, __tlbtemp_mapping_itlb
1754	rsr	a3, epc1
1755	sub	a3, a3, a1
1756
1757	/* Calculate VPN */
1758
1759	movi	a1, PAGE_MASK
1760	and	a1, a1, a0
1761
1762	/* Jump for ITLB entry */
1763
1764	bgez	a3, 1f
1765
1766	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
1767
1768	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1769	add	a1, a3, a1
1770
1771	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1772
1773	mov	a0, a6
1774	movnez	a0, a7, a3
1775	j	3b
1776
1777	/* ITLB entry. We only use dst in a6. */
1778
17791:	witlb	a6, a1
1780	isync
1781	j	4b
1782
1783
1784#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
1785
1786
17872:	/* Invalid PGD, default exception handling */
1788
1789	rsr	a1, depc
1790	s32i	a1, a2, PT_AREG2
1791	mov	a1, a2
1792
1793	rsr	a2, ps
1794	bbsi.l	a2, PS_UM_BIT, 1f
1795	call0	_kernel_exception
17961:	call0	_user_exception
1797
1798ENDPROC(fast_second_level_miss)
1799
1800/*
1801 * StoreProhibitedException
1802 *
1803 * Update the pte and invalidate the itlb mapping for this pte.
1804 *
1805 * Entry condition:
1806 *
1807 *   a0:	trashed, original value saved on stack (PT_AREG0)
1808 *   a1:	a1
1809 *   a2:	new stack pointer, original in DEPC
1810 *   a3:	a3
1811 *   depc:	a2, original value saved on stack (PT_DEPC)
1812 *   excsave_1:	dispatch table
1813 *
1814 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1815 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1816 */
1817
1818ENTRY(fast_store_prohibited)
1819
1820	/* Save a1 and a3. */
1821
1822	s32i	a1, a2, PT_AREG1
1823	s32i	a3, a2, PT_AREG3
1824
1825	GET_CURRENT(a1,a2)
1826	l32i	a0, a1, TASK_MM		# tsk->mm
1827	beqz	a0, 9f
1828
18298:	rsr	a1, excvaddr		# fault address
1830	_PGD_OFFSET(a0, a1, a3)
1831	l32i	a0, a0, 0
1832	beqz	a0, 2f
1833
1834	/*
1835	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1836	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1837	 */
1838
1839	_PTE_OFFSET(a0, a1, a3)
1840	l32i	a3, a0, 0		# read pteval
1841	movi	a1, _PAGE_CA_INVALID
1842	ball	a3, a1, 2f
1843	bbci.l	a3, _PAGE_WRITABLE_BIT, 2f
1844
1845	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1846	or	a3, a3, a1
1847	rsr	a1, excvaddr
1848	s32i	a3, a0, 0
1849
1850	/* We need to flush the cache if we have page coloring. */
1851#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1852	dhwb	a0, 0
1853#endif
1854	pdtlb	a0, a1
1855	wdtlb	a3, a0
1856
1857	/* Exit critical section. */
1858
1859	movi	a0, 0
1860	rsr	a3, excsave1
1861	s32i	a0, a3, EXC_TABLE_FIXUP
1862
1863	/* Restore the working registers, and return. */
1864
1865	l32i	a3, a2, PT_AREG3
1866	l32i	a1, a2, PT_AREG1
1867	l32i	a0, a2, PT_AREG0
1868	l32i	a2, a2, PT_DEPC
1869
1870	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1871
1872	rsr	a2, depc
1873	rfe
1874
1875	/* Double exception. Restore FIXUP handler and return. */
1876
18771:	xsr	a2, depc
1878	esync
1879	rfde
1880
18819:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1882	j	8b
1883
18842:	/* If there was a problem, handle fault in C */
1885
1886	rsr	a3, depc	# still holds a2
1887	s32i	a3, a2, PT_AREG2
1888	mov	a1, a2
1889
1890	rsr	a2, ps
1891	bbsi.l	a2, PS_UM_BIT, 1f
1892	call0	_kernel_exception
18931:	call0	_user_exception
1894
1895ENDPROC(fast_store_prohibited)
1896
1897#endif /* CONFIG_MMU */
1898
1899	.text
1900/*
1901 * System Calls.
1902 *
1903 * void system_call (struct pt_regs* regs, int exccause)
1904 *                            a2                 a3
1905 */
1906	.literal_position
1907
1908ENTRY(system_call)
1909
1910#if defined(__XTENSA_WINDOWED_ABI__)
1911	abi_entry_default
1912#elif defined(__XTENSA_CALL0_ABI__)
1913	abi_entry(12)
1914
1915	s32i	a0, sp, 0
1916	s32i	abi_saved0, sp, 4
1917	s32i	abi_saved1, sp, 8
1918	mov	abi_saved0, a2
1919#else
1920#error Unsupported Xtensa ABI
1921#endif
1922
1923	/* regs->syscall = regs->areg[2] */
1924
1925	l32i	a7, abi_saved0, PT_AREG2
1926	s32i	a7, abi_saved0, PT_SYSCALL
1927
1928	GET_THREAD_INFO(a4, a1)
1929	l32i	abi_saved1, a4, TI_FLAGS
1930	movi	a4, _TIF_WORK_MASK
1931	and	abi_saved1, abi_saved1, a4
1932	beqz	abi_saved1, 1f
1933
1934	mov	abi_arg0, abi_saved0
1935	abi_call	do_syscall_trace_enter
1936	beqz	abi_rv, .Lsyscall_exit
1937	l32i	a7, abi_saved0, PT_SYSCALL
1938
19391:
1940	/* syscall = sys_call_table[syscall_nr] */
1941
1942	movi	a4, sys_call_table
1943	movi	a5, __NR_syscalls
1944	movi	abi_rv, -ENOSYS
1945	bgeu	a7, a5, 1f
1946
1947	addx4	a4, a7, a4
1948	l32i	abi_tmp0, a4, 0
1949
1950	/* Load args: arg0 - arg5 are passed via regs. */
1951
1952	l32i	abi_arg0, abi_saved0, PT_AREG6
1953	l32i	abi_arg1, abi_saved0, PT_AREG3
1954	l32i	abi_arg2, abi_saved0, PT_AREG4
1955	l32i	abi_arg3, abi_saved0, PT_AREG5
1956	l32i	abi_arg4, abi_saved0, PT_AREG8
1957	l32i	abi_arg5, abi_saved0, PT_AREG9
1958
1959	abi_callx	abi_tmp0
1960
19611:	/* regs->areg[2] = return_value */
1962
1963	s32i	abi_rv, abi_saved0, PT_AREG2
1964	bnez	abi_saved1, 1f
1965.Lsyscall_exit:
1966#if defined(__XTENSA_WINDOWED_ABI__)
1967	abi_ret_default
1968#elif defined(__XTENSA_CALL0_ABI__)
1969	l32i	a0, sp, 0
1970	l32i	abi_saved0, sp, 4
1971	l32i	abi_saved1, sp, 8
1972	abi_ret(12)
1973#else
1974#error Unsupported Xtensa ABI
1975#endif
1976
19771:
1978	mov	abi_arg0, abi_saved0
1979	abi_call	do_syscall_trace_leave
1980	j	.Lsyscall_exit
1981
1982ENDPROC(system_call)
1983
1984/*
1985 * Spill live registers on the kernel stack macro.
1986 *
1987 * Entry condition: ps.woe is set, ps.excm is cleared
1988 * Exit condition: windowstart has single bit set
1989 * May clobber: a12, a13
1990 */
1991	.macro	spill_registers_kernel
1992
1993#if XCHAL_NUM_AREGS > 16
1994	call12	1f
1995	_j	2f
1996	retw
1997	.align	4
19981:
1999	_entry	a1, 48
2000	addi	a12, a0, 3
2001#if XCHAL_NUM_AREGS > 32
2002	.rept	(XCHAL_NUM_AREGS - 32) / 12
2003	_entry	a1, 48
2004	mov	a12, a0
2005	.endr
2006#endif
2007	_entry	a1, 16
2008#if XCHAL_NUM_AREGS % 12 == 0
2009	mov	a8, a8
2010#elif XCHAL_NUM_AREGS % 12 == 4
2011	mov	a12, a12
2012#elif XCHAL_NUM_AREGS % 12 == 8
2013	mov	a4, a4
2014#endif
2015	retw
20162:
2017#else
2018	mov	a12, a12
2019#endif
2020	.endm
2021
2022/*
2023 * Task switch.
2024 *
2025 * struct task*  _switch_to (struct task* prev, struct task* next)
2026 *         a2                              a2                 a3
2027 */
2028
2029ENTRY(_switch_to)
2030
2031#if defined(__XTENSA_WINDOWED_ABI__)
2032	abi_entry(XTENSA_SPILL_STACK_RESERVE)
2033#elif defined(__XTENSA_CALL0_ABI__)
2034	abi_entry(16)
2035
2036	s32i	a12, sp, 0
2037	s32i	a13, sp, 4
2038	s32i	a14, sp, 8
2039	s32i	a15, sp, 12
2040#else
2041#error Unsupported Xtensa ABI
2042#endif
2043	mov	a11, a3			# and 'next' (a3)
2044
2045	l32i	a4, a2, TASK_THREAD_INFO
2046	l32i	a5, a3, TASK_THREAD_INFO
2047
2048	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2049
2050#if THREAD_RA > 1020 || THREAD_SP > 1020
2051	addi	a10, a2, TASK_THREAD
2052	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
2053	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
2054#else
2055	s32i	a0, a2, THREAD_RA	# save return address
2056	s32i	a1, a2, THREAD_SP	# save stack pointer
2057#endif
2058
2059#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
2060	movi	a6, __stack_chk_guard
2061	l32i	a8, a3, TASK_STACK_CANARY
2062	s32i	a8, a6, 0
2063#endif
2064
2065	/* Disable ints while we manipulate the stack pointer. */
2066
2067	irq_save a14, a3
2068	rsync
2069
2070	/* Switch CPENABLE */
2071
2072#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
2073	l32i	a3, a5, THREAD_CPENABLE
2074	xsr	a3, cpenable
2075	s32i	a3, a4, THREAD_CPENABLE
2076#endif
2077
2078#if XCHAL_HAVE_EXCLUSIVE
2079	l32i	a3, a5, THREAD_ATOMCTL8
2080	getex	a3
2081	s32i	a3, a4, THREAD_ATOMCTL8
2082#endif
2083
2084	/* Flush register file. */
2085
2086#if defined(__XTENSA_WINDOWED_ABI__)
2087	spill_registers_kernel
2088#endif
2089
2090	/* Set kernel stack (and leave critical section)
2091	 * Note: It's save to set it here. The stack will not be overwritten
2092	 *       because the kernel stack will only be loaded again after
2093	 *       we return from kernel space.
2094	 */
2095
2096	rsr	a3, excsave1		# exc_table
2097	addi	a7, a5, PT_REGS_OFFSET
2098	s32i	a7, a3, EXC_TABLE_KSTK
2099
2100	/* restore context of the task 'next' */
2101
2102	l32i	a0, a11, THREAD_RA	# restore return address
2103	l32i	a1, a11, THREAD_SP	# restore stack pointer
2104
2105	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2106
2107	wsr	a14, ps
2108	rsync
2109
2110#if defined(__XTENSA_WINDOWED_ABI__)
2111	abi_ret(XTENSA_SPILL_STACK_RESERVE)
2112#elif defined(__XTENSA_CALL0_ABI__)
2113	l32i	a12, sp, 0
2114	l32i	a13, sp, 4
2115	l32i	a14, sp, 8
2116	l32i	a15, sp, 12
2117	abi_ret(16)
2118#else
2119#error Unsupported Xtensa ABI
2120#endif
2121
2122ENDPROC(_switch_to)
2123
2124ENTRY(ret_from_fork)
2125
2126	/* void schedule_tail (struct task_struct *prev)
2127	 * Note: prev is still in abi_arg0 (return value from fake call frame)
2128	 */
2129	abi_call	schedule_tail
2130
2131	mov		abi_arg0, a1
2132	abi_call	do_syscall_trace_leave
2133	j		common_exception_return
2134
2135ENDPROC(ret_from_fork)
2136
2137/*
2138 * Kernel thread creation helper
2139 * On entry, set up by copy_thread: abi_saved0 = thread_fn,
2140 * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev
2141 */
2142ENTRY(ret_from_kernel_thread)
2143
2144	abi_call	schedule_tail
2145	mov		abi_arg0, abi_saved1
2146	abi_callx	abi_saved0
2147	j		common_exception_return
2148
2149ENDPROC(ret_from_kernel_thread)
2150