xref: /openbmc/linux/arch/xtensa/kernel/entry.S (revision 11788d9b)
1/*
2 * Low-level exception handling
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2004 - 2008 by Tensilica Inc.
9 * Copyright (C) 2015 Cadence Design Systems Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 *
13 */
14
15#include <linux/linkage.h>
16#include <linux/pgtable.h>
17#include <asm/asm-offsets.h>
18#include <asm/asmmacro.h>
19#include <asm/processor.h>
20#include <asm/coprocessor.h>
21#include <asm/thread_info.h>
22#include <asm/asm-uaccess.h>
23#include <asm/unistd.h>
24#include <asm/ptrace.h>
25#include <asm/current.h>
26#include <asm/page.h>
27#include <asm/signal.h>
28#include <asm/tlbflush.h>
29#include <variant/tie-asm.h>
30
31/* Unimplemented features. */
32
33#undef KERNEL_STACK_OVERFLOW_CHECK
34
35/* Not well tested.
36 *
37 * - fast_coprocessor
38 */
39
40/*
41 * Macro to find first bit set in WINDOWBASE from the left + 1
42 *
43 * 100....0 -> 1
44 * 010....0 -> 2
45 * 000....1 -> WSBITS
46 */
47
48	.macro ffs_ws bit mask
49
50#if XCHAL_HAVE_NSA
51	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
52	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
53#else
54	movi    \bit, WSBITS
55#if WSBITS > 16
56	_bltui  \mask, 0x10000, 99f
57	addi    \bit, \bit, -16
58	extui   \mask, \mask, 16, 16
59#endif
60#if WSBITS > 8
6199:	_bltui  \mask, 0x100, 99f
62	addi    \bit, \bit, -8
63	srli    \mask, \mask, 8
64#endif
6599:	_bltui  \mask, 0x10, 99f
66	addi    \bit, \bit, -4
67	srli    \mask, \mask, 4
6899:	_bltui  \mask, 0x4, 99f
69	addi    \bit, \bit, -2
70	srli    \mask, \mask, 2
7199:	_bltui  \mask, 0x2, 99f
72	addi    \bit, \bit, -1
7399:
74
75#endif
76	.endm
77
78
79	.macro	irq_save flags tmp
80#if XTENSA_FAKE_NMI
81#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
82	rsr	\flags, ps
83	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
84	bgei	\tmp, LOCKLEVEL, 99f
85	rsil	\tmp, LOCKLEVEL
8699:
87#else
88	movi	\tmp, LOCKLEVEL
89	rsr	\flags, ps
90	or	\flags, \flags, \tmp
91	xsr	\flags, ps
92	rsync
93#endif
94#else
95	rsil	\flags, LOCKLEVEL
96#endif
97	.endm
98
99/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
100
101/*
102 * First-level exception handler for user exceptions.
103 * Save some special registers, extra states and all registers in the AR
104 * register file that were in use in the user task, and jump to the common
105 * exception code.
106 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
107 * save them for kernel exceptions).
108 *
109 * Entry condition for user_exception:
110 *
111 *   a0:	trashed, original value saved on stack (PT_AREG0)
112 *   a1:	a1
113 *   a2:	new stack pointer, original value in depc
114 *   a3:	a3
115 *   depc:	a2, original value saved on stack (PT_DEPC)
116 *   excsave1:	dispatch table
117 *
118 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
119 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
120 *
121 * Entry condition for _user_exception:
122 *
123 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
124 *   excsave has been restored, and
125 *   stack pointer (a1) has been set.
126 *
127 * Note: _user_exception might be at an odd address. Don't use call0..call12
128 */
129	.literal_position
130
131ENTRY(user_exception)
132
133	/* Save a1, a2, a3, and set SP. */
134
135	rsr	a0, depc
136	s32i	a1, a2, PT_AREG1
137	s32i	a0, a2, PT_AREG2
138	s32i	a3, a2, PT_AREG3
139	mov	a1, a2
140
141	.globl _user_exception
142_user_exception:
143
144	/* Save SAR and turn off single stepping */
145
146	movi	a2, 0
147	wsr	a2, depc		# terminate user stack trace with 0
148	rsr	a3, sar
149	xsr	a2, icountlevel
150	s32i	a3, a1, PT_SAR
151	s32i	a2, a1, PT_ICOUNTLEVEL
152
153#if XCHAL_HAVE_THREADPTR
154	rur	a2, threadptr
155	s32i	a2, a1, PT_THREADPTR
156#endif
157
158	/* Rotate ws so that the current windowbase is at bit0. */
159	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
160
161	rsr	a2, windowbase
162	rsr	a3, windowstart
163	ssr	a2
164	s32i	a2, a1, PT_WINDOWBASE
165	s32i	a3, a1, PT_WINDOWSTART
166	slli	a2, a3, 32-WSBITS
167	src	a2, a3, a2
168	srli	a2, a2, 32-WSBITS
169	s32i	a2, a1, PT_WMASK	# needed for restoring registers
170
171	/* Save only live registers. */
172
173	_bbsi.l	a2, 1, 1f
174	s32i	a4, a1, PT_AREG4
175	s32i	a5, a1, PT_AREG5
176	s32i	a6, a1, PT_AREG6
177	s32i	a7, a1, PT_AREG7
178	_bbsi.l	a2, 2, 1f
179	s32i	a8, a1, PT_AREG8
180	s32i	a9, a1, PT_AREG9
181	s32i	a10, a1, PT_AREG10
182	s32i	a11, a1, PT_AREG11
183	_bbsi.l	a2, 3, 1f
184	s32i	a12, a1, PT_AREG12
185	s32i	a13, a1, PT_AREG13
186	s32i	a14, a1, PT_AREG14
187	s32i	a15, a1, PT_AREG15
188	_bnei	a2, 1, 1f		# only one valid frame?
189
190	/* Only one valid frame, skip saving regs. */
191
192	j	2f
193
194	/* Save the remaining registers.
195	 * We have to save all registers up to the first '1' from
196	 * the right, except the current frame (bit 0).
197	 * Assume a2 is:  001001000110001
198	 * All register frames starting from the top field to the marked '1'
199	 * must be saved.
200	 */
201
2021:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
203	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
204	and	a3, a3, a2		# max. only one bit is set
205
206	/* Find number of frames to save */
207
208	ffs_ws	a0, a3			# number of frames to the '1' from left
209
210	/* Store information into WMASK:
211	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
212	 * bits 4...: number of valid 4-register frames
213	 */
214
215	slli	a3, a0, 4		# number of frames to save in bits 8..4
216	extui	a2, a2, 0, 4		# mask for the first 16 registers
217	or	a2, a3, a2
218	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
219
220	/* Save 4 registers at a time */
221
2221:	rotw	-1
223	s32i	a0, a5, PT_AREG_END - 16
224	s32i	a1, a5, PT_AREG_END - 12
225	s32i	a2, a5, PT_AREG_END - 8
226	s32i	a3, a5, PT_AREG_END - 4
227	addi	a0, a4, -1
228	addi	a1, a5, -16
229	_bnez	a0, 1b
230
231	/* WINDOWBASE still in SAR! */
232
233	rsr	a2, sar			# original WINDOWBASE
234	movi	a3, 1
235	ssl	a2
236	sll	a3, a3
237	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
238	wsr	a2, windowbase		# and WINDOWSTART
239	rsync
240
241	/* We are back to the original stack pointer (a1) */
242
2432:	/* Now, jump to the common exception handler. */
244
245	j	common_exception
246
247ENDPROC(user_exception)
248
249/*
250 * First-level exit handler for kernel exceptions
251 * Save special registers and the live window frame.
252 * Note: Even though we changes the stack pointer, we don't have to do a
253 *	 MOVSP here, as we do that when we return from the exception.
254 *	 (See comment in the kernel exception exit code)
255 *
256 * Entry condition for kernel_exception:
257 *
258 *   a0:	trashed, original value saved on stack (PT_AREG0)
259 *   a1:	a1
260 *   a2:	new stack pointer, original in DEPC
261 *   a3:	a3
262 *   depc:	a2, original value saved on stack (PT_DEPC)
263 *   excsave_1:	dispatch table
264 *
265 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
266 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
267 *
268 * Entry condition for _kernel_exception:
269 *
270 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
271 *   excsave has been restored, and
272 *   stack pointer (a1) has been set.
273 *
274 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
275 */
276
277ENTRY(kernel_exception)
278
279	/* Save a1, a2, a3, and set SP. */
280
281	rsr	a0, depc		# get a2
282	s32i	a1, a2, PT_AREG1
283	s32i	a0, a2, PT_AREG2
284	s32i	a3, a2, PT_AREG3
285	mov	a1, a2
286
287	.globl _kernel_exception
288_kernel_exception:
289
290	/* Save SAR and turn off single stepping */
291
292	movi	a2, 0
293	rsr	a3, sar
294	xsr	a2, icountlevel
295	s32i	a3, a1, PT_SAR
296	s32i	a2, a1, PT_ICOUNTLEVEL
297
298	/* Rotate ws so that the current windowbase is at bit0. */
299	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
300
301	rsr	a2, windowbase		# don't need to save these, we only
302	rsr	a3, windowstart		# need shifted windowstart: windowmask
303	ssr	a2
304	slli	a2, a3, 32-WSBITS
305	src	a2, a3, a2
306	srli	a2, a2, 32-WSBITS
307	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
308
309	/* Save only the live window-frame */
310
311	_bbsi.l	a2, 1, 1f
312	s32i	a4, a1, PT_AREG4
313	s32i	a5, a1, PT_AREG5
314	s32i	a6, a1, PT_AREG6
315	s32i	a7, a1, PT_AREG7
316	_bbsi.l	a2, 2, 1f
317	s32i	a8, a1, PT_AREG8
318	s32i	a9, a1, PT_AREG9
319	s32i	a10, a1, PT_AREG10
320	s32i	a11, a1, PT_AREG11
321	_bbsi.l	a2, 3, 1f
322	s32i	a12, a1, PT_AREG12
323	s32i	a13, a1, PT_AREG13
324	s32i	a14, a1, PT_AREG14
325	s32i	a15, a1, PT_AREG15
326
327	_bnei	a2, 1, 1f
328
329	/* Copy spill slots of a0 and a1 to imitate movsp
330	 * in order to keep exception stack continuous
331	 */
332	l32i	a3, a1, PT_SIZE
333	l32i	a0, a1, PT_SIZE + 4
334	s32e	a3, a1, -16
335	s32e	a0, a1, -12
3361:
337	l32i	a0, a1, PT_AREG0	# restore saved a0
338	wsr	a0, depc
339
340#ifdef KERNEL_STACK_OVERFLOW_CHECK
341
342	/*  Stack overflow check, for debugging  */
343	extui	a2, a1, TASK_SIZE_BITS,XX
344	movi	a3, SIZE??
345	_bge	a2, a3, out_of_stack_panic
346
347#endif
348
349/*
350 * This is the common exception handler.
351 * We get here from the user exception handler or simply by falling through
352 * from the kernel exception handler.
353 * Save the remaining special registers, switch to kernel mode, and jump
354 * to the second-level exception handler.
355 *
356 */
357
358common_exception:
359
360	/* Save some registers, disable loops and clear the syscall flag. */
361
362	rsr	a2, debugcause
363	rsr	a3, epc1
364	s32i	a2, a1, PT_DEBUGCAUSE
365	s32i	a3, a1, PT_PC
366
367	movi	a2, NO_SYSCALL
368	rsr	a3, excvaddr
369	s32i	a2, a1, PT_SYSCALL
370	movi	a2, 0
371	s32i	a3, a1, PT_EXCVADDR
372#if XCHAL_HAVE_LOOPS
373	xsr	a2, lcount
374	s32i	a2, a1, PT_LCOUNT
375#endif
376
377#if XCHAL_HAVE_EXCLUSIVE
378	/* Clear exclusive access monitor set by interrupted code */
379	clrex
380#endif
381
382	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
383
384	rsr	a2, exccause
385	movi	a3, 0
386	rsr	a0, excsave1
387	s32i	a2, a1, PT_EXCCAUSE
388	s32i	a3, a0, EXC_TABLE_FIXUP
389
390	/* All unrecoverable states are saved on stack, now, and a1 is valid.
391	 * Now we can allow exceptions again. In case we've got an interrupt
392	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
393	 * otherwise it's left unchanged.
394	 *
395	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
396	 */
397
398	rsr	a3, ps
399	s32i	a3, a1, PT_PS		# save ps
400
401#if XTENSA_FAKE_NMI
402	/* Correct PS needs to be saved in the PT_PS:
403	 * - in case of exception or level-1 interrupt it's in the PS,
404	 *   and is already saved.
405	 * - in case of medium level interrupt it's in the excsave2.
406	 */
407	movi	a0, EXCCAUSE_MAPPED_NMI
408	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
409	beq	a2, a0, .Lmedium_level_irq
410	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
411	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
412
413.Lmedium_level_irq:
414	rsr	a0, excsave2
415	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
416	bgei	a3, LOCKLEVEL, .Lexception
417
418.Llevel1_irq:
419	movi	a3, LOCKLEVEL
420
421.Lexception:
422	movi	a0, PS_WOE_MASK
423	or	a3, a3, a0
424#else
425	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
426	movi	a0, LOCKLEVEL
427	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
428					# a3 = PS.INTLEVEL
429	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
430	movi	a2, PS_WOE_MASK
431	or	a3, a3, a2
432	rsr	a2, exccause
433#endif
434
435	/* restore return address (or 0 if return to userspace) */
436	rsr	a0, depc
437	wsr	a3, ps
438	rsync				# PS.WOE => rsync => overflow
439
440	/* Save lbeg, lend */
441#if XCHAL_HAVE_LOOPS
442	rsr	a4, lbeg
443	rsr	a3, lend
444	s32i	a4, a1, PT_LBEG
445	s32i	a3, a1, PT_LEND
446#endif
447
448	/* Save SCOMPARE1 */
449
450#if XCHAL_HAVE_S32C1I
451	rsr     a3, scompare1
452	s32i    a3, a1, PT_SCOMPARE1
453#endif
454
455	/* Save optional registers. */
456
457	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
458
459	/* Go to second-level dispatcher. Set up parameters to pass to the
460	 * exception handler and call the exception handler.
461	 */
462
463	rsr	a4, excsave1
464	mov	a6, a1			# pass stack frame
465	mov	a7, a2			# pass EXCCAUSE
466	addx4	a4, a2, a4
467	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
468
469	/* Call the second-level handler */
470
471	callx4	a4
472
473	/* Jump here for exception exit */
474	.global common_exception_return
475common_exception_return:
476
477#if XTENSA_FAKE_NMI
478	l32i	a2, a1, PT_EXCCAUSE
479	movi	a3, EXCCAUSE_MAPPED_NMI
480	beq	a2, a3, .LNMIexit
481#endif
4821:
483	irq_save a2, a3
484#ifdef CONFIG_TRACE_IRQFLAGS
485	call4	trace_hardirqs_off
486#endif
487
488	/* Jump if we are returning from kernel exceptions. */
489
490	l32i	a3, a1, PT_PS
491	GET_THREAD_INFO(a2, a1)
492	l32i	a4, a2, TI_FLAGS
493	_bbci.l	a3, PS_UM_BIT, 6f
494
495	/* Specific to a user exception exit:
496	 * We need to check some flags for signal handling and rescheduling,
497	 * and have to restore WB and WS, extra states, and all registers
498	 * in the register file that were in use in the user task.
499	 * Note that we don't disable interrupts here.
500	 */
501
502	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
503	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
504	_bbci.l	a4, TIF_SIGPENDING, 5f
505
5062:	l32i	a4, a1, PT_DEPC
507	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
508
509	/* Call do_signal() */
510
511#ifdef CONFIG_TRACE_IRQFLAGS
512	call4	trace_hardirqs_on
513#endif
514	rsil	a2, 0
515	mov	a6, a1
516	call4	do_notify_resume	# int do_notify_resume(struct pt_regs*)
517	j	1b
518
5193:	/* Reschedule */
520
521#ifdef CONFIG_TRACE_IRQFLAGS
522	call4	trace_hardirqs_on
523#endif
524	rsil	a2, 0
525	call4	schedule	# void schedule (void)
526	j	1b
527
528#ifdef CONFIG_PREEMPTION
5296:
530	_bbci.l	a4, TIF_NEED_RESCHED, 4f
531
532	/* Check current_thread_info->preempt_count */
533
534	l32i	a4, a2, TI_PRE_COUNT
535	bnez	a4, 4f
536	call4	preempt_schedule_irq
537	j	4f
538#endif
539
540#if XTENSA_FAKE_NMI
541.LNMIexit:
542	l32i	a3, a1, PT_PS
543	_bbci.l	a3, PS_UM_BIT, 4f
544#endif
545
5465:
547#ifdef CONFIG_HAVE_HW_BREAKPOINT
548	_bbci.l	a4, TIF_DB_DISABLED, 7f
549	call4	restore_dbreak
5507:
551#endif
552#ifdef CONFIG_DEBUG_TLB_SANITY
553	l32i	a4, a1, PT_DEPC
554	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
555	call4	check_tlb_sanity
556#endif
5576:
5584:
559#ifdef CONFIG_TRACE_IRQFLAGS
560	extui	a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
561	bgei	a4, LOCKLEVEL, 1f
562	call4	trace_hardirqs_on
5631:
564#endif
565	/* Restore optional registers. */
566
567	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
568
569	/* Restore SCOMPARE1 */
570
571#if XCHAL_HAVE_S32C1I
572	l32i    a2, a1, PT_SCOMPARE1
573	wsr     a2, scompare1
574#endif
575	wsr	a3, ps		/* disable interrupts */
576
577	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit
578
579user_exception_exit:
580
581	/* Restore the state of the task and return from the exception. */
582
583	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
584
585	l32i	a2, a1, PT_WINDOWBASE
586	l32i	a3, a1, PT_WINDOWSTART
587	wsr	a1, depc		# use DEPC as temp storage
588	wsr	a3, windowstart		# restore WINDOWSTART
589	ssr	a2			# preserve user's WB in the SAR
590	wsr	a2, windowbase		# switch to user's saved WB
591	rsync
592	rsr	a1, depc		# restore stack pointer
593	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
594	rotw	-1			# we restore a4..a7
595	_bltui	a6, 16, 1f		# only have to restore current window?
596
597	/* The working registers are a0 and a3.  We are restoring to
598	 * a4..a7.  Be careful not to destroy what we have just restored.
599	 * Note: wmask has the format YYYYM:
600	 *       Y: number of registers saved in groups of 4
601	 *       M: 4 bit mask of first 16 registers
602	 */
603
604	mov	a2, a6
605	mov	a3, a5
606
6072:	rotw	-1			# a0..a3 become a4..a7
608	addi	a3, a7, -4*4		# next iteration
609	addi	a2, a6, -16		# decrementing Y in WMASK
610	l32i	a4, a3, PT_AREG_END + 0
611	l32i	a5, a3, PT_AREG_END + 4
612	l32i	a6, a3, PT_AREG_END + 8
613	l32i	a7, a3, PT_AREG_END + 12
614	_bgeui	a2, 16, 2b
615
616	/* Clear unrestored registers (don't leak anything to user-land */
617
6181:	rsr	a0, windowbase
619	rsr	a3, sar
620	sub	a3, a0, a3
621	beqz	a3, 2f
622	extui	a3, a3, 0, WBBITS
623
6241:	rotw	-1
625	addi	a3, a7, -1
626	movi	a4, 0
627	movi	a5, 0
628	movi	a6, 0
629	movi	a7, 0
630	bgei	a3, 1, 1b
631
632	/* We are back were we were when we started.
633	 * Note: a2 still contains WMASK (if we've returned to the original
634	 *	 frame where we had loaded a2), or at least the lower 4 bits
635	 *	 (if we have restored WSBITS-1 frames).
636	 */
637
6382:
639#if XCHAL_HAVE_THREADPTR
640	l32i	a3, a1, PT_THREADPTR
641	wur	a3, threadptr
642#endif
643
644	j	common_exception_exit
645
646	/* This is the kernel exception exit.
647	 * We avoided to do a MOVSP when we entered the exception, but we
648	 * have to do it here.
649	 */
650
651kernel_exception_exit:
652
653	/* Check if we have to do a movsp.
654	 *
655	 * We only have to do a movsp if the previous window-frame has
656	 * been spilled to the *temporary* exception stack instead of the
657	 * task's stack. This is the case if the corresponding bit in
658	 * WINDOWSTART for the previous window-frame was set before
659	 * (not spilled) but is zero now (spilled).
660	 * If this bit is zero, all other bits except the one for the
661	 * current window frame are also zero. So, we can use a simple test:
662	 * 'and' WINDOWSTART and WINDOWSTART-1:
663	 *
664	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
665	 *
666	 * The result is zero only if one bit was set.
667	 *
668	 * (Note: We might have gone through several task switches before
669	 *        we come back to the current task, so WINDOWBASE might be
670	 *        different from the time the exception occurred.)
671	 */
672
673	/* Test WINDOWSTART before and after the exception.
674	 * We actually have WMASK, so we only have to test if it is 1 or not.
675	 */
676
677	l32i	a2, a1, PT_WMASK
678	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
679
680	/* Test WINDOWSTART now. If spilled, do the movsp */
681
682	rsr     a3, windowstart
683	addi	a0, a3, -1
684	and     a3, a3, a0
685	_bnez	a3, common_exception_exit
686
687	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
688
689	addi    a0, a1, -16
690	l32i    a3, a0, 0
691	l32i    a4, a0, 4
692	s32i    a3, a1, PT_SIZE+0
693	s32i    a4, a1, PT_SIZE+4
694	l32i    a3, a0, 8
695	l32i    a4, a0, 12
696	s32i    a3, a1, PT_SIZE+8
697	s32i    a4, a1, PT_SIZE+12
698
699	/* Common exception exit.
700	 * We restore the special register and the current window frame, and
701	 * return from the exception.
702	 *
703	 * Note: We expect a2 to hold PT_WMASK
704	 */
705
706common_exception_exit:
707
708	/* Restore address registers. */
709
710	_bbsi.l	a2, 1, 1f
711	l32i	a4,  a1, PT_AREG4
712	l32i	a5,  a1, PT_AREG5
713	l32i	a6,  a1, PT_AREG6
714	l32i	a7,  a1, PT_AREG7
715	_bbsi.l	a2, 2, 1f
716	l32i	a8,  a1, PT_AREG8
717	l32i	a9,  a1, PT_AREG9
718	l32i	a10, a1, PT_AREG10
719	l32i	a11, a1, PT_AREG11
720	_bbsi.l	a2, 3, 1f
721	l32i	a12, a1, PT_AREG12
722	l32i	a13, a1, PT_AREG13
723	l32i	a14, a1, PT_AREG14
724	l32i	a15, a1, PT_AREG15
725
726	/* Restore PC, SAR */
727
7281:	l32i	a2, a1, PT_PC
729	l32i	a3, a1, PT_SAR
730	wsr	a2, epc1
731	wsr	a3, sar
732
733	/* Restore LBEG, LEND, LCOUNT */
734#if XCHAL_HAVE_LOOPS
735	l32i	a2, a1, PT_LBEG
736	l32i	a3, a1, PT_LEND
737	wsr	a2, lbeg
738	l32i	a2, a1, PT_LCOUNT
739	wsr	a3, lend
740	wsr	a2, lcount
741#endif
742
743	/* We control single stepping through the ICOUNTLEVEL register. */
744
745	l32i	a2, a1, PT_ICOUNTLEVEL
746	movi	a3, -2
747	wsr	a2, icountlevel
748	wsr	a3, icount
749
750	/* Check if it was double exception. */
751
752	l32i	a0, a1, PT_DEPC
753	l32i	a3, a1, PT_AREG3
754	l32i	a2, a1, PT_AREG2
755	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
756
757	/* Restore a0...a3 and return */
758
759	l32i	a0, a1, PT_AREG0
760	l32i	a1, a1, PT_AREG1
761	rfe
762
7631: 	wsr	a0, depc
764	l32i	a0, a1, PT_AREG0
765	l32i	a1, a1, PT_AREG1
766	rfde
767
768ENDPROC(kernel_exception)
769
770/*
771 * Debug exception handler.
772 *
773 * Currently, we don't support KGDB, so only user application can be debugged.
774 *
775 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
776 */
777
778	.literal_position
779
780ENTRY(debug_exception)
781
782	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
783	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
784
785	/* Set EPC1 and EXCCAUSE */
786
787	wsr	a2, depc		# save a2 temporarily
788	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
789	wsr	a2, epc1
790
791	movi	a2, EXCCAUSE_MAPPED_DEBUG
792	wsr	a2, exccause
793
794	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
795
796	movi	a2, 1 << PS_EXCM_BIT
797	or	a2, a0, a2
798	wsr	a2, ps
799
800	/* Switch to kernel/user stack, restore jump vector, and save a0 */
801
802	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
803
804	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
8053:
806	l32i	a0, a3, DT_DEBUG_SAVE
807	s32i	a1, a2, PT_AREG1
808	s32i	a0, a2, PT_AREG0
809	movi	a0, 0
810	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
811	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
812	xsr	a0, depc
813	s32i	a3, a2, PT_AREG3
814	s32i	a0, a2, PT_AREG2
815	mov	a1, a2
816
817	/* Debug exception is handled as an exception, so interrupts will
818	 * likely be enabled in the common exception handler. Disable
819	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
820	 * meaning.
821	 */
822#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
823	GET_THREAD_INFO(a2, a1)
824	l32i	a3, a2, TI_PRE_COUNT
825	addi	a3, a3, 1
826	s32i	a3, a2, TI_PRE_COUNT
827#endif
828
829	rsr	a2, ps
830	bbsi.l	a2, PS_UM_BIT, _user_exception
831	j	_kernel_exception
832
8332:	rsr	a2, excsave1
834	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
835	j	3b
836
837#ifdef CONFIG_HAVE_HW_BREAKPOINT
838	/* Debug exception while in exception mode. This may happen when
839	 * window overflow/underflow handler or fast exception handler hits
840	 * data breakpoint, in which case save and disable all data
841	 * breakpoints, single-step faulting instruction and restore data
842	 * breakpoints.
843	 */
8441:
845	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode
846
847	rsr	a0, debugcause
848	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
849
850	.set	_index, 0
851	.rept	XCHAL_NUM_DBREAK
852	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
853	wsr	a0, SREG_DBREAKC + _index
854	.set	_index, _index + 1
855	.endr
856
857	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
858	wsr	a0, icountlevel
859
860	l32i	a0, a3, DT_ICOUNT_SAVE
861	xsr	a0, icount
862
863	l32i	a0, a3, DT_DEBUG_SAVE
864	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
865	rfi	XCHAL_DEBUGLEVEL
866
867.Ldebug_save_dbreak:
868	.set	_index, 0
869	.rept	XCHAL_NUM_DBREAK
870	movi	a0, 0
871	xsr	a0, SREG_DBREAKC + _index
872	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
873	.set	_index, _index + 1
874	.endr
875
876	movi	a0, XCHAL_EXCM_LEVEL + 1
877	xsr	a0, icountlevel
878	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
879
880	movi	a0, 0xfffffffe
881	xsr	a0, icount
882	s32i	a0, a3, DT_ICOUNT_SAVE
883
884	l32i	a0, a3, DT_DEBUG_SAVE
885	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
886	rfi	XCHAL_DEBUGLEVEL
887#else
888	/* Debug exception while in exception mode. Should not happen. */
8891:	j	1b	// FIXME!!
890#endif
891
892ENDPROC(debug_exception)
893
894/*
895 * We get here in case of an unrecoverable exception.
896 * The only thing we can do is to be nice and print a panic message.
897 * We only produce a single stack frame for panic, so ???
898 *
899 *
900 * Entry conditions:
901 *
902 *   - a0 contains the caller address; original value saved in excsave1.
903 *   - the original a0 contains a valid return address (backtrace) or 0.
904 *   - a2 contains a valid stackpointer
905 *
906 * Notes:
907 *
908 *   - If the stack pointer could be invalid, the caller has to setup a
909 *     dummy stack pointer (e.g. the stack of the init_task)
910 *
911 *   - If the return address could be invalid, the caller has to set it
912 *     to 0, so the backtrace would stop.
913 *
914 */
915	.align 4
916unrecoverable_text:
917	.ascii "Unrecoverable error in exception handler\0"
918
919	.literal_position
920
921ENTRY(unrecoverable_exception)
922
923	movi	a0, 1
924	movi	a1, 0
925
926	wsr	a0, windowstart
927	wsr	a1, windowbase
928	rsync
929
930	movi	a1, PS_WOE_MASK | LOCKLEVEL
931	wsr	a1, ps
932	rsync
933
934	movi	a1, init_task
935	movi	a0, 0
936	addi	a1, a1, PT_REGS_OFFSET
937
938	movi	a6, unrecoverable_text
939	call4	panic
940
9411:	j	1b
942
943ENDPROC(unrecoverable_exception)
944
945/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
946
947	__XTENSA_HANDLER
948	.literal_position
949
950/*
951 * Fast-handler for alloca exceptions
952 *
953 *  The ALLOCA handler is entered when user code executes the MOVSP
954 *  instruction and the caller's frame is not in the register file.
955 *
956 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
957 *
958 *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
959 *
960 * It leverages the existing window spill/fill routines and their support for
961 * double exceptions. The 'movsp' instruction will only cause an exception if
962 * the next window needs to be loaded. In fact this ALLOCA exception may be
963 * replaced at some point by changing the hardware to do a underflow exception
964 * of the proper size instead.
965 *
966 * This algorithm simply backs out the register changes started by the user
967 * exception handler, makes it appear that we have started a window underflow
968 * by rotating the window back and then setting the old window base (OWB) in
969 * the 'ps' register with the rolled back window base. The 'movsp' instruction
970 * will be re-executed and this time since the next window frames is in the
971 * active AR registers it won't cause an exception.
972 *
973 * If the WindowUnderflow code gets a TLB miss the page will get mapped
974 * the partial WindowUnderflow will be handled in the double exception
975 * handler.
976 *
977 * Entry condition:
978 *
979 *   a0:	trashed, original value saved on stack (PT_AREG0)
980 *   a1:	a1
981 *   a2:	new stack pointer, original in DEPC
982 *   a3:	a3
983 *   depc:	a2, original value saved on stack (PT_DEPC)
984 *   excsave_1:	dispatch table
985 *
986 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
987 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
988 */
989
990ENTRY(fast_alloca)
991	rsr	a0, windowbase
992	rotw	-1
993	rsr	a2, ps
994	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
995	xor	a3, a3, a4
996	l32i	a4, a6, PT_AREG0
997	l32i	a1, a6, PT_DEPC
998	rsr	a6, depc
999	wsr	a1, depc
1000	slli	a3, a3, PS_OWB_SHIFT
1001	xor	a2, a2, a3
1002	wsr	a2, ps
1003	rsync
1004
1005	_bbci.l	a4, 31, 4f
1006	rotw	-1
1007	_bbci.l	a8, 30, 8f
1008	rotw	-1
1009	j	_WindowUnderflow12
10108:	j	_WindowUnderflow8
10114:	j	_WindowUnderflow4
1012ENDPROC(fast_alloca)
1013
1014#ifdef CONFIG_USER_ABI_CALL0_PROBE
1015/*
1016 * fast illegal instruction handler.
1017 *
1018 * This is used to fix up user PS.WOE on the exception caused
1019 * by the first opcode related to register window. If PS.WOE is
1020 * already set it goes directly to the common user exception handler.
1021 *
1022 * Entry condition:
1023 *
1024 *   a0:	trashed, original value saved on stack (PT_AREG0)
1025 *   a1:	a1
1026 *   a2:	new stack pointer, original in DEPC
1027 *   a3:	a3
1028 *   depc:	a2, original value saved on stack (PT_DEPC)
1029 *   excsave_1:	dispatch table
1030 */
1031
1032ENTRY(fast_illegal_instruction_user)
1033
1034	rsr	a0, ps
1035	bbsi.l	a0, PS_WOE_BIT, 1f
1036	s32i	a3, a2, PT_AREG3
1037	movi	a3, PS_WOE_MASK
1038	or	a0, a0, a3
1039	wsr	a0, ps
1040	l32i	a3, a2, PT_AREG3
1041	l32i	a0, a2, PT_AREG0
1042	rsr	a2, depc
1043	rfe
10441:
1045	call0	user_exception
1046
1047ENDPROC(fast_illegal_instruction_user)
1048#endif
1049
1050	/*
1051 * fast system calls.
1052 *
1053 * WARNING:  The kernel doesn't save the entire user context before
1054 * handling a fast system call.  These functions are small and short,
1055 * usually offering some functionality not available to user tasks.
1056 *
1057 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1058 *
1059 * Entry condition:
1060 *
1061 *   a0:	trashed, original value saved on stack (PT_AREG0)
1062 *   a1:	a1
1063 *   a2:	new stack pointer, original in DEPC
1064 *   a3:	a3
1065 *   depc:	a2, original value saved on stack (PT_DEPC)
1066 *   excsave_1:	dispatch table
1067 */
1068
1069ENTRY(fast_syscall_user)
1070
1071	/* Skip syscall. */
1072
1073	rsr	a0, epc1
1074	addi	a0, a0, 3
1075	wsr	a0, epc1
1076
1077	l32i	a0, a2, PT_DEPC
1078	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1079
1080	rsr	a0, depc			# get syscall-nr
1081	_beqz	a0, fast_syscall_spill_registers
1082	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1083
1084	call0	user_exception
1085
1086ENDPROC(fast_syscall_user)
1087
1088ENTRY(fast_syscall_unrecoverable)
1089
1090	/* Restore all states. */
1091
1092	l32i    a0, a2, PT_AREG0        # restore a0
1093	xsr     a2, depc                # restore a2, depc
1094
1095	wsr     a0, excsave1
1096	call0	unrecoverable_exception
1097
1098ENDPROC(fast_syscall_unrecoverable)
1099
1100/*
1101 * sysxtensa syscall handler
1102 *
1103 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
1104 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
1105 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
1106 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1107 *        a2            a6                   a3    a4      a5
1108 *
1109 * Entry condition:
1110 *
1111 *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
1112 *   a1:	a1
1113 *   a2:	new stack pointer, original in a0 and DEPC
1114 *   a3:	a3
1115 *   a4..a15:	unchanged
1116 *   depc:	a2, original value saved on stack (PT_DEPC)
1117 *   excsave_1:	dispatch table
1118 *
1119 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1120 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1121 *
1122 * Note: we don't have to save a2; a2 holds the return value
1123 */
1124
1125	.literal_position
1126
1127#ifdef CONFIG_FAST_SYSCALL_XTENSA
1128
1129ENTRY(fast_syscall_xtensa)
1130
1131	s32i	a7, a2, PT_AREG7	# we need an additional register
1132	movi	a7, 4			# sizeof(unsigned int)
1133	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
1134
1135	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
1136	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1137
1138	/* Fall through for ATOMIC_CMP_SWP. */
1139
1140.Lswp:	/* Atomic compare and swap */
1141
1142EX(.Leac) l32i	a0, a3, 0		# read old value
1143	bne	a0, a4, 1f		# same as old value? jump
1144EX(.Leac) s32i	a5, a3, 0		# different, modify value
1145	l32i	a7, a2, PT_AREG7	# restore a7
1146	l32i	a0, a2, PT_AREG0	# restore a0
1147	movi	a2, 1			# and return 1
1148	rfe
1149
11501:	l32i	a7, a2, PT_AREG7	# restore a7
1151	l32i	a0, a2, PT_AREG0	# restore a0
1152	movi	a2, 0			# return 0 (note that we cannot set
1153	rfe
1154
1155.Lnswp:	/* Atomic set, add, and exg_add. */
1156
1157EX(.Leac) l32i	a7, a3, 0		# orig
1158	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
1159	add	a0, a4, a7		# + arg
1160	moveqz	a0, a4, a6		# set
1161	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
1162EX(.Leac) s32i	a0, a3, 0		# write new value
1163
1164	mov	a0, a2
1165	mov	a2, a7
1166	l32i	a7, a0, PT_AREG7	# restore a7
1167	l32i	a0, a0, PT_AREG0	# restore a0
1168	rfe
1169
1170.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
1171	l32i	a0, a2, PT_AREG0	# restore a0
1172	movi	a2, -EFAULT
1173	rfe
1174
1175.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
1176	l32i	a0, a2, PT_AREG0	# restore a0
1177	movi	a2, -EINVAL
1178	rfe
1179
1180ENDPROC(fast_syscall_xtensa)
1181
1182#else /* CONFIG_FAST_SYSCALL_XTENSA */
1183
1184ENTRY(fast_syscall_xtensa)
1185
1186	l32i    a0, a2, PT_AREG0        # restore a0
1187	movi	a2, -ENOSYS
1188	rfe
1189
1190ENDPROC(fast_syscall_xtensa)
1191
1192#endif /* CONFIG_FAST_SYSCALL_XTENSA */
1193
1194
1195/* fast_syscall_spill_registers.
1196 *
1197 * Entry condition:
1198 *
1199 *   a0:	trashed, original value saved on stack (PT_AREG0)
1200 *   a1:	a1
1201 *   a2:	new stack pointer, original in DEPC
1202 *   a3:	a3
1203 *   depc:	a2, original value saved on stack (PT_DEPC)
1204 *   excsave_1:	dispatch table
1205 *
1206 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1207 */
1208
1209#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
1210
1211ENTRY(fast_syscall_spill_registers)
1212
1213	/* Register a FIXUP handler (pass current wb as a parameter) */
1214
1215	xsr	a3, excsave1
1216	movi	a0, fast_syscall_spill_registers_fixup
1217	s32i	a0, a3, EXC_TABLE_FIXUP
1218	rsr	a0, windowbase
1219	s32i	a0, a3, EXC_TABLE_PARAM
1220	xsr	a3, excsave1		# restore a3 and excsave_1
1221
1222	/* Save a3, a4 and SAR on stack. */
1223
1224	rsr	a0, sar
1225	s32i	a3, a2, PT_AREG3
1226	s32i	a0, a2, PT_SAR
1227
1228	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1229
1230	s32i	a4, a2, PT_AREG4
1231	s32i	a7, a2, PT_AREG7
1232	s32i	a8, a2, PT_AREG8
1233	s32i	a11, a2, PT_AREG11
1234	s32i	a12, a2, PT_AREG12
1235	s32i	a15, a2, PT_AREG15
1236
1237	/*
1238	 * Rotate ws so that the current windowbase is at bit 0.
1239	 * Assume ws = xxxwww1yy (www1 current window frame).
1240	 * Rotate ws right so that a4 = yyxxxwww1.
1241	 */
1242
1243	rsr	a0, windowbase
1244	rsr	a3, windowstart		# a3 = xxxwww1yy
1245	ssr	a0			# holds WB
1246	slli	a0, a3, WSBITS
1247	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
1248	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
1249
1250	/* We are done if there are no more than the current register frame. */
1251
1252	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
1253	movi	a0, (1 << (WSBITS-1))
1254	_beqz	a3, .Lnospill		# only one active frame? jump
1255
1256	/* We want 1 at the top, so that we return to the current windowbase */
1257
1258	or	a3, a3, a0		# 1yyxxxwww
1259
1260	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1261
1262	wsr	a3, windowstart		# save shifted windowstart
1263	neg	a0, a3
1264	and	a3, a0, a3		# first bit set from right: 000010000
1265
1266	ffs_ws	a0, a3			# a0: shifts to skip empty frames
1267	movi	a3, WSBITS
1268	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
1269	ssr	a0			# save in SAR for later.
1270
1271	rsr	a3, windowbase
1272	add	a3, a3, a0
1273	wsr	a3, windowbase
1274	rsync
1275
1276	rsr	a3, windowstart
1277	srl	a3, a3			# shift windowstart
1278
1279	/* WB is now just one frame below the oldest frame in the register
1280	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
1281	   and WS differ by one 4-register frame. */
1282
1283	/* Save frames. Depending what call was used (call4, call8, call12),
1284	 * we have to save 4,8. or 12 registers.
1285	 */
1286
1287
1288.Lloop: _bbsi.l	a3, 1, .Lc4
1289	_bbci.l	a3, 2, .Lc12
1290
1291.Lc8:	s32e	a4, a13, -16
1292	l32e	a4, a5, -12
1293	s32e	a8, a4, -32
1294	s32e	a5, a13, -12
1295	s32e	a6, a13, -8
1296	s32e	a7, a13, -4
1297	s32e	a9, a4, -28
1298	s32e	a10, a4, -24
1299	s32e	a11, a4, -20
1300	srli	a11, a3, 2		# shift windowbase by 2
1301	rotw	2
1302	_bnei	a3, 1, .Lloop
1303	j	.Lexit
1304
1305.Lc4:	s32e	a4, a9, -16
1306	s32e	a5, a9, -12
1307	s32e	a6, a9, -8
1308	s32e	a7, a9, -4
1309
1310	srli	a7, a3, 1
1311	rotw	1
1312	_bnei	a3, 1, .Lloop
1313	j	.Lexit
1314
1315.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
1316
1317	/* 12-register frame (call12) */
1318
1319	l32e	a0, a5, -12
1320	s32e	a8, a0, -48
1321	mov	a8, a0
1322
1323	s32e	a9, a8, -44
1324	s32e	a10, a8, -40
1325	s32e	a11, a8, -36
1326	s32e	a12, a8, -32
1327	s32e	a13, a8, -28
1328	s32e	a14, a8, -24
1329	s32e	a15, a8, -20
1330	srli	a15, a3, 3
1331
1332	/* The stack pointer for a4..a7 is out of reach, so we rotate the
1333	 * window, grab the stackpointer, and rotate back.
1334	 * Alternatively, we could also use the following approach, but that
1335	 * makes the fixup routine much more complicated:
1336	 * rotw	1
1337	 * s32e	a0, a13, -16
1338	 * ...
1339	 * rotw 2
1340	 */
1341
1342	rotw	1
1343	mov	a4, a13
1344	rotw	-1
1345
1346	s32e	a4, a8, -16
1347	s32e	a5, a8, -12
1348	s32e	a6, a8, -8
1349	s32e	a7, a8, -4
1350
1351	rotw	3
1352
1353	_beqi	a3, 1, .Lexit
1354	j	.Lloop
1355
1356.Lexit:
1357
1358	/* Done. Do the final rotation and set WS */
1359
1360	rotw	1
1361	rsr	a3, windowbase
1362	ssl	a3
1363	movi	a3, 1
1364	sll	a3, a3
1365	wsr	a3, windowstart
1366.Lnospill:
1367
1368	/* Advance PC, restore registers and SAR, and return from exception. */
1369
1370	l32i	a3, a2, PT_SAR
1371	l32i	a0, a2, PT_AREG0
1372	wsr	a3, sar
1373	l32i	a3, a2, PT_AREG3
1374
1375	/* Restore clobbered registers. */
1376
1377	l32i	a4, a2, PT_AREG4
1378	l32i	a7, a2, PT_AREG7
1379	l32i	a8, a2, PT_AREG8
1380	l32i	a11, a2, PT_AREG11
1381	l32i	a12, a2, PT_AREG12
1382	l32i	a15, a2, PT_AREG15
1383
1384	movi	a2, 0
1385	rfe
1386
1387.Linvalid_mask:
1388
1389	/* We get here because of an unrecoverable error in the window
1390	 * registers, so set up a dummy frame and kill the user application.
1391	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1392	 */
1393
1394	movi	a0, 1
1395	movi	a1, 0
1396
1397	wsr	a0, windowstart
1398	wsr	a1, windowbase
1399	rsync
1400
1401	movi	a0, 0
1402
1403	rsr	a3, excsave1
1404	l32i	a1, a3, EXC_TABLE_KSTK
1405
1406	movi	a4, PS_WOE_MASK | LOCKLEVEL
1407	wsr	a4, ps
1408	rsync
1409
1410	movi	a6, SIGSEGV
1411	call4	do_exit
1412
1413	/* shouldn't return, so panic */
1414
1415	wsr	a0, excsave1
1416	call0	unrecoverable_exception		# should not return
14171:	j	1b
1418
1419
1420ENDPROC(fast_syscall_spill_registers)
1421
1422/* Fixup handler.
1423 *
1424 * We get here if the spill routine causes an exception, e.g. tlb miss.
1425 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1426 * we entered the spill routine and jump to the user exception handler.
1427 *
1428 * Note that we only need to restore the bits in windowstart that have not
1429 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1430 * rotated windowstart with only those bits set for frames that haven't been
1431 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1432 * frame for the current windowbase - 1, we need to rotate a3 left by the
1433 * value of the current windowbase + 1 and move it to windowstart.
1434 *
1435 * a0: value of depc, original value in depc
1436 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1437 * a3: exctable, original value in excsave1
1438 */
1439
1440ENTRY(fast_syscall_spill_registers_fixup)
1441
1442	rsr	a2, windowbase	# get current windowbase (a2 is saved)
1443	xsr	a0, depc	# restore depc and a0
1444	ssl	a2		# set shift (32 - WB)
1445
1446	/* We need to make sure the current registers (a0-a3) are preserved.
1447	 * To do this, we simply set the bit for the current window frame
1448	 * in WS, so that the exception handlers save them to the task stack.
1449	 *
1450	 * Note: we use a3 to set the windowbase, so we take a special care
1451	 * of it, saving it in the original _spill_registers frame across
1452	 * the exception handler call.
1453	 */
1454
1455	xsr	a3, excsave1	# get spill-mask
1456	slli	a3, a3, 1	# shift left by one
1457	addi	a3, a3, 1	# set the bit for the current window frame
1458
1459	slli	a2, a3, 32-WSBITS
1460	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
1461	wsr	a2, windowstart	# set corrected windowstart
1462
1463	srli	a3, a3, 1
1464	rsr	a2, excsave1
1465	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
1466	xsr	a2, excsave1
1467	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
1468	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
1469	xsr	a2, excsave1
1470
1471	/* Return to the original (user task) WINDOWBASE.
1472	 * We leave the following frame behind:
1473	 * a0, a1, a2	same
1474	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1475	 * depc:	depc (we have to return to that address)
1476	 * excsave_1:	exctable
1477	 */
1478
1479	wsr	a3, windowbase
1480	rsync
1481
1482	/* We are now in the original frame when we entered _spill_registers:
1483	 *  a0: return address
1484	 *  a1: used, stack pointer
1485	 *  a2: kernel stack pointer
1486	 *  a3: available
1487	 *  depc: exception address
1488	 *  excsave: exctable
1489	 * Note: This frame might be the same as above.
1490	 */
1491
1492	/* Setup stack pointer. */
1493
1494	addi	a2, a2, -PT_USER_SIZE
1495	s32i	a0, a2, PT_AREG0
1496
1497	/* Make sure we return to this fixup handler. */
1498
1499	movi	a3, fast_syscall_spill_registers_fixup_return
1500	s32i	a3, a2, PT_DEPC		# setup depc
1501
1502	/* Jump to the exception handler. */
1503
1504	rsr	a3, excsave1
1505	rsr	a0, exccause
1506	addx4	a0, a0, a3              	# find entry in table
1507	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
1508	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1509	jx	a0
1510
1511ENDPROC(fast_syscall_spill_registers_fixup)
1512
1513ENTRY(fast_syscall_spill_registers_fixup_return)
1514
1515	/* When we return here, all registers have been restored (a2: DEPC) */
1516
1517	wsr	a2, depc		# exception address
1518
1519	/* Restore fixup handler. */
1520
1521	rsr	a2, excsave1
1522	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
1523	movi	a3, fast_syscall_spill_registers_fixup
1524	s32i	a3, a2, EXC_TABLE_FIXUP
1525	rsr	a3, windowbase
1526	s32i	a3, a2, EXC_TABLE_PARAM
1527	l32i	a2, a2, EXC_TABLE_KSTK
1528
1529	/* Load WB at the time the exception occurred. */
1530
1531	rsr	a3, sar			# WB is still in SAR
1532	neg	a3, a3
1533	wsr	a3, windowbase
1534	rsync
1535
1536	rsr	a3, excsave1
1537	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1538
1539	rfde
1540
1541ENDPROC(fast_syscall_spill_registers_fixup_return)
1542
1543#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1544
1545ENTRY(fast_syscall_spill_registers)
1546
1547	l32i    a0, a2, PT_AREG0        # restore a0
1548	movi	a2, -ENOSYS
1549	rfe
1550
1551ENDPROC(fast_syscall_spill_registers)
1552
1553#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1554
1555#ifdef CONFIG_MMU
1556/*
1557 * We should never get here. Bail out!
1558 */
1559
1560ENTRY(fast_second_level_miss_double_kernel)
1561
15621:
1563	call0	unrecoverable_exception		# should not return
15641:	j	1b
1565
1566ENDPROC(fast_second_level_miss_double_kernel)
1567
1568/* First-level entry handler for user, kernel, and double 2nd-level
1569 * TLB miss exceptions.  Note that for now, user and kernel miss
1570 * exceptions share the same entry point and are handled identically.
1571 *
1572 * An old, less-efficient C version of this function used to exist.
1573 * We include it below, interleaved as comments, for reference.
1574 *
1575 * Entry condition:
1576 *
1577 *   a0:	trashed, original value saved on stack (PT_AREG0)
1578 *   a1:	a1
1579 *   a2:	new stack pointer, original in DEPC
1580 *   a3:	a3
1581 *   depc:	a2, original value saved on stack (PT_DEPC)
1582 *   excsave_1:	dispatch table
1583 *
1584 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1585 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1586 */
1587
1588ENTRY(fast_second_level_miss)
1589
1590	/* Save a1 and a3. Note: we don't expect a double exception. */
1591
1592	s32i	a1, a2, PT_AREG1
1593	s32i	a3, a2, PT_AREG3
1594
1595	/* We need to map the page of PTEs for the user task.  Find
1596	 * the pointer to that page.  Also, it's possible for tsk->mm
1597	 * to be NULL while tsk->active_mm is nonzero if we faulted on
1598	 * a vmalloc address.  In that rare case, we must use
1599	 * active_mm instead to avoid a fault in this handler.  See
1600	 *
1601	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1602	 *   (or search Internet on "mm vs. active_mm")
1603	 *
1604	 *	if (!mm)
1605	 *		mm = tsk->active_mm;
1606	 *	pgd = pgd_offset (mm, regs->excvaddr);
1607	 *	pmd = pmd_offset (pgd, regs->excvaddr);
1608	 *	pmdval = *pmd;
1609	 */
1610
1611	GET_CURRENT(a1,a2)
1612	l32i	a0, a1, TASK_MM		# tsk->mm
1613	beqz	a0, 9f
1614
16158:	rsr	a3, excvaddr		# fault address
1616	_PGD_OFFSET(a0, a3, a1)
1617	l32i	a0, a0, 0		# read pmdval
1618	beqz	a0, 2f
1619
1620	/* Read ptevaddr and convert to top of page-table page.
1621	 *
1622	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
1623	 * 	vpnval += DTLB_WAY_PGTABLE;
1624	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1625	 *	write_dtlb_entry (pteval, vpnval);
1626	 *
1627	 * The messy computation for 'pteval' above really simplifies
1628	 * into the following:
1629	 *
1630	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
1631	 *                 | PAGE_DIRECTORY
1632	 */
1633
1634	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
1635	add	a0, a0, a1		# pmdval - PAGE_OFFSET
1636	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
1637	xor	a0, a0, a1
1638
1639	movi	a1, _PAGE_DIRECTORY
1640	or	a0, a0, a1		# ... | PAGE_DIRECTORY
1641
1642	/*
1643	 * We utilize all three wired-ways (7-9) to hold pmd translations.
1644	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1645	 * This allows to map the three most common regions to three different
1646	 * DTLBs:
1647	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
1648	 *  2   -> way 8	shared libaries (2000.0000)
1649	 *  3   -> way 0	stack (3000.0000)
1650	 */
1651
1652	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
1653	rsr	a1, ptevaddr
1654	addx2	a3, a3, a3		# ->			0,3,6,9
1655	srli	a1, a1, PAGE_SHIFT
1656	extui	a3, a3, 2, 2		# ->			0,0,1,2
1657	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
1658	addi	a3, a3, DTLB_WAY_PGD
1659	add	a1, a1, a3		# ... + way_number
1660
16613:	wdtlb	a0, a1
1662	dsync
1663
1664	/* Exit critical section. */
1665
16664:	rsr	a3, excsave1
1667	movi	a0, 0
1668	s32i	a0, a3, EXC_TABLE_FIXUP
1669
1670	/* Restore the working registers, and return. */
1671
1672	l32i	a0, a2, PT_AREG0
1673	l32i	a1, a2, PT_AREG1
1674	l32i	a3, a2, PT_AREG3
1675	l32i	a2, a2, PT_DEPC
1676
1677	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1678
1679	/* Restore excsave1 and return. */
1680
1681	rsr	a2, depc
1682	rfe
1683
1684	/* Return from double exception. */
1685
16861:	xsr	a2, depc
1687	esync
1688	rfde
1689
16909:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1691	bnez	a0, 8b
1692
1693	/* Even more unlikely case active_mm == 0.
1694	 * We can get here with NMI in the middle of context_switch that
1695	 * touches vmalloc area.
1696	 */
1697	movi	a0, init_mm
1698	j	8b
1699
1700#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1701
17022:	/* Special case for cache aliasing.
1703	 * We (should) only get here if a clear_user_page, copy_user_page
1704	 * or the aliased cache flush functions got preemptively interrupted
1705	 * by another task. Re-establish temporary mapping to the
1706	 * TLBTEMP_BASE areas.
1707	 */
1708
1709	/* We shouldn't be in a double exception */
1710
1711	l32i	a0, a2, PT_DEPC
1712	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1713
1714	/* Make sure the exception originated in the special functions */
1715
1716	movi	a0, __tlbtemp_mapping_start
1717	rsr	a3, epc1
1718	bltu	a3, a0, 2f
1719	movi	a0, __tlbtemp_mapping_end
1720	bgeu	a3, a0, 2f
1721
1722	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1723
1724	movi	a3, TLBTEMP_BASE_1
1725	rsr	a0, excvaddr
1726	bltu	a0, a3, 2f
1727
1728	addi	a1, a0, -TLBTEMP_SIZE
1729	bgeu	a1, a3, 2f
1730
1731	/* Check if we have to restore an ITLB mapping. */
1732
1733	movi	a1, __tlbtemp_mapping_itlb
1734	rsr	a3, epc1
1735	sub	a3, a3, a1
1736
1737	/* Calculate VPN */
1738
1739	movi	a1, PAGE_MASK
1740	and	a1, a1, a0
1741
1742	/* Jump for ITLB entry */
1743
1744	bgez	a3, 1f
1745
1746	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
1747
1748	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1749	add	a1, a3, a1
1750
1751	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1752
1753	mov	a0, a6
1754	movnez	a0, a7, a3
1755	j	3b
1756
1757	/* ITLB entry. We only use dst in a6. */
1758
17591:	witlb	a6, a1
1760	isync
1761	j	4b
1762
1763
1764#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
1765
1766
17672:	/* Invalid PGD, default exception handling */
1768
1769	rsr	a1, depc
1770	s32i	a1, a2, PT_AREG2
1771	mov	a1, a2
1772
1773	rsr	a2, ps
1774	bbsi.l	a2, PS_UM_BIT, 1f
1775	call0	_kernel_exception
17761:	call0	_user_exception
1777
1778ENDPROC(fast_second_level_miss)
1779
1780/*
1781 * StoreProhibitedException
1782 *
1783 * Update the pte and invalidate the itlb mapping for this pte.
1784 *
1785 * Entry condition:
1786 *
1787 *   a0:	trashed, original value saved on stack (PT_AREG0)
1788 *   a1:	a1
1789 *   a2:	new stack pointer, original in DEPC
1790 *   a3:	a3
1791 *   depc:	a2, original value saved on stack (PT_DEPC)
1792 *   excsave_1:	dispatch table
1793 *
1794 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1795 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1796 */
1797
1798ENTRY(fast_store_prohibited)
1799
1800	/* Save a1 and a3. */
1801
1802	s32i	a1, a2, PT_AREG1
1803	s32i	a3, a2, PT_AREG3
1804
1805	GET_CURRENT(a1,a2)
1806	l32i	a0, a1, TASK_MM		# tsk->mm
1807	beqz	a0, 9f
1808
18098:	rsr	a1, excvaddr		# fault address
1810	_PGD_OFFSET(a0, a1, a3)
1811	l32i	a0, a0, 0
1812	beqz	a0, 2f
1813
1814	/*
1815	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1816	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1817	 */
1818
1819	_PTE_OFFSET(a0, a1, a3)
1820	l32i	a3, a0, 0		# read pteval
1821	movi	a1, _PAGE_CA_INVALID
1822	ball	a3, a1, 2f
1823	bbci.l	a3, _PAGE_WRITABLE_BIT, 2f
1824
1825	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1826	or	a3, a3, a1
1827	rsr	a1, excvaddr
1828	s32i	a3, a0, 0
1829
1830	/* We need to flush the cache if we have page coloring. */
1831#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1832	dhwb	a0, 0
1833#endif
1834	pdtlb	a0, a1
1835	wdtlb	a3, a0
1836
1837	/* Exit critical section. */
1838
1839	movi	a0, 0
1840	rsr	a3, excsave1
1841	s32i	a0, a3, EXC_TABLE_FIXUP
1842
1843	/* Restore the working registers, and return. */
1844
1845	l32i	a3, a2, PT_AREG3
1846	l32i	a1, a2, PT_AREG1
1847	l32i	a0, a2, PT_AREG0
1848	l32i	a2, a2, PT_DEPC
1849
1850	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1851
1852	rsr	a2, depc
1853	rfe
1854
1855	/* Double exception. Restore FIXUP handler and return. */
1856
18571:	xsr	a2, depc
1858	esync
1859	rfde
1860
18619:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1862	j	8b
1863
18642:	/* If there was a problem, handle fault in C */
1865
1866	rsr	a3, depc	# still holds a2
1867	s32i	a3, a2, PT_AREG2
1868	mov	a1, a2
1869
1870	rsr	a2, ps
1871	bbsi.l	a2, PS_UM_BIT, 1f
1872	call0	_kernel_exception
18731:	call0	_user_exception
1874
1875ENDPROC(fast_store_prohibited)
1876
1877#endif /* CONFIG_MMU */
1878
1879	.text
1880/*
1881 * System Calls.
1882 *
1883 * void system_call (struct pt_regs* regs, int exccause)
1884 *                            a2                 a3
1885 */
1886	.literal_position
1887
1888ENTRY(system_call)
1889
1890	abi_entry_default
1891
1892	/* regs->syscall = regs->areg[2] */
1893
1894	l32i	a7, a2, PT_AREG2
1895	s32i	a7, a2, PT_SYSCALL
1896
1897	GET_THREAD_INFO(a4, a1)
1898	l32i	a3, a4, TI_FLAGS
1899	movi	a4, _TIF_WORK_MASK
1900	and	a3, a3, a4
1901	beqz	a3, 1f
1902
1903	mov	a6, a2
1904	call4	do_syscall_trace_enter
1905	beqz	a6, .Lsyscall_exit
1906	l32i	a7, a2, PT_SYSCALL
1907
19081:
1909	/* syscall = sys_call_table[syscall_nr] */
1910
1911	movi	a4, sys_call_table
1912	movi	a5, __NR_syscalls
1913	movi	a6, -ENOSYS
1914	bgeu	a7, a5, 1f
1915
1916	addx4	a4, a7, a4
1917	l32i	a4, a4, 0
1918
1919	/* Load args: arg0 - arg5 are passed via regs. */
1920
1921	l32i	a6, a2, PT_AREG6
1922	l32i	a7, a2, PT_AREG3
1923	l32i	a8, a2, PT_AREG4
1924	l32i	a9, a2, PT_AREG5
1925	l32i	a10, a2, PT_AREG8
1926	l32i	a11, a2, PT_AREG9
1927
1928	callx4	a4
1929
19301:	/* regs->areg[2] = return_value */
1931
1932	s32i	a6, a2, PT_AREG2
1933	bnez	a3, 1f
1934.Lsyscall_exit:
1935	abi_ret_default
1936
19371:
1938	mov	a6, a2
1939	call4	do_syscall_trace_leave
1940	abi_ret_default
1941
1942ENDPROC(system_call)
1943
1944/*
1945 * Spill live registers on the kernel stack macro.
1946 *
1947 * Entry condition: ps.woe is set, ps.excm is cleared
1948 * Exit condition: windowstart has single bit set
1949 * May clobber: a12, a13
1950 */
1951	.macro	spill_registers_kernel
1952
1953#if XCHAL_NUM_AREGS > 16
1954	call12	1f
1955	_j	2f
1956	retw
1957	.align	4
19581:
1959	_entry	a1, 48
1960	addi	a12, a0, 3
1961#if XCHAL_NUM_AREGS > 32
1962	.rept	(XCHAL_NUM_AREGS - 32) / 12
1963	_entry	a1, 48
1964	mov	a12, a0
1965	.endr
1966#endif
1967	_entry	a1, 16
1968#if XCHAL_NUM_AREGS % 12 == 0
1969	mov	a8, a8
1970#elif XCHAL_NUM_AREGS % 12 == 4
1971	mov	a12, a12
1972#elif XCHAL_NUM_AREGS % 12 == 8
1973	mov	a4, a4
1974#endif
1975	retw
19762:
1977#else
1978	mov	a12, a12
1979#endif
1980	.endm
1981
1982/*
1983 * Task switch.
1984 *
1985 * struct task*  _switch_to (struct task* prev, struct task* next)
1986 *         a2                              a2                 a3
1987 */
1988
1989ENTRY(_switch_to)
1990
1991	abi_entry(XTENSA_SPILL_STACK_RESERVE)
1992
1993	mov	a11, a3			# and 'next' (a3)
1994
1995	l32i	a4, a2, TASK_THREAD_INFO
1996	l32i	a5, a3, TASK_THREAD_INFO
1997
1998	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1999
2000#if THREAD_RA > 1020 || THREAD_SP > 1020
2001	addi	a10, a2, TASK_THREAD
2002	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
2003	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
2004#else
2005	s32i	a0, a2, THREAD_RA	# save return address
2006	s32i	a1, a2, THREAD_SP	# save stack pointer
2007#endif
2008
2009#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
2010	movi	a6, __stack_chk_guard
2011	l32i	a8, a3, TASK_STACK_CANARY
2012	s32i	a8, a6, 0
2013#endif
2014
2015	/* Disable ints while we manipulate the stack pointer. */
2016
2017	irq_save a14, a3
2018	rsync
2019
2020	/* Switch CPENABLE */
2021
2022#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
2023	l32i	a3, a5, THREAD_CPENABLE
2024	xsr	a3, cpenable
2025	s32i	a3, a4, THREAD_CPENABLE
2026#endif
2027
2028#if XCHAL_HAVE_EXCLUSIVE
2029	l32i	a3, a5, THREAD_ATOMCTL8
2030	getex	a3
2031	s32i	a3, a4, THREAD_ATOMCTL8
2032#endif
2033
2034	/* Flush register file. */
2035
2036	spill_registers_kernel
2037
2038	/* Set kernel stack (and leave critical section)
2039	 * Note: It's save to set it here. The stack will not be overwritten
2040	 *       because the kernel stack will only be loaded again after
2041	 *       we return from kernel space.
2042	 */
2043
2044	rsr	a3, excsave1		# exc_table
2045	addi	a7, a5, PT_REGS_OFFSET
2046	s32i	a7, a3, EXC_TABLE_KSTK
2047
2048	/* restore context of the task 'next' */
2049
2050	l32i	a0, a11, THREAD_RA	# restore return address
2051	l32i	a1, a11, THREAD_SP	# restore stack pointer
2052
2053	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2054
2055	wsr	a14, ps
2056	rsync
2057
2058	abi_ret(XTENSA_SPILL_STACK_RESERVE)
2059
2060ENDPROC(_switch_to)
2061
2062ENTRY(ret_from_fork)
2063
2064	/* void schedule_tail (struct task_struct *prev)
2065	 * Note: prev is still in a6 (return value from fake call4 frame)
2066	 */
2067	call4	schedule_tail
2068
2069	mov	a6, a1
2070	call4	do_syscall_trace_leave
2071
2072	j	common_exception_return
2073
2074ENDPROC(ret_from_fork)
2075
2076/*
2077 * Kernel thread creation helper
2078 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
2079 *           left from _switch_to: a6 = prev
2080 */
2081ENTRY(ret_from_kernel_thread)
2082
2083	call4	schedule_tail
2084	mov	a6, a3
2085	callx4	a2
2086	j	common_exception_return
2087
2088ENDPROC(ret_from_kernel_thread)
2089