xref: /openbmc/linux/arch/alpha/kernel/entry.S (revision 93d90ad7)
1/*
2 * arch/alpha/kernel/entry.S
3 *
4 * Kernel entry-points.
5 */
6
7#include <asm/asm-offsets.h>
8#include <asm/thread_info.h>
9#include <asm/pal.h>
10#include <asm/errno.h>
11#include <asm/unistd.h>
12
13	.text
14	.set noat
15	.cfi_sections	.debug_frame
16
17/* Stack offsets.  */
18#define SP_OFF			184
19#define SWITCH_STACK_SIZE	320
20
21.macro	CFI_START_OSF_FRAME	func
22	.align	4
23	.globl	\func
24	.type	\func,@function
25\func:
26	.cfi_startproc simple
27	.cfi_return_column 64
28	.cfi_def_cfa	$sp, 48
29	.cfi_rel_offset	64, 8
30	.cfi_rel_offset	$gp, 16
31	.cfi_rel_offset	$16, 24
32	.cfi_rel_offset	$17, 32
33	.cfi_rel_offset	$18, 40
34.endm
35
36.macro	CFI_END_OSF_FRAME	func
37	.cfi_endproc
38	.size	\func, . - \func
39.endm
40
41/*
42 * This defines the normal kernel pt-regs layout.
43 *
44 * regs 9-15 preserved by C code
45 * regs 16-18 saved by PAL-code
46 * regs 29-30 saved and set up by PAL-code
47 * JRP - Save regs 16-18 in a special area of the stack, so that
48 * the palcode-provided values are available to the signal handler.
49 */
50
51.macro	SAVE_ALL
52	subq	$sp, SP_OFF, $sp
53	.cfi_adjust_cfa_offset	SP_OFF
54	stq	$0, 0($sp)
55	stq	$1, 8($sp)
56	stq	$2, 16($sp)
57	stq	$3, 24($sp)
58	stq	$4, 32($sp)
59	stq	$28, 144($sp)
60	.cfi_rel_offset	$0, 0
61	.cfi_rel_offset $1, 8
62	.cfi_rel_offset	$2, 16
63	.cfi_rel_offset	$3, 24
64	.cfi_rel_offset	$4, 32
65	.cfi_rel_offset	$28, 144
66	lda	$2, alpha_mv
67	stq	$5, 40($sp)
68	stq	$6, 48($sp)
69	stq	$7, 56($sp)
70	stq	$8, 64($sp)
71	stq	$19, 72($sp)
72	stq	$20, 80($sp)
73	stq	$21, 88($sp)
74	ldq	$2, HAE_CACHE($2)
75	stq	$22, 96($sp)
76	stq	$23, 104($sp)
77	stq	$24, 112($sp)
78	stq	$25, 120($sp)
79	stq	$26, 128($sp)
80	stq	$27, 136($sp)
81	stq	$2, 152($sp)
82	stq	$16, 160($sp)
83	stq	$17, 168($sp)
84	stq	$18, 176($sp)
85	.cfi_rel_offset	$5, 40
86	.cfi_rel_offset	$6, 48
87	.cfi_rel_offset	$7, 56
88	.cfi_rel_offset	$8, 64
89	.cfi_rel_offset $19, 72
90	.cfi_rel_offset	$20, 80
91	.cfi_rel_offset	$21, 88
92	.cfi_rel_offset $22, 96
93	.cfi_rel_offset	$23, 104
94	.cfi_rel_offset	$24, 112
95	.cfi_rel_offset	$25, 120
96	.cfi_rel_offset	$26, 128
97	.cfi_rel_offset	$27, 136
98.endm
99
100.macro	RESTORE_ALL
101	lda	$19, alpha_mv
102	ldq	$0, 0($sp)
103	ldq	$1, 8($sp)
104	ldq	$2, 16($sp)
105	ldq	$3, 24($sp)
106	ldq	$21, 152($sp)
107	ldq	$20, HAE_CACHE($19)
108	ldq	$4, 32($sp)
109	ldq	$5, 40($sp)
110	ldq	$6, 48($sp)
111	ldq	$7, 56($sp)
112	subq	$20, $21, $20
113	ldq	$8, 64($sp)
114	beq	$20, 99f
115	ldq	$20, HAE_REG($19)
116	stq	$21, HAE_CACHE($19)
117	stq	$21, 0($20)
11899:	ldq	$19, 72($sp)
119	ldq	$20, 80($sp)
120	ldq	$21, 88($sp)
121	ldq	$22, 96($sp)
122	ldq	$23, 104($sp)
123	ldq	$24, 112($sp)
124	ldq	$25, 120($sp)
125	ldq	$26, 128($sp)
126	ldq	$27, 136($sp)
127	ldq	$28, 144($sp)
128	addq	$sp, SP_OFF, $sp
129	.cfi_restore	$0
130	.cfi_restore	$1
131	.cfi_restore	$2
132	.cfi_restore	$3
133	.cfi_restore	$4
134	.cfi_restore	$5
135	.cfi_restore	$6
136	.cfi_restore	$7
137	.cfi_restore	$8
138	.cfi_restore	$19
139	.cfi_restore	$20
140	.cfi_restore	$21
141	.cfi_restore	$22
142	.cfi_restore	$23
143	.cfi_restore	$24
144	.cfi_restore	$25
145	.cfi_restore	$26
146	.cfi_restore	$27
147	.cfi_restore	$28
148	.cfi_adjust_cfa_offset	-SP_OFF
149.endm
150
151.macro	DO_SWITCH_STACK
152	bsr	$1, do_switch_stack
153	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
154	.cfi_rel_offset	$9, 0
155	.cfi_rel_offset	$10, 8
156	.cfi_rel_offset	$11, 16
157	.cfi_rel_offset	$12, 24
158	.cfi_rel_offset	$13, 32
159	.cfi_rel_offset	$14, 40
160	.cfi_rel_offset	$15, 48
161	/* We don't really care about the FP registers for debugging.  */
162.endm
163
164.macro	UNDO_SWITCH_STACK
165	bsr	$1, undo_switch_stack
166	.cfi_restore	$9
167	.cfi_restore	$10
168	.cfi_restore	$11
169	.cfi_restore	$12
170	.cfi_restore	$13
171	.cfi_restore	$14
172	.cfi_restore	$15
173	.cfi_adjust_cfa_offset	-SWITCH_STACK_SIZE
174.endm
175
176/*
177 * Non-syscall kernel entry points.
178 */
179
180CFI_START_OSF_FRAME entInt
181	SAVE_ALL
182	lda	$8, 0x3fff
183	lda	$26, ret_from_sys_call
184	bic	$sp, $8, $8
185	mov	$sp, $19
186	jsr	$31, do_entInt
187CFI_END_OSF_FRAME entInt
188
189CFI_START_OSF_FRAME entArith
190	SAVE_ALL
191	lda	$8, 0x3fff
192	lda	$26, ret_from_sys_call
193	bic	$sp, $8, $8
194	mov	$sp, $18
195	jsr	$31, do_entArith
196CFI_END_OSF_FRAME entArith
197
198CFI_START_OSF_FRAME entMM
199	SAVE_ALL
200/* save $9 - $15 so the inline exception code can manipulate them.  */
201	subq	$sp, 56, $sp
202	.cfi_adjust_cfa_offset	56
203	stq	$9, 0($sp)
204	stq	$10, 8($sp)
205	stq	$11, 16($sp)
206	stq	$12, 24($sp)
207	stq	$13, 32($sp)
208	stq	$14, 40($sp)
209	stq	$15, 48($sp)
210	.cfi_rel_offset	$9, 0
211	.cfi_rel_offset	$10, 8
212	.cfi_rel_offset	$11, 16
213	.cfi_rel_offset	$12, 24
214	.cfi_rel_offset	$13, 32
215	.cfi_rel_offset	$14, 40
216	.cfi_rel_offset	$15, 48
217	addq	$sp, 56, $19
218/* handle the fault */
219	lda	$8, 0x3fff
220	bic	$sp, $8, $8
221	jsr	$26, do_page_fault
222/* reload the registers after the exception code played.  */
223	ldq	$9, 0($sp)
224	ldq	$10, 8($sp)
225	ldq	$11, 16($sp)
226	ldq	$12, 24($sp)
227	ldq	$13, 32($sp)
228	ldq	$14, 40($sp)
229	ldq	$15, 48($sp)
230	addq	$sp, 56, $sp
231	.cfi_restore	$9
232	.cfi_restore	$10
233	.cfi_restore	$11
234	.cfi_restore	$12
235	.cfi_restore	$13
236	.cfi_restore	$14
237	.cfi_restore	$15
238	.cfi_adjust_cfa_offset	-56
239/* finish up the syscall as normal.  */
240	br	ret_from_sys_call
241CFI_END_OSF_FRAME entMM
242
243CFI_START_OSF_FRAME entIF
244	SAVE_ALL
245	lda	$8, 0x3fff
246	lda	$26, ret_from_sys_call
247	bic	$sp, $8, $8
248	mov	$sp, $17
249	jsr	$31, do_entIF
250CFI_END_OSF_FRAME entIF
251
252CFI_START_OSF_FRAME entUna
253	lda	$sp, -256($sp)
254	.cfi_adjust_cfa_offset	256
255	stq	$0, 0($sp)
256	.cfi_rel_offset	$0, 0
257	.cfi_remember_state
258	ldq	$0, 256($sp)	/* get PS */
259	stq	$1, 8($sp)
260	stq	$2, 16($sp)
261	stq	$3, 24($sp)
262	and	$0, 8, $0		/* user mode? */
263	stq	$4, 32($sp)
264	bne	$0, entUnaUser	/* yup -> do user-level unaligned fault */
265	stq	$5, 40($sp)
266	stq	$6, 48($sp)
267	stq	$7, 56($sp)
268	stq	$8, 64($sp)
269	stq	$9, 72($sp)
270	stq	$10, 80($sp)
271	stq	$11, 88($sp)
272	stq	$12, 96($sp)
273	stq	$13, 104($sp)
274	stq	$14, 112($sp)
275	stq	$15, 120($sp)
276	/* 16-18 PAL-saved */
277	stq	$19, 152($sp)
278	stq	$20, 160($sp)
279	stq	$21, 168($sp)
280	stq	$22, 176($sp)
281	stq	$23, 184($sp)
282	stq	$24, 192($sp)
283	stq	$25, 200($sp)
284	stq	$26, 208($sp)
285	stq	$27, 216($sp)
286	stq	$28, 224($sp)
287	mov	$sp, $19
288	stq	$gp, 232($sp)
289	.cfi_rel_offset	$1, 1*8
290	.cfi_rel_offset	$2, 2*8
291	.cfi_rel_offset	$3, 3*8
292	.cfi_rel_offset	$4, 4*8
293	.cfi_rel_offset	$5, 5*8
294	.cfi_rel_offset	$6, 6*8
295	.cfi_rel_offset	$7, 7*8
296	.cfi_rel_offset	$8, 8*8
297	.cfi_rel_offset	$9, 9*8
298	.cfi_rel_offset	$10, 10*8
299	.cfi_rel_offset	$11, 11*8
300	.cfi_rel_offset	$12, 12*8
301	.cfi_rel_offset	$13, 13*8
302	.cfi_rel_offset	$14, 14*8
303	.cfi_rel_offset	$15, 15*8
304	.cfi_rel_offset	$19, 19*8
305	.cfi_rel_offset	$20, 20*8
306	.cfi_rel_offset	$21, 21*8
307	.cfi_rel_offset	$22, 22*8
308	.cfi_rel_offset	$23, 23*8
309	.cfi_rel_offset	$24, 24*8
310	.cfi_rel_offset	$25, 25*8
311	.cfi_rel_offset	$26, 26*8
312	.cfi_rel_offset	$27, 27*8
313	.cfi_rel_offset	$28, 28*8
314	.cfi_rel_offset	$29, 29*8
315	lda	$8, 0x3fff
316	stq	$31, 248($sp)
317	bic	$sp, $8, $8
318	jsr	$26, do_entUna
319	ldq	$0, 0($sp)
320	ldq	$1, 8($sp)
321	ldq	$2, 16($sp)
322	ldq	$3, 24($sp)
323	ldq	$4, 32($sp)
324	ldq	$5, 40($sp)
325	ldq	$6, 48($sp)
326	ldq	$7, 56($sp)
327	ldq	$8, 64($sp)
328	ldq	$9, 72($sp)
329	ldq	$10, 80($sp)
330	ldq	$11, 88($sp)
331	ldq	$12, 96($sp)
332	ldq	$13, 104($sp)
333	ldq	$14, 112($sp)
334	ldq	$15, 120($sp)
335	/* 16-18 PAL-saved */
336	ldq	$19, 152($sp)
337	ldq	$20, 160($sp)
338	ldq	$21, 168($sp)
339	ldq	$22, 176($sp)
340	ldq	$23, 184($sp)
341	ldq	$24, 192($sp)
342	ldq	$25, 200($sp)
343	ldq	$26, 208($sp)
344	ldq	$27, 216($sp)
345	ldq	$28, 224($sp)
346	ldq	$gp, 232($sp)
347	lda	$sp, 256($sp)
348	.cfi_restore	$1
349	.cfi_restore	$2
350	.cfi_restore	$3
351	.cfi_restore	$4
352	.cfi_restore	$5
353	.cfi_restore	$6
354	.cfi_restore	$7
355	.cfi_restore	$8
356	.cfi_restore	$9
357	.cfi_restore	$10
358	.cfi_restore	$11
359	.cfi_restore	$12
360	.cfi_restore	$13
361	.cfi_restore	$14
362	.cfi_restore	$15
363	.cfi_restore	$19
364	.cfi_restore	$20
365	.cfi_restore	$21
366	.cfi_restore	$22
367	.cfi_restore	$23
368	.cfi_restore	$24
369	.cfi_restore	$25
370	.cfi_restore	$26
371	.cfi_restore	$27
372	.cfi_restore	$28
373	.cfi_restore	$29
374	.cfi_adjust_cfa_offset	-256
375	call_pal PAL_rti
376
377	.align	4
378entUnaUser:
379	.cfi_restore_state
380	ldq	$0, 0($sp)	/* restore original $0 */
381	lda	$sp, 256($sp)	/* pop entUna's stack frame */
382	.cfi_restore	$0
383	.cfi_adjust_cfa_offset	-256
384	SAVE_ALL		/* setup normal kernel stack */
385	lda	$sp, -56($sp)
386	.cfi_adjust_cfa_offset	56
387	stq	$9, 0($sp)
388	stq	$10, 8($sp)
389	stq	$11, 16($sp)
390	stq	$12, 24($sp)
391	stq	$13, 32($sp)
392	stq	$14, 40($sp)
393	stq	$15, 48($sp)
394	.cfi_rel_offset	$9, 0
395	.cfi_rel_offset	$10, 8
396	.cfi_rel_offset	$11, 16
397	.cfi_rel_offset	$12, 24
398	.cfi_rel_offset	$13, 32
399	.cfi_rel_offset	$14, 40
400	.cfi_rel_offset	$15, 48
401	lda	$8, 0x3fff
402	addq	$sp, 56, $19
403	bic	$sp, $8, $8
404	jsr	$26, do_entUnaUser
405	ldq	$9, 0($sp)
406	ldq	$10, 8($sp)
407	ldq	$11, 16($sp)
408	ldq	$12, 24($sp)
409	ldq	$13, 32($sp)
410	ldq	$14, 40($sp)
411	ldq	$15, 48($sp)
412	lda	$sp, 56($sp)
413	.cfi_restore	$9
414	.cfi_restore	$10
415	.cfi_restore	$11
416	.cfi_restore	$12
417	.cfi_restore	$13
418	.cfi_restore	$14
419	.cfi_restore	$15
420	.cfi_adjust_cfa_offset	-56
421	br	ret_from_sys_call
422CFI_END_OSF_FRAME entUna
423
424CFI_START_OSF_FRAME entDbg
425	SAVE_ALL
426	lda	$8, 0x3fff
427	lda	$26, ret_from_sys_call
428	bic	$sp, $8, $8
429	mov	$sp, $16
430	jsr	$31, do_entDbg
431CFI_END_OSF_FRAME entDbg
432
433/*
434 * The system call entry point is special.  Most importantly, it looks
435 * like a function call to userspace as far as clobbered registers.  We
436 * do preserve the argument registers (for syscall restarts) and $26
437 * (for leaf syscall functions).
438 *
439 * So much for theory.  We don't take advantage of this yet.
440 *
441 * Note that a0-a2 are not saved by PALcode as with the other entry points.
442 */
443
444	.align	4
445	.globl	entSys
446	.type	entSys, @function
447	.cfi_startproc simple
448	.cfi_return_column 64
449	.cfi_def_cfa	$sp, 48
450	.cfi_rel_offset	64, 8
451	.cfi_rel_offset	$gp, 16
452entSys:
453	SAVE_ALL
454	lda	$8, 0x3fff
455	bic	$sp, $8, $8
456	lda	$4, NR_SYSCALLS($31)
457	stq	$16, SP_OFF+24($sp)
458	lda	$5, sys_call_table
459	lda	$27, sys_ni_syscall
460	cmpult	$0, $4, $4
461	ldl	$3, TI_FLAGS($8)
462	stq	$17, SP_OFF+32($sp)
463	s8addq	$0, $5, $5
464	stq	$18, SP_OFF+40($sp)
465	.cfi_rel_offset	$16, SP_OFF+24
466	.cfi_rel_offset	$17, SP_OFF+32
467	.cfi_rel_offset	$18, SP_OFF+40
468#ifdef CONFIG_AUDITSYSCALL
469	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
470	and     $3, $6, $3
471#endif
472	bne     $3, strace
473	beq	$4, 1f
474	ldq	$27, 0($5)
4751:	jsr	$26, ($27), alpha_ni_syscall
476	ldgp	$gp, 0($26)
477	blt	$0, $syscall_error	/* the call failed */
478	stq	$0, 0($sp)
479	stq	$31, 72($sp)		/* a3=0 => no error */
480
481	.align	4
482	.globl	ret_from_sys_call
483ret_from_sys_call:
484	cmovne	$26, 0, $18		/* $18 = 0 => non-restartable */
485	ldq	$0, SP_OFF($sp)
486	and	$0, 8, $0
487	beq	$0, ret_to_kernel
488ret_to_user:
489	/* Make sure need_resched and sigpending don't change between
490		sampling and the rti.  */
491	lda	$16, 7
492	call_pal PAL_swpipl
493	ldl	$17, TI_FLAGS($8)
494	and	$17, _TIF_WORK_MASK, $2
495	bne	$2, work_pending
496restore_all:
497	.cfi_remember_state
498	RESTORE_ALL
499	call_pal PAL_rti
500
501ret_to_kernel:
502	.cfi_restore_state
503	lda	$16, 7
504	call_pal PAL_swpipl
505	br restore_all
506
507	.align 3
508$syscall_error:
509	/*
510	 * Some system calls (e.g., ptrace) can return arbitrary
511	 * values which might normally be mistaken as error numbers.
512	 * Those functions must zero $0 (v0) directly in the stack
513	 * frame to indicate that a negative return value wasn't an
514	 * error number..
515	 */
516	ldq	$18, 0($sp)	/* old syscall nr (zero if success) */
517	beq	$18, $ret_success
518
519	ldq	$19, 72($sp)	/* .. and this a3 */
520	subq	$31, $0, $0	/* with error in v0 */
521	addq	$31, 1, $1	/* set a3 for errno return */
522	stq	$0, 0($sp)
523	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
524	stq	$1, 72($sp)	/* a3 for return */
525	br	ret_from_sys_call
526
527$ret_success:
528	stq	$0, 0($sp)
529	stq	$31, 72($sp)	/* a3=0 => no error */
530	br	ret_from_sys_call
531
532/*
533 * Do all cleanup when returning from all interrupts and system calls.
534 *
535 * Arguments:
536 *       $8: current.
537 *      $17: TI_FLAGS.
538 *      $18: The old syscall number, or zero if this is not a return
539 *           from a syscall that errored and is possibly restartable.
540 *      $19: The old a3 value
541 */
542
543	.align	4
544	.type	work_pending, @function
545work_pending:
546	and	$17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
547	bne	$2, $work_notifysig
548
549$work_resched:
550	/*
551	 * We can get here only if we returned from syscall without SIGPENDING
552	 * or got through work_notifysig already.  Either case means no syscall
553	 * restarts for us, so let $18 and $19 burn.
554	 */
555	jsr	$26, schedule
556	mov	0, $18
557	br	ret_to_user
558
559$work_notifysig:
560	mov	$sp, $16
561	DO_SWITCH_STACK
562	jsr	$26, do_work_pending
563	UNDO_SWITCH_STACK
564	br	restore_all
565
566/*
567 * PTRACE syscall handler
568 */
569
570	.align	4
571	.type	strace, @function
572strace:
573	/* set up signal stack, call syscall_trace */
574	DO_SWITCH_STACK
575	jsr	$26, syscall_trace_enter /* returns the syscall number */
576	UNDO_SWITCH_STACK
577
578	/* get the arguments back.. */
579	ldq	$16, SP_OFF+24($sp)
580	ldq	$17, SP_OFF+32($sp)
581	ldq	$18, SP_OFF+40($sp)
582	ldq	$19, 72($sp)
583	ldq	$20, 80($sp)
584	ldq	$21, 88($sp)
585
586	/* get the system call pointer.. */
587	lda	$1, NR_SYSCALLS($31)
588	lda	$2, sys_call_table
589	lda	$27, alpha_ni_syscall
590	cmpult	$0, $1, $1
591	s8addq	$0, $2, $2
592	beq	$1, 1f
593	ldq	$27, 0($2)
5941:	jsr	$26, ($27), sys_gettimeofday
595ret_from_straced:
596	ldgp	$gp, 0($26)
597
598	/* check return.. */
599	blt	$0, $strace_error	/* the call failed */
600	stq	$31, 72($sp)		/* a3=0 => no error */
601$strace_success:
602	stq	$0, 0($sp)		/* save return value */
603
604	DO_SWITCH_STACK
605	jsr	$26, syscall_trace_leave
606	UNDO_SWITCH_STACK
607	br	$31, ret_from_sys_call
608
609	.align	3
610$strace_error:
611	ldq	$18, 0($sp)	/* old syscall nr (zero if success) */
612	beq	$18, $strace_success
613	ldq	$19, 72($sp)	/* .. and this a3 */
614
615	subq	$31, $0, $0	/* with error in v0 */
616	addq	$31, 1, $1	/* set a3 for errno return */
617	stq	$0, 0($sp)
618	stq	$1, 72($sp)	/* a3 for return */
619
620	DO_SWITCH_STACK
621	mov	$18, $9		/* save old syscall number */
622	mov	$19, $10	/* save old a3 */
623	jsr	$26, syscall_trace_leave
624	mov	$9, $18
625	mov	$10, $19
626	UNDO_SWITCH_STACK
627
628	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
629	br	ret_from_sys_call
630CFI_END_OSF_FRAME entSys
631
632/*
633 * Save and restore the switch stack -- aka the balance of the user context.
634 */
635
636	.align	4
637	.type	do_switch_stack, @function
638	.cfi_startproc simple
639	.cfi_return_column 64
640	.cfi_def_cfa $sp, 0
641	.cfi_register 64, $1
642do_switch_stack:
643	lda	$sp, -SWITCH_STACK_SIZE($sp)
644	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
645	stq	$9, 0($sp)
646	stq	$10, 8($sp)
647	stq	$11, 16($sp)
648	stq	$12, 24($sp)
649	stq	$13, 32($sp)
650	stq	$14, 40($sp)
651	stq	$15, 48($sp)
652	stq	$26, 56($sp)
653	stt	$f0, 64($sp)
654	stt	$f1, 72($sp)
655	stt	$f2, 80($sp)
656	stt	$f3, 88($sp)
657	stt	$f4, 96($sp)
658	stt	$f5, 104($sp)
659	stt	$f6, 112($sp)
660	stt	$f7, 120($sp)
661	stt	$f8, 128($sp)
662	stt	$f9, 136($sp)
663	stt	$f10, 144($sp)
664	stt	$f11, 152($sp)
665	stt	$f12, 160($sp)
666	stt	$f13, 168($sp)
667	stt	$f14, 176($sp)
668	stt	$f15, 184($sp)
669	stt	$f16, 192($sp)
670	stt	$f17, 200($sp)
671	stt	$f18, 208($sp)
672	stt	$f19, 216($sp)
673	stt	$f20, 224($sp)
674	stt	$f21, 232($sp)
675	stt	$f22, 240($sp)
676	stt	$f23, 248($sp)
677	stt	$f24, 256($sp)
678	stt	$f25, 264($sp)
679	stt	$f26, 272($sp)
680	stt	$f27, 280($sp)
681	mf_fpcr	$f0		# get fpcr
682	stt	$f28, 288($sp)
683	stt	$f29, 296($sp)
684	stt	$f30, 304($sp)
685	stt	$f0, 312($sp)	# save fpcr in slot of $f31
686	ldt	$f0, 64($sp)	# dont let "do_switch_stack" change fp state.
687	ret	$31, ($1), 1
688	.cfi_endproc
689	.size	do_switch_stack, .-do_switch_stack
690
691	.align	4
692	.type	undo_switch_stack, @function
693	.cfi_startproc simple
694	.cfi_def_cfa $sp, 0
695	.cfi_register 64, $1
696undo_switch_stack:
697	ldq	$9, 0($sp)
698	ldq	$10, 8($sp)
699	ldq	$11, 16($sp)
700	ldq	$12, 24($sp)
701	ldq	$13, 32($sp)
702	ldq	$14, 40($sp)
703	ldq	$15, 48($sp)
704	ldq	$26, 56($sp)
705	ldt	$f30, 312($sp)	# get saved fpcr
706	ldt	$f0, 64($sp)
707	ldt	$f1, 72($sp)
708	ldt	$f2, 80($sp)
709	ldt	$f3, 88($sp)
710	mt_fpcr	$f30		# install saved fpcr
711	ldt	$f4, 96($sp)
712	ldt	$f5, 104($sp)
713	ldt	$f6, 112($sp)
714	ldt	$f7, 120($sp)
715	ldt	$f8, 128($sp)
716	ldt	$f9, 136($sp)
717	ldt	$f10, 144($sp)
718	ldt	$f11, 152($sp)
719	ldt	$f12, 160($sp)
720	ldt	$f13, 168($sp)
721	ldt	$f14, 176($sp)
722	ldt	$f15, 184($sp)
723	ldt	$f16, 192($sp)
724	ldt	$f17, 200($sp)
725	ldt	$f18, 208($sp)
726	ldt	$f19, 216($sp)
727	ldt	$f20, 224($sp)
728	ldt	$f21, 232($sp)
729	ldt	$f22, 240($sp)
730	ldt	$f23, 248($sp)
731	ldt	$f24, 256($sp)
732	ldt	$f25, 264($sp)
733	ldt	$f26, 272($sp)
734	ldt	$f27, 280($sp)
735	ldt	$f28, 288($sp)
736	ldt	$f29, 296($sp)
737	ldt	$f30, 304($sp)
738	lda	$sp, SWITCH_STACK_SIZE($sp)
739	ret	$31, ($1), 1
740	.cfi_endproc
741	.size	undo_switch_stack, .-undo_switch_stack
742
743/*
744 * The meat of the context switch code.
745 */
746
747	.align	4
748	.globl	alpha_switch_to
749	.type	alpha_switch_to, @function
750	.cfi_startproc
751alpha_switch_to:
752	DO_SWITCH_STACK
753	call_pal PAL_swpctx
754	lda	$8, 0x3fff
755	UNDO_SWITCH_STACK
756	bic	$sp, $8, $8
757	mov	$17, $0
758	ret
759	.cfi_endproc
760	.size	alpha_switch_to, .-alpha_switch_to
761
762/*
763 * New processes begin life here.
764 */
765
766	.globl	ret_from_fork
767	.align	4
768	.ent	ret_from_fork
769ret_from_fork:
770	lda	$26, ret_from_sys_call
771	mov	$17, $16
772	jmp	$31, schedule_tail
773.end ret_from_fork
774
775/*
776 * ... and new kernel threads - here
777 */
778	.align 4
779	.globl	ret_from_kernel_thread
780	.ent	ret_from_kernel_thread
781ret_from_kernel_thread:
782	mov	$17, $16
783	jsr	$26, schedule_tail
784	mov	$9, $27
785	mov	$10, $16
786	jsr	$26, ($9)
787	mov	$31, $19		/* to disable syscall restarts */
788	br	$31, ret_to_user
789.end ret_from_kernel_thread
790
791
792/*
793 * Special system calls.  Most of these are special in that they either
794 * have to play switch_stack games or in some way use the pt_regs struct.
795 */
796
797.macro	fork_like name
798	.align	4
799	.globl	alpha_\name
800	.ent	alpha_\name
801alpha_\name:
802	.prologue 0
803	bsr	$1, do_switch_stack
804	jsr	$26, sys_\name
805	ldq	$26, 56($sp)
806	lda	$sp, SWITCH_STACK_SIZE($sp)
807	ret
808.end	alpha_\name
809.endm
810
811fork_like fork
812fork_like vfork
813fork_like clone
814
815	.align	4
816	.globl	sys_sigreturn
817	.ent	sys_sigreturn
818sys_sigreturn:
819	.prologue 0
820	lda	$9, ret_from_straced
821	cmpult	$26, $9, $9
822	lda	$sp, -SWITCH_STACK_SIZE($sp)
823	jsr	$26, do_sigreturn
824	bne	$9, 1f
825	jsr	$26, syscall_trace_leave
8261:	br	$1, undo_switch_stack
827	br	ret_from_sys_call
828.end sys_sigreturn
829
830	.align	4
831	.globl	sys_rt_sigreturn
832	.ent	sys_rt_sigreturn
833sys_rt_sigreturn:
834	.prologue 0
835	lda	$9, ret_from_straced
836	cmpult	$26, $9, $9
837	lda	$sp, -SWITCH_STACK_SIZE($sp)
838	jsr	$26, do_rt_sigreturn
839	bne	$9, 1f
840	jsr	$26, syscall_trace_leave
8411:	br	$1, undo_switch_stack
842	br	ret_from_sys_call
843.end sys_rt_sigreturn
844
845	.align	4
846	.globl	alpha_ni_syscall
847	.ent	alpha_ni_syscall
848alpha_ni_syscall:
849	.prologue 0
850	/* Special because it also implements overflow handling via
851	   syscall number 0.  And if you recall, zero is a special
852	   trigger for "not an error".  Store large non-zero there.  */
853	lda	$0, -ENOSYS
854	unop
855	stq	$0, 0($sp)
856	ret
857.end alpha_ni_syscall
858