xref: /openbmc/linux/arch/alpha/kernel/entry.S (revision e20d5a22)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/alpha/kernel/entry.S
4 *
5 * Kernel entry-points.
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/pal.h>
11#include <asm/errno.h>
12#include <asm/unistd.h>
13
14	.text
15	.set noat
16	.cfi_sections	.debug_frame
17
18/* Stack offsets.  */
19#define SP_OFF			184
20#define SWITCH_STACK_SIZE	320
21
22.macro	CFI_START_OSF_FRAME	func
23	.align	4
24	.globl	\func
25	.type	\func,@function
26\func:
27	.cfi_startproc simple
28	.cfi_return_column 64
29	.cfi_def_cfa	$sp, 48
30	.cfi_rel_offset	64, 8
31	.cfi_rel_offset	$gp, 16
32	.cfi_rel_offset	$16, 24
33	.cfi_rel_offset	$17, 32
34	.cfi_rel_offset	$18, 40
35.endm
36
37.macro	CFI_END_OSF_FRAME	func
38	.cfi_endproc
39	.size	\func, . - \func
40.endm
41
42/*
43 * This defines the normal kernel pt-regs layout.
44 *
45 * regs 9-15 preserved by C code
46 * regs 16-18 saved by PAL-code
47 * regs 29-30 saved and set up by PAL-code
48 * JRP - Save regs 16-18 in a special area of the stack, so that
49 * the palcode-provided values are available to the signal handler.
50 */
51
52.macro	SAVE_ALL
53	subq	$sp, SP_OFF, $sp
54	.cfi_adjust_cfa_offset	SP_OFF
55	stq	$0, 0($sp)
56	stq	$1, 8($sp)
57	stq	$2, 16($sp)
58	stq	$3, 24($sp)
59	stq	$4, 32($sp)
60	stq	$28, 144($sp)
61	.cfi_rel_offset	$0, 0
62	.cfi_rel_offset $1, 8
63	.cfi_rel_offset	$2, 16
64	.cfi_rel_offset	$3, 24
65	.cfi_rel_offset	$4, 32
66	.cfi_rel_offset	$28, 144
67	lda	$2, alpha_mv
68	stq	$5, 40($sp)
69	stq	$6, 48($sp)
70	stq	$7, 56($sp)
71	stq	$8, 64($sp)
72	stq	$19, 72($sp)
73	stq	$20, 80($sp)
74	stq	$21, 88($sp)
75	ldq	$2, HAE_CACHE($2)
76	stq	$22, 96($sp)
77	stq	$23, 104($sp)
78	stq	$24, 112($sp)
79	stq	$25, 120($sp)
80	stq	$26, 128($sp)
81	stq	$27, 136($sp)
82	stq	$2, 152($sp)
83	stq	$16, 160($sp)
84	stq	$17, 168($sp)
85	stq	$18, 176($sp)
86	.cfi_rel_offset	$5, 40
87	.cfi_rel_offset	$6, 48
88	.cfi_rel_offset	$7, 56
89	.cfi_rel_offset	$8, 64
90	.cfi_rel_offset $19, 72
91	.cfi_rel_offset	$20, 80
92	.cfi_rel_offset	$21, 88
93	.cfi_rel_offset $22, 96
94	.cfi_rel_offset	$23, 104
95	.cfi_rel_offset	$24, 112
96	.cfi_rel_offset	$25, 120
97	.cfi_rel_offset	$26, 128
98	.cfi_rel_offset	$27, 136
99.endm
100
101.macro	RESTORE_ALL
102	lda	$19, alpha_mv
103	ldq	$0, 0($sp)
104	ldq	$1, 8($sp)
105	ldq	$2, 16($sp)
106	ldq	$3, 24($sp)
107	ldq	$21, 152($sp)
108	ldq	$20, HAE_CACHE($19)
109	ldq	$4, 32($sp)
110	ldq	$5, 40($sp)
111	ldq	$6, 48($sp)
112	ldq	$7, 56($sp)
113	subq	$20, $21, $20
114	ldq	$8, 64($sp)
115	beq	$20, 99f
116	ldq	$20, HAE_REG($19)
117	stq	$21, HAE_CACHE($19)
118	stq	$21, 0($20)
11999:	ldq	$19, 72($sp)
120	ldq	$20, 80($sp)
121	ldq	$21, 88($sp)
122	ldq	$22, 96($sp)
123	ldq	$23, 104($sp)
124	ldq	$24, 112($sp)
125	ldq	$25, 120($sp)
126	ldq	$26, 128($sp)
127	ldq	$27, 136($sp)
128	ldq	$28, 144($sp)
129	addq	$sp, SP_OFF, $sp
130	.cfi_restore	$0
131	.cfi_restore	$1
132	.cfi_restore	$2
133	.cfi_restore	$3
134	.cfi_restore	$4
135	.cfi_restore	$5
136	.cfi_restore	$6
137	.cfi_restore	$7
138	.cfi_restore	$8
139	.cfi_restore	$19
140	.cfi_restore	$20
141	.cfi_restore	$21
142	.cfi_restore	$22
143	.cfi_restore	$23
144	.cfi_restore	$24
145	.cfi_restore	$25
146	.cfi_restore	$26
147	.cfi_restore	$27
148	.cfi_restore	$28
149	.cfi_adjust_cfa_offset	-SP_OFF
150.endm
151
152.macro	DO_SWITCH_STACK
153	bsr	$1, do_switch_stack
154	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
155	.cfi_rel_offset	$9, 0
156	.cfi_rel_offset	$10, 8
157	.cfi_rel_offset	$11, 16
158	.cfi_rel_offset	$12, 24
159	.cfi_rel_offset	$13, 32
160	.cfi_rel_offset	$14, 40
161	.cfi_rel_offset	$15, 48
162	/* We don't really care about the FP registers for debugging.  */
163.endm
164
165.macro	UNDO_SWITCH_STACK
166	bsr	$1, undo_switch_stack
167	.cfi_restore	$9
168	.cfi_restore	$10
169	.cfi_restore	$11
170	.cfi_restore	$12
171	.cfi_restore	$13
172	.cfi_restore	$14
173	.cfi_restore	$15
174	.cfi_adjust_cfa_offset	-SWITCH_STACK_SIZE
175.endm
176
177/*
178 * Non-syscall kernel entry points.
179 */
180
181CFI_START_OSF_FRAME entInt
182	SAVE_ALL
183	lda	$8, 0x3fff
184	lda	$26, ret_from_sys_call
185	bic	$sp, $8, $8
186	mov	$sp, $19
187	jsr	$31, do_entInt
188CFI_END_OSF_FRAME entInt
189
190CFI_START_OSF_FRAME entArith
191	SAVE_ALL
192	lda	$8, 0x3fff
193	lda	$26, ret_from_sys_call
194	bic	$sp, $8, $8
195	mov	$sp, $18
196	jsr	$31, do_entArith
197CFI_END_OSF_FRAME entArith
198
199CFI_START_OSF_FRAME entMM
200	SAVE_ALL
201/* save $9 - $15 so the inline exception code can manipulate them.  */
202	subq	$sp, 56, $sp
203	.cfi_adjust_cfa_offset	56
204	stq	$9, 0($sp)
205	stq	$10, 8($sp)
206	stq	$11, 16($sp)
207	stq	$12, 24($sp)
208	stq	$13, 32($sp)
209	stq	$14, 40($sp)
210	stq	$15, 48($sp)
211	.cfi_rel_offset	$9, 0
212	.cfi_rel_offset	$10, 8
213	.cfi_rel_offset	$11, 16
214	.cfi_rel_offset	$12, 24
215	.cfi_rel_offset	$13, 32
216	.cfi_rel_offset	$14, 40
217	.cfi_rel_offset	$15, 48
218	addq	$sp, 56, $19
219/* handle the fault */
220	lda	$8, 0x3fff
221	bic	$sp, $8, $8
222	jsr	$26, do_page_fault
223/* reload the registers after the exception code played.  */
224	ldq	$9, 0($sp)
225	ldq	$10, 8($sp)
226	ldq	$11, 16($sp)
227	ldq	$12, 24($sp)
228	ldq	$13, 32($sp)
229	ldq	$14, 40($sp)
230	ldq	$15, 48($sp)
231	addq	$sp, 56, $sp
232	.cfi_restore	$9
233	.cfi_restore	$10
234	.cfi_restore	$11
235	.cfi_restore	$12
236	.cfi_restore	$13
237	.cfi_restore	$14
238	.cfi_restore	$15
239	.cfi_adjust_cfa_offset	-56
240/* finish up the syscall as normal.  */
241	br	ret_from_sys_call
242CFI_END_OSF_FRAME entMM
243
244CFI_START_OSF_FRAME entIF
245	SAVE_ALL
246	lda	$8, 0x3fff
247	lda	$26, ret_from_sys_call
248	bic	$sp, $8, $8
249	mov	$sp, $17
250	jsr	$31, do_entIF
251CFI_END_OSF_FRAME entIF
252
253CFI_START_OSF_FRAME entUna
254	lda	$sp, -256($sp)
255	.cfi_adjust_cfa_offset	256
256	stq	$0, 0($sp)
257	.cfi_rel_offset	$0, 0
258	.cfi_remember_state
259	ldq	$0, 256($sp)	/* get PS */
260	stq	$1, 8($sp)
261	stq	$2, 16($sp)
262	stq	$3, 24($sp)
263	and	$0, 8, $0		/* user mode? */
264	stq	$4, 32($sp)
265	bne	$0, entUnaUser	/* yup -> do user-level unaligned fault */
266	stq	$5, 40($sp)
267	stq	$6, 48($sp)
268	stq	$7, 56($sp)
269	stq	$8, 64($sp)
270	stq	$9, 72($sp)
271	stq	$10, 80($sp)
272	stq	$11, 88($sp)
273	stq	$12, 96($sp)
274	stq	$13, 104($sp)
275	stq	$14, 112($sp)
276	stq	$15, 120($sp)
277	/* 16-18 PAL-saved */
278	stq	$19, 152($sp)
279	stq	$20, 160($sp)
280	stq	$21, 168($sp)
281	stq	$22, 176($sp)
282	stq	$23, 184($sp)
283	stq	$24, 192($sp)
284	stq	$25, 200($sp)
285	stq	$26, 208($sp)
286	stq	$27, 216($sp)
287	stq	$28, 224($sp)
288	mov	$sp, $19
289	stq	$gp, 232($sp)
290	.cfi_rel_offset	$1, 1*8
291	.cfi_rel_offset	$2, 2*8
292	.cfi_rel_offset	$3, 3*8
293	.cfi_rel_offset	$4, 4*8
294	.cfi_rel_offset	$5, 5*8
295	.cfi_rel_offset	$6, 6*8
296	.cfi_rel_offset	$7, 7*8
297	.cfi_rel_offset	$8, 8*8
298	.cfi_rel_offset	$9, 9*8
299	.cfi_rel_offset	$10, 10*8
300	.cfi_rel_offset	$11, 11*8
301	.cfi_rel_offset	$12, 12*8
302	.cfi_rel_offset	$13, 13*8
303	.cfi_rel_offset	$14, 14*8
304	.cfi_rel_offset	$15, 15*8
305	.cfi_rel_offset	$19, 19*8
306	.cfi_rel_offset	$20, 20*8
307	.cfi_rel_offset	$21, 21*8
308	.cfi_rel_offset	$22, 22*8
309	.cfi_rel_offset	$23, 23*8
310	.cfi_rel_offset	$24, 24*8
311	.cfi_rel_offset	$25, 25*8
312	.cfi_rel_offset	$26, 26*8
313	.cfi_rel_offset	$27, 27*8
314	.cfi_rel_offset	$28, 28*8
315	.cfi_rel_offset	$29, 29*8
316	lda	$8, 0x3fff
317	stq	$31, 248($sp)
318	bic	$sp, $8, $8
319	jsr	$26, do_entUna
320	ldq	$0, 0($sp)
321	ldq	$1, 8($sp)
322	ldq	$2, 16($sp)
323	ldq	$3, 24($sp)
324	ldq	$4, 32($sp)
325	ldq	$5, 40($sp)
326	ldq	$6, 48($sp)
327	ldq	$7, 56($sp)
328	ldq	$8, 64($sp)
329	ldq	$9, 72($sp)
330	ldq	$10, 80($sp)
331	ldq	$11, 88($sp)
332	ldq	$12, 96($sp)
333	ldq	$13, 104($sp)
334	ldq	$14, 112($sp)
335	ldq	$15, 120($sp)
336	/* 16-18 PAL-saved */
337	ldq	$19, 152($sp)
338	ldq	$20, 160($sp)
339	ldq	$21, 168($sp)
340	ldq	$22, 176($sp)
341	ldq	$23, 184($sp)
342	ldq	$24, 192($sp)
343	ldq	$25, 200($sp)
344	ldq	$26, 208($sp)
345	ldq	$27, 216($sp)
346	ldq	$28, 224($sp)
347	ldq	$gp, 232($sp)
348	lda	$sp, 256($sp)
349	.cfi_restore	$1
350	.cfi_restore	$2
351	.cfi_restore	$3
352	.cfi_restore	$4
353	.cfi_restore	$5
354	.cfi_restore	$6
355	.cfi_restore	$7
356	.cfi_restore	$8
357	.cfi_restore	$9
358	.cfi_restore	$10
359	.cfi_restore	$11
360	.cfi_restore	$12
361	.cfi_restore	$13
362	.cfi_restore	$14
363	.cfi_restore	$15
364	.cfi_restore	$19
365	.cfi_restore	$20
366	.cfi_restore	$21
367	.cfi_restore	$22
368	.cfi_restore	$23
369	.cfi_restore	$24
370	.cfi_restore	$25
371	.cfi_restore	$26
372	.cfi_restore	$27
373	.cfi_restore	$28
374	.cfi_restore	$29
375	.cfi_adjust_cfa_offset	-256
376	call_pal PAL_rti
377
378	.align	4
379entUnaUser:
380	.cfi_restore_state
381	ldq	$0, 0($sp)	/* restore original $0 */
382	lda	$sp, 256($sp)	/* pop entUna's stack frame */
383	.cfi_restore	$0
384	.cfi_adjust_cfa_offset	-256
385	SAVE_ALL		/* setup normal kernel stack */
386	lda	$sp, -56($sp)
387	.cfi_adjust_cfa_offset	56
388	stq	$9, 0($sp)
389	stq	$10, 8($sp)
390	stq	$11, 16($sp)
391	stq	$12, 24($sp)
392	stq	$13, 32($sp)
393	stq	$14, 40($sp)
394	stq	$15, 48($sp)
395	.cfi_rel_offset	$9, 0
396	.cfi_rel_offset	$10, 8
397	.cfi_rel_offset	$11, 16
398	.cfi_rel_offset	$12, 24
399	.cfi_rel_offset	$13, 32
400	.cfi_rel_offset	$14, 40
401	.cfi_rel_offset	$15, 48
402	lda	$8, 0x3fff
403	addq	$sp, 56, $19
404	bic	$sp, $8, $8
405	jsr	$26, do_entUnaUser
406	ldq	$9, 0($sp)
407	ldq	$10, 8($sp)
408	ldq	$11, 16($sp)
409	ldq	$12, 24($sp)
410	ldq	$13, 32($sp)
411	ldq	$14, 40($sp)
412	ldq	$15, 48($sp)
413	lda	$sp, 56($sp)
414	.cfi_restore	$9
415	.cfi_restore	$10
416	.cfi_restore	$11
417	.cfi_restore	$12
418	.cfi_restore	$13
419	.cfi_restore	$14
420	.cfi_restore	$15
421	.cfi_adjust_cfa_offset	-56
422	br	ret_from_sys_call
423CFI_END_OSF_FRAME entUna
424
425CFI_START_OSF_FRAME entDbg
426	SAVE_ALL
427	lda	$8, 0x3fff
428	lda	$26, ret_from_sys_call
429	bic	$sp, $8, $8
430	mov	$sp, $16
431	jsr	$31, do_entDbg
432CFI_END_OSF_FRAME entDbg
433
434/*
435 * The system call entry point is special.  Most importantly, it looks
436 * like a function call to userspace as far as clobbered registers.  We
437 * do preserve the argument registers (for syscall restarts) and $26
438 * (for leaf syscall functions).
439 *
440 * So much for theory.  We don't take advantage of this yet.
441 *
442 * Note that a0-a2 are not saved by PALcode as with the other entry points.
443 */
444
445	.align	4
446	.globl	entSys
447	.type	entSys, @function
448	.cfi_startproc simple
449	.cfi_return_column 64
450	.cfi_def_cfa	$sp, 48
451	.cfi_rel_offset	64, 8
452	.cfi_rel_offset	$gp, 16
453entSys:
454	SAVE_ALL
455	lda	$8, 0x3fff
456	bic	$sp, $8, $8
457	lda	$4, NR_SYSCALLS($31)
458	stq	$16, SP_OFF+24($sp)
459	lda	$5, sys_call_table
460	lda	$27, sys_ni_syscall
461	cmpult	$0, $4, $4
462	ldl	$3, TI_FLAGS($8)
463	stq	$17, SP_OFF+32($sp)
464	s8addq	$0, $5, $5
465	stq	$18, SP_OFF+40($sp)
466	.cfi_rel_offset	$16, SP_OFF+24
467	.cfi_rel_offset	$17, SP_OFF+32
468	.cfi_rel_offset	$18, SP_OFF+40
469#ifdef CONFIG_AUDITSYSCALL
470	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
471	and     $3, $6, $3
472	bne     $3, strace
473#else
474	blbs    $3, strace		/* check for SYSCALL_TRACE in disguise */
475#endif
476	beq	$4, 1f
477	ldq	$27, 0($5)
4781:	jsr	$26, ($27), sys_ni_syscall
479	ldgp	$gp, 0($26)
480	blt	$0, $syscall_error	/* the call failed */
481$ret_success:
482	stq	$0, 0($sp)
483	stq	$31, 72($sp)		/* a3=0 => no error */
484
485	.align	4
486	.globl	ret_from_sys_call
487ret_from_sys_call:
488	cmovne	$26, 0, $18		/* $18 = 0 => non-restartable */
489	ldq	$0, SP_OFF($sp)
490	and	$0, 8, $0
491	beq	$0, ret_to_kernel
492ret_to_user:
493	/* Make sure need_resched and sigpending don't change between
494		sampling and the rti.  */
495	lda	$16, 7
496	call_pal PAL_swpipl
497	ldl	$17, TI_FLAGS($8)
498	and	$17, _TIF_WORK_MASK, $2
499	bne	$2, work_pending
500restore_all:
501	.cfi_remember_state
502	RESTORE_ALL
503	call_pal PAL_rti
504
505ret_to_kernel:
506	.cfi_restore_state
507	lda	$16, 7
508	call_pal PAL_swpipl
509	br restore_all
510
511	.align 3
512$syscall_error:
513	/*
514	 * Some system calls (e.g., ptrace) can return arbitrary
515	 * values which might normally be mistaken as error numbers.
516	 * Those functions must zero $0 (v0) directly in the stack
517	 * frame to indicate that a negative return value wasn't an
518	 * error number..
519	 */
520	ldq	$18, 0($sp)	/* old syscall nr (zero if success) */
521	beq	$18, $ret_success
522
523	ldq	$19, 72($sp)	/* .. and this a3 */
524	subq	$31, $0, $0	/* with error in v0 */
525	addq	$31, 1, $1	/* set a3 for errno return */
526	stq	$0, 0($sp)
527	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
528	stq	$1, 72($sp)	/* a3 for return */
529	br	ret_from_sys_call
530
531/*
532 * Do all cleanup when returning from all interrupts and system calls.
533 *
534 * Arguments:
535 *       $8: current.
536 *      $17: TI_FLAGS.
537 *      $18: The old syscall number, or zero if this is not a return
538 *           from a syscall that errored and is possibly restartable.
539 *      $19: The old a3 value
540 */
541
542	.align	4
543	.type	work_pending, @function
544work_pending:
545	and	$17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2
546	bne	$2, $work_notifysig
547
548$work_resched:
549	/*
550	 * We can get here only if we returned from syscall without SIGPENDING
551	 * or got through work_notifysig already.  Either case means no syscall
552	 * restarts for us, so let $18 and $19 burn.
553	 */
554	jsr	$26, schedule
555	mov	0, $18
556	br	ret_to_user
557
558$work_notifysig:
559	mov	$sp, $16
560	DO_SWITCH_STACK
561	jsr	$26, do_work_pending
562	UNDO_SWITCH_STACK
563	br	restore_all
564
565/*
566 * PTRACE syscall handler
567 */
568
569	.align	4
570	.type	strace, @function
571strace:
572	/* set up signal stack, call syscall_trace */
573	DO_SWITCH_STACK
574	jsr	$26, syscall_trace_enter /* returns the syscall number */
575	UNDO_SWITCH_STACK
576
577	/* get the arguments back.. */
578	ldq	$16, SP_OFF+24($sp)
579	ldq	$17, SP_OFF+32($sp)
580	ldq	$18, SP_OFF+40($sp)
581	ldq	$19, 72($sp)
582	ldq	$20, 80($sp)
583	ldq	$21, 88($sp)
584
585	/* get the system call pointer.. */
586	lda	$1, NR_SYSCALLS($31)
587	lda	$2, sys_call_table
588	lda	$27, sys_ni_syscall
589	cmpult	$0, $1, $1
590	s8addq	$0, $2, $2
591	beq	$1, 1f
592	ldq	$27, 0($2)
5931:	jsr	$26, ($27), sys_gettimeofday
594ret_from_straced:
595	ldgp	$gp, 0($26)
596
597	/* check return.. */
598	blt	$0, $strace_error	/* the call failed */
599$strace_success:
600	stq	$31, 72($sp)		/* a3=0 => no error */
601	stq	$0, 0($sp)		/* save return value */
602
603	DO_SWITCH_STACK
604	jsr	$26, syscall_trace_leave
605	UNDO_SWITCH_STACK
606	br	$31, ret_from_sys_call
607
608	.align	3
609$strace_error:
610	ldq	$18, 0($sp)	/* old syscall nr (zero if success) */
611	beq	$18, $strace_success
612	ldq	$19, 72($sp)	/* .. and this a3 */
613
614	subq	$31, $0, $0	/* with error in v0 */
615	addq	$31, 1, $1	/* set a3 for errno return */
616	stq	$0, 0($sp)
617	stq	$1, 72($sp)	/* a3 for return */
618
619	DO_SWITCH_STACK
620	mov	$18, $9		/* save old syscall number */
621	mov	$19, $10	/* save old a3 */
622	jsr	$26, syscall_trace_leave
623	mov	$9, $18
624	mov	$10, $19
625	UNDO_SWITCH_STACK
626
627	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
628	br	ret_from_sys_call
629CFI_END_OSF_FRAME entSys
630
631/*
632 * Save and restore the switch stack -- aka the balance of the user context.
633 */
634
635	.align	4
636	.type	do_switch_stack, @function
637	.cfi_startproc simple
638	.cfi_return_column 64
639	.cfi_def_cfa $sp, 0
640	.cfi_register 64, $1
641do_switch_stack:
642	lda	$sp, -SWITCH_STACK_SIZE($sp)
643	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
644	stq	$9, 0($sp)
645	stq	$10, 8($sp)
646	stq	$11, 16($sp)
647	stq	$12, 24($sp)
648	stq	$13, 32($sp)
649	stq	$14, 40($sp)
650	stq	$15, 48($sp)
651	stq	$26, 56($sp)
652	stt	$f0, 64($sp)
653	stt	$f1, 72($sp)
654	stt	$f2, 80($sp)
655	stt	$f3, 88($sp)
656	stt	$f4, 96($sp)
657	stt	$f5, 104($sp)
658	stt	$f6, 112($sp)
659	stt	$f7, 120($sp)
660	stt	$f8, 128($sp)
661	stt	$f9, 136($sp)
662	stt	$f10, 144($sp)
663	stt	$f11, 152($sp)
664	stt	$f12, 160($sp)
665	stt	$f13, 168($sp)
666	stt	$f14, 176($sp)
667	stt	$f15, 184($sp)
668	stt	$f16, 192($sp)
669	stt	$f17, 200($sp)
670	stt	$f18, 208($sp)
671	stt	$f19, 216($sp)
672	stt	$f20, 224($sp)
673	stt	$f21, 232($sp)
674	stt	$f22, 240($sp)
675	stt	$f23, 248($sp)
676	stt	$f24, 256($sp)
677	stt	$f25, 264($sp)
678	stt	$f26, 272($sp)
679	stt	$f27, 280($sp)
680	mf_fpcr	$f0		# get fpcr
681	stt	$f28, 288($sp)
682	stt	$f29, 296($sp)
683	stt	$f30, 304($sp)
684	stt	$f0, 312($sp)	# save fpcr in slot of $f31
685	ldt	$f0, 64($sp)	# dont let "do_switch_stack" change fp state.
686	ret	$31, ($1), 1
687	.cfi_endproc
688	.size	do_switch_stack, .-do_switch_stack
689
690	.align	4
691	.type	undo_switch_stack, @function
692	.cfi_startproc simple
693	.cfi_def_cfa $sp, 0
694	.cfi_register 64, $1
695undo_switch_stack:
696	ldq	$9, 0($sp)
697	ldq	$10, 8($sp)
698	ldq	$11, 16($sp)
699	ldq	$12, 24($sp)
700	ldq	$13, 32($sp)
701	ldq	$14, 40($sp)
702	ldq	$15, 48($sp)
703	ldq	$26, 56($sp)
704	ldt	$f30, 312($sp)	# get saved fpcr
705	ldt	$f0, 64($sp)
706	ldt	$f1, 72($sp)
707	ldt	$f2, 80($sp)
708	ldt	$f3, 88($sp)
709	mt_fpcr	$f30		# install saved fpcr
710	ldt	$f4, 96($sp)
711	ldt	$f5, 104($sp)
712	ldt	$f6, 112($sp)
713	ldt	$f7, 120($sp)
714	ldt	$f8, 128($sp)
715	ldt	$f9, 136($sp)
716	ldt	$f10, 144($sp)
717	ldt	$f11, 152($sp)
718	ldt	$f12, 160($sp)
719	ldt	$f13, 168($sp)
720	ldt	$f14, 176($sp)
721	ldt	$f15, 184($sp)
722	ldt	$f16, 192($sp)
723	ldt	$f17, 200($sp)
724	ldt	$f18, 208($sp)
725	ldt	$f19, 216($sp)
726	ldt	$f20, 224($sp)
727	ldt	$f21, 232($sp)
728	ldt	$f22, 240($sp)
729	ldt	$f23, 248($sp)
730	ldt	$f24, 256($sp)
731	ldt	$f25, 264($sp)
732	ldt	$f26, 272($sp)
733	ldt	$f27, 280($sp)
734	ldt	$f28, 288($sp)
735	ldt	$f29, 296($sp)
736	ldt	$f30, 304($sp)
737	lda	$sp, SWITCH_STACK_SIZE($sp)
738	ret	$31, ($1), 1
739	.cfi_endproc
740	.size	undo_switch_stack, .-undo_switch_stack
741
742/*
743 * The meat of the context switch code.
744 */
745
746	.align	4
747	.globl	alpha_switch_to
748	.type	alpha_switch_to, @function
749	.cfi_startproc
750alpha_switch_to:
751	DO_SWITCH_STACK
752	call_pal PAL_swpctx
753	lda	$8, 0x3fff
754	UNDO_SWITCH_STACK
755	bic	$sp, $8, $8
756	mov	$17, $0
757	ret
758	.cfi_endproc
759	.size	alpha_switch_to, .-alpha_switch_to
760
761/*
762 * New processes begin life here.
763 */
764
765	.globl	ret_from_fork
766	.align	4
767	.ent	ret_from_fork
768ret_from_fork:
769	lda	$26, ret_to_user
770	mov	$17, $16
771	jmp	$31, schedule_tail
772.end ret_from_fork
773
774/*
775 * ... and new kernel threads - here
776 */
777	.align 4
778	.globl	ret_from_kernel_thread
779	.ent	ret_from_kernel_thread
780ret_from_kernel_thread:
781	mov	$17, $16
782	jsr	$26, schedule_tail
783	mov	$9, $27
784	mov	$10, $16
785	jsr	$26, ($9)
786	br	$31, ret_to_user
787.end ret_from_kernel_thread
788
789
790/*
791 * Special system calls.  Most of these are special in that they either
792 * have to play switch_stack games.
793 */
794
795.macro	fork_like name
796	.align	4
797	.globl	alpha_\name
798	.ent	alpha_\name
799alpha_\name:
800	.prologue 0
801	bsr	$1, do_switch_stack
802	jsr	$26, sys_\name
803	ldq	$26, 56($sp)
804	lda	$sp, SWITCH_STACK_SIZE($sp)
805	ret
806.end	alpha_\name
807.endm
808
809fork_like fork
810fork_like vfork
811fork_like clone
812
813.macro	sigreturn_like name
814	.align	4
815	.globl	sys_\name
816	.ent	sys_\name
817sys_\name:
818	.prologue 0
819	lda	$9, ret_from_straced
820	cmpult	$26, $9, $9
821	lda	$sp, -SWITCH_STACK_SIZE($sp)
822	jsr	$26, do_\name
823	bne	$9, 1f
824	jsr	$26, syscall_trace_leave
8251:	br	$1, undo_switch_stack
826	br	ret_from_sys_call
827.end sys_\name
828.endm
829
830sigreturn_like sigreturn
831sigreturn_like rt_sigreturn
832
833	.align	4
834	.globl	alpha_syscall_zero
835	.ent	alpha_syscall_zero
836alpha_syscall_zero:
837	.prologue 0
838	/* Special because it needs to do something opposite to
839	   force_successful_syscall_return().  We use the saved
840	   syscall number for that, zero meaning "not an error".
841	   That works nicely, but for real syscall 0 we need to
842	   make sure that this logics doesn't get confused.
843	   Store a non-zero there - -ENOSYS we need in register
844	   for our return value will do just fine.
845	  */
846	lda	$0, -ENOSYS
847	unop
848	stq	$0, 0($sp)
849	ret
850.end alpha_syscall_zero
851