xref: /openbmc/linux/arch/sparc/kernel/entry.S (revision c21b37f6)
1/* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $
2 * arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
9 */
10
11#include <linux/errno.h>
12
13#include <asm/head.h>
14#include <asm/asi.h>
15#include <asm/smp.h>
16#include <asm/kgdb.h>
17#include <asm/contregs.h>
18#include <asm/ptrace.h>
19#include <asm/asm-offsets.h>
20#include <asm/psr.h>
21#include <asm/vaddrs.h>
22#include <asm/memreg.h>
23#include <asm/page.h>
24#ifdef CONFIG_SUN4
25#include <asm/pgtsun4.h>
26#else
27#include <asm/pgtsun4c.h>
28#endif
29#include <asm/winmacro.h>
30#include <asm/signal.h>
31#include <asm/obio.h>
32#include <asm/mxcc.h>
33#include <asm/thread_info.h>
34#include <asm/param.h>
35#include <asm/unistd.h>
36
37#include <asm/asmmacro.h>
38
39#define curptr      g6
40
41/* These are just handy. */
42#define _SV	save	%sp, -STACKFRAME_SZ, %sp
43#define _RS     restore
44
45#define FLUSH_ALL_KERNEL_WINDOWS \
46	_SV; _SV; _SV; _SV; _SV; _SV; _SV; \
47	_RS; _RS; _RS; _RS; _RS; _RS; _RS;
48
49/* First, KGDB low level things.  This is a rewrite
50 * of the routines found in the sparc-stub.c asm() statement
51 * from the gdb distribution.  This is also dual-purpose
52 * as a software trap for userlevel programs.
53 */
54	.data
55	.align	4
56
57in_trap_handler:
58	.word	0
59
60	.text
61	.align	4
62
63#if 0 /* kgdb is dropped from 2.5.33 */
64! This function is called when any SPARC trap (except window overflow or
65! underflow) occurs.  It makes sure that the invalid register window is still
66! available before jumping into C code.  It will also restore the world if you
67! return from handle_exception.
68
69	.globl	trap_low
70trap_low:
71	rd	%wim, %l3
72	SAVE_ALL
73
74	sethi	%hi(in_trap_handler), %l4
75	ld	[%lo(in_trap_handler) + %l4], %l5
76	inc	%l5
77	st	%l5, [%lo(in_trap_handler) + %l4]
78
79	/* Make sure kgdb sees the same state we just saved. */
80	LOAD_PT_GLOBALS(sp)
81	LOAD_PT_INS(sp)
82	ld	[%sp + STACKFRAME_SZ + PT_Y], %l4
83	ld	[%sp + STACKFRAME_SZ + PT_WIM], %l3
84	ld	[%sp + STACKFRAME_SZ + PT_PSR], %l0
85	ld	[%sp + STACKFRAME_SZ + PT_PC], %l1
86	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l2
87	rd	%tbr, %l5	/* Never changes... */
88
89	/* Make kgdb exception frame. */
90	sub	%sp,(16+1+6+1+72)*4,%sp	! Make room for input & locals
91 					! + hidden arg + arg spill
92					! + doubleword alignment
93					! + registers[72] local var
94	SAVE_KGDB_GLOBALS(sp)
95	SAVE_KGDB_INS(sp)
96	SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
97
98	/* We are increasing PIL, so two writes. */
99	or	%l0, PSR_PIL, %l0
100	wr	%l0, 0, %psr
101	WRITE_PAUSE
102	wr	%l0, PSR_ET, %psr
103	WRITE_PAUSE
104
105	call	handle_exception
106	 add	%sp, STACKFRAME_SZ, %o0	! Pass address of registers
107
108	/* Load new kgdb register set. */
109	LOAD_KGDB_GLOBALS(sp)
110	LOAD_KGDB_INS(sp)
111	LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
112	wr      %l4, 0x0, %y
113
114	sethi	%hi(in_trap_handler), %l4
115	ld	[%lo(in_trap_handler) + %l4], %l5
116	dec	%l5
117	st	%l5, [%lo(in_trap_handler) + %l4]
118
119	add	%sp,(16+1+6+1+72)*4,%sp	! Undo the kgdb trap frame.
120
121	/* Now take what kgdb did and place it into the pt_regs
122	 * frame which SparcLinux RESTORE_ALL understands.,
123	 */
124	STORE_PT_INS(sp)
125	STORE_PT_GLOBALS(sp)
126	STORE_PT_YREG(sp, g2)
127	STORE_PT_PRIV(sp, l0, l1, l2)
128
129	RESTORE_ALL
130#endif
131
132#ifdef CONFIG_BLK_DEV_FD
133	.text
134	.align	4
135	.globl	floppy_hardint
136floppy_hardint:
137	/*
138	 * This code cannot touch registers %l0 %l1 and %l2
139	 * because SAVE_ALL depends on their values. It depends
140	 * on %l3 also, but we regenerate it before a call.
141	 * Other registers are:
142	 * %l3 -- base address of fdc registers
143	 * %l4 -- pdma_vaddr
144	 * %l5 -- scratch for ld/st address
145	 * %l6 -- pdma_size
146	 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
147	 */
148
149	/* Do we have work to do? */
150	sethi	%hi(doing_pdma), %l7
151	ld	[%l7 + %lo(doing_pdma)], %l7
152	cmp	%l7, 0
153	be	floppy_dosoftint
154	 nop
155
156	/* Load fdc register base */
157	sethi	%hi(fdc_status), %l3
158	ld	[%l3 + %lo(fdc_status)], %l3
159
160	/* Setup register addresses */
161	sethi	%hi(pdma_vaddr), %l5	! transfer buffer
162	ld	[%l5 + %lo(pdma_vaddr)], %l4
163	sethi	%hi(pdma_size), %l5	! bytes to go
164	ld	[%l5 + %lo(pdma_size)], %l6
165next_byte:
166  	ldub	[%l3], %l7
167
168	andcc	%l7, 0x80, %g0		! Does fifo still have data
169	bz	floppy_fifo_emptied	! fifo has been emptied...
170	 andcc	%l7, 0x20, %g0		! in non-dma mode still?
171	bz	floppy_overrun		! nope, overrun
172	 andcc	%l7, 0x40, %g0		! 0=write 1=read
173	bz	floppy_write
174	 sub	%l6, 0x1, %l6
175
176	/* Ok, actually read this byte */
177	ldub	[%l3 + 1], %l7
178	orcc	%g0, %l6, %g0
179	stb	%l7, [%l4]
180	bne	next_byte
181	 add	%l4, 0x1, %l4
182
183	b	floppy_tdone
184	 nop
185
186floppy_write:
187	/* Ok, actually write this byte */
188	ldub	[%l4], %l7
189	orcc	%g0, %l6, %g0
190	stb	%l7, [%l3 + 1]
191	bne	next_byte
192	 add	%l4, 0x1, %l4
193
194	/* fall through... */
195floppy_tdone:
196	sethi	%hi(pdma_vaddr), %l5
197	st	%l4, [%l5 + %lo(pdma_vaddr)]
198	sethi	%hi(pdma_size), %l5
199	st	%l6, [%l5 + %lo(pdma_size)]
200	/* Flip terminal count pin */
201	set	auxio_register, %l7
202	ld	[%l7], %l7
203
204	set	sparc_cpu_model, %l5
205	ld	[%l5], %l5
206	subcc   %l5, 1, %g0		/* enum { sun4c = 1 }; */
207	be	1f
208	 ldub	[%l7], %l5
209
210	or	%l5, 0xc2, %l5
211	stb	%l5, [%l7]
212	andn    %l5, 0x02, %l5
213	b	2f
214	 nop
215
2161:
217	or      %l5, 0xf4, %l5
218	stb     %l5, [%l7]
219	andn    %l5, 0x04, %l5
220
2212:
222	/* Kill some time so the bits set */
223	WRITE_PAUSE
224	WRITE_PAUSE
225
226	stb     %l5, [%l7]
227
228	/* Prevent recursion */
229	sethi	%hi(doing_pdma), %l7
230	b	floppy_dosoftint
231	 st	%g0, [%l7 + %lo(doing_pdma)]
232
233	/* We emptied the FIFO, but we haven't read everything
234	 * as of yet.  Store the current transfer address and
235	 * bytes left to read so we can continue when the next
236	 * fast IRQ comes in.
237	 */
238floppy_fifo_emptied:
239	sethi	%hi(pdma_vaddr), %l5
240	st	%l4, [%l5 + %lo(pdma_vaddr)]
241	sethi	%hi(pdma_size), %l7
242	st	%l6, [%l7 + %lo(pdma_size)]
243
244	/* Restore condition codes */
245	wr	%l0, 0x0, %psr
246	WRITE_PAUSE
247
248	jmp	%l1
249	rett	%l2
250
251floppy_overrun:
252	sethi	%hi(pdma_vaddr), %l5
253	st	%l4, [%l5 + %lo(pdma_vaddr)]
254	sethi	%hi(pdma_size), %l5
255	st	%l6, [%l5 + %lo(pdma_size)]
256	/* Prevent recursion */
257	sethi	%hi(doing_pdma), %l7
258	st	%g0, [%l7 + %lo(doing_pdma)]
259
260	/* fall through... */
261floppy_dosoftint:
262	rd	%wim, %l3
263	SAVE_ALL
264
265	/* Set all IRQs off. */
266	or	%l0, PSR_PIL, %l4
267	wr	%l4, 0x0, %psr
268	WRITE_PAUSE
269	wr	%l4, PSR_ET, %psr
270	WRITE_PAUSE
271
272	mov	11, %o0			! floppy irq level (unused anyway)
273	mov	%g0, %o1		! devid is not used in fast interrupts
274	call	sparc_floppy_irq
275	 add	%sp, STACKFRAME_SZ, %o2	! struct pt_regs *regs
276
277	RESTORE_ALL
278
279#endif /* (CONFIG_BLK_DEV_FD) */
280
281	/* Bad trap handler */
282	.globl	bad_trap_handler
283bad_trap_handler:
284	SAVE_ALL
285
286	wr	%l0, PSR_ET, %psr
287	WRITE_PAUSE
288
289	add	%sp, STACKFRAME_SZ, %o0	! pt_regs
290	call	do_hw_interrupt
291	 mov	%l7, %o1		! trap number
292
293	RESTORE_ALL
294
295/* For now all IRQ's not registered get sent here. handler_irq() will
296 * see if a routine is registered to handle this interrupt and if not
297 * it will say so on the console.
298 */
299
300	.align	4
301	.globl	real_irq_entry, patch_handler_irq
302real_irq_entry:
303	SAVE_ALL
304
305#ifdef CONFIG_SMP
306	.globl	patchme_maybe_smp_msg
307
308	cmp	%l7, 12
309patchme_maybe_smp_msg:
310	bgu	maybe_smp4m_msg
311	 nop
312#endif
313
314real_irq_continue:
315	or	%l0, PSR_PIL, %g2
316	wr	%g2, 0x0, %psr
317	WRITE_PAUSE
318	wr	%g2, PSR_ET, %psr
319	WRITE_PAUSE
320	mov	%l7, %o0		! irq level
321patch_handler_irq:
322	call	handler_irq
323	 add	%sp, STACKFRAME_SZ, %o1	! pt_regs ptr
324	or	%l0, PSR_PIL, %g2	! restore PIL after handler_irq
325	wr	%g2, PSR_ET, %psr	! keep ET up
326	WRITE_PAUSE
327
328	RESTORE_ALL
329
330#ifdef CONFIG_SMP
331	/* SMP per-cpu ticker interrupts are handled specially. */
332smp4m_ticker:
333	bne	real_irq_continue+4
334	 or	%l0, PSR_PIL, %g2
335	wr	%g2, 0x0, %psr
336	WRITE_PAUSE
337	wr	%g2, PSR_ET, %psr
338	WRITE_PAUSE
339	call	smp4m_percpu_timer_interrupt
340	 add	%sp, STACKFRAME_SZ, %o0
341	wr	%l0, PSR_ET, %psr
342	WRITE_PAUSE
343	RESTORE_ALL
344
345	/* Here is where we check for possible SMP IPI passed to us
346	 * on some level other than 15 which is the NMI and only used
347	 * for cross calls.  That has a separate entry point below.
348	 */
349maybe_smp4m_msg:
350	GET_PROCESSOR4M_ID(o3)
351	set	sun4m_interrupts, %l5
352	ld	[%l5], %o5
353	sethi	%hi(0x40000000), %o2
354	sll	%o3, 12, %o3
355	ld	[%o5 + %o3], %o1
356	andcc	%o1, %o2, %g0
357	be,a	smp4m_ticker
358	 cmp	%l7, 14
359	st	%o2, [%o5 + 0x4]
360	WRITE_PAUSE
361	ld	[%o5], %g0
362	WRITE_PAUSE
363	or	%l0, PSR_PIL, %l4
364	wr	%l4, 0x0, %psr
365	WRITE_PAUSE
366	wr	%l4, PSR_ET, %psr
367	WRITE_PAUSE
368	call	smp_reschedule_irq
369	 nop
370
371	RESTORE_ALL
372
373	.align	4
374	.globl	linux_trap_ipi15_sun4m
375linux_trap_ipi15_sun4m:
376	SAVE_ALL
377	sethi	%hi(0x80000000), %o2
378	GET_PROCESSOR4M_ID(o0)
379	set	sun4m_interrupts, %l5
380	ld	[%l5], %o5
381	sll	%o0, 12, %o0
382	add	%o5, %o0, %o5
383	ld	[%o5], %o3
384	andcc	%o3, %o2, %g0
385	be	1f			! Must be an NMI async memory error
386	 st	%o2, [%o5 + 4]
387	WRITE_PAUSE
388	ld	[%o5], %g0
389	WRITE_PAUSE
390	or	%l0, PSR_PIL, %l4
391	wr	%l4, 0x0, %psr
392	WRITE_PAUSE
393	wr	%l4, PSR_ET, %psr
394	WRITE_PAUSE
395	call	smp4m_cross_call_irq
396	 nop
397	b	ret_trap_lockless_ipi
398	 clr	%l6
3991:
400	/* NMI async memory error handling. */
401	sethi	%hi(0x80000000), %l4
402	sethi	%hi(0x4000), %o3
403	sub	%o5, %o0, %o5
404	add	%o5, %o3, %l5
405	st	%l4, [%l5 + 0xc]
406	WRITE_PAUSE
407	ld	[%l5], %g0
408	WRITE_PAUSE
409	or	%l0, PSR_PIL, %l4
410	wr	%l4, 0x0, %psr
411	WRITE_PAUSE
412	wr	%l4, PSR_ET, %psr
413	WRITE_PAUSE
414	call	sun4m_nmi
415	 nop
416	st	%l4, [%l5 + 0x8]
417	WRITE_PAUSE
418	ld	[%l5], %g0
419	WRITE_PAUSE
420	RESTORE_ALL
421
422	.globl	smp4d_ticker
423	/* SMP per-cpu ticker interrupts are handled specially. */
424smp4d_ticker:
425	SAVE_ALL
426	or	%l0, PSR_PIL, %g2
427	sethi	%hi(CC_ICLR), %o0
428	sethi	%hi(1 << 14), %o1
429	or	%o0, %lo(CC_ICLR), %o0
430	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 14 in MXCC's ICLR */
431	wr	%g2, 0x0, %psr
432	WRITE_PAUSE
433	wr	%g2, PSR_ET, %psr
434	WRITE_PAUSE
435	call	smp4d_percpu_timer_interrupt
436	 add	%sp, STACKFRAME_SZ, %o0
437	wr	%l0, PSR_ET, %psr
438	WRITE_PAUSE
439	RESTORE_ALL
440
441	.align	4
442	.globl	linux_trap_ipi15_sun4d
443linux_trap_ipi15_sun4d:
444	SAVE_ALL
445	sethi	%hi(CC_BASE), %o4
446	sethi	%hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
447	or	%o4, (CC_EREG - CC_BASE), %o0
448	ldda	[%o0] ASI_M_MXCC, %o0
449	andcc	%o0, %o2, %g0
450	bne	1f
451	 sethi	%hi(BB_STAT2), %o2
452	lduba	[%o2] ASI_M_CTL, %o2
453	andcc	%o2, BB_STAT2_MASK, %g0
454	bne	2f
455	 or	%o4, (CC_ICLR - CC_BASE), %o0
456	sethi	%hi(1 << 15), %o1
457	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 15 in MXCC's ICLR */
458	or	%l0, PSR_PIL, %l4
459	wr	%l4, 0x0, %psr
460	WRITE_PAUSE
461	wr	%l4, PSR_ET, %psr
462	WRITE_PAUSE
463	call	smp4d_cross_call_irq
464	 nop
465	b	ret_trap_lockless_ipi
466	 clr	%l6
467
4681:	/* MXCC error */
4692:	/* BB error */
470	/* Disable PIL 15 */
471	set	CC_IMSK, %l4
472	lduha	[%l4] ASI_M_MXCC, %l5
473	sethi	%hi(1 << 15), %l7
474	or	%l5, %l7, %l5
475	stha	%l5, [%l4] ASI_M_MXCC
476	/* FIXME */
4771:	b,a	1b
478
479#endif /* CONFIG_SMP */
480
481	/* This routine handles illegal instructions and privileged
482	 * instruction attempts from user code.
483	 */
484	.align	4
485	.globl	bad_instruction
486bad_instruction:
487	sethi	%hi(0xc1f80000), %l4
488	ld	[%l1], %l5
489	sethi	%hi(0x81d80000), %l7
490	and	%l5, %l4, %l5
491	cmp	%l5, %l7
492	be	1f
493	SAVE_ALL
494
495	wr	%l0, PSR_ET, %psr		! re-enable traps
496	WRITE_PAUSE
497
498	add	%sp, STACKFRAME_SZ, %o0
499	mov	%l1, %o1
500	mov	%l2, %o2
501	call	do_illegal_instruction
502	 mov	%l0, %o3
503
504	RESTORE_ALL
505
5061:	/* unimplemented flush - just skip */
507	jmpl	%l2, %g0
508	 rett	%l2 + 4
509
510	.align	4
511	.globl	priv_instruction
512priv_instruction:
513	SAVE_ALL
514
515	wr	%l0, PSR_ET, %psr
516	WRITE_PAUSE
517
518	add	%sp, STACKFRAME_SZ, %o0
519	mov	%l1, %o1
520	mov	%l2, %o2
521	call	do_priv_instruction
522	 mov	%l0, %o3
523
524	RESTORE_ALL
525
526	/* This routine handles unaligned data accesses. */
527	.align	4
528	.globl	mna_handler
529mna_handler:
530	andcc	%l0, PSR_PS, %g0
531	be	mna_fromuser
532	 nop
533
534	SAVE_ALL
535
536	wr	%l0, PSR_ET, %psr
537	WRITE_PAUSE
538
539	ld	[%l1], %o1
540	call	kernel_unaligned_trap
541	 add	%sp, STACKFRAME_SZ, %o0
542
543	RESTORE_ALL
544
545mna_fromuser:
546	SAVE_ALL
547
548	wr	%l0, PSR_ET, %psr		! re-enable traps
549	WRITE_PAUSE
550
551	ld	[%l1], %o1
552	call	user_unaligned_trap
553	 add	%sp, STACKFRAME_SZ, %o0
554
555	RESTORE_ALL
556
557	/* This routine handles floating point disabled traps. */
558	.align	4
559	.globl	fpd_trap_handler
560fpd_trap_handler:
561	SAVE_ALL
562
563	wr	%l0, PSR_ET, %psr		! re-enable traps
564	WRITE_PAUSE
565
566	add	%sp, STACKFRAME_SZ, %o0
567	mov	%l1, %o1
568	mov	%l2, %o2
569	call	do_fpd_trap
570	 mov	%l0, %o3
571
572	RESTORE_ALL
573
574	/* This routine handles Floating Point Exceptions. */
575	.align	4
576	.globl	fpe_trap_handler
577fpe_trap_handler:
578	set	fpsave_magic, %l5
579	cmp	%l1, %l5
580	be	1f
581	 sethi	%hi(fpsave), %l5
582	or	%l5, %lo(fpsave), %l5
583	cmp	%l1, %l5
584	bne	2f
585	 sethi	%hi(fpsave_catch2), %l5
586	or	%l5, %lo(fpsave_catch2), %l5
587	wr	%l0, 0x0, %psr
588	WRITE_PAUSE
589	jmp	%l5
590	 rett	%l5 + 4
5911:
592	sethi	%hi(fpsave_catch), %l5
593	or	%l5, %lo(fpsave_catch), %l5
594	wr	%l0, 0x0, %psr
595	WRITE_PAUSE
596	jmp	%l5
597	 rett	%l5 + 4
598
5992:
600	SAVE_ALL
601
602	wr	%l0, PSR_ET, %psr		! re-enable traps
603	WRITE_PAUSE
604
605	add	%sp, STACKFRAME_SZ, %o0
606	mov	%l1, %o1
607	mov	%l2, %o2
608	call	do_fpe_trap
609	 mov	%l0, %o3
610
611	RESTORE_ALL
612
613	/* This routine handles Tag Overflow Exceptions. */
614	.align	4
615	.globl	do_tag_overflow
616do_tag_overflow:
617	SAVE_ALL
618
619	wr	%l0, PSR_ET, %psr		! re-enable traps
620	WRITE_PAUSE
621
622	add	%sp, STACKFRAME_SZ, %o0
623	mov	%l1, %o1
624	mov	%l2, %o2
625	call	handle_tag_overflow
626	 mov	%l0, %o3
627
628	RESTORE_ALL
629
630	/* This routine handles Watchpoint Exceptions. */
631	.align	4
632	.globl	do_watchpoint
633do_watchpoint:
634	SAVE_ALL
635
636	wr	%l0, PSR_ET, %psr		! re-enable traps
637	WRITE_PAUSE
638
639	add	%sp, STACKFRAME_SZ, %o0
640	mov	%l1, %o1
641	mov	%l2, %o2
642	call	handle_watchpoint
643	 mov	%l0, %o3
644
645	RESTORE_ALL
646
647	/* This routine handles Register Access Exceptions. */
648	.align	4
649	.globl	do_reg_access
650do_reg_access:
651	SAVE_ALL
652
653	wr	%l0, PSR_ET, %psr		! re-enable traps
654	WRITE_PAUSE
655
656	add	%sp, STACKFRAME_SZ, %o0
657	mov	%l1, %o1
658	mov	%l2, %o2
659	call	handle_reg_access
660	 mov	%l0, %o3
661
662	RESTORE_ALL
663
664	/* This routine handles Co-Processor Disabled Exceptions. */
665	.align	4
666	.globl	do_cp_disabled
667do_cp_disabled:
668	SAVE_ALL
669
670	wr	%l0, PSR_ET, %psr		! re-enable traps
671	WRITE_PAUSE
672
673	add	%sp, STACKFRAME_SZ, %o0
674	mov	%l1, %o1
675	mov	%l2, %o2
676	call	handle_cp_disabled
677	 mov	%l0, %o3
678
679	RESTORE_ALL
680
681	/* This routine handles Co-Processor Exceptions. */
682	.align	4
683	.globl	do_cp_exception
684do_cp_exception:
685	SAVE_ALL
686
687	wr	%l0, PSR_ET, %psr		! re-enable traps
688	WRITE_PAUSE
689
690	add	%sp, STACKFRAME_SZ, %o0
691	mov	%l1, %o1
692	mov	%l2, %o2
693	call	handle_cp_exception
694	 mov	%l0, %o3
695
696	RESTORE_ALL
697
698	/* This routine handles Hardware Divide By Zero Exceptions. */
699	.align	4
700	.globl	do_hw_divzero
701do_hw_divzero:
702	SAVE_ALL
703
704	wr	%l0, PSR_ET, %psr		! re-enable traps
705	WRITE_PAUSE
706
707	add	%sp, STACKFRAME_SZ, %o0
708	mov	%l1, %o1
709	mov	%l2, %o2
710	call	handle_hw_divzero
711	 mov	%l0, %o3
712
713	RESTORE_ALL
714
715	.align	4
716	.globl	do_flush_windows
717do_flush_windows:
718	SAVE_ALL
719
720	wr	%l0, PSR_ET, %psr
721	WRITE_PAUSE
722
723	andcc	%l0, PSR_PS, %g0
724	bne	dfw_kernel
725	 nop
726
727	call	flush_user_windows
728	 nop
729
730	/* Advance over the trap instruction. */
731	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
732	add	%l1, 0x4, %l2
733	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
734	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
735
736	RESTORE_ALL
737
738	.globl	flush_patch_one
739
740	/* We get these for debugging routines using __builtin_return_address() */
741dfw_kernel:
742flush_patch_one:
743	FLUSH_ALL_KERNEL_WINDOWS
744
745	/* Advance over the trap instruction. */
746	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
747	add	%l1, 0x4, %l2
748	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
749	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
750
751	RESTORE_ALL
752
753	/* The getcc software trap.  The user wants the condition codes from
754	 * the %psr in register %g1.
755	 */
756
757	.align	4
758	.globl	getcc_trap_handler
759getcc_trap_handler:
760	srl	%l0, 20, %g1	! give user
761	and	%g1, 0xf, %g1	! only ICC bits in %psr
762	jmp	%l2		! advance over trap instruction
763	rett	%l2 + 0x4	! like this...
764
765	/* The setcc software trap.  The user has condition codes in %g1
766	 * that it would like placed in the %psr.  Be careful not to flip
767	 * any unintentional bits!
768	 */
769
770	.align	4
771	.globl	setcc_trap_handler
772setcc_trap_handler:
773	sll	%g1, 0x14, %l4
774	set	PSR_ICC, %l5
775	andn	%l0, %l5, %l0	! clear ICC bits in %psr
776	and	%l4, %l5, %l4	! clear non-ICC bits in user value
777	or	%l4, %l0, %l4	! or them in... mix mix mix
778
779	wr	%l4, 0x0, %psr	! set new %psr
780	WRITE_PAUSE		! TI scumbags...
781
782	jmp	%l2		! advance over trap instruction
783	rett	%l2 + 0x4	! like this...
784
785	.align	4
786	.globl	linux_trap_nmi_sun4c
787linux_trap_nmi_sun4c:
788	SAVE_ALL
789
790	/* Ugh, we need to clear the IRQ line.  This is now
791	 * a very sun4c specific trap handler...
792	 */
793	sethi	%hi(interrupt_enable), %l5
794	ld	[%l5 + %lo(interrupt_enable)], %l5
795	ldub	[%l5], %l6
796	andn	%l6, INTS_ENAB, %l6
797	stb	%l6, [%l5]
798
799	/* Now it is safe to re-enable traps without recursion. */
800	or	%l0, PSR_PIL, %l0
801	wr	%l0, PSR_ET, %psr
802	WRITE_PAUSE
803
804	/* Now call the c-code with the pt_regs frame ptr and the
805	 * memory error registers as arguments.  The ordering chosen
806	 * here is due to unlatching semantics.
807	 */
808	sethi	%hi(AC_SYNC_ERR), %o0
809	add	%o0, 0x4, %o0
810	lda	[%o0] ASI_CONTROL, %o2	! sync vaddr
811	sub	%o0, 0x4, %o0
812	lda	[%o0] ASI_CONTROL, %o1	! sync error
813	add	%o0, 0xc, %o0
814	lda	[%o0] ASI_CONTROL, %o4	! async vaddr
815	sub	%o0, 0x4, %o0
816	lda	[%o0] ASI_CONTROL, %o3	! async error
817	call	sparc_lvl15_nmi
818	 add	%sp, STACKFRAME_SZ, %o0
819
820	RESTORE_ALL
821
822	.align	4
823	.globl	invalid_segment_patch1_ff
824	.globl	invalid_segment_patch2_ff
825invalid_segment_patch1_ff:	cmp	%l4, 0xff
826invalid_segment_patch2_ff:	mov	0xff, %l3
827
828	.align	4
829	.globl	invalid_segment_patch1_1ff
830	.globl	invalid_segment_patch2_1ff
831invalid_segment_patch1_1ff:	cmp	%l4, 0x1ff
832invalid_segment_patch2_1ff:	mov	0x1ff, %l3
833
834	.align	4
835	.globl	num_context_patch1_16, num_context_patch2_16
836num_context_patch1_16:		mov	0x10, %l7
837num_context_patch2_16:		mov	0x10, %l7
838
839	.align	4
840	.globl	vac_linesize_patch_32
841vac_linesize_patch_32:		subcc	%l7, 32, %l7
842
843	.align	4
844	.globl	vac_hwflush_patch1_on, vac_hwflush_patch2_on
845
846/*
847 * Ugly, but we cant use hardware flushing on the sun4 and we'd require
848 * two instructions (Anton)
849 */
850#ifdef CONFIG_SUN4
851vac_hwflush_patch1_on:		nop
852#else
853vac_hwflush_patch1_on:		addcc	%l7, -PAGE_SIZE, %l7
854#endif
855
856vac_hwflush_patch2_on:		sta	%g0, [%l3 + %l7] ASI_HWFLUSHSEG
857
858	.globl	invalid_segment_patch1, invalid_segment_patch2
859	.globl	num_context_patch1
860	.globl	vac_linesize_patch, vac_hwflush_patch1
861	.globl	vac_hwflush_patch2
862
863	.align	4
864	.globl	sun4c_fault
865
866! %l0 = %psr
867! %l1 = %pc
868! %l2 = %npc
869! %l3 = %wim
870! %l7 = 1 for textfault
871! We want error in %l5, vaddr in %l6
872sun4c_fault:
873#ifdef CONFIG_SUN4
874	sethi	%hi(sun4c_memerr_reg), %l4
875	ld	[%l4+%lo(sun4c_memerr_reg)], %l4  ! memerr ctrl reg addr
876	ld	[%l4], %l6		! memerr ctrl reg
877	ld	[%l4 + 4], %l5		! memerr vaddr reg
878	andcc	%l6, 0x80, %g0		! check for error type
879	st	%g0, [%l4 + 4]		! clear the error
880	be	0f			! normal error
881	 sethi	%hi(AC_BUS_ERROR), %l4	! bus err reg addr
882
883	call	prom_halt	! something weird happened
884					! what exactly did happen?
885					! what should we do here?
886
8870:	or	%l4, %lo(AC_BUS_ERROR), %l4	! bus err reg addr
888	lduba	[%l4] ASI_CONTROL, %l6	! bus err reg
889
890	cmp    %l7, 1			! text fault?
891	be	1f			! yes
892	 nop
893
894	ld     [%l1], %l4		! load instruction that caused fault
895	srl	%l4, 21, %l4
896	andcc	%l4, 1, %g0		! store instruction?
897
898	be	1f			! no
899	 sethi	%hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
900					! %lo(SUN4C_SYNC_BADWRITE) = 0
901	or	%l4, %l6, %l6		! set write bit to emulate sun4c
9021:
903#else
904	sethi	%hi(AC_SYNC_ERR), %l4
905	add	%l4, 0x4, %l6			! AC_SYNC_VA in %l6
906	lda	[%l6] ASI_CONTROL, %l5		! Address
907	lda	[%l4] ASI_CONTROL, %l6		! Error, retained for a bit
908#endif
909
910	andn	%l5, 0xfff, %l5			! Encode all info into l7
911	srl	%l6, 14, %l4
912
913	and	%l4, 2, %l4
914	or	%l5, %l4, %l4
915
916	or	%l4, %l7, %l7			! l7 = [addr,write,txtfault]
917
918	andcc	%l0, PSR_PS, %g0
919	be	sun4c_fault_fromuser
920	 andcc	%l7, 1, %g0			! Text fault?
921
922	be	1f
923	 sethi	%hi(KERNBASE), %l4
924
925	mov	%l1, %l5			! PC
926
9271:
928	cmp	%l5, %l4
929	blu	sun4c_fault_fromuser
930	 sethi	%hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
931
932	/* If the kernel references a bum kernel pointer, or a pte which
933	 * points to a non existant page in ram, we will run this code
934	 * _forever_ and lock up the machine!!!!! So we must check for
935	 * this condition, the AC_SYNC_ERR bits are what we must examine.
936	 * Also a parity error would make this happen as well.  So we just
937	 * check that we are in fact servicing a tlb miss and not some
938	 * other type of fault for the kernel.
939	 */
940	andcc	%l6, 0x80, %g0
941	be	sun4c_fault_fromuser
942	 and	%l5, %l4, %l5
943
944	/* Test for NULL pte_t * in vmalloc area. */
945	sethi   %hi(VMALLOC_START), %l4
946	cmp     %l5, %l4
947	blu,a   invalid_segment_patch1
948	 lduXa	[%l5] ASI_SEGMAP, %l4
949
950	sethi   %hi(swapper_pg_dir), %l4
951	srl     %l5, SUN4C_PGDIR_SHIFT, %l6
952	or      %l4, %lo(swapper_pg_dir), %l4
953	sll     %l6, 2, %l6
954	ld      [%l4 + %l6], %l4
955#ifdef CONFIG_SUN4
956	sethi	%hi(PAGE_MASK), %l6
957	andcc	%l4, %l6, %g0
958#else
959	andcc   %l4, PAGE_MASK, %g0
960#endif
961	be      sun4c_fault_fromuser
962	 lduXa  [%l5] ASI_SEGMAP, %l4
963
964invalid_segment_patch1:
965	cmp	%l4, 0x7f
966	bne	1f
967	 sethi	%hi(sun4c_kfree_ring), %l4
968	or	%l4, %lo(sun4c_kfree_ring), %l4
969	ld	[%l4 + 0x18], %l3
970	deccc	%l3			! do we have a free entry?
971	bcs,a	2f			! no, unmap one.
972	 sethi	%hi(sun4c_kernel_ring), %l4
973
974	st	%l3, [%l4 + 0x18]	! sun4c_kfree_ring.num_entries--
975
976	ld	[%l4 + 0x00], %l6	! entry = sun4c_kfree_ring.ringhd.next
977	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
978
979	ld	[%l6 + 0x00], %l3	! next = entry->next
980	ld	[%l6 + 0x04], %l7	! entry->prev
981
982	st	%l7, [%l3 + 0x04]	! next->prev = entry->prev
983	st	%l3, [%l7 + 0x00]	! entry->prev->next = next
984
985	sethi	%hi(sun4c_kernel_ring), %l4
986	or	%l4, %lo(sun4c_kernel_ring), %l4
987					! head = &sun4c_kernel_ring.ringhd
988
989	ld	[%l4 + 0x00], %l7	! head->next
990
991	st	%l4, [%l6 + 0x04]	! entry->prev = head
992	st	%l7, [%l6 + 0x00]	! entry->next = head->next
993	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
994
995	st	%l6, [%l4 + 0x00]	! head->next = entry
996
997	ld	[%l4 + 0x18], %l3
998	inc	%l3			! sun4c_kernel_ring.num_entries++
999	st	%l3, [%l4 + 0x18]
1000	b	4f
1001	 ld	[%l6 + 0x08], %l5
1002
10032:
1004	or	%l4, %lo(sun4c_kernel_ring), %l4
1005					! head = &sun4c_kernel_ring.ringhd
1006
1007	ld	[%l4 + 0x04], %l6	! entry = head->prev
1008
1009	ld	[%l6 + 0x08], %l3	! tmp = entry->vaddr
1010
1011	! Flush segment from the cache.
1012#ifdef CONFIG_SUN4
1013	sethi	%hi((128 * 1024)), %l7
1014#else
1015	sethi	%hi((64 * 1024)), %l7
1016#endif
10179:
1018vac_hwflush_patch1:
1019vac_linesize_patch:
1020	subcc	%l7, 16, %l7
1021	bne	9b
1022vac_hwflush_patch2:
1023	 sta	%g0, [%l3 + %l7] ASI_FLUSHSEG
1024
1025	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
1026
1027	ld	[%l6 + 0x00], %l5	! next = entry->next
1028	ld	[%l6 + 0x04], %l7	! entry->prev
1029
1030	st	%l7, [%l5 + 0x04]	! next->prev = entry->prev
1031	st	%l5, [%l7 + 0x00]	! entry->prev->next = next
1032	st	%l4, [%l6 + 0x04]	! entry->prev = head
1033
1034	ld	[%l4 + 0x00], %l7	! head->next
1035
1036	st	%l7, [%l6 + 0x00]	! entry->next = head->next
1037	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
1038	st	%l6, [%l4 + 0x00]	! head->next = entry
1039
1040	mov	%l3, %l5		! address = tmp
1041
10424:
1043num_context_patch1:
1044	mov	0x08, %l7
1045
1046	ld	[%l6 + 0x08], %l4
1047	ldub	[%l6 + 0x0c], %l3
1048	or	%l4, %l3, %l4		! encode new vaddr/pseg into l4
1049
1050	sethi	%hi(AC_CONTEXT), %l3
1051	lduba	[%l3] ASI_CONTROL, %l6
1052
1053	/* Invalidate old mapping, instantiate new mapping,
1054	 * for each context.  Registers l6/l7 are live across
1055	 * this loop.
1056	 */
10573:	deccc	%l7
1058	sethi	%hi(AC_CONTEXT), %l3
1059	stba	%l7, [%l3] ASI_CONTROL
1060invalid_segment_patch2:
1061	mov	0x7f, %l3
1062	stXa	%l3, [%l5] ASI_SEGMAP
1063	andn	%l4, 0x1ff, %l3
1064	bne	3b
1065	 stXa	%l4, [%l3] ASI_SEGMAP
1066
1067	sethi	%hi(AC_CONTEXT), %l3
1068	stba	%l6, [%l3] ASI_CONTROL
1069
1070	andn	%l4, 0x1ff, %l5
1071
10721:
1073	sethi	%hi(VMALLOC_START), %l4
1074	cmp	%l5, %l4
1075
1076	bgeu	1f
1077	 mov	1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1078
1079	sethi	%hi(KERNBASE), %l6
1080
1081	sub	%l5, %l6, %l4
1082	srl	%l4, PAGE_SHIFT, %l4
1083	sethi	%hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1084	or	%l3, %l4, %l3
1085
1086	sethi	%hi(PAGE_SIZE), %l4
1087
10882:
1089	sta	%l3, [%l5] ASI_PTE
1090	deccc	%l7
1091	inc	%l3
1092	bne	2b
1093	 add	%l5, %l4, %l5
1094
1095	b	7f
1096	 sethi	%hi(sun4c_kernel_faults), %l4
1097
10981:
1099	srl	%l5, SUN4C_PGDIR_SHIFT, %l3
1100	sethi	%hi(swapper_pg_dir), %l4
1101	or	%l4, %lo(swapper_pg_dir), %l4
1102	sll	%l3, 2, %l3
1103	ld	[%l4 + %l3], %l4
1104#ifndef CONFIG_SUN4
1105	and	%l4, PAGE_MASK, %l4
1106#else
1107	sethi	%hi(PAGE_MASK), %l6
1108	and	%l4, %l6, %l4
1109#endif
1110
1111	srl	%l5, (PAGE_SHIFT - 2), %l6
1112	and	%l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1113	add	%l6, %l4, %l6
1114
1115	sethi	%hi(PAGE_SIZE), %l4
1116
11172:
1118	ld	[%l6], %l3
1119	deccc	%l7
1120	sta	%l3, [%l5] ASI_PTE
1121	add	%l6, 0x4, %l6
1122	bne	2b
1123	 add	%l5, %l4, %l5
1124
1125	sethi	%hi(sun4c_kernel_faults), %l4
11267:
1127	ld	[%l4 + %lo(sun4c_kernel_faults)], %l3
1128	inc	%l3
1129	st	%l3, [%l4 + %lo(sun4c_kernel_faults)]
1130
1131	/* Restore condition codes */
1132	wr	%l0, 0x0, %psr
1133	WRITE_PAUSE
1134	jmp	%l1
1135	 rett	%l2
1136
1137sun4c_fault_fromuser:
1138	SAVE_ALL
1139	 nop
1140
1141	mov	%l7, %o1		! Decode the info from %l7
1142	mov	%l7, %o2
1143	and	%o1, 1, %o1		! arg2 = text_faultp
1144	mov	%l7, %o3
1145	and	%o2, 2, %o2		! arg3 = writep
1146	andn	%o3, 0xfff, %o3		! arg4 = faulting address
1147
1148	wr	%l0, PSR_ET, %psr
1149	WRITE_PAUSE
1150
1151	call	do_sun4c_fault
1152	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
1153
1154	RESTORE_ALL
1155
1156	.align	4
1157	.globl	srmmu_fault
1158srmmu_fault:
1159	mov	0x400, %l5
1160	mov	0x300, %l4
1161
1162	lda	[%l5] ASI_M_MMUREGS, %l6	! read sfar first
1163	lda	[%l4] ASI_M_MMUREGS, %l5	! read sfsr last
1164
1165	andn	%l6, 0xfff, %l6
1166	srl	%l5, 6, %l5			! and encode all info into l7
1167
1168	and	%l5, 2, %l5
1169	or	%l5, %l6, %l6
1170
1171	or	%l6, %l7, %l7			! l7 = [addr,write,txtfault]
1172
1173	SAVE_ALL
1174
1175	mov	%l7, %o1
1176	mov	%l7, %o2
1177	and	%o1, 1, %o1		! arg2 = text_faultp
1178	mov	%l7, %o3
1179	and	%o2, 2, %o2		! arg3 = writep
1180	andn	%o3, 0xfff, %o3		! arg4 = faulting address
1181
1182	wr	%l0, PSR_ET, %psr
1183	WRITE_PAUSE
1184
1185	call	do_sparc_fault
1186	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
1187
1188	RESTORE_ALL
1189
1190#ifdef CONFIG_SUNOS_EMUL
1191	/* SunOS uses syscall zero as the 'indirect syscall' it looks
1192	 * like indir_syscall(scall_num, arg0, arg1, arg2...);  etc.
1193	 * This is complete brain damage.
1194	 */
1195	.globl	sunos_indir
1196sunos_indir:
1197	mov	%o7, %l4
1198	cmp	%o0, NR_SYSCALLS
1199	blu,a	1f
1200	 sll	%o0, 0x2, %o0
1201
1202	sethi	%hi(sunos_nosys), %l6
1203	b	2f
1204	 or	%l6, %lo(sunos_nosys), %l6
1205
12061:
1207	set	sunos_sys_table, %l7
1208	ld	[%l7 + %o0], %l6
1209
12102:
1211	mov	%o1, %o0
1212	mov	%o2, %o1
1213	mov	%o3, %o2
1214	mov	%o4, %o3
1215	mov	%o5, %o4
1216	call	%l6
1217	 mov	%l4, %o7
1218#endif
1219
1220	.align	4
1221	.globl	sys_nis_syscall
1222sys_nis_syscall:
1223	mov	%o7, %l5
1224	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1225	call	c_sys_nis_syscall
1226	 mov	%l5, %o7
1227
1228	.align 4
1229	.globl	sys_ptrace
1230sys_ptrace:
1231	call	do_ptrace
1232	 add	%sp, STACKFRAME_SZ, %o0
1233
1234	ld	[%curptr + TI_FLAGS], %l5
1235	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1236	be	1f
1237	 nop
1238
1239	call	syscall_trace
1240	 nop
1241
12421:
1243	RESTORE_ALL
1244
1245	.align	4
1246	.globl	sys_execve
1247sys_execve:
1248	mov	%o7, %l5
1249	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1250	call	sparc_execve
1251	 mov	%l5, %o7
1252
1253	.align	4
1254	.globl	sys_pipe
1255sys_pipe:
1256	mov	%o7, %l5
1257	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1258	call	sparc_pipe
1259	 mov	%l5, %o7
1260
1261	.align	4
1262	.globl	sys_sigaltstack
1263sys_sigaltstack:
1264	mov	%o7, %l5
1265	mov	%fp, %o2
1266	call	do_sigaltstack
1267	 mov	%l5, %o7
1268
1269	.align	4
1270	.globl	sys_sigstack
1271sys_sigstack:
1272	mov	%o7, %l5
1273	mov	%fp, %o2
1274	call	do_sys_sigstack
1275	 mov	%l5, %o7
1276
1277	.align	4
1278	.globl	sys_sigreturn
1279sys_sigreturn:
1280	call	do_sigreturn
1281	 add	%sp, STACKFRAME_SZ, %o0
1282
1283	ld	[%curptr + TI_FLAGS], %l5
1284	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1285	be	1f
1286	 nop
1287
1288	call	syscall_trace
1289	 nop
1290
12911:
1292	/* We don't want to muck with user registers like a
1293	 * normal syscall, just return.
1294	 */
1295	RESTORE_ALL
1296
1297	.align	4
1298	.globl	sys_rt_sigreturn
1299sys_rt_sigreturn:
1300	call	do_rt_sigreturn
1301	 add	%sp, STACKFRAME_SZ, %o0
1302
1303	ld	[%curptr + TI_FLAGS], %l5
1304	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1305	be	1f
1306	 nop
1307
1308	call	syscall_trace
1309	 nop
1310
13111:
1312	/* We are returning to a signal handler. */
1313	RESTORE_ALL
1314
1315	/* Now that we have a real sys_clone, sys_fork() is
1316	 * implemented in terms of it.  Our _real_ implementation
1317	 * of SunOS vfork() will use sys_vfork().
1318	 *
1319	 * XXX These three should be consolidated into mostly shared
1320	 * XXX code just like on sparc64... -DaveM
1321	 */
1322	.align	4
1323	.globl	sys_fork, flush_patch_two
1324sys_fork:
1325	mov	%o7, %l5
1326flush_patch_two:
1327	FLUSH_ALL_KERNEL_WINDOWS;
1328	ld	[%curptr + TI_TASK], %o4
1329	rd	%psr, %g4
1330	WRITE_PAUSE
1331	mov	SIGCHLD, %o0			! arg0:	clone flags
1332	rd	%wim, %g5
1333	WRITE_PAUSE
1334	mov	%fp, %o1			! arg1:	usp
1335	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1336	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
1337	mov	0, %o3
1338	call	sparc_do_fork
1339	 mov	%l5, %o7
1340
1341	/* Whee, kernel threads! */
1342	.globl	sys_clone, flush_patch_three
1343sys_clone:
1344	mov	%o7, %l5
1345flush_patch_three:
1346	FLUSH_ALL_KERNEL_WINDOWS;
1347	ld	[%curptr + TI_TASK], %o4
1348	rd	%psr, %g4
1349	WRITE_PAUSE
1350
1351	/* arg0,1: flags,usp  -- loaded already */
1352	cmp	%o1, 0x0			! Is new_usp NULL?
1353	rd	%wim, %g5
1354	WRITE_PAUSE
1355	be,a	1f
1356	 mov	%fp, %o1			! yes, use callers usp
1357	andn	%o1, 7, %o1			! no, align to 8 bytes
13581:
1359	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1360	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
1361	mov	0, %o3
1362	call	sparc_do_fork
1363	 mov	%l5, %o7
1364
1365	/* Whee, real vfork! */
1366	.globl	sys_vfork, flush_patch_four
1367sys_vfork:
1368flush_patch_four:
1369	FLUSH_ALL_KERNEL_WINDOWS;
1370	ld	[%curptr + TI_TASK], %o4
1371	rd	%psr, %g4
1372	WRITE_PAUSE
1373	rd	%wim, %g5
1374	WRITE_PAUSE
1375	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1376	sethi	%hi(0x4000 | 0x0100 | SIGCHLD), %o0
1377	mov	%fp, %o1
1378	or	%o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1379	sethi	%hi(sparc_do_fork), %l1
1380	mov	0, %o3
1381	jmpl	%l1 + %lo(sparc_do_fork), %g0
1382	 add	%sp, STACKFRAME_SZ, %o2
1383
1384        .align  4
1385linux_sparc_ni_syscall:
1386	sethi   %hi(sys_ni_syscall), %l7
1387	b       syscall_is_too_hard
1388	 or     %l7, %lo(sys_ni_syscall), %l7
1389
1390linux_fast_syscall:
1391	andn	%l7, 3, %l7
1392	mov	%i0, %o0
1393	mov	%i1, %o1
1394	mov 	%i2, %o2
1395	jmpl	%l7 + %g0, %g0
1396	 mov	%i3, %o3
1397
1398linux_syscall_trace:
1399	call	syscall_trace
1400	 nop
1401	mov	%i0, %o0
1402	mov	%i1, %o1
1403	mov	%i2, %o2
1404	mov	%i3, %o3
1405	b	2f
1406	 mov	%i4, %o4
1407
1408	.globl	ret_from_fork
1409ret_from_fork:
1410	call	schedule_tail
1411	 mov	%g3, %o0
1412	b	ret_sys_call
1413	 ld	[%sp + STACKFRAME_SZ + PT_I0], %o0
1414
1415	/* Linux native and SunOS system calls enter here... */
1416	.align	4
1417	.globl	linux_sparc_syscall
1418linux_sparc_syscall:
1419	/* Direct access to user regs, must faster. */
1420	cmp	%g1, NR_SYSCALLS
1421	bgeu	linux_sparc_ni_syscall
1422	 sll	%g1, 2, %l4
1423	ld	[%l7 + %l4], %l7
1424	andcc	%l7, 1, %g0
1425	bne	linux_fast_syscall
1426	 /* Just do first insn from SAVE_ALL in the delay slot */
1427
1428	.globl	syscall_is_too_hard
1429syscall_is_too_hard:
1430	SAVE_ALL_HEAD
1431	 rd	%wim, %l3
1432
1433	wr	%l0, PSR_ET, %psr
1434	mov	%i0, %o0
1435	mov	%i1, %o1
1436	mov	%i2, %o2
1437
1438	ld	[%curptr + TI_FLAGS], %l5
1439	mov	%i3, %o3
1440	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1441	mov	%i4, %o4
1442	bne	linux_syscall_trace
1443	 mov	%i0, %l5
14442:
1445	call	%l7
1446	 mov	%i5, %o5
1447
1448	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1449
1450	.globl	ret_sys_call
1451ret_sys_call:
1452	ld	[%curptr + TI_FLAGS], %l6
1453	cmp	%o0, -ERESTART_RESTARTBLOCK
1454	ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1455	set	PSR_C, %g2
1456	bgeu	1f
1457	 andcc	%l6, _TIF_SYSCALL_TRACE, %g0
1458
1459	/* System call success, clear Carry condition code. */
1460	andn	%g3, %g2, %g3
1461	clr	%l6
1462	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1463	bne	linux_syscall_trace2
1464	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1465	add	%l1, 0x4, %l2			/* npc = npc+4 */
1466	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1467	b	ret_trap_entry
1468	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
14691:
1470	/* System call failure, set Carry condition code.
1471	 * Also, get abs(errno) to return to the process.
1472	 */
1473	sub	%g0, %o0, %o0
1474	or	%g3, %g2, %g3
1475	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1476	mov	1, %l6
1477	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1478	bne	linux_syscall_trace2
1479	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1480	add	%l1, 0x4, %l2			/* npc = npc+4 */
1481	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1482	b	ret_trap_entry
1483	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1484
1485linux_syscall_trace2:
1486	call	syscall_trace
1487	 add	%l1, 0x4, %l2			/* npc = npc+4 */
1488	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1489	b	ret_trap_entry
1490	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1491
1492
1493	/*
1494	 * Solaris system calls and indirect system calls enter here.
1495         *
1496	 * I have named the solaris indirect syscalls like that because
1497	 * it seems like Solaris has some fast path syscalls that can
1498	 * be handled as indirect system calls. - mig
1499	 */
1500
1501linux_syscall_for_solaris:
1502	sethi	%hi(sys_call_table), %l7
1503	b	linux_sparc_syscall
1504	 or	%l7, %lo(sys_call_table), %l7
1505
1506	.align	4
1507	.globl	solaris_syscall
1508solaris_syscall:
1509	cmp	%g1,59
1510	be	linux_syscall_for_solaris
1511	 cmp	%g1,2
1512	be	linux_syscall_for_solaris
1513	 cmp    %g1,42
1514	be      linux_syscall_for_solaris
1515	 cmp	%g1,119
1516	be,a	linux_syscall_for_solaris
1517	 mov	2, %g1
15181:
1519	SAVE_ALL_HEAD
1520	 rd	%wim, %l3
1521
1522	wr	%l0, PSR_ET, %psr
1523	nop
1524	nop
1525	mov	%i0, %l5
1526
1527	call	do_solaris_syscall
1528	 add	%sp, STACKFRAME_SZ, %o0
1529
1530	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1531	set	PSR_C, %g2
1532	cmp	%o0, -ERESTART_RESTARTBLOCK
1533	bgeu	1f
1534	 ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1535
1536	/* System call success, clear Carry condition code. */
1537	andn	%g3, %g2, %g3
1538	clr	%l6
1539	b	2f
1540	 st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1541
15421:
1543	/* System call failure, set Carry condition code.
1544	 * Also, get abs(errno) to return to the process.
1545	 */
1546	sub	%g0, %o0, %o0
1547	mov	1, %l6
1548	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1549	or	%g3, %g2, %g3
1550	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1551
1552	/* Advance the pc and npc over the trap instruction.
1553	 * If the npc is unaligned (has a 1 in the lower byte), it means
1554	 * the kernel does not want us to play magic (ie, skipping over
1555	 * traps).  Mainly when the Solaris code wants to set some PC and
1556	 * nPC (setcontext).
1557	 */
15582:
1559	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1	/* pc  = npc   */
1560	andcc	%l1, 1, %g0
1561	bne	1f
1562	 add	%l1, 0x4, %l2			/* npc = npc+4 */
1563	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1564	b	ret_trap_entry
1565	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1566
1567	/* kernel knows what it is doing, fixup npc and continue */
15681:
1569	sub	%l1, 1, %l1
1570 	b	ret_trap_entry
1571	 st	%l1, [%sp + STACKFRAME_SZ + PT_NPC]
1572
1573#ifndef CONFIG_SUNOS_EMUL
1574	.align	4
1575	.globl	sunos_syscall
1576sunos_syscall:
1577	SAVE_ALL_HEAD
1578	 rd	%wim, %l3
1579	wr	%l0, PSR_ET, %psr
1580	nop
1581	nop
1582	mov	%i0, %l5
1583	call	do_sunos_syscall
1584	 add	%sp, STACKFRAME_SZ, %o0
1585#endif
1586
1587	/* {net, open}bsd system calls enter here... */
1588	.align	4
1589	.globl	bsd_syscall
1590bsd_syscall:
1591	/* Direct access to user regs, must faster. */
1592	cmp	%g1, NR_SYSCALLS
1593	blu,a	1f
1594	 sll	%g1, 2, %l4
1595
1596	set	sys_ni_syscall, %l7
1597	b	bsd_is_too_hard
1598	 nop
1599
16001:
1601	ld	[%l7 + %l4], %l7
1602
1603	.globl	bsd_is_too_hard
1604bsd_is_too_hard:
1605	rd	%wim, %l3
1606	SAVE_ALL
1607
1608	wr	%l0, PSR_ET, %psr
1609	WRITE_PAUSE
1610
16112:
1612	mov	%i0, %o0
1613	mov	%i1, %o1
1614	mov	%i2, %o2
1615	mov	%i0, %l5
1616	mov	%i3, %o3
1617	mov	%i4, %o4
1618	call	%l7
1619	 mov	%i5, %o5
1620
1621	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1622	set	PSR_C, %g2
1623	cmp	%o0, -ERESTART_RESTARTBLOCK
1624	bgeu	1f
1625	 ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1626
1627	/* System call success, clear Carry condition code. */
1628	andn	%g3, %g2, %g3
1629	clr	%l6
1630	b	2f
1631	 st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1632
16331:
1634	/* System call failure, set Carry condition code.
1635	 * Also, get abs(errno) to return to the process.
1636	 */
1637	sub	%g0, %o0, %o0
1638#if 0 /* XXX todo XXX */
1639	sethi	%hi(bsd_xlatb_rorl), %o3
1640	or	%o3, %lo(bsd_xlatb_rorl), %o3
1641	sll	%o0, 2, %o0
1642	ld	[%o3 + %o0], %o0
1643#endif
1644	mov	1, %l6
1645	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1646	or	%g3, %g2, %g3
1647	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1648
1649	/* Advance the pc and npc over the trap instruction. */
16502:
1651	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1	/* pc  = npc   */
1652	add	%l1, 0x4, %l2			/* npc = npc+4 */
1653	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1654	b	ret_trap_entry
1655	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1656
1657/* Saving and restoring the FPU state is best done from lowlevel code.
1658 *
1659 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1660 *             void *fpqueue, unsigned long *fpqdepth)
1661 */
1662
1663	.globl	fpsave
1664fpsave:
1665	st	%fsr, [%o1]	! this can trap on us if fpu is in bogon state
1666	ld	[%o1], %g1
1667	set	0x2000, %g4
1668	andcc	%g1, %g4, %g0
1669	be	2f
1670	 mov	0, %g2
1671
1672	/* We have an fpqueue to save. */
16731:
1674	std	%fq, [%o2]
1675fpsave_magic:
1676	st	%fsr, [%o1]
1677	ld	[%o1], %g3
1678	andcc	%g3, %g4, %g0
1679	add	%g2, 1, %g2
1680	bne	1b
1681	 add	%o2, 8, %o2
1682
16832:
1684	st	%g2, [%o3]
1685
1686	std	%f0, [%o0 + 0x00]
1687	std	%f2, [%o0 + 0x08]
1688	std	%f4, [%o0 + 0x10]
1689	std	%f6, [%o0 + 0x18]
1690	std	%f8, [%o0 + 0x20]
1691	std	%f10, [%o0 + 0x28]
1692	std	%f12, [%o0 + 0x30]
1693	std	%f14, [%o0 + 0x38]
1694	std	%f16, [%o0 + 0x40]
1695	std	%f18, [%o0 + 0x48]
1696	std	%f20, [%o0 + 0x50]
1697	std	%f22, [%o0 + 0x58]
1698	std	%f24, [%o0 + 0x60]
1699	std	%f26, [%o0 + 0x68]
1700	std	%f28, [%o0 + 0x70]
1701	retl
1702	 std	%f30, [%o0 + 0x78]
1703
1704	/* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1705	 * code for pointing out this possible deadlock, while we save state
1706	 * above we could trap on the fsr store so our low level fpu trap
1707	 * code has to know how to deal with this.
1708	 */
1709fpsave_catch:
1710	b	fpsave_magic + 4
1711	 st	%fsr, [%o1]
1712
1713fpsave_catch2:
1714	b	fpsave + 4
1715	 st	%fsr, [%o1]
1716
1717	/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1718
1719	.globl	fpload
1720fpload:
1721	ldd	[%o0 + 0x00], %f0
1722	ldd	[%o0 + 0x08], %f2
1723	ldd	[%o0 + 0x10], %f4
1724	ldd	[%o0 + 0x18], %f6
1725	ldd	[%o0 + 0x20], %f8
1726	ldd	[%o0 + 0x28], %f10
1727	ldd	[%o0 + 0x30], %f12
1728	ldd	[%o0 + 0x38], %f14
1729	ldd	[%o0 + 0x40], %f16
1730	ldd	[%o0 + 0x48], %f18
1731	ldd	[%o0 + 0x50], %f20
1732	ldd	[%o0 + 0x58], %f22
1733	ldd	[%o0 + 0x60], %f24
1734	ldd	[%o0 + 0x68], %f26
1735	ldd	[%o0 + 0x70], %f28
1736	ldd	[%o0 + 0x78], %f30
1737	ld	[%o1], %fsr
1738	retl
1739	 nop
1740
1741	/* __ndelay and __udelay take two arguments:
1742	 * 0 - nsecs or usecs to delay
1743	 * 1 - per_cpu udelay_val (loops per jiffy)
1744	 *
1745	 * Note that ndelay gives HZ times higher resolution but has a 10ms
1746	 * limit.  udelay can handle up to 1s.
1747	 */
1748	.globl	__ndelay
1749__ndelay:
1750	save	%sp, -STACKFRAME_SZ, %sp
1751	mov	%i0, %o0
1752	call	.umul			! round multiplier up so large ns ok
1753	 mov	0x1ae, %o1		! 2**32 / (1 000 000 000 / HZ)
1754	call	.umul
1755	 mov	%i1, %o1		! udelay_val
1756	ba	delay_continue
1757	 mov	%o1, %o0		! >>32 later for better resolution
1758
1759	.globl	__udelay
1760__udelay:
1761	save	%sp, -STACKFRAME_SZ, %sp
1762	mov	%i0, %o0
1763	sethi	%hi(0x10c7), %o1	! round multiplier up so large us ok
1764	call	.umul
1765	 or	%o1, %lo(0x10c7), %o1	! 2**32 / 1 000 000
1766	call	.umul
1767	 mov	%i1, %o1		! udelay_val
1768	sethi	%hi(0x028f4b62), %l0	! Add in rounding constant * 2**32,
1769	or	%g0, %lo(0x028f4b62), %l0
1770	addcc	%o0, %l0, %o0		! 2**32 * 0.009 999
1771	bcs,a	3f
1772	 add	%o1, 0x01, %o1
17733:
1774	call	.umul
1775	 mov	HZ, %o0			! >>32 earlier for wider range
1776
1777delay_continue:
1778	cmp	%o0, 0x0
17791:
1780	bne	1b
1781	 subcc	%o0, 1, %o0
1782
1783	ret
1784	restore
1785
1786	/* Handle a software breakpoint */
1787	/* We have to inform parent that child has stopped */
1788	.align 4
1789	.globl breakpoint_trap
1790breakpoint_trap:
1791	rd	%wim,%l3
1792	SAVE_ALL
1793	wr 	%l0, PSR_ET, %psr
1794	WRITE_PAUSE
1795
1796	st	%i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1797	call	sparc_breakpoint
1798	 add	%sp, STACKFRAME_SZ, %o0
1799
1800	RESTORE_ALL
1801
1802	.align	4
1803	.globl	__handle_exception, flush_patch_exception
1804__handle_exception:
1805flush_patch_exception:
1806	FLUSH_ALL_KERNEL_WINDOWS;
1807	ldd	[%o0], %o6
1808	jmpl	%o7 + 0xc, %g0			! see asm-sparc/processor.h
1809	 mov	1, %g1				! signal EFAULT condition
1810
1811	.align	4
1812	.globl	kill_user_windows, kuw_patch1_7win
1813	.globl	kuw_patch1
1814kuw_patch1_7win:	sll	%o3, 6, %o3
1815
1816	/* No matter how much overhead this routine has in the worst
1817	 * case scenerio, it is several times better than taking the
1818	 * traps with the old method of just doing flush_user_windows().
1819	 */
1820kill_user_windows:
1821	ld	[%g6 + TI_UWINMASK], %o0	! get current umask
1822	orcc	%g0, %o0, %g0			! if no bits set, we are done
1823	be	3f				! nothing to do
1824	 rd	%psr, %o5			! must clear interrupts
1825	or	%o5, PSR_PIL, %o4		! or else that could change
1826	wr	%o4, 0x0, %psr			! the uwinmask state
1827	WRITE_PAUSE				! burn them cycles
18281:
1829	ld	[%g6 + TI_UWINMASK], %o0	! get consistent state
1830	orcc	%g0, %o0, %g0			! did an interrupt come in?
1831	be	4f				! yep, we are done
1832	 rd	%wim, %o3			! get current wim
1833	srl	%o3, 1, %o4			! simulate a save
1834kuw_patch1:
1835	sll	%o3, 7, %o3			! compute next wim
1836	or	%o4, %o3, %o3			! result
1837	andncc	%o0, %o3, %o0			! clean this bit in umask
1838	bne	kuw_patch1			! not done yet
1839	 srl	%o3, 1, %o4			! begin another save simulation
1840	wr	%o3, 0x0, %wim			! set the new wim
1841	st	%g0, [%g6 + TI_UWINMASK]	! clear uwinmask
18424:
1843	wr	%o5, 0x0, %psr			! re-enable interrupts
1844	WRITE_PAUSE				! burn baby burn
18453:
1846	retl					! return
1847	 st	%g0, [%g6 + TI_W_SAVED]		! no windows saved
1848
1849	.align	4
1850	.globl	restore_current
1851restore_current:
1852	LOAD_CURRENT(g6, o0)
1853	retl
1854	 nop
1855
1856#ifdef CONFIG_PCI
1857#include <asm/pcic.h>
1858
1859	.align	4
1860	.globl	linux_trap_ipi15_pcic
1861linux_trap_ipi15_pcic:
1862	rd	%wim, %l3
1863	SAVE_ALL
1864
1865	/*
1866	 * First deactivate NMI
1867	 * or we cannot drop ET, cannot get window spill traps.
1868	 * The busy loop is necessary because the PIO error
1869	 * sometimes does not go away quickly and we trap again.
1870	 */
1871	sethi	%hi(pcic_regs), %o1
1872	ld	[%o1 + %lo(pcic_regs)], %o2
1873
1874	! Get pending status for printouts later.
1875	ld	[%o2 + PCI_SYS_INT_PENDING], %o0
1876
1877	mov	PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1878	stb	%o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
18791:
1880	ld	[%o2 + PCI_SYS_INT_PENDING], %o1
1881	andcc	%o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1882	bne	1b
1883	 nop
1884
1885	or	%l0, PSR_PIL, %l4
1886	wr	%l4, 0x0, %psr
1887	WRITE_PAUSE
1888	wr	%l4, PSR_ET, %psr
1889	WRITE_PAUSE
1890
1891	call	pcic_nmi
1892	 add	%sp, STACKFRAME_SZ, %o1	! struct pt_regs *regs
1893	RESTORE_ALL
1894
1895	.globl	pcic_nmi_trap_patch
1896pcic_nmi_trap_patch:
1897	sethi	%hi(linux_trap_ipi15_pcic), %l3
1898	jmpl	%l3 + %lo(linux_trap_ipi15_pcic), %g0
1899	 rd	%psr, %l0
1900	.word	0
1901
1902#endif /* CONFIG_PCI */
1903
1904/* End of entry.S */
1905