xref: /openbmc/linux/arch/sparc/kernel/entry.S (revision 64fc2a94)
1/* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
2 *
3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
8 */
9
10#include <linux/linkage.h>
11#include <linux/errno.h>
12
13#include <asm/head.h>
14#include <asm/asi.h>
15#include <asm/smp.h>
16#include <asm/contregs.h>
17#include <asm/ptrace.h>
18#include <asm/asm-offsets.h>
19#include <asm/psr.h>
20#include <asm/vaddrs.h>
21#include <asm/page.h>
22#include <asm/pgtable.h>
23#include <asm/winmacro.h>
24#include <asm/signal.h>
25#include <asm/obio.h>
26#include <asm/mxcc.h>
27#include <asm/thread_info.h>
28#include <asm/param.h>
29#include <asm/unistd.h>
30
31#include <asm/asmmacro.h>
32#include <asm/export.h>
33
34#define curptr      g6
35
36/* These are just handy. */
37#define _SV	save	%sp, -STACKFRAME_SZ, %sp
38#define _RS     restore
39
40#define FLUSH_ALL_KERNEL_WINDOWS \
41	_SV; _SV; _SV; _SV; _SV; _SV; _SV; \
42	_RS; _RS; _RS; _RS; _RS; _RS; _RS;
43
44	.text
45
46#ifdef CONFIG_KGDB
47	.align	4
48	.globl		arch_kgdb_breakpoint
49	.type		arch_kgdb_breakpoint,#function
50arch_kgdb_breakpoint:
51	ta		0x7d
52	retl
53	 nop
54	.size		arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
55#endif
56
57#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
58	.align	4
59	.globl	floppy_hardint
60floppy_hardint:
61	/*
62	 * This code cannot touch registers %l0 %l1 and %l2
63	 * because SAVE_ALL depends on their values. It depends
64	 * on %l3 also, but we regenerate it before a call.
65	 * Other registers are:
66	 * %l3 -- base address of fdc registers
67	 * %l4 -- pdma_vaddr
68	 * %l5 -- scratch for ld/st address
69	 * %l6 -- pdma_size
70	 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
71	 */
72
73	/* Do we have work to do? */
74	sethi	%hi(doing_pdma), %l7
75	ld	[%l7 + %lo(doing_pdma)], %l7
76	cmp	%l7, 0
77	be	floppy_dosoftint
78	 nop
79
80	/* Load fdc register base */
81	sethi	%hi(fdc_status), %l3
82	ld	[%l3 + %lo(fdc_status)], %l3
83
84	/* Setup register addresses */
85	sethi	%hi(pdma_vaddr), %l5	! transfer buffer
86	ld	[%l5 + %lo(pdma_vaddr)], %l4
87	sethi	%hi(pdma_size), %l5	! bytes to go
88	ld	[%l5 + %lo(pdma_size)], %l6
89next_byte:
90  	ldub	[%l3], %l7
91
92	andcc	%l7, 0x80, %g0		! Does fifo still have data
93	bz	floppy_fifo_emptied	! fifo has been emptied...
94	 andcc	%l7, 0x20, %g0		! in non-dma mode still?
95	bz	floppy_overrun		! nope, overrun
96	 andcc	%l7, 0x40, %g0		! 0=write 1=read
97	bz	floppy_write
98	 sub	%l6, 0x1, %l6
99
100	/* Ok, actually read this byte */
101	ldub	[%l3 + 1], %l7
102	orcc	%g0, %l6, %g0
103	stb	%l7, [%l4]
104	bne	next_byte
105	 add	%l4, 0x1, %l4
106
107	b	floppy_tdone
108	 nop
109
110floppy_write:
111	/* Ok, actually write this byte */
112	ldub	[%l4], %l7
113	orcc	%g0, %l6, %g0
114	stb	%l7, [%l3 + 1]
115	bne	next_byte
116	 add	%l4, 0x1, %l4
117
118	/* fall through... */
119floppy_tdone:
120	sethi	%hi(pdma_vaddr), %l5
121	st	%l4, [%l5 + %lo(pdma_vaddr)]
122	sethi	%hi(pdma_size), %l5
123	st	%l6, [%l5 + %lo(pdma_size)]
124	/* Flip terminal count pin */
125	set	auxio_register, %l7
126	ld	[%l7], %l7
127
128	ldub	[%l7], %l5
129
130	or	%l5, 0xc2, %l5
131	stb	%l5, [%l7]
132	andn    %l5, 0x02, %l5
133
1342:
135	/* Kill some time so the bits set */
136	WRITE_PAUSE
137	WRITE_PAUSE
138
139	stb     %l5, [%l7]
140
141	/* Prevent recursion */
142	sethi	%hi(doing_pdma), %l7
143	b	floppy_dosoftint
144	 st	%g0, [%l7 + %lo(doing_pdma)]
145
146	/* We emptied the FIFO, but we haven't read everything
147	 * as of yet.  Store the current transfer address and
148	 * bytes left to read so we can continue when the next
149	 * fast IRQ comes in.
150	 */
151floppy_fifo_emptied:
152	sethi	%hi(pdma_vaddr), %l5
153	st	%l4, [%l5 + %lo(pdma_vaddr)]
154	sethi	%hi(pdma_size), %l7
155	st	%l6, [%l7 + %lo(pdma_size)]
156
157	/* Restore condition codes */
158	wr	%l0, 0x0, %psr
159	WRITE_PAUSE
160
161	jmp	%l1
162	rett	%l2
163
164floppy_overrun:
165	sethi	%hi(pdma_vaddr), %l5
166	st	%l4, [%l5 + %lo(pdma_vaddr)]
167	sethi	%hi(pdma_size), %l5
168	st	%l6, [%l5 + %lo(pdma_size)]
169	/* Prevent recursion */
170	sethi	%hi(doing_pdma), %l7
171	st	%g0, [%l7 + %lo(doing_pdma)]
172
173	/* fall through... */
174floppy_dosoftint:
175	rd	%wim, %l3
176	SAVE_ALL
177
178	/* Set all IRQs off. */
179	or	%l0, PSR_PIL, %l4
180	wr	%l4, 0x0, %psr
181	WRITE_PAUSE
182	wr	%l4, PSR_ET, %psr
183	WRITE_PAUSE
184
185	mov	11, %o0			! floppy irq level (unused anyway)
186	mov	%g0, %o1		! devid is not used in fast interrupts
187	call	sparc_floppy_irq
188	 add	%sp, STACKFRAME_SZ, %o2	! struct pt_regs *regs
189
190	RESTORE_ALL
191
192#endif /* (CONFIG_BLK_DEV_FD) */
193
194	/* Bad trap handler */
195	.globl	bad_trap_handler
196bad_trap_handler:
197	SAVE_ALL
198
199	wr	%l0, PSR_ET, %psr
200	WRITE_PAUSE
201
202	add	%sp, STACKFRAME_SZ, %o0	! pt_regs
203	call	do_hw_interrupt
204	 mov	%l7, %o1		! trap number
205
206	RESTORE_ALL
207
208/* For now all IRQ's not registered get sent here. handler_irq() will
209 * see if a routine is registered to handle this interrupt and if not
210 * it will say so on the console.
211 */
212
213	.align	4
214	.globl	real_irq_entry, patch_handler_irq
215real_irq_entry:
216	SAVE_ALL
217
218#ifdef CONFIG_SMP
219	.globl	patchme_maybe_smp_msg
220
221	cmp	%l7, 11
222patchme_maybe_smp_msg:
223	bgu	maybe_smp4m_msg
224	 nop
225#endif
226
227real_irq_continue:
228	or	%l0, PSR_PIL, %g2
229	wr	%g2, 0x0, %psr
230	WRITE_PAUSE
231	wr	%g2, PSR_ET, %psr
232	WRITE_PAUSE
233	mov	%l7, %o0		! irq level
234patch_handler_irq:
235	call	handler_irq
236	 add	%sp, STACKFRAME_SZ, %o1	! pt_regs ptr
237	or	%l0, PSR_PIL, %g2	! restore PIL after handler_irq
238	wr	%g2, PSR_ET, %psr	! keep ET up
239	WRITE_PAUSE
240
241	RESTORE_ALL
242
243#ifdef CONFIG_SMP
244	/* SMP per-cpu ticker interrupts are handled specially. */
245smp4m_ticker:
246	bne	real_irq_continue+4
247	 or	%l0, PSR_PIL, %g2
248	wr	%g2, 0x0, %psr
249	WRITE_PAUSE
250	wr	%g2, PSR_ET, %psr
251	WRITE_PAUSE
252	call	smp4m_percpu_timer_interrupt
253	 add	%sp, STACKFRAME_SZ, %o0
254	wr	%l0, PSR_ET, %psr
255	WRITE_PAUSE
256	RESTORE_ALL
257
258#define GET_PROCESSOR4M_ID(reg)	\
259	rd	%tbr, %reg;	\
260	srl	%reg, 12, %reg;	\
261	and	%reg, 3, %reg;
262
263	/* Here is where we check for possible SMP IPI passed to us
264	 * on some level other than 15 which is the NMI and only used
265	 * for cross calls.  That has a separate entry point below.
266	 *
267	 * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
268	 */
269maybe_smp4m_msg:
270	GET_PROCESSOR4M_ID(o3)
271	sethi	%hi(sun4m_irq_percpu), %l5
272	sll	%o3, 2, %o3
273	or	%l5, %lo(sun4m_irq_percpu), %o5
274	sethi	%hi(0x70000000), %o2	! Check all soft-IRQs
275	ld	[%o5 + %o3], %o1
276	ld	[%o1 + 0x00], %o3	! sun4m_irq_percpu[cpu]->pending
277	andcc	%o3, %o2, %g0
278	be,a	smp4m_ticker
279	 cmp	%l7, 14
280	/* Soft-IRQ IPI */
281	st	%o2, [%o1 + 0x04]	! sun4m_irq_percpu[cpu]->clear=0x70000000
282	WRITE_PAUSE
283	ld	[%o1 + 0x00], %g0	! sun4m_irq_percpu[cpu]->pending
284	WRITE_PAUSE
285	or	%l0, PSR_PIL, %l4
286	wr	%l4, 0x0, %psr
287	WRITE_PAUSE
288	wr	%l4, PSR_ET, %psr
289	WRITE_PAUSE
290	srl	%o3, 28, %o2		! shift for simpler checks below
291maybe_smp4m_msg_check_single:
292	andcc	%o2, 0x1, %g0
293	beq,a	maybe_smp4m_msg_check_mask
294	 andcc	%o2, 0x2, %g0
295	call	smp_call_function_single_interrupt
296	 nop
297	andcc	%o2, 0x2, %g0
298maybe_smp4m_msg_check_mask:
299	beq,a	maybe_smp4m_msg_check_resched
300	 andcc	%o2, 0x4, %g0
301	call	smp_call_function_interrupt
302	 nop
303	andcc	%o2, 0x4, %g0
304maybe_smp4m_msg_check_resched:
305	/* rescheduling is done in RESTORE_ALL regardless, but incr stats */
306	beq,a	maybe_smp4m_msg_out
307	 nop
308	call	smp_resched_interrupt
309	 nop
310maybe_smp4m_msg_out:
311	RESTORE_ALL
312
313	.align	4
314	.globl	linux_trap_ipi15_sun4m
315linux_trap_ipi15_sun4m:
316	SAVE_ALL
317	sethi	%hi(0x80000000), %o2
318	GET_PROCESSOR4M_ID(o0)
319	sethi	%hi(sun4m_irq_percpu), %l5
320	or	%l5, %lo(sun4m_irq_percpu), %o5
321	sll	%o0, 2, %o0
322	ld	[%o5 + %o0], %o5
323	ld	[%o5 + 0x00], %o3	! sun4m_irq_percpu[cpu]->pending
324	andcc	%o3, %o2, %g0
325	be	sun4m_nmi_error		! Must be an NMI async memory error
326	 st	%o2, [%o5 + 0x04]	! sun4m_irq_percpu[cpu]->clear=0x80000000
327	WRITE_PAUSE
328	ld	[%o5 + 0x00], %g0	! sun4m_irq_percpu[cpu]->pending
329	WRITE_PAUSE
330	or	%l0, PSR_PIL, %l4
331	wr	%l4, 0x0, %psr
332	WRITE_PAUSE
333	wr	%l4, PSR_ET, %psr
334	WRITE_PAUSE
335	call	smp4m_cross_call_irq
336	 nop
337	b	ret_trap_lockless_ipi
338	 clr	%l6
339
340	.globl	smp4d_ticker
341	/* SMP per-cpu ticker interrupts are handled specially. */
342smp4d_ticker:
343	SAVE_ALL
344	or	%l0, PSR_PIL, %g2
345	sethi	%hi(CC_ICLR), %o0
346	sethi	%hi(1 << 14), %o1
347	or	%o0, %lo(CC_ICLR), %o0
348	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 14 in MXCC's ICLR */
349	wr	%g2, 0x0, %psr
350	WRITE_PAUSE
351	wr	%g2, PSR_ET, %psr
352	WRITE_PAUSE
353	call	smp4d_percpu_timer_interrupt
354	 add	%sp, STACKFRAME_SZ, %o0
355	wr	%l0, PSR_ET, %psr
356	WRITE_PAUSE
357	RESTORE_ALL
358
359	.align	4
360	.globl	linux_trap_ipi15_sun4d
361linux_trap_ipi15_sun4d:
362	SAVE_ALL
363	sethi	%hi(CC_BASE), %o4
364	sethi	%hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
365	or	%o4, (CC_EREG - CC_BASE), %o0
366	ldda	[%o0] ASI_M_MXCC, %o0
367	andcc	%o0, %o2, %g0
368	bne	1f
369	 sethi	%hi(BB_STAT2), %o2
370	lduba	[%o2] ASI_M_CTL, %o2
371	andcc	%o2, BB_STAT2_MASK, %g0
372	bne	2f
373	 or	%o4, (CC_ICLR - CC_BASE), %o0
374	sethi	%hi(1 << 15), %o1
375	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 15 in MXCC's ICLR */
376	or	%l0, PSR_PIL, %l4
377	wr	%l4, 0x0, %psr
378	WRITE_PAUSE
379	wr	%l4, PSR_ET, %psr
380	WRITE_PAUSE
381	call	smp4d_cross_call_irq
382	 nop
383	b	ret_trap_lockless_ipi
384	 clr	%l6
385
3861:	/* MXCC error */
3872:	/* BB error */
388	/* Disable PIL 15 */
389	set	CC_IMSK, %l4
390	lduha	[%l4] ASI_M_MXCC, %l5
391	sethi	%hi(1 << 15), %l7
392	or	%l5, %l7, %l5
393	stha	%l5, [%l4] ASI_M_MXCC
394	/* FIXME */
3951:	b,a	1b
396
397	.globl	smpleon_ipi
398	.extern leon_ipi_interrupt
399	/* SMP per-cpu IPI interrupts are handled specially. */
400smpleon_ipi:
401        SAVE_ALL
402	or	%l0, PSR_PIL, %g2
403	wr	%g2, 0x0, %psr
404	WRITE_PAUSE
405	wr	%g2, PSR_ET, %psr
406	WRITE_PAUSE
407	call	leonsmp_ipi_interrupt
408	 add	%sp, STACKFRAME_SZ, %o1 ! pt_regs
409	wr	%l0, PSR_ET, %psr
410	WRITE_PAUSE
411	RESTORE_ALL
412
413	.align	4
414	.globl	linux_trap_ipi15_leon
415linux_trap_ipi15_leon:
416	SAVE_ALL
417	or	%l0, PSR_PIL, %l4
418	wr	%l4, 0x0, %psr
419	WRITE_PAUSE
420	wr	%l4, PSR_ET, %psr
421	WRITE_PAUSE
422	call	leon_cross_call_irq
423	 nop
424	b	ret_trap_lockless_ipi
425	 clr	%l6
426
427#endif /* CONFIG_SMP */
428
429	/* This routine handles illegal instructions and privileged
430	 * instruction attempts from user code.
431	 */
432	.align	4
433	.globl	bad_instruction
434bad_instruction:
435	sethi	%hi(0xc1f80000), %l4
436	ld	[%l1], %l5
437	sethi	%hi(0x81d80000), %l7
438	and	%l5, %l4, %l5
439	cmp	%l5, %l7
440	be	1f
441	SAVE_ALL
442
443	wr	%l0, PSR_ET, %psr		! re-enable traps
444	WRITE_PAUSE
445
446	add	%sp, STACKFRAME_SZ, %o0
447	mov	%l1, %o1
448	mov	%l2, %o2
449	call	do_illegal_instruction
450	 mov	%l0, %o3
451
452	RESTORE_ALL
453
4541:	/* unimplemented flush - just skip */
455	jmpl	%l2, %g0
456	 rett	%l2 + 4
457
458	.align	4
459	.globl	priv_instruction
460priv_instruction:
461	SAVE_ALL
462
463	wr	%l0, PSR_ET, %psr
464	WRITE_PAUSE
465
466	add	%sp, STACKFRAME_SZ, %o0
467	mov	%l1, %o1
468	mov	%l2, %o2
469	call	do_priv_instruction
470	 mov	%l0, %o3
471
472	RESTORE_ALL
473
474	/* This routine handles unaligned data accesses. */
475	.align	4
476	.globl	mna_handler
477mna_handler:
478	andcc	%l0, PSR_PS, %g0
479	be	mna_fromuser
480	 nop
481
482	SAVE_ALL
483
484	wr	%l0, PSR_ET, %psr
485	WRITE_PAUSE
486
487	ld	[%l1], %o1
488	call	kernel_unaligned_trap
489	 add	%sp, STACKFRAME_SZ, %o0
490
491	RESTORE_ALL
492
493mna_fromuser:
494	SAVE_ALL
495
496	wr	%l0, PSR_ET, %psr		! re-enable traps
497	WRITE_PAUSE
498
499	ld	[%l1], %o1
500	call	user_unaligned_trap
501	 add	%sp, STACKFRAME_SZ, %o0
502
503	RESTORE_ALL
504
505	/* This routine handles floating point disabled traps. */
506	.align	4
507	.globl	fpd_trap_handler
508fpd_trap_handler:
509	SAVE_ALL
510
511	wr	%l0, PSR_ET, %psr		! re-enable traps
512	WRITE_PAUSE
513
514	add	%sp, STACKFRAME_SZ, %o0
515	mov	%l1, %o1
516	mov	%l2, %o2
517	call	do_fpd_trap
518	 mov	%l0, %o3
519
520	RESTORE_ALL
521
522	/* This routine handles Floating Point Exceptions. */
523	.align	4
524	.globl	fpe_trap_handler
525fpe_trap_handler:
526	set	fpsave_magic, %l5
527	cmp	%l1, %l5
528	be	1f
529	 sethi	%hi(fpsave), %l5
530	or	%l5, %lo(fpsave), %l5
531	cmp	%l1, %l5
532	bne	2f
533	 sethi	%hi(fpsave_catch2), %l5
534	or	%l5, %lo(fpsave_catch2), %l5
535	wr	%l0, 0x0, %psr
536	WRITE_PAUSE
537	jmp	%l5
538	 rett	%l5 + 4
5391:
540	sethi	%hi(fpsave_catch), %l5
541	or	%l5, %lo(fpsave_catch), %l5
542	wr	%l0, 0x0, %psr
543	WRITE_PAUSE
544	jmp	%l5
545	 rett	%l5 + 4
546
5472:
548	SAVE_ALL
549
550	wr	%l0, PSR_ET, %psr		! re-enable traps
551	WRITE_PAUSE
552
553	add	%sp, STACKFRAME_SZ, %o0
554	mov	%l1, %o1
555	mov	%l2, %o2
556	call	do_fpe_trap
557	 mov	%l0, %o3
558
559	RESTORE_ALL
560
561	/* This routine handles Tag Overflow Exceptions. */
562	.align	4
563	.globl	do_tag_overflow
564do_tag_overflow:
565	SAVE_ALL
566
567	wr	%l0, PSR_ET, %psr		! re-enable traps
568	WRITE_PAUSE
569
570	add	%sp, STACKFRAME_SZ, %o0
571	mov	%l1, %o1
572	mov	%l2, %o2
573	call	handle_tag_overflow
574	 mov	%l0, %o3
575
576	RESTORE_ALL
577
578	/* This routine handles Watchpoint Exceptions. */
579	.align	4
580	.globl	do_watchpoint
581do_watchpoint:
582	SAVE_ALL
583
584	wr	%l0, PSR_ET, %psr		! re-enable traps
585	WRITE_PAUSE
586
587	add	%sp, STACKFRAME_SZ, %o0
588	mov	%l1, %o1
589	mov	%l2, %o2
590	call	handle_watchpoint
591	 mov	%l0, %o3
592
593	RESTORE_ALL
594
595	/* This routine handles Register Access Exceptions. */
596	.align	4
597	.globl	do_reg_access
598do_reg_access:
599	SAVE_ALL
600
601	wr	%l0, PSR_ET, %psr		! re-enable traps
602	WRITE_PAUSE
603
604	add	%sp, STACKFRAME_SZ, %o0
605	mov	%l1, %o1
606	mov	%l2, %o2
607	call	handle_reg_access
608	 mov	%l0, %o3
609
610	RESTORE_ALL
611
612	/* This routine handles Co-Processor Disabled Exceptions. */
613	.align	4
614	.globl	do_cp_disabled
615do_cp_disabled:
616	SAVE_ALL
617
618	wr	%l0, PSR_ET, %psr		! re-enable traps
619	WRITE_PAUSE
620
621	add	%sp, STACKFRAME_SZ, %o0
622	mov	%l1, %o1
623	mov	%l2, %o2
624	call	handle_cp_disabled
625	 mov	%l0, %o3
626
627	RESTORE_ALL
628
629	/* This routine handles Co-Processor Exceptions. */
630	.align	4
631	.globl	do_cp_exception
632do_cp_exception:
633	SAVE_ALL
634
635	wr	%l0, PSR_ET, %psr		! re-enable traps
636	WRITE_PAUSE
637
638	add	%sp, STACKFRAME_SZ, %o0
639	mov	%l1, %o1
640	mov	%l2, %o2
641	call	handle_cp_exception
642	 mov	%l0, %o3
643
644	RESTORE_ALL
645
646	/* This routine handles Hardware Divide By Zero Exceptions. */
647	.align	4
648	.globl	do_hw_divzero
649do_hw_divzero:
650	SAVE_ALL
651
652	wr	%l0, PSR_ET, %psr		! re-enable traps
653	WRITE_PAUSE
654
655	add	%sp, STACKFRAME_SZ, %o0
656	mov	%l1, %o1
657	mov	%l2, %o2
658	call	handle_hw_divzero
659	 mov	%l0, %o3
660
661	RESTORE_ALL
662
663	.align	4
664	.globl	do_flush_windows
665do_flush_windows:
666	SAVE_ALL
667
668	wr	%l0, PSR_ET, %psr
669	WRITE_PAUSE
670
671	andcc	%l0, PSR_PS, %g0
672	bne	dfw_kernel
673	 nop
674
675	call	flush_user_windows
676	 nop
677
678	/* Advance over the trap instruction. */
679	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
680	add	%l1, 0x4, %l2
681	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
682	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
683
684	RESTORE_ALL
685
686	.globl	flush_patch_one
687
688	/* We get these for debugging routines using __builtin_return_address() */
689dfw_kernel:
690flush_patch_one:
691	FLUSH_ALL_KERNEL_WINDOWS
692
693	/* Advance over the trap instruction. */
694	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
695	add	%l1, 0x4, %l2
696	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
697	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
698
699	RESTORE_ALL
700
701	/* The getcc software trap.  The user wants the condition codes from
702	 * the %psr in register %g1.
703	 */
704
705	.align	4
706	.globl	getcc_trap_handler
707getcc_trap_handler:
708	srl	%l0, 20, %g1	! give user
709	and	%g1, 0xf, %g1	! only ICC bits in %psr
710	jmp	%l2		! advance over trap instruction
711	rett	%l2 + 0x4	! like this...
712
713	/* The setcc software trap.  The user has condition codes in %g1
714	 * that it would like placed in the %psr.  Be careful not to flip
715	 * any unintentional bits!
716	 */
717
718	.align	4
719	.globl	setcc_trap_handler
720setcc_trap_handler:
721	sll	%g1, 0x14, %l4
722	set	PSR_ICC, %l5
723	andn	%l0, %l5, %l0	! clear ICC bits in %psr
724	and	%l4, %l5, %l4	! clear non-ICC bits in user value
725	or	%l4, %l0, %l4	! or them in... mix mix mix
726
727	wr	%l4, 0x0, %psr	! set new %psr
728	WRITE_PAUSE		! TI scumbags...
729
730	jmp	%l2		! advance over trap instruction
731	rett	%l2 + 0x4	! like this...
732
733sun4m_nmi_error:
734	/* NMI async memory error handling. */
735	sethi	%hi(0x80000000), %l4
736	sethi	%hi(sun4m_irq_global), %o5
737	ld	[%o5 + %lo(sun4m_irq_global)], %l5
738	st	%l4, [%l5 + 0x0c]	! sun4m_irq_global->mask_set=0x80000000
739	WRITE_PAUSE
740	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
741	WRITE_PAUSE
742	or	%l0, PSR_PIL, %l4
743	wr	%l4, 0x0, %psr
744	WRITE_PAUSE
745	wr	%l4, PSR_ET, %psr
746	WRITE_PAUSE
747	call	sun4m_nmi
748	 nop
749	st	%l4, [%l5 + 0x08]	! sun4m_irq_global->mask_clear=0x80000000
750	WRITE_PAUSE
751	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
752	WRITE_PAUSE
753	RESTORE_ALL
754
755#ifndef CONFIG_SMP
756	.align	4
757	.globl	linux_trap_ipi15_sun4m
758linux_trap_ipi15_sun4m:
759	SAVE_ALL
760
761	ba	sun4m_nmi_error
762	 nop
763#endif /* CONFIG_SMP */
764
765	.align	4
766	.globl	srmmu_fault
767srmmu_fault:
768	mov	0x400, %l5
769	mov	0x300, %l4
770
771LEON_PI(lda	[%l5] ASI_LEON_MMUREGS, %l6)	! read sfar first
772SUN_PI_(lda	[%l5] ASI_M_MMUREGS, %l6)	! read sfar first
773
774LEON_PI(lda	[%l4] ASI_LEON_MMUREGS, %l5)	! read sfsr last
775SUN_PI_(lda	[%l4] ASI_M_MMUREGS, %l5)	! read sfsr last
776
777	andn	%l6, 0xfff, %l6
778	srl	%l5, 6, %l5			! and encode all info into l7
779
780	and	%l5, 2, %l5
781	or	%l5, %l6, %l6
782
783	or	%l6, %l7, %l7			! l7 = [addr,write,txtfault]
784
785	SAVE_ALL
786
787	mov	%l7, %o1
788	mov	%l7, %o2
789	and	%o1, 1, %o1		! arg2 = text_faultp
790	mov	%l7, %o3
791	and	%o2, 2, %o2		! arg3 = writep
792	andn	%o3, 0xfff, %o3		! arg4 = faulting address
793
794	wr	%l0, PSR_ET, %psr
795	WRITE_PAUSE
796
797	call	do_sparc_fault
798	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
799
800	RESTORE_ALL
801
802	.align	4
803	.globl	sys_nis_syscall
804sys_nis_syscall:
805	mov	%o7, %l5
806	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
807	call	c_sys_nis_syscall
808	 mov	%l5, %o7
809
810sunos_execv:
811	.globl	sunos_execv
812	b	sys_execve
813	 clr	%i2
814
815	.align	4
816	.globl	sys_sparc_pipe
817sys_sparc_pipe:
818	mov	%o7, %l5
819	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
820	call	sparc_pipe
821	 mov	%l5, %o7
822
823	.align	4
824	.globl	sys_sigstack
825sys_sigstack:
826	mov	%o7, %l5
827	mov	%fp, %o2
828	call	do_sys_sigstack
829	 mov	%l5, %o7
830
831	.align	4
832	.globl	sys_sigreturn
833sys_sigreturn:
834	call	do_sigreturn
835	 add	%sp, STACKFRAME_SZ, %o0
836
837	ld	[%curptr + TI_FLAGS], %l5
838	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
839	be	1f
840	 nop
841
842	call	syscall_trace
843	 mov	1, %o1
844
8451:
846	/* We don't want to muck with user registers like a
847	 * normal syscall, just return.
848	 */
849	RESTORE_ALL
850
851	.align	4
852	.globl	sys_rt_sigreturn
853sys_rt_sigreturn:
854	call	do_rt_sigreturn
855	 add	%sp, STACKFRAME_SZ, %o0
856
857	ld	[%curptr + TI_FLAGS], %l5
858	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
859	be	1f
860	 nop
861
862	add	%sp, STACKFRAME_SZ, %o0
863	call	syscall_trace
864	 mov	1, %o1
865
8661:
867	/* We are returning to a signal handler. */
868	RESTORE_ALL
869
870	/* Now that we have a real sys_clone, sys_fork() is
871	 * implemented in terms of it.  Our _real_ implementation
872	 * of SunOS vfork() will use sys_vfork().
873	 *
874	 * XXX These three should be consolidated into mostly shared
875	 * XXX code just like on sparc64... -DaveM
876	 */
877	.align	4
878	.globl	sys_fork, flush_patch_two
879sys_fork:
880	mov	%o7, %l5
881flush_patch_two:
882	FLUSH_ALL_KERNEL_WINDOWS;
883	ld	[%curptr + TI_TASK], %o4
884	rd	%psr, %g4
885	WRITE_PAUSE
886	mov	SIGCHLD, %o0			! arg0:	clone flags
887	rd	%wim, %g5
888	WRITE_PAUSE
889	mov	%fp, %o1			! arg1:	usp
890	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
891	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
892	mov	0, %o3
893	call	sparc_do_fork
894	 mov	%l5, %o7
895
896	/* Whee, kernel threads! */
897	.globl	sys_clone, flush_patch_three
898sys_clone:
899	mov	%o7, %l5
900flush_patch_three:
901	FLUSH_ALL_KERNEL_WINDOWS;
902	ld	[%curptr + TI_TASK], %o4
903	rd	%psr, %g4
904	WRITE_PAUSE
905
906	/* arg0,1: flags,usp  -- loaded already */
907	cmp	%o1, 0x0			! Is new_usp NULL?
908	rd	%wim, %g5
909	WRITE_PAUSE
910	be,a	1f
911	 mov	%fp, %o1			! yes, use callers usp
912	andn	%o1, 7, %o1			! no, align to 8 bytes
9131:
914	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
915	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
916	mov	0, %o3
917	call	sparc_do_fork
918	 mov	%l5, %o7
919
920	/* Whee, real vfork! */
921	.globl	sys_vfork, flush_patch_four
922sys_vfork:
923flush_patch_four:
924	FLUSH_ALL_KERNEL_WINDOWS;
925	ld	[%curptr + TI_TASK], %o4
926	rd	%psr, %g4
927	WRITE_PAUSE
928	rd	%wim, %g5
929	WRITE_PAUSE
930	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
931	sethi	%hi(0x4000 | 0x0100 | SIGCHLD), %o0
932	mov	%fp, %o1
933	or	%o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
934	sethi	%hi(sparc_do_fork), %l1
935	mov	0, %o3
936	jmpl	%l1 + %lo(sparc_do_fork), %g0
937	 add	%sp, STACKFRAME_SZ, %o2
938
939        .align  4
940linux_sparc_ni_syscall:
941	sethi   %hi(sys_ni_syscall), %l7
942	b       do_syscall
943	 or     %l7, %lo(sys_ni_syscall), %l7
944
945linux_syscall_trace:
946	add	%sp, STACKFRAME_SZ, %o0
947	call	syscall_trace
948	 mov	0, %o1
949	cmp	%o0, 0
950	bne	3f
951	 mov	-ENOSYS, %o0
952
953	/* Syscall tracing can modify the registers.  */
954	ld	[%sp + STACKFRAME_SZ + PT_G1], %g1
955	sethi	%hi(sys_call_table), %l7
956	ld	[%sp + STACKFRAME_SZ + PT_I0], %i0
957	or	%l7, %lo(sys_call_table), %l7
958	ld	[%sp + STACKFRAME_SZ + PT_I1], %i1
959	ld	[%sp + STACKFRAME_SZ + PT_I2], %i2
960	ld	[%sp + STACKFRAME_SZ + PT_I3], %i3
961	ld	[%sp + STACKFRAME_SZ + PT_I4], %i4
962	ld	[%sp + STACKFRAME_SZ + PT_I5], %i5
963	cmp	%g1, NR_syscalls
964	bgeu	3f
965	 mov	-ENOSYS, %o0
966
967	sll	%g1, 2, %l4
968	mov	%i0, %o0
969	ld	[%l7 + %l4], %l7
970	mov	%i1, %o1
971	mov	%i2, %o2
972	mov	%i3, %o3
973	b	2f
974	 mov	%i4, %o4
975
976	.globl	ret_from_fork
977ret_from_fork:
978	call	schedule_tail
979	 ld	[%g3 + TI_TASK], %o0
980	b	ret_sys_call
981	 ld	[%sp + STACKFRAME_SZ + PT_I0], %o0
982
983	.globl	ret_from_kernel_thread
984ret_from_kernel_thread:
985	call	schedule_tail
986	 ld	[%g3 + TI_TASK], %o0
987	ld	[%sp + STACKFRAME_SZ + PT_G1], %l0
988	call	%l0
989	 ld	[%sp + STACKFRAME_SZ + PT_G2], %o0
990	rd	%psr, %l1
991	ld	[%sp + STACKFRAME_SZ + PT_PSR], %l0
992	andn	%l0, PSR_CWP, %l0
993	nop
994	and	%l1, PSR_CWP, %l1
995	or	%l0, %l1, %l0
996	st	%l0, [%sp + STACKFRAME_SZ + PT_PSR]
997	b	ret_sys_call
998	 mov	0, %o0
999
1000	/* Linux native system calls enter here... */
1001	.align	4
1002	.globl	linux_sparc_syscall
1003linux_sparc_syscall:
1004	sethi	%hi(PSR_SYSCALL), %l4
1005	or	%l0, %l4, %l0
1006	/* Direct access to user regs, must faster. */
1007	cmp	%g1, NR_syscalls
1008	bgeu	linux_sparc_ni_syscall
1009	 sll	%g1, 2, %l4
1010	ld	[%l7 + %l4], %l7
1011
1012do_syscall:
1013	SAVE_ALL_HEAD
1014	 rd	%wim, %l3
1015
1016	wr	%l0, PSR_ET, %psr
1017	mov	%i0, %o0
1018	mov	%i1, %o1
1019	mov	%i2, %o2
1020
1021	ld	[%curptr + TI_FLAGS], %l5
1022	mov	%i3, %o3
1023	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1024	mov	%i4, %o4
1025	bne	linux_syscall_trace
1026	 mov	%i0, %l5
10272:
1028	call	%l7
1029	 mov	%i5, %o5
1030
10313:
1032	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1033
1034ret_sys_call:
1035	ld	[%curptr + TI_FLAGS], %l6
1036	cmp	%o0, -ERESTART_RESTARTBLOCK
1037	ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1038	set	PSR_C, %g2
1039	bgeu	1f
1040	 andcc	%l6, _TIF_SYSCALL_TRACE, %g0
1041
1042	/* System call success, clear Carry condition code. */
1043	andn	%g3, %g2, %g3
1044	clr	%l6
1045	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1046	bne	linux_syscall_trace2
1047	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1048	add	%l1, 0x4, %l2			/* npc = npc+4 */
1049	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1050	b	ret_trap_entry
1051	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
10521:
1053	/* System call failure, set Carry condition code.
1054	 * Also, get abs(errno) to return to the process.
1055	 */
1056	sub	%g0, %o0, %o0
1057	or	%g3, %g2, %g3
1058	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1059	mov	1, %l6
1060	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1061	bne	linux_syscall_trace2
1062	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1063	add	%l1, 0x4, %l2			/* npc = npc+4 */
1064	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1065	b	ret_trap_entry
1066	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1067
1068linux_syscall_trace2:
1069	add	%sp, STACKFRAME_SZ, %o0
1070	mov	1, %o1
1071	call	syscall_trace
1072	 add	%l1, 0x4, %l2			/* npc = npc+4 */
1073	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1074	b	ret_trap_entry
1075	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1076
1077
1078/* Saving and restoring the FPU state is best done from lowlevel code.
1079 *
1080 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1081 *             void *fpqueue, unsigned long *fpqdepth)
1082 */
1083
1084	.globl	fpsave
1085fpsave:
1086	st	%fsr, [%o1]	! this can trap on us if fpu is in bogon state
1087	ld	[%o1], %g1
1088	set	0x2000, %g4
1089	andcc	%g1, %g4, %g0
1090	be	2f
1091	 mov	0, %g2
1092
1093	/* We have an fpqueue to save. */
10941:
1095	std	%fq, [%o2]
1096fpsave_magic:
1097	st	%fsr, [%o1]
1098	ld	[%o1], %g3
1099	andcc	%g3, %g4, %g0
1100	add	%g2, 1, %g2
1101	bne	1b
1102	 add	%o2, 8, %o2
1103
11042:
1105	st	%g2, [%o3]
1106
1107	std	%f0, [%o0 + 0x00]
1108	std	%f2, [%o0 + 0x08]
1109	std	%f4, [%o0 + 0x10]
1110	std	%f6, [%o0 + 0x18]
1111	std	%f8, [%o0 + 0x20]
1112	std	%f10, [%o0 + 0x28]
1113	std	%f12, [%o0 + 0x30]
1114	std	%f14, [%o0 + 0x38]
1115	std	%f16, [%o0 + 0x40]
1116	std	%f18, [%o0 + 0x48]
1117	std	%f20, [%o0 + 0x50]
1118	std	%f22, [%o0 + 0x58]
1119	std	%f24, [%o0 + 0x60]
1120	std	%f26, [%o0 + 0x68]
1121	std	%f28, [%o0 + 0x70]
1122	retl
1123	 std	%f30, [%o0 + 0x78]
1124
1125	/* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1126	 * code for pointing out this possible deadlock, while we save state
1127	 * above we could trap on the fsr store so our low level fpu trap
1128	 * code has to know how to deal with this.
1129	 */
1130fpsave_catch:
1131	b	fpsave_magic + 4
1132	 st	%fsr, [%o1]
1133
1134fpsave_catch2:
1135	b	fpsave + 4
1136	 st	%fsr, [%o1]
1137
1138	/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1139
1140	.globl	fpload
1141fpload:
1142	ldd	[%o0 + 0x00], %f0
1143	ldd	[%o0 + 0x08], %f2
1144	ldd	[%o0 + 0x10], %f4
1145	ldd	[%o0 + 0x18], %f6
1146	ldd	[%o0 + 0x20], %f8
1147	ldd	[%o0 + 0x28], %f10
1148	ldd	[%o0 + 0x30], %f12
1149	ldd	[%o0 + 0x38], %f14
1150	ldd	[%o0 + 0x40], %f16
1151	ldd	[%o0 + 0x48], %f18
1152	ldd	[%o0 + 0x50], %f20
1153	ldd	[%o0 + 0x58], %f22
1154	ldd	[%o0 + 0x60], %f24
1155	ldd	[%o0 + 0x68], %f26
1156	ldd	[%o0 + 0x70], %f28
1157	ldd	[%o0 + 0x78], %f30
1158	ld	[%o1], %fsr
1159	retl
1160	 nop
1161
1162	/* __ndelay and __udelay take two arguments:
1163	 * 0 - nsecs or usecs to delay
1164	 * 1 - per_cpu udelay_val (loops per jiffy)
1165	 *
1166	 * Note that ndelay gives HZ times higher resolution but has a 10ms
1167	 * limit.  udelay can handle up to 1s.
1168	 */
1169	.globl	__ndelay
1170__ndelay:
1171	save	%sp, -STACKFRAME_SZ, %sp
1172	mov	%i0, %o0		! round multiplier up so large ns ok
1173	mov	0x1ae, %o1		! 2**32 / (1 000 000 000 / HZ)
1174	umul	%o0, %o1, %o0
1175	rd	%y, %o1
1176	mov	%i1, %o1		! udelay_val
1177	umul	%o0, %o1, %o0
1178	rd	%y, %o1
1179	ba	delay_continue
1180	 mov	%o1, %o0		! >>32 later for better resolution
1181
1182	.globl	__udelay
1183__udelay:
1184	save	%sp, -STACKFRAME_SZ, %sp
1185	mov	%i0, %o0
1186	sethi	%hi(0x10c7), %o1	! round multiplier up so large us ok
1187	or	%o1, %lo(0x10c7), %o1	! 2**32 / 1 000 000
1188	umul	%o0, %o1, %o0
1189	rd	%y, %o1
1190	mov	%i1, %o1		! udelay_val
1191	umul	%o0, %o1, %o0
1192	rd	%y, %o1
1193	sethi	%hi(0x028f4b62), %l0	! Add in rounding constant * 2**32,
1194	or	%g0, %lo(0x028f4b62), %l0
1195	addcc	%o0, %l0, %o0		! 2**32 * 0.009 999
1196	bcs,a	3f
1197	 add	%o1, 0x01, %o1
11983:
1199	mov	HZ, %o0			! >>32 earlier for wider range
1200	umul	%o0, %o1, %o0
1201	rd	%y, %o1
1202
1203delay_continue:
1204	cmp	%o0, 0x0
12051:
1206	bne	1b
1207	 subcc	%o0, 1, %o0
1208
1209	ret
1210	restore
1211EXPORT_SYMBOL(__udelay)
1212EXPORT_SYMBOL(__ndelay)
1213
1214	/* Handle a software breakpoint */
1215	/* We have to inform parent that child has stopped */
1216	.align 4
1217	.globl breakpoint_trap
1218breakpoint_trap:
1219	rd	%wim,%l3
1220	SAVE_ALL
1221	wr 	%l0, PSR_ET, %psr
1222	WRITE_PAUSE
1223
1224	st	%i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1225	call	sparc_breakpoint
1226	 add	%sp, STACKFRAME_SZ, %o0
1227
1228	RESTORE_ALL
1229
1230#ifdef CONFIG_KGDB
1231	ENTRY(kgdb_trap_low)
1232	rd	%wim,%l3
1233	SAVE_ALL
1234	wr 	%l0, PSR_ET, %psr
1235	WRITE_PAUSE
1236
1237	mov	%l7, %o0		! trap_level
1238	call	kgdb_trap
1239	 add	%sp, STACKFRAME_SZ, %o1	! struct pt_regs *regs
1240
1241	RESTORE_ALL
1242	ENDPROC(kgdb_trap_low)
1243#endif
1244
1245	.align	4
1246	.globl	flush_patch_exception
1247flush_patch_exception:
1248	FLUSH_ALL_KERNEL_WINDOWS;
1249	ldd	[%o0], %o6
1250	jmpl	%o7 + 0xc, %g0			! see asm-sparc/processor.h
1251	 mov	1, %g1				! signal EFAULT condition
1252
1253	.align	4
1254	.globl	kill_user_windows, kuw_patch1_7win
1255	.globl	kuw_patch1
1256kuw_patch1_7win:	sll	%o3, 6, %o3
1257
1258	/* No matter how much overhead this routine has in the worst
1259	 * case scenario, it is several times better than taking the
1260	 * traps with the old method of just doing flush_user_windows().
1261	 */
1262kill_user_windows:
1263	ld	[%g6 + TI_UWINMASK], %o0	! get current umask
1264	orcc	%g0, %o0, %g0			! if no bits set, we are done
1265	be	3f				! nothing to do
1266	 rd	%psr, %o5			! must clear interrupts
1267	or	%o5, PSR_PIL, %o4		! or else that could change
1268	wr	%o4, 0x0, %psr			! the uwinmask state
1269	WRITE_PAUSE				! burn them cycles
12701:
1271	ld	[%g6 + TI_UWINMASK], %o0	! get consistent state
1272	orcc	%g0, %o0, %g0			! did an interrupt come in?
1273	be	4f				! yep, we are done
1274	 rd	%wim, %o3			! get current wim
1275	srl	%o3, 1, %o4			! simulate a save
1276kuw_patch1:
1277	sll	%o3, 7, %o3			! compute next wim
1278	or	%o4, %o3, %o3			! result
1279	andncc	%o0, %o3, %o0			! clean this bit in umask
1280	bne	kuw_patch1			! not done yet
1281	 srl	%o3, 1, %o4			! begin another save simulation
1282	wr	%o3, 0x0, %wim			! set the new wim
1283	st	%g0, [%g6 + TI_UWINMASK]	! clear uwinmask
12844:
1285	wr	%o5, 0x0, %psr			! re-enable interrupts
1286	WRITE_PAUSE				! burn baby burn
12873:
1288	retl					! return
1289	 st	%g0, [%g6 + TI_W_SAVED]		! no windows saved
1290
1291	.align	4
1292	.globl	restore_current
1293restore_current:
1294	LOAD_CURRENT(g6, o0)
1295	retl
1296	 nop
1297
1298#ifdef CONFIG_PCIC_PCI
1299#include <asm/pcic.h>
1300
1301	.align	4
1302	.globl	linux_trap_ipi15_pcic
1303linux_trap_ipi15_pcic:
1304	rd	%wim, %l3
1305	SAVE_ALL
1306
1307	/*
1308	 * First deactivate NMI
1309	 * or we cannot drop ET, cannot get window spill traps.
1310	 * The busy loop is necessary because the PIO error
1311	 * sometimes does not go away quickly and we trap again.
1312	 */
1313	sethi	%hi(pcic_regs), %o1
1314	ld	[%o1 + %lo(pcic_regs)], %o2
1315
1316	! Get pending status for printouts later.
1317	ld	[%o2 + PCI_SYS_INT_PENDING], %o0
1318
1319	mov	PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1320	stb	%o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
13211:
1322	ld	[%o2 + PCI_SYS_INT_PENDING], %o1
1323	andcc	%o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1324	bne	1b
1325	 nop
1326
1327	or	%l0, PSR_PIL, %l4
1328	wr	%l4, 0x0, %psr
1329	WRITE_PAUSE
1330	wr	%l4, PSR_ET, %psr
1331	WRITE_PAUSE
1332
1333	call	pcic_nmi
1334	 add	%sp, STACKFRAME_SZ, %o1	! struct pt_regs *regs
1335	RESTORE_ALL
1336
1337	.globl	pcic_nmi_trap_patch
1338pcic_nmi_trap_patch:
1339	sethi	%hi(linux_trap_ipi15_pcic), %l3
1340	jmpl	%l3 + %lo(linux_trap_ipi15_pcic), %g0
1341	 rd	%psr, %l0
1342	.word	0
1343
1344#endif /* CONFIG_PCIC_PCI */
1345
1346	.globl	flushw_all
1347flushw_all:
1348	save	%sp, -0x40, %sp
1349	save	%sp, -0x40, %sp
1350	save	%sp, -0x40, %sp
1351	save	%sp, -0x40, %sp
1352	save	%sp, -0x40, %sp
1353	save	%sp, -0x40, %sp
1354	save	%sp, -0x40, %sp
1355	restore
1356	restore
1357	restore
1358	restore
1359	restore
1360	restore
1361	ret
1362	 restore
1363
1364#ifdef CONFIG_SMP
1365ENTRY(hard_smp_processor_id)
1366661:	rd		%tbr, %g1
1367	srl		%g1, 12, %o0
1368	and		%o0, 3, %o0
1369	.section	.cpuid_patch, "ax"
1370	/* Instruction location. */
1371	.word		661b
1372	/* SUN4D implementation. */
1373	lda		[%g0] ASI_M_VIKING_TMP1, %o0
1374	nop
1375	nop
1376	/* LEON implementation. */
1377	rd		%asr17, %o0
1378	srl		%o0, 0x1c, %o0
1379	nop
1380	.previous
1381	retl
1382	 nop
1383ENDPROC(hard_smp_processor_id)
1384#endif
1385
1386/* End of entry.S */
1387