xref: /openbmc/linux/arch/sparc/kernel/rtrap_32.S (revision ef136bc9)
1/*
2 * rtrap.S: Return from Sparc trap low-level code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/page.h>
8#include <asm/ptrace.h>
9#include <asm/psr.h>
10#include <asm/asi.h>
11#include <asm/smp.h>
12#include <asm/contregs.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17#define t_psr     l0
18#define t_pc      l1
19#define t_npc     l2
20#define t_wim     l3
21#define twin_tmp1 l4
22#define glob_tmp  g4
23#define curptr    g6
24
25	/* 7 WINDOW SPARC PATCH INSTRUCTIONS */
26	.globl	rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
27	.globl	rtrap_7win_patch4, rtrap_7win_patch5
28rtrap_7win_patch1:	srl	%t_wim, 0x6, %glob_tmp
29rtrap_7win_patch2:	and	%glob_tmp, 0x7f, %glob_tmp
30rtrap_7win_patch3:	srl	%g1, 7, %g2
31rtrap_7win_patch4:	srl	%g2, 6, %g2
32rtrap_7win_patch5:	and	%g1, 0x7f, %g1
33	/* END OF PATCH INSTRUCTIONS */
34
35	/* We need to check for a few things which are:
36	 * 1) The need to call schedule() because this
37	 *    processes quantum is up.
38	 * 2) Pending signals for this process, if any
39	 *    exist we need to call do_signal() to do
40	 *    the needy.
41	 *
42	 * Else we just check if the rett would land us
43	 * in an invalid window, if so we need to grab
44	 * it off the user/kernel stack first.
45	 */
46
47	.globl	ret_trap_entry, rtrap_patch1, rtrap_patch2
48	.globl	rtrap_patch3, rtrap_patch4, rtrap_patch5
49	.globl	ret_trap_lockless_ipi
50ret_trap_entry:
51ret_trap_lockless_ipi:
52	andcc	%t_psr, PSR_PS, %g0
53	sethi	%hi(PSR_SYSCALL), %g1
54	be	1f
55	 andn	%t_psr, %g1, %t_psr
56
57	wr	%t_psr, 0x0, %psr
58	b	ret_trap_kernel
59	 nop
60
611:
62	ld	[%curptr + TI_FLAGS], %g2
63	andcc	%g2, (_TIF_NEED_RESCHED), %g0
64	be	signal_p
65	 nop
66
67	call	schedule
68	 nop
69
70	ld	[%curptr + TI_FLAGS], %g2
71signal_p:
72	andcc	%g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
73	bz,a	ret_trap_continue
74	 ld	[%sp + STACKFRAME_SZ + PT_PSR], %t_psr
75
76	mov	%g2, %o2
77	mov	%l5, %o1
78	call	do_notify_resume
79	 add	%sp, STACKFRAME_SZ, %o0	! pt_regs ptr
80
81	b	signal_p
82	 ld	[%curptr + TI_FLAGS], %g2
83
84ret_trap_continue:
85	sethi	%hi(PSR_SYSCALL), %g1
86	andn	%t_psr, %g1, %t_psr
87	wr	%t_psr, 0x0, %psr
88	WRITE_PAUSE
89
90	ld	[%curptr + TI_W_SAVED], %twin_tmp1
91	orcc	%g0, %twin_tmp1, %g0
92	be	ret_trap_nobufwins
93	 nop
94
95	wr	%t_psr, PSR_ET, %psr
96	WRITE_PAUSE
97
98	mov	1, %o1
99	call	try_to_clear_window_buffer
100	 add	%sp, STACKFRAME_SZ, %o0
101
102	b	signal_p
103	 ld	[%curptr + TI_FLAGS], %g2
104
105ret_trap_nobufwins:
106	/* Load up the user's out registers so we can pull
107	 * a window from the stack, if necessary.
108	 */
109	LOAD_PT_INS(sp)
110
111	/* If there are already live user windows in the
112	 * set we can return from trap safely.
113	 */
114	ld	[%curptr + TI_UWINMASK], %twin_tmp1
115	orcc	%g0, %twin_tmp1, %g0
116	bne	ret_trap_userwins_ok
117	 nop
118
119		/* Calculate new %wim, we have to pull a register
120		 * window from the users stack.
121		 */
122ret_trap_pull_one_window:
123		rd	%wim, %t_wim
124		sll	%t_wim, 0x1, %twin_tmp1
125rtrap_patch1:	srl	%t_wim, 0x7, %glob_tmp
126		or	%glob_tmp, %twin_tmp1, %glob_tmp
127rtrap_patch2:	and	%glob_tmp, 0xff, %glob_tmp
128
129		wr	%glob_tmp, 0x0, %wim
130
131	/* Here comes the architecture specific
132	 * branch to the user stack checking routine
133	 * for return from traps.
134	 */
135	b	srmmu_rett_stackchk
136	 andcc	%fp, 0x7, %g0
137
138ret_trap_userwins_ok:
139	LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
140	or	%t_pc, %t_npc, %g2
141	andcc	%g2, 0x3, %g0
142	sethi	%hi(PSR_SYSCALL), %g2
143	be	1f
144	 andn	%t_psr, %g2, %t_psr
145
146	b	ret_trap_unaligned_pc
147	 add	%sp, STACKFRAME_SZ, %o0
148
1491:
150	LOAD_PT_YREG(sp, g1)
151	LOAD_PT_GLOBALS(sp)
152
153	wr	%t_psr, 0x0, %psr
154	WRITE_PAUSE
155
156	jmp	%t_pc
157	rett	%t_npc
158
159ret_trap_unaligned_pc:
160	ld	[%sp + STACKFRAME_SZ + PT_PC], %o1
161	ld	[%sp + STACKFRAME_SZ + PT_NPC], %o2
162	ld	[%sp + STACKFRAME_SZ + PT_PSR], %o3
163
164	wr	%t_wim, 0x0, %wim		! or else...
165
166	wr	%t_psr, PSR_ET, %psr
167	WRITE_PAUSE
168
169	call	do_memaccess_unaligned
170	 nop
171
172	b	signal_p
173	 ld	[%curptr + TI_FLAGS], %g2
174
175ret_trap_kernel:
176		/* Will the rett land us in the invalid window? */
177		mov	2, %g1
178		sll	%g1, %t_psr, %g1
179rtrap_patch3:	srl	%g1, 8, %g2
180		or	%g1, %g2, %g1
181		rd	%wim, %g2
182		andcc	%g2, %g1, %g0
183		be	1f		! Nope, just return from the trap
184		 sll	%g2, 0x1, %g1
185
186		/* We have to grab a window before returning. */
187rtrap_patch4:	srl	%g2, 7,  %g2
188		or	%g1, %g2, %g1
189rtrap_patch5:	and	%g1, 0xff, %g1
190
191	wr	%g1, 0x0, %wim
192
193	/* Grrr, make sure we load from the right %sp... */
194	LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
195
196	restore	%g0, %g0, %g0
197	LOAD_WINDOW(sp)
198	b	2f
199	 save	%g0, %g0, %g0
200
201	/* Reload the entire frame in case this is from a
202	 * kernel system call or whatever...
203	 */
2041:
205	LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
2062:
207	sethi	%hi(PSR_SYSCALL), %twin_tmp1
208	andn	%t_psr, %twin_tmp1, %t_psr
209	wr	%t_psr, 0x0, %psr
210	WRITE_PAUSE
211
212	jmp	%t_pc
213	rett	%t_npc
214
215ret_trap_user_stack_is_bolixed:
216	wr	%t_wim, 0x0, %wim
217
218	wr	%t_psr, PSR_ET, %psr
219	WRITE_PAUSE
220
221	call	window_ret_fault
222	 add	%sp, STACKFRAME_SZ, %o0
223
224	b	signal_p
225	 ld	[%curptr + TI_FLAGS], %g2
226
227	.globl	srmmu_rett_stackchk
228srmmu_rett_stackchk:
229	bne	ret_trap_user_stack_is_bolixed
230	 sethi   %hi(PAGE_OFFSET), %g1
231	cmp	%g1, %fp
232	bleu	ret_trap_user_stack_is_bolixed
233	 mov	AC_M_SFSR, %g1
234	lda	[%g1] ASI_M_MMUREGS, %g0
235
236	lda	[%g0] ASI_M_MMUREGS, %g1
237	or	%g1, 0x2, %g1
238	sta	%g1, [%g0] ASI_M_MMUREGS
239
240	restore	%g0, %g0, %g0
241
242	LOAD_WINDOW(sp)
243
244	save	%g0, %g0, %g0
245
246	andn	%g1, 0x2, %g1
247	sta	%g1, [%g0] ASI_M_MMUREGS
248
249	mov	AC_M_SFAR, %g2
250	lda	[%g2] ASI_M_MMUREGS, %g2
251
252	mov	AC_M_SFSR, %g1
253	lda	[%g1] ASI_M_MMUREGS, %g1
254	andcc	%g1, 0x2, %g0
255	be	ret_trap_userwins_ok
256	 nop
257
258	b,a	ret_trap_user_stack_is_bolixed
259