xref: /openbmc/linux/arch/sparc/kernel/rtrap_32.S (revision 0ad53fe3)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * rtrap.S: Return from Sparc trap low-level code.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/psr.h>
11#include <asm/asi.h>
12#include <asm/smp.h>
13#include <asm/contregs.h>
14#include <asm/winmacro.h>
15#include <asm/asmmacro.h>
16#include <asm/thread_info.h>
17
18#define t_psr     l0
19#define t_pc      l1
20#define t_npc     l2
21#define t_wim     l3
22#define twin_tmp1 l4
23#define glob_tmp  g4
24#define curptr    g6
25
26	/* 7 WINDOW SPARC PATCH INSTRUCTIONS */
27	.globl	rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
28	.globl	rtrap_7win_patch4, rtrap_7win_patch5
29rtrap_7win_patch1:	srl	%t_wim, 0x6, %glob_tmp
30rtrap_7win_patch2:	and	%glob_tmp, 0x7f, %glob_tmp
31rtrap_7win_patch3:	srl	%g1, 7, %g2
32rtrap_7win_patch4:	srl	%g2, 6, %g2
33rtrap_7win_patch5:	and	%g1, 0x7f, %g1
34	/* END OF PATCH INSTRUCTIONS */
35
36	/* We need to check for a few things which are:
37	 * 1) The need to call schedule() because this
38	 *    processes quantum is up.
39	 * 2) Pending signals for this process, if any
40	 *    exist we need to call do_signal() to do
41	 *    the needy.
42	 *
43	 * Else we just check if the rett would land us
44	 * in an invalid window, if so we need to grab
45	 * it off the user/kernel stack first.
46	 */
47
48	.globl	ret_trap_entry, rtrap_patch1, rtrap_patch2
49	.globl	rtrap_patch3, rtrap_patch4, rtrap_patch5
50	.globl	ret_trap_lockless_ipi
51ret_trap_entry:
52ret_trap_lockless_ipi:
53	andcc	%t_psr, PSR_PS, %g0
54	sethi	%hi(PSR_SYSCALL), %g1
55	be	1f
56	 andn	%t_psr, %g1, %t_psr
57
58	wr	%t_psr, 0x0, %psr
59	b	ret_trap_kernel
60	 nop
61
621:
63	ld	[%curptr + TI_FLAGS], %g2
64	andcc	%g2, (_TIF_NEED_RESCHED), %g0
65	be	signal_p
66	 nop
67
68	call	schedule
69	 nop
70
71	ld	[%curptr + TI_FLAGS], %g2
72signal_p:
73	andcc	%g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
74	bz,a	ret_trap_continue
75	 ld	[%sp + STACKFRAME_SZ + PT_PSR], %t_psr
76
77	mov	%g2, %o2
78	mov	%l6, %o1
79	call	do_notify_resume
80	 add	%sp, STACKFRAME_SZ, %o0	! pt_regs ptr
81
82	b	signal_p
83	 ld	[%curptr + TI_FLAGS], %g2
84
85ret_trap_continue:
86	sethi	%hi(PSR_SYSCALL), %g1
87	andn	%t_psr, %g1, %t_psr
88	wr	%t_psr, 0x0, %psr
89	WRITE_PAUSE
90
91	ld	[%curptr + TI_W_SAVED], %twin_tmp1
92	orcc	%g0, %twin_tmp1, %g0
93	be	ret_trap_nobufwins
94	 nop
95
96	wr	%t_psr, PSR_ET, %psr
97	WRITE_PAUSE
98
99	mov	1, %o1
100	call	try_to_clear_window_buffer
101	 add	%sp, STACKFRAME_SZ, %o0
102
103	b	signal_p
104	 ld	[%curptr + TI_FLAGS], %g2
105
106ret_trap_nobufwins:
107	/* Load up the user's out registers so we can pull
108	 * a window from the stack, if necessary.
109	 */
110	LOAD_PT_INS(sp)
111
112	/* If there are already live user windows in the
113	 * set we can return from trap safely.
114	 */
115	ld	[%curptr + TI_UWINMASK], %twin_tmp1
116	orcc	%g0, %twin_tmp1, %g0
117	bne	ret_trap_userwins_ok
118	 nop
119
120		/* Calculate new %wim, we have to pull a register
121		 * window from the users stack.
122		 */
123ret_trap_pull_one_window:
124		rd	%wim, %t_wim
125		sll	%t_wim, 0x1, %twin_tmp1
126rtrap_patch1:	srl	%t_wim, 0x7, %glob_tmp
127		or	%glob_tmp, %twin_tmp1, %glob_tmp
128rtrap_patch2:	and	%glob_tmp, 0xff, %glob_tmp
129
130		wr	%glob_tmp, 0x0, %wim
131
132	/* Here comes the architecture specific
133	 * branch to the user stack checking routine
134	 * for return from traps.
135	 */
136	b	srmmu_rett_stackchk
137	 andcc	%fp, 0x7, %g0
138
139ret_trap_userwins_ok:
140	LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
141	or	%t_pc, %t_npc, %g2
142	andcc	%g2, 0x3, %g0
143	sethi	%hi(PSR_SYSCALL), %g2
144	be	1f
145	 andn	%t_psr, %g2, %t_psr
146
147	b	ret_trap_unaligned_pc
148	 add	%sp, STACKFRAME_SZ, %o0
149
1501:
151	LOAD_PT_YREG(sp, g1)
152	LOAD_PT_GLOBALS(sp)
153
154	wr	%t_psr, 0x0, %psr
155	WRITE_PAUSE
156
157	jmp	%t_pc
158	rett	%t_npc
159
160ret_trap_unaligned_pc:
161	ld	[%sp + STACKFRAME_SZ + PT_PC], %o1
162	ld	[%sp + STACKFRAME_SZ + PT_NPC], %o2
163	ld	[%sp + STACKFRAME_SZ + PT_PSR], %o3
164
165	wr	%t_wim, 0x0, %wim		! or else...
166
167	wr	%t_psr, PSR_ET, %psr
168	WRITE_PAUSE
169
170	call	do_memaccess_unaligned
171	 nop
172
173	b	signal_p
174	 ld	[%curptr + TI_FLAGS], %g2
175
176ret_trap_kernel:
177		/* Will the rett land us in the invalid window? */
178		mov	2, %g1
179		sll	%g1, %t_psr, %g1
180rtrap_patch3:	srl	%g1, 8, %g2
181		or	%g1, %g2, %g1
182		rd	%wim, %g2
183		andcc	%g2, %g1, %g0
184		be	1f		! Nope, just return from the trap
185		 sll	%g2, 0x1, %g1
186
187		/* We have to grab a window before returning. */
188rtrap_patch4:	srl	%g2, 7,  %g2
189		or	%g1, %g2, %g1
190rtrap_patch5:	and	%g1, 0xff, %g1
191
192	wr	%g1, 0x0, %wim
193
194	/* Grrr, make sure we load from the right %sp... */
195	LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
196
197	restore	%g0, %g0, %g0
198	LOAD_WINDOW(sp)
199	b	2f
200	 save	%g0, %g0, %g0
201
202	/* Reload the entire frame in case this is from a
203	 * kernel system call or whatever...
204	 */
2051:
206	LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
2072:
208	sethi	%hi(PSR_SYSCALL), %twin_tmp1
209	andn	%t_psr, %twin_tmp1, %t_psr
210	wr	%t_psr, 0x0, %psr
211	WRITE_PAUSE
212
213	jmp	%t_pc
214	rett	%t_npc
215
216ret_trap_user_stack_is_bolixed:
217	wr	%t_wim, 0x0, %wim
218
219	wr	%t_psr, PSR_ET, %psr
220	WRITE_PAUSE
221
222	call	window_ret_fault
223	 add	%sp, STACKFRAME_SZ, %o0
224
225	b	signal_p
226	 ld	[%curptr + TI_FLAGS], %g2
227
228	.globl	srmmu_rett_stackchk
229srmmu_rett_stackchk:
230	bne	ret_trap_user_stack_is_bolixed
231	 sethi   %hi(PAGE_OFFSET), %g1
232	cmp	%g1, %fp
233	bleu	ret_trap_user_stack_is_bolixed
234	 mov	AC_M_SFSR, %g1
235LEON_PI(lda	[%g1] ASI_LEON_MMUREGS, %g0)
236SUN_PI_(lda	[%g1] ASI_M_MMUREGS, %g0)
237
238LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %g1)
239SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %g1)
240	or	%g1, 0x2, %g1
241LEON_PI(sta	%g1, [%g0] ASI_LEON_MMUREGS)
242SUN_PI_(sta	%g1, [%g0] ASI_M_MMUREGS)
243
244	restore	%g0, %g0, %g0
245
246	LOAD_WINDOW(sp)
247
248	save	%g0, %g0, %g0
249
250	andn	%g1, 0x2, %g1
251LEON_PI(sta	%g1, [%g0] ASI_LEON_MMUREGS)
252SUN_PI_(sta	%g1, [%g0] ASI_M_MMUREGS)
253
254	mov	AC_M_SFAR, %g2
255LEON_PI(lda	[%g2] ASI_LEON_MMUREGS, %g2)
256SUN_PI_(lda	[%g2] ASI_M_MMUREGS, %g2)
257
258	mov	AC_M_SFSR, %g1
259LEON_PI(lda	[%g1] ASI_LEON_MMUREGS, %g1)
260SUN_PI_(lda	[%g1] ASI_M_MMUREGS, %g1)
261	andcc	%g1, 0x2, %g0
262	be	ret_trap_userwins_ok
263	 nop
264
265	b,a	ret_trap_user_stack_is_bolixed
266