xref: /openbmc/linux/arch/mips/kernel/r4k_switch.S (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 *    written by Carsten Langgaard, carstenl@mips.com
12 */
13#include <asm/asm.h>
14#include <asm/cachectl.h>
15#include <asm/fpregdef.h>
16#include <asm/mipsregs.h>
17#include <asm/asm-offsets.h>
18#include <asm/page.h>
19#include <asm/pgtable-bits.h>
20#include <asm/regdef.h>
21#include <asm/stackframe.h>
22#include <asm/thread_info.h>
23
24#include <asm/asmmacro.h>
25
26/*
27 * Offset to the current process status flags, the first 32 bytes of the
28 * stack are not used.
29 */
30#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
31
32/*
33 * FPU context is saved iff the process has used it's FPU in the current
34 * time slice as indicated by _TIF_USEDFPU.  In any case, the CU1 bit for user
35 * space STATUS register should be 0, so that a process *always* starts its
36 * userland with FPU disabled after each context switch.
37 *
38 * FPU will be enabled as soon as the process accesses FPU again, through
39 * do_cpu() trap.
40 */
41
42/*
43 * task_struct *resume(task_struct *prev, task_struct *next,
44 *                     struct thread_info *next_ti)
45 */
46	.align	5
47	LEAF(resume)
48#ifndef CONFIG_CPU_HAS_LLSC
49	sw	zero, ll_bit
50#endif
51	mfc0	t1, CP0_STATUS
52	LONG_S	t1, THREAD_STATUS(a0)
53	cpu_save_nonscratch a0
54	LONG_S	ra, THREAD_REG31(a0)
55
56	/*
57	 * check if we need to save FPU registers
58	 */
59	PTR_L	t3, TASK_THREAD_INFO(a0)
60	LONG_L	t0, TI_FLAGS(t3)
61	li	t1, _TIF_USEDFPU
62	and	t2, t0, t1
63	beqz	t2, 1f
64	nor	t1, zero, t1
65
66	and	t0, t0, t1
67	LONG_S	t0, TI_FLAGS(t3)
68
69	/*
70	 * clear saved user stack CU1 bit
71	 */
72	LONG_L	t0, ST_OFF(t3)
73	li	t1, ~ST0_CU1
74	and	t0, t0, t1
75	LONG_S	t0, ST_OFF(t3)
76
77	fpu_save_double a0 t0 t1		# c0_status passed in t0
78						# clobbers t1
791:
80
81	/*
82	 * The order of restoring the registers takes care of the race
83	 * updating $28, $29 and kernelsp without disabling ints.
84	 */
85	move	$28, a2
86	cpu_restore_nonscratch a1
87
88	PTR_ADDU	t0, $28, _THREAD_SIZE - 32
89	set_saved_sp	t0, t1, t2
90#ifdef CONFIG_MIPS_MT_SMTC
91	/* Read-modify-writes of Status must be atomic on a VPE */
92	mfc0	t2, CP0_TCSTATUS
93	ori	t1, t2, TCSTATUS_IXMT
94	mtc0	t1, CP0_TCSTATUS
95	andi	t2, t2, TCSTATUS_IXMT
96	_ehb
97	DMT	8				# dmt	t0
98	move	t1,ra
99	jal	mips_ihb
100	move	ra,t1
101#endif /* CONFIG_MIPS_MT_SMTC */
102	mfc0	t1, CP0_STATUS		/* Do we really need this? */
103	li	a3, 0xff01
104	and	t1, a3
105	LONG_L	a2, THREAD_STATUS(a1)
106	nor	a3, $0, a3
107	and	a2, a3
108	or	a2, t1
109	mtc0	a2, CP0_STATUS
110#ifdef CONFIG_MIPS_MT_SMTC
111	_ehb
112	andi	t0, t0, VPECONTROL_TE
113	beqz	t0, 1f
114	emt
1151:
116	mfc0	t1, CP0_TCSTATUS
117	xori	t1, t1, TCSTATUS_IXMT
118	or	t1, t1, t2
119	mtc0	t1, CP0_TCSTATUS
120	_ehb
121#endif /* CONFIG_MIPS_MT_SMTC */
122	move	v0, a0
123	jr	ra
124	END(resume)
125
126/*
127 * Save a thread's fp context.
128 */
129LEAF(_save_fp)
130#ifdef CONFIG_64BIT
131	mfc0	t0, CP0_STATUS
132#endif
133	fpu_save_double a0 t0 t1		# clobbers t1
134	jr	ra
135	END(_save_fp)
136
137/*
138 * Restore a thread's fp context.
139 */
140LEAF(_restore_fp)
141#ifdef CONFIG_64BIT
142	mfc0	t0, CP0_STATUS
143#endif
144	fpu_restore_double a0 t0 t1		# clobbers t1
145	jr	ra
146	END(_restore_fp)
147
148/*
149 * Load the FPU with signalling NANS.  This bit pattern we're using has
150 * the property that no matter whether considered as single or as double
151 * precision represents signaling NANS.
152 *
153 * We initialize fcr31 to rounding to nearest, no exceptions.
154 */
155
156#define FPU_DEFAULT  0x00000000
157
158LEAF(_init_fpu)
159#ifdef CONFIG_MIPS_MT_SMTC
160	/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
161	mfc0	t0, CP0_TCSTATUS
162	/* Bit position is the same for Status, TCStatus */
163	li	t1, ST0_CU1
164	or	t0, t1
165	mtc0	t0, CP0_TCSTATUS
166#else /* Normal MIPS CU1 enable */
167	mfc0	t0, CP0_STATUS
168	li	t1, ST0_CU1
169	or	t0, t1
170	mtc0	t0, CP0_STATUS
171#endif /* CONFIG_MIPS_MT_SMTC */
172	enable_fpu_hazard
173
174	li	t1, FPU_DEFAULT
175	ctc1	t1, fcr31
176
177	li	t1, -1				# SNaN
178
179#ifdef CONFIG_64BIT
180	sll	t0, t0, 5
181	bgez	t0, 1f				# 16 / 32 register mode?
182
183	dmtc1	t1, $f1
184	dmtc1	t1, $f3
185	dmtc1	t1, $f5
186	dmtc1	t1, $f7
187	dmtc1	t1, $f9
188	dmtc1	t1, $f11
189	dmtc1	t1, $f13
190	dmtc1	t1, $f15
191	dmtc1	t1, $f17
192	dmtc1	t1, $f19
193	dmtc1	t1, $f21
194	dmtc1	t1, $f23
195	dmtc1	t1, $f25
196	dmtc1	t1, $f27
197	dmtc1	t1, $f29
198	dmtc1	t1, $f31
1991:
200#endif
201
202#ifdef CONFIG_CPU_MIPS32
203	mtc1	t1, $f0
204	mtc1	t1, $f1
205	mtc1	t1, $f2
206	mtc1	t1, $f3
207	mtc1	t1, $f4
208	mtc1	t1, $f5
209	mtc1	t1, $f6
210	mtc1	t1, $f7
211	mtc1	t1, $f8
212	mtc1	t1, $f9
213	mtc1	t1, $f10
214	mtc1	t1, $f11
215	mtc1	t1, $f12
216	mtc1	t1, $f13
217	mtc1	t1, $f14
218	mtc1	t1, $f15
219	mtc1	t1, $f16
220	mtc1	t1, $f17
221	mtc1	t1, $f18
222	mtc1	t1, $f19
223	mtc1	t1, $f20
224	mtc1	t1, $f21
225	mtc1	t1, $f22
226	mtc1	t1, $f23
227	mtc1	t1, $f24
228	mtc1	t1, $f25
229	mtc1	t1, $f26
230	mtc1	t1, $f27
231	mtc1	t1, $f28
232	mtc1	t1, $f29
233	mtc1	t1, $f30
234	mtc1	t1, $f31
235#else
236	.set	mips3
237	dmtc1	t1, $f0
238	dmtc1	t1, $f2
239	dmtc1	t1, $f4
240	dmtc1	t1, $f6
241	dmtc1	t1, $f8
242	dmtc1	t1, $f10
243	dmtc1	t1, $f12
244	dmtc1	t1, $f14
245	dmtc1	t1, $f16
246	dmtc1	t1, $f18
247	dmtc1	t1, $f20
248	dmtc1	t1, $f22
249	dmtc1	t1, $f24
250	dmtc1	t1, $f26
251	dmtc1	t1, $f28
252	dmtc1	t1, $f30
253#endif
254	jr	ra
255	END(_init_fpu)
256