1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle 7 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 * written by Carsten Langgaard, carstenl@mips.com 12 */ 13#include <asm/asm.h> 14#include <asm/cachectl.h> 15#include <asm/fpregdef.h> 16#include <asm/mipsregs.h> 17#include <asm/asm-offsets.h> 18#include <asm/page.h> 19#include <asm/pgtable-bits.h> 20#include <asm/regdef.h> 21#include <asm/stackframe.h> 22#include <asm/thread_info.h> 23 24#include <asm/asmmacro.h> 25 26/* 27 * Offset to the current process status flags, the first 32 bytes of the 28 * stack are not used. 29 */ 30#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 31 32/* 33 * FPU context is saved iff the process has used it's FPU in the current 34 * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user 35 * space STATUS register should be 0, so that a process *always* starts its 36 * userland with FPU disabled after each context switch. 37 * 38 * FPU will be enabled as soon as the process accesses FPU again, through 39 * do_cpu() trap. 40 */ 41 42/* 43 * task_struct *resume(task_struct *prev, task_struct *next, 44 * struct thread_info *next_ti, int usedfpu) 45 */ 46 .align 5 47 LEAF(resume) 48 mfc0 t1, CP0_STATUS 49 LONG_S t1, THREAD_STATUS(a0) 50 cpu_save_nonscratch a0 51 LONG_S ra, THREAD_REG31(a0) 52 53 /* 54 * check if we need to save FPU registers 55 */ 56 57 beqz a3, 1f 58 59 PTR_L t3, TASK_THREAD_INFO(a0) 60 /* 61 * clear saved user stack CU1 bit 62 */ 63 LONG_L t0, ST_OFF(t3) 64 li t1, ~ST0_CU1 65 and t0, t0, t1 66 LONG_S t0, ST_OFF(t3) 67 68 fpu_save_double a0 t0 t1 # c0_status passed in t0 69 # clobbers t1 701: 71 72 /* 73 * The order of restoring the registers takes care of the race 74 * updating $28, $29 and kernelsp without disabling ints. 75 */ 76 move $28, a2 77 cpu_restore_nonscratch a1 78 79 PTR_ADDU t0, $28, _THREAD_SIZE - 32 80 set_saved_sp t0, t1, t2 81#ifdef CONFIG_MIPS_MT_SMTC 82 /* Read-modify-writes of Status must be atomic on a VPE */ 83 mfc0 t2, CP0_TCSTATUS 84 ori t1, t2, TCSTATUS_IXMT 85 mtc0 t1, CP0_TCSTATUS 86 andi t2, t2, TCSTATUS_IXMT 87 _ehb 88 DMT 8 # dmt t0 89 move t1,ra 90 jal mips_ihb 91 move ra,t1 92#endif /* CONFIG_MIPS_MT_SMTC */ 93 mfc0 t1, CP0_STATUS /* Do we really need this? */ 94 li a3, 0xff01 95 and t1, a3 96 LONG_L a2, THREAD_STATUS(a1) 97 nor a3, $0, a3 98 and a2, a3 99 or a2, t1 100 mtc0 a2, CP0_STATUS 101#ifdef CONFIG_MIPS_MT_SMTC 102 _ehb 103 andi t0, t0, VPECONTROL_TE 104 beqz t0, 1f 105 emt 1061: 107 mfc0 t1, CP0_TCSTATUS 108 xori t1, t1, TCSTATUS_IXMT 109 or t1, t1, t2 110 mtc0 t1, CP0_TCSTATUS 111 _ehb 112#endif /* CONFIG_MIPS_MT_SMTC */ 113 move v0, a0 114 jr ra 115 END(resume) 116 117/* 118 * Save a thread's fp context. 119 */ 120LEAF(_save_fp) 121#ifdef CONFIG_64BIT 122 mfc0 t0, CP0_STATUS 123#endif 124 fpu_save_double a0 t0 t1 # clobbers t1 125 jr ra 126 END(_save_fp) 127 128/* 129 * Restore a thread's fp context. 130 */ 131LEAF(_restore_fp) 132#ifdef CONFIG_64BIT 133 mfc0 t0, CP0_STATUS 134#endif 135 fpu_restore_double a0 t0 t1 # clobbers t1 136 jr ra 137 END(_restore_fp) 138 139/* 140 * Load the FPU with signalling NANS. This bit pattern we're using has 141 * the property that no matter whether considered as single or as double 142 * precision represents signaling NANS. 143 * 144 * We initialize fcr31 to rounding to nearest, no exceptions. 145 */ 146 147#define FPU_DEFAULT 0x00000000 148 149LEAF(_init_fpu) 150#ifdef CONFIG_MIPS_MT_SMTC 151 /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ 152 mfc0 t0, CP0_TCSTATUS 153 /* Bit position is the same for Status, TCStatus */ 154 li t1, ST0_CU1 155 or t0, t1 156 mtc0 t0, CP0_TCSTATUS 157#else /* Normal MIPS CU1 enable */ 158 mfc0 t0, CP0_STATUS 159 li t1, ST0_CU1 160 or t0, t1 161 mtc0 t0, CP0_STATUS 162#endif /* CONFIG_MIPS_MT_SMTC */ 163 enable_fpu_hazard 164 165 li t1, FPU_DEFAULT 166 ctc1 t1, fcr31 167 168 li t1, -1 # SNaN 169 170#ifdef CONFIG_64BIT 171 sll t0, t0, 5 172 bgez t0, 1f # 16 / 32 register mode? 173 174 dmtc1 t1, $f1 175 dmtc1 t1, $f3 176 dmtc1 t1, $f5 177 dmtc1 t1, $f7 178 dmtc1 t1, $f9 179 dmtc1 t1, $f11 180 dmtc1 t1, $f13 181 dmtc1 t1, $f15 182 dmtc1 t1, $f17 183 dmtc1 t1, $f19 184 dmtc1 t1, $f21 185 dmtc1 t1, $f23 186 dmtc1 t1, $f25 187 dmtc1 t1, $f27 188 dmtc1 t1, $f29 189 dmtc1 t1, $f31 1901: 191#endif 192 193#ifdef CONFIG_CPU_MIPS32 194 mtc1 t1, $f0 195 mtc1 t1, $f1 196 mtc1 t1, $f2 197 mtc1 t1, $f3 198 mtc1 t1, $f4 199 mtc1 t1, $f5 200 mtc1 t1, $f6 201 mtc1 t1, $f7 202 mtc1 t1, $f8 203 mtc1 t1, $f9 204 mtc1 t1, $f10 205 mtc1 t1, $f11 206 mtc1 t1, $f12 207 mtc1 t1, $f13 208 mtc1 t1, $f14 209 mtc1 t1, $f15 210 mtc1 t1, $f16 211 mtc1 t1, $f17 212 mtc1 t1, $f18 213 mtc1 t1, $f19 214 mtc1 t1, $f20 215 mtc1 t1, $f21 216 mtc1 t1, $f22 217 mtc1 t1, $f23 218 mtc1 t1, $f24 219 mtc1 t1, $f25 220 mtc1 t1, $f26 221 mtc1 t1, $f27 222 mtc1 t1, $f28 223 mtc1 t1, $f29 224 mtc1 t1, $f30 225 mtc1 t1, $f31 226#else 227 .set mips3 228 dmtc1 t1, $f0 229 dmtc1 t1, $f2 230 dmtc1 t1, $f4 231 dmtc1 t1, $f6 232 dmtc1 t1, $f8 233 dmtc1 t1, $f10 234 dmtc1 t1, $f12 235 dmtc1 t1, $f14 236 dmtc1 t1, $f16 237 dmtc1 t1, $f18 238 dmtc1 t1, $f20 239 dmtc1 t1, $f22 240 dmtc1 t1, $f24 241 dmtc1 t1, $f26 242 dmtc1 t1, $f28 243 dmtc1 t1, $f30 244#endif 245 jr ra 246 END(_init_fpu) 247