1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 2007 Maciej W. Rozycki 10 */ 11 #ifndef _ASM_STACKFRAME_H 12 #define _ASM_STACKFRAME_H 13 14 #include <linux/threads.h> 15 16 #include <asm/asm.h> 17 #include <asm/asmmacro.h> 18 #include <asm/mipsregs.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/thread_info.h> 21 22 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 23 #define STATMASK 0x3f 24 #else 25 #define STATMASK 0x1f 26 #endif 27 28 .macro SAVE_AT 29 .set push 30 .set noat 31 LONG_S $1, PT_R1(sp) 32 .set pop 33 .endm 34 35 .macro SAVE_TEMP 36 #ifdef CONFIG_CPU_HAS_SMARTMIPS 37 mflhxu v1 38 LONG_S v1, PT_LO(sp) 39 mflhxu v1 40 LONG_S v1, PT_HI(sp) 41 mflhxu v1 42 LONG_S v1, PT_ACX(sp) 43 #elif !defined(CONFIG_CPU_MIPSR6) 44 mfhi v1 45 #endif 46 #ifdef CONFIG_32BIT 47 LONG_S $8, PT_R8(sp) 48 LONG_S $9, PT_R9(sp) 49 #endif 50 LONG_S $10, PT_R10(sp) 51 LONG_S $11, PT_R11(sp) 52 LONG_S $12, PT_R12(sp) 53 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 54 LONG_S v1, PT_HI(sp) 55 mflo v1 56 #endif 57 LONG_S $13, PT_R13(sp) 58 LONG_S $14, PT_R14(sp) 59 LONG_S $15, PT_R15(sp) 60 LONG_S $24, PT_R24(sp) 61 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 62 LONG_S v1, PT_LO(sp) 63 #endif 64 #ifdef CONFIG_CPU_CAVIUM_OCTEON 65 /* 66 * The Octeon multiplier state is affected by general 67 * multiply instructions. It must be saved before and 68 * kernel code might corrupt it 69 */ 70 jal octeon_mult_save 71 #endif 72 .endm 73 74 .macro SAVE_STATIC 75 LONG_S $16, PT_R16(sp) 76 LONG_S $17, PT_R17(sp) 77 LONG_S $18, PT_R18(sp) 78 LONG_S $19, PT_R19(sp) 79 LONG_S $20, PT_R20(sp) 80 LONG_S $21, PT_R21(sp) 81 LONG_S $22, PT_R22(sp) 82 LONG_S $23, PT_R23(sp) 83 LONG_S $30, PT_R30(sp) 84 .endm 85 86 #ifdef CONFIG_SMP 87 .macro get_saved_sp /* SMP variation */ 88 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 89 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 90 lui k1, %hi(kernelsp) 91 #else 92 lui k1, %highest(kernelsp) 93 daddiu k1, %higher(kernelsp) 94 dsll k1, 16 95 daddiu k1, %hi(kernelsp) 96 dsll k1, 16 97 #endif 98 LONG_SRL k0, SMP_CPUID_PTRSHIFT 99 LONG_ADDU k1, k0 100 LONG_L k1, %lo(kernelsp)(k1) 101 .endm 102 103 .macro set_saved_sp stackp temp temp2 104 ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG 105 LONG_SRL \temp, SMP_CPUID_PTRSHIFT 106 LONG_S \stackp, kernelsp(\temp) 107 .endm 108 #else /* !CONFIG_SMP */ 109 .macro get_saved_sp /* Uniprocessor variation */ 110 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 111 /* 112 * Clear BTB (branch target buffer), forbid RAS (return address 113 * stack) to workaround the Out-of-order Issue in Loongson2F 114 * via its diagnostic register. 115 */ 116 move k0, ra 117 jal 1f 118 nop 119 1: jal 1f 120 nop 121 1: jal 1f 122 nop 123 1: jal 1f 124 nop 125 1: move ra, k0 126 li k0, 3 127 mtc0 k0, $22 128 #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 129 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 130 lui k1, %hi(kernelsp) 131 #else 132 lui k1, %highest(kernelsp) 133 daddiu k1, %higher(kernelsp) 134 dsll k1, k1, 16 135 daddiu k1, %hi(kernelsp) 136 dsll k1, k1, 16 137 #endif 138 LONG_L k1, %lo(kernelsp)(k1) 139 .endm 140 141 .macro set_saved_sp stackp temp temp2 142 LONG_S \stackp, kernelsp 143 .endm 144 #endif 145 146 .macro SAVE_SOME 147 .set push 148 .set noat 149 .set reorder 150 mfc0 k0, CP0_STATUS 151 sll k0, 3 /* extract cu0 bit */ 152 .set noreorder 153 bltz k0, 8f 154 move k1, sp 155 #ifdef CONFIG_EVA 156 /* 157 * Flush interAptiv's Return Prediction Stack (RPS) by writing 158 * EntryHi. Toggling Config7.RPS is slower and less portable. 159 * 160 * The RPS isn't automatically flushed when exceptions are 161 * taken, which can result in kernel mode speculative accesses 162 * to user addresses if the RPS mispredicts. That's harmless 163 * when user and kernel share the same address space, but with 164 * EVA the same user segments may be unmapped to kernel mode, 165 * even containing sensitive MMIO regions or invalid memory. 166 * 167 * This can happen when the kernel sets the return address to 168 * ret_from_* and jr's to the exception handler, which looks 169 * more like a tail call than a function call. If nested calls 170 * don't evict the last user address in the RPS, it will 171 * mispredict the return and fetch from a user controlled 172 * address into the icache. 173 * 174 * More recent EVA-capable cores with MAAR to restrict 175 * speculative accesses aren't affected. 176 */ 177 MFC0 k0, CP0_ENTRYHI 178 MTC0 k0, CP0_ENTRYHI 179 #endif 180 .set reorder 181 /* Called from user mode, new stack. */ 182 get_saved_sp 183 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 184 8: move k0, sp 185 PTR_SUBU sp, k1, PT_SIZE 186 #else 187 .set at=k0 188 8: PTR_SUBU k1, PT_SIZE 189 .set noat 190 move k0, sp 191 move sp, k1 192 #endif 193 LONG_S k0, PT_R29(sp) 194 LONG_S $3, PT_R3(sp) 195 /* 196 * You might think that you don't need to save $0, 197 * but the FPU emulator and gdb remote debug stub 198 * need it to operate correctly 199 */ 200 LONG_S $0, PT_R0(sp) 201 mfc0 v1, CP0_STATUS 202 LONG_S $2, PT_R2(sp) 203 LONG_S v1, PT_STATUS(sp) 204 LONG_S $4, PT_R4(sp) 205 mfc0 v1, CP0_CAUSE 206 LONG_S $5, PT_R5(sp) 207 LONG_S v1, PT_CAUSE(sp) 208 LONG_S $6, PT_R6(sp) 209 MFC0 v1, CP0_EPC 210 LONG_S $7, PT_R7(sp) 211 #ifdef CONFIG_64BIT 212 LONG_S $8, PT_R8(sp) 213 LONG_S $9, PT_R9(sp) 214 #endif 215 LONG_S v1, PT_EPC(sp) 216 LONG_S $25, PT_R25(sp) 217 LONG_S $28, PT_R28(sp) 218 LONG_S $31, PT_R31(sp) 219 220 /* Set thread_info if we're coming from user mode */ 221 mfc0 k0, CP0_STATUS 222 sll k0, 3 /* extract cu0 bit */ 223 bltz k0, 9f 224 225 ori $28, sp, _THREAD_MASK 226 xori $28, _THREAD_MASK 227 #ifdef CONFIG_CPU_CAVIUM_OCTEON 228 .set mips64 229 pref 0, 0($28) /* Prefetch the current pointer */ 230 #endif 231 9: 232 .set pop 233 .endm 234 235 .macro SAVE_ALL 236 SAVE_SOME 237 SAVE_AT 238 SAVE_TEMP 239 SAVE_STATIC 240 .endm 241 242 .macro RESTORE_AT 243 .set push 244 .set noat 245 LONG_L $1, PT_R1(sp) 246 .set pop 247 .endm 248 249 .macro RESTORE_TEMP 250 #ifdef CONFIG_CPU_CAVIUM_OCTEON 251 /* Restore the Octeon multiplier state */ 252 jal octeon_mult_restore 253 #endif 254 #ifdef CONFIG_CPU_HAS_SMARTMIPS 255 LONG_L $24, PT_ACX(sp) 256 mtlhx $24 257 LONG_L $24, PT_HI(sp) 258 mtlhx $24 259 LONG_L $24, PT_LO(sp) 260 mtlhx $24 261 #elif !defined(CONFIG_CPU_MIPSR6) 262 LONG_L $24, PT_LO(sp) 263 mtlo $24 264 LONG_L $24, PT_HI(sp) 265 mthi $24 266 #endif 267 #ifdef CONFIG_32BIT 268 LONG_L $8, PT_R8(sp) 269 LONG_L $9, PT_R9(sp) 270 #endif 271 LONG_L $10, PT_R10(sp) 272 LONG_L $11, PT_R11(sp) 273 LONG_L $12, PT_R12(sp) 274 LONG_L $13, PT_R13(sp) 275 LONG_L $14, PT_R14(sp) 276 LONG_L $15, PT_R15(sp) 277 LONG_L $24, PT_R24(sp) 278 .endm 279 280 .macro RESTORE_STATIC 281 LONG_L $16, PT_R16(sp) 282 LONG_L $17, PT_R17(sp) 283 LONG_L $18, PT_R18(sp) 284 LONG_L $19, PT_R19(sp) 285 LONG_L $20, PT_R20(sp) 286 LONG_L $21, PT_R21(sp) 287 LONG_L $22, PT_R22(sp) 288 LONG_L $23, PT_R23(sp) 289 LONG_L $30, PT_R30(sp) 290 .endm 291 292 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 293 294 .macro RESTORE_SOME 295 .set push 296 .set reorder 297 .set noat 298 mfc0 a0, CP0_STATUS 299 li v1, ST0_CU1 | ST0_IM 300 ori a0, STATMASK 301 xori a0, STATMASK 302 mtc0 a0, CP0_STATUS 303 and a0, v1 304 LONG_L v0, PT_STATUS(sp) 305 nor v1, $0, v1 306 and v0, v1 307 or v0, a0 308 mtc0 v0, CP0_STATUS 309 LONG_L $31, PT_R31(sp) 310 LONG_L $28, PT_R28(sp) 311 LONG_L $25, PT_R25(sp) 312 LONG_L $7, PT_R7(sp) 313 LONG_L $6, PT_R6(sp) 314 LONG_L $5, PT_R5(sp) 315 LONG_L $4, PT_R4(sp) 316 LONG_L $3, PT_R3(sp) 317 LONG_L $2, PT_R2(sp) 318 .set pop 319 .endm 320 321 .macro RESTORE_SP_AND_RET 322 .set push 323 .set noreorder 324 LONG_L k0, PT_EPC(sp) 325 LONG_L sp, PT_R29(sp) 326 jr k0 327 rfe 328 .set pop 329 .endm 330 331 #else 332 .macro RESTORE_SOME 333 .set push 334 .set reorder 335 .set noat 336 mfc0 a0, CP0_STATUS 337 ori a0, STATMASK 338 xori a0, STATMASK 339 mtc0 a0, CP0_STATUS 340 li v1, ST0_CU1 | ST0_FR | ST0_IM 341 and a0, v1 342 LONG_L v0, PT_STATUS(sp) 343 nor v1, $0, v1 344 and v0, v1 345 or v0, a0 346 mtc0 v0, CP0_STATUS 347 LONG_L v1, PT_EPC(sp) 348 MTC0 v1, CP0_EPC 349 LONG_L $31, PT_R31(sp) 350 LONG_L $28, PT_R28(sp) 351 LONG_L $25, PT_R25(sp) 352 #ifdef CONFIG_64BIT 353 LONG_L $8, PT_R8(sp) 354 LONG_L $9, PT_R9(sp) 355 #endif 356 LONG_L $7, PT_R7(sp) 357 LONG_L $6, PT_R6(sp) 358 LONG_L $5, PT_R5(sp) 359 LONG_L $4, PT_R4(sp) 360 LONG_L $3, PT_R3(sp) 361 LONG_L $2, PT_R2(sp) 362 .set pop 363 .endm 364 365 .macro RESTORE_SP_AND_RET 366 LONG_L sp, PT_R29(sp) 367 #ifdef CONFIG_CPU_MIPSR6 368 eretnc 369 #else 370 .set arch=r4000 371 eret 372 .set mips0 373 #endif 374 .endm 375 376 #endif 377 378 .macro RESTORE_SP 379 LONG_L sp, PT_R29(sp) 380 .endm 381 382 .macro RESTORE_ALL 383 RESTORE_TEMP 384 RESTORE_STATIC 385 RESTORE_AT 386 RESTORE_SOME 387 RESTORE_SP 388 .endm 389 390 /* 391 * Move to kernel mode and disable interrupts. 392 * Set cp0 enable bit as sign that we're running on the kernel stack 393 */ 394 .macro CLI 395 mfc0 t0, CP0_STATUS 396 li t1, ST0_CU0 | STATMASK 397 or t0, t1 398 xori t0, STATMASK 399 mtc0 t0, CP0_STATUS 400 irq_disable_hazard 401 .endm 402 403 /* 404 * Move to kernel mode and enable interrupts. 405 * Set cp0 enable bit as sign that we're running on the kernel stack 406 */ 407 .macro STI 408 mfc0 t0, CP0_STATUS 409 li t1, ST0_CU0 | STATMASK 410 or t0, t1 411 xori t0, STATMASK & ~1 412 mtc0 t0, CP0_STATUS 413 irq_enable_hazard 414 .endm 415 416 /* 417 * Just move to kernel mode and leave interrupts as they are. Note 418 * for the R3000 this means copying the previous enable from IEp. 419 * Set cp0 enable bit as sign that we're running on the kernel stack 420 */ 421 .macro KMODE 422 mfc0 t0, CP0_STATUS 423 li t1, ST0_CU0 | (STATMASK & ~1) 424 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 425 andi t2, t0, ST0_IEP 426 srl t2, 2 427 or t0, t2 428 #endif 429 or t0, t1 430 xori t0, STATMASK & ~1 431 mtc0 t0, CP0_STATUS 432 irq_disable_hazard 433 .endm 434 435 #endif /* _ASM_STACKFRAME_H */ 436