1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 2007 Maciej W. Rozycki 10 */ 11 #ifndef _ASM_STACKFRAME_H 12 #define _ASM_STACKFRAME_H 13 14 #include <linux/threads.h> 15 16 #include <asm/asm.h> 17 #include <asm/asmmacro.h> 18 #include <asm/mipsregs.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/thread_info.h> 21 22 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 23 #define STATMASK 0x3f 24 #else 25 #define STATMASK 0x1f 26 #endif 27 28 .macro SAVE_AT 29 .set push 30 .set noat 31 LONG_S $1, PT_R1(sp) 32 .set pop 33 .endm 34 35 .macro SAVE_TEMP 36 #ifdef CONFIG_CPU_HAS_SMARTMIPS 37 mflhxu v1 38 LONG_S v1, PT_LO(sp) 39 mflhxu v1 40 LONG_S v1, PT_HI(sp) 41 mflhxu v1 42 LONG_S v1, PT_ACX(sp) 43 #elif !defined(CONFIG_CPU_MIPSR6) 44 mfhi v1 45 #endif 46 #ifdef CONFIG_32BIT 47 LONG_S $8, PT_R8(sp) 48 LONG_S $9, PT_R9(sp) 49 #endif 50 LONG_S $10, PT_R10(sp) 51 LONG_S $11, PT_R11(sp) 52 LONG_S $12, PT_R12(sp) 53 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 54 LONG_S v1, PT_HI(sp) 55 mflo v1 56 #endif 57 LONG_S $13, PT_R13(sp) 58 LONG_S $14, PT_R14(sp) 59 LONG_S $15, PT_R15(sp) 60 LONG_S $24, PT_R24(sp) 61 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 62 LONG_S v1, PT_LO(sp) 63 #endif 64 #ifdef CONFIG_CPU_CAVIUM_OCTEON 65 /* 66 * The Octeon multiplier state is affected by general 67 * multiply instructions. It must be saved before and 68 * kernel code might corrupt it 69 */ 70 jal octeon_mult_save 71 #endif 72 .endm 73 74 .macro SAVE_STATIC 75 LONG_S $16, PT_R16(sp) 76 LONG_S $17, PT_R17(sp) 77 LONG_S $18, PT_R18(sp) 78 LONG_S $19, PT_R19(sp) 79 LONG_S $20, PT_R20(sp) 80 LONG_S $21, PT_R21(sp) 81 LONG_S $22, PT_R22(sp) 82 LONG_S $23, PT_R23(sp) 83 LONG_S $30, PT_R30(sp) 84 .endm 85 86 #ifdef CONFIG_SMP 87 .macro get_saved_sp /* SMP variation */ 88 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 89 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 90 lui k1, %hi(kernelsp) 91 #else 92 lui k1, %highest(kernelsp) 93 daddiu k1, %higher(kernelsp) 94 dsll k1, 16 95 daddiu k1, %hi(kernelsp) 96 dsll k1, 16 97 #endif 98 LONG_SRL k0, SMP_CPUID_PTRSHIFT 99 LONG_ADDU k1, k0 100 LONG_L k1, %lo(kernelsp)(k1) 101 .endm 102 103 .macro set_saved_sp stackp temp temp2 104 ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG 105 LONG_SRL \temp, SMP_CPUID_PTRSHIFT 106 LONG_S \stackp, kernelsp(\temp) 107 .endm 108 #else /* !CONFIG_SMP */ 109 .macro get_saved_sp /* Uniprocessor variation */ 110 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 111 /* 112 * Clear BTB (branch target buffer), forbid RAS (return address 113 * stack) to workaround the Out-of-order Issue in Loongson2F 114 * via its diagnostic register. 115 */ 116 move k0, ra 117 jal 1f 118 nop 119 1: jal 1f 120 nop 121 1: jal 1f 122 nop 123 1: jal 1f 124 nop 125 1: move ra, k0 126 li k0, 3 127 mtc0 k0, $22 128 #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 129 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 130 lui k1, %hi(kernelsp) 131 #else 132 lui k1, %highest(kernelsp) 133 daddiu k1, %higher(kernelsp) 134 dsll k1, k1, 16 135 daddiu k1, %hi(kernelsp) 136 dsll k1, k1, 16 137 #endif 138 LONG_L k1, %lo(kernelsp)(k1) 139 .endm 140 141 .macro set_saved_sp stackp temp temp2 142 LONG_S \stackp, kernelsp 143 .endm 144 #endif 145 146 .macro SAVE_SOME 147 .set push 148 .set noat 149 .set reorder 150 mfc0 k0, CP0_STATUS 151 sll k0, 3 /* extract cu0 bit */ 152 .set noreorder 153 bltz k0, 8f 154 move k1, sp 155 #ifdef CONFIG_EVA 156 /* 157 * Flush interAptiv's Return Prediction Stack (RPS) by writing 158 * EntryHi. Toggling Config7.RPS is slower and less portable. 159 * 160 * The RPS isn't automatically flushed when exceptions are 161 * taken, which can result in kernel mode speculative accesses 162 * to user addresses if the RPS mispredicts. That's harmless 163 * when user and kernel share the same address space, but with 164 * EVA the same user segments may be unmapped to kernel mode, 165 * even containing sensitive MMIO regions or invalid memory. 166 * 167 * This can happen when the kernel sets the return address to 168 * ret_from_* and jr's to the exception handler, which looks 169 * more like a tail call than a function call. If nested calls 170 * don't evict the last user address in the RPS, it will 171 * mispredict the return and fetch from a user controlled 172 * address into the icache. 173 * 174 * More recent EVA-capable cores with MAAR to restrict 175 * speculative accesses aren't affected. 176 */ 177 MFC0 k0, CP0_ENTRYHI 178 MTC0 k0, CP0_ENTRYHI 179 #endif 180 .set reorder 181 /* Called from user mode, new stack. */ 182 get_saved_sp 183 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 184 8: move k0, sp 185 PTR_SUBU sp, k1, PT_SIZE 186 #else 187 .set at=k0 188 8: PTR_SUBU k1, PT_SIZE 189 .set noat 190 move k0, sp 191 move sp, k1 192 #endif 193 LONG_S k0, PT_R29(sp) 194 LONG_S $3, PT_R3(sp) 195 /* 196 * You might think that you don't need to save $0, 197 * but the FPU emulator and gdb remote debug stub 198 * need it to operate correctly 199 */ 200 LONG_S $0, PT_R0(sp) 201 mfc0 v1, CP0_STATUS 202 LONG_S $2, PT_R2(sp) 203 LONG_S v1, PT_STATUS(sp) 204 LONG_S $4, PT_R4(sp) 205 mfc0 v1, CP0_CAUSE 206 LONG_S $5, PT_R5(sp) 207 LONG_S v1, PT_CAUSE(sp) 208 LONG_S $6, PT_R6(sp) 209 MFC0 v1, CP0_EPC 210 LONG_S $7, PT_R7(sp) 211 #ifdef CONFIG_64BIT 212 LONG_S $8, PT_R8(sp) 213 LONG_S $9, PT_R9(sp) 214 #endif 215 LONG_S v1, PT_EPC(sp) 216 LONG_S $25, PT_R25(sp) 217 LONG_S $28, PT_R28(sp) 218 LONG_S $31, PT_R31(sp) 219 ori $28, sp, _THREAD_MASK 220 xori $28, _THREAD_MASK 221 #ifdef CONFIG_CPU_CAVIUM_OCTEON 222 .set mips64 223 pref 0, 0($28) /* Prefetch the current pointer */ 224 #endif 225 .set pop 226 .endm 227 228 .macro SAVE_ALL 229 SAVE_SOME 230 SAVE_AT 231 SAVE_TEMP 232 SAVE_STATIC 233 .endm 234 235 .macro RESTORE_AT 236 .set push 237 .set noat 238 LONG_L $1, PT_R1(sp) 239 .set pop 240 .endm 241 242 .macro RESTORE_TEMP 243 #ifdef CONFIG_CPU_CAVIUM_OCTEON 244 /* Restore the Octeon multiplier state */ 245 jal octeon_mult_restore 246 #endif 247 #ifdef CONFIG_CPU_HAS_SMARTMIPS 248 LONG_L $24, PT_ACX(sp) 249 mtlhx $24 250 LONG_L $24, PT_HI(sp) 251 mtlhx $24 252 LONG_L $24, PT_LO(sp) 253 mtlhx $24 254 #elif !defined(CONFIG_CPU_MIPSR6) 255 LONG_L $24, PT_LO(sp) 256 mtlo $24 257 LONG_L $24, PT_HI(sp) 258 mthi $24 259 #endif 260 #ifdef CONFIG_32BIT 261 LONG_L $8, PT_R8(sp) 262 LONG_L $9, PT_R9(sp) 263 #endif 264 LONG_L $10, PT_R10(sp) 265 LONG_L $11, PT_R11(sp) 266 LONG_L $12, PT_R12(sp) 267 LONG_L $13, PT_R13(sp) 268 LONG_L $14, PT_R14(sp) 269 LONG_L $15, PT_R15(sp) 270 LONG_L $24, PT_R24(sp) 271 .endm 272 273 .macro RESTORE_STATIC 274 LONG_L $16, PT_R16(sp) 275 LONG_L $17, PT_R17(sp) 276 LONG_L $18, PT_R18(sp) 277 LONG_L $19, PT_R19(sp) 278 LONG_L $20, PT_R20(sp) 279 LONG_L $21, PT_R21(sp) 280 LONG_L $22, PT_R22(sp) 281 LONG_L $23, PT_R23(sp) 282 LONG_L $30, PT_R30(sp) 283 .endm 284 285 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 286 287 .macro RESTORE_SOME 288 .set push 289 .set reorder 290 .set noat 291 mfc0 a0, CP0_STATUS 292 li v1, 0xff00 293 ori a0, STATMASK 294 xori a0, STATMASK 295 mtc0 a0, CP0_STATUS 296 and a0, v1 297 LONG_L v0, PT_STATUS(sp) 298 nor v1, $0, v1 299 and v0, v1 300 or v0, a0 301 mtc0 v0, CP0_STATUS 302 LONG_L $31, PT_R31(sp) 303 LONG_L $28, PT_R28(sp) 304 LONG_L $25, PT_R25(sp) 305 LONG_L $7, PT_R7(sp) 306 LONG_L $6, PT_R6(sp) 307 LONG_L $5, PT_R5(sp) 308 LONG_L $4, PT_R4(sp) 309 LONG_L $3, PT_R3(sp) 310 LONG_L $2, PT_R2(sp) 311 .set pop 312 .endm 313 314 .macro RESTORE_SP_AND_RET 315 .set push 316 .set noreorder 317 LONG_L k0, PT_EPC(sp) 318 LONG_L sp, PT_R29(sp) 319 jr k0 320 rfe 321 .set pop 322 .endm 323 324 #else 325 .macro RESTORE_SOME 326 .set push 327 .set reorder 328 .set noat 329 mfc0 a0, CP0_STATUS 330 ori a0, STATMASK 331 xori a0, STATMASK 332 mtc0 a0, CP0_STATUS 333 li v1, 0xff00 334 and a0, v1 335 LONG_L v0, PT_STATUS(sp) 336 nor v1, $0, v1 337 and v0, v1 338 or v0, a0 339 mtc0 v0, CP0_STATUS 340 LONG_L v1, PT_EPC(sp) 341 MTC0 v1, CP0_EPC 342 LONG_L $31, PT_R31(sp) 343 LONG_L $28, PT_R28(sp) 344 LONG_L $25, PT_R25(sp) 345 #ifdef CONFIG_64BIT 346 LONG_L $8, PT_R8(sp) 347 LONG_L $9, PT_R9(sp) 348 #endif 349 LONG_L $7, PT_R7(sp) 350 LONG_L $6, PT_R6(sp) 351 LONG_L $5, PT_R5(sp) 352 LONG_L $4, PT_R4(sp) 353 LONG_L $3, PT_R3(sp) 354 LONG_L $2, PT_R2(sp) 355 .set pop 356 .endm 357 358 .macro RESTORE_SP_AND_RET 359 LONG_L sp, PT_R29(sp) 360 .set arch=r4000 361 eret 362 .set mips0 363 .endm 364 365 #endif 366 367 .macro RESTORE_SP 368 LONG_L sp, PT_R29(sp) 369 .endm 370 371 .macro RESTORE_ALL 372 RESTORE_TEMP 373 RESTORE_STATIC 374 RESTORE_AT 375 RESTORE_SOME 376 RESTORE_SP 377 .endm 378 379 .macro RESTORE_ALL_AND_RET 380 RESTORE_TEMP 381 RESTORE_STATIC 382 RESTORE_AT 383 RESTORE_SOME 384 RESTORE_SP_AND_RET 385 .endm 386 387 /* 388 * Move to kernel mode and disable interrupts. 389 * Set cp0 enable bit as sign that we're running on the kernel stack 390 */ 391 .macro CLI 392 mfc0 t0, CP0_STATUS 393 li t1, ST0_CU0 | STATMASK 394 or t0, t1 395 xori t0, STATMASK 396 mtc0 t0, CP0_STATUS 397 irq_disable_hazard 398 .endm 399 400 /* 401 * Move to kernel mode and enable interrupts. 402 * Set cp0 enable bit as sign that we're running on the kernel stack 403 */ 404 .macro STI 405 mfc0 t0, CP0_STATUS 406 li t1, ST0_CU0 | STATMASK 407 or t0, t1 408 xori t0, STATMASK & ~1 409 mtc0 t0, CP0_STATUS 410 irq_enable_hazard 411 .endm 412 413 /* 414 * Just move to kernel mode and leave interrupts as they are. Note 415 * for the R3000 this means copying the previous enable from IEp. 416 * Set cp0 enable bit as sign that we're running on the kernel stack 417 */ 418 .macro KMODE 419 mfc0 t0, CP0_STATUS 420 li t1, ST0_CU0 | (STATMASK & ~1) 421 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 422 andi t2, t0, ST0_IEP 423 srl t2, 2 424 or t0, t2 425 #endif 426 or t0, t1 427 xori t0, STATMASK & ~1 428 mtc0 t0, CP0_STATUS 429 irq_disable_hazard 430 .endm 431 432 #endif /* _ASM_STACKFRAME_H */ 433