1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 2007 Maciej W. Rozycki 10 */ 11 #ifndef _ASM_STACKFRAME_H 12 #define _ASM_STACKFRAME_H 13 14 #include <linux/threads.h> 15 16 #include <asm/asm.h> 17 #include <asm/asmmacro.h> 18 #include <asm/mipsregs.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/thread_info.h> 21 22 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 23 #define STATMASK 0x3f 24 #else 25 #define STATMASK 0x1f 26 #endif 27 28 .macro SAVE_AT 29 .set push 30 .set noat 31 LONG_S $1, PT_R1(sp) 32 .set pop 33 .endm 34 35 .macro SAVE_TEMP 36 #ifdef CONFIG_CPU_HAS_SMARTMIPS 37 mflhxu v1 38 LONG_S v1, PT_LO(sp) 39 mflhxu v1 40 LONG_S v1, PT_HI(sp) 41 mflhxu v1 42 LONG_S v1, PT_ACX(sp) 43 #elif !defined(CONFIG_CPU_MIPSR6) 44 mfhi v1 45 #endif 46 #ifdef CONFIG_32BIT 47 LONG_S $8, PT_R8(sp) 48 LONG_S $9, PT_R9(sp) 49 #endif 50 LONG_S $10, PT_R10(sp) 51 LONG_S $11, PT_R11(sp) 52 LONG_S $12, PT_R12(sp) 53 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 54 LONG_S v1, PT_HI(sp) 55 mflo v1 56 #endif 57 LONG_S $13, PT_R13(sp) 58 LONG_S $14, PT_R14(sp) 59 LONG_S $15, PT_R15(sp) 60 LONG_S $24, PT_R24(sp) 61 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 62 LONG_S v1, PT_LO(sp) 63 #endif 64 #ifdef CONFIG_CPU_CAVIUM_OCTEON 65 /* 66 * The Octeon multiplier state is affected by general 67 * multiply instructions. It must be saved before and 68 * kernel code might corrupt it 69 */ 70 jal octeon_mult_save 71 #endif 72 .endm 73 74 .macro SAVE_STATIC 75 LONG_S $16, PT_R16(sp) 76 LONG_S $17, PT_R17(sp) 77 LONG_S $18, PT_R18(sp) 78 LONG_S $19, PT_R19(sp) 79 LONG_S $20, PT_R20(sp) 80 LONG_S $21, PT_R21(sp) 81 LONG_S $22, PT_R22(sp) 82 LONG_S $23, PT_R23(sp) 83 LONG_S $30, PT_R30(sp) 84 .endm 85 86 /* 87 * get_saved_sp returns the SP for the current CPU by looking in the 88 * kernelsp array for it. If tosp is set, it stores the current sp in 89 * k0 and loads the new value in sp. If not, it clobbers k0 and 90 * stores the new value in k1, leaving sp unaffected. 91 */ 92 #ifdef CONFIG_SMP 93 94 /* SMP variation */ 95 .macro get_saved_sp docfi=0 tosp=0 96 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 97 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 98 lui k1, %hi(kernelsp) 99 #else 100 lui k1, %highest(kernelsp) 101 daddiu k1, %higher(kernelsp) 102 dsll k1, 16 103 daddiu k1, %hi(kernelsp) 104 dsll k1, 16 105 #endif 106 LONG_SRL k0, SMP_CPUID_PTRSHIFT 107 LONG_ADDU k1, k0 108 .if \tosp 109 move k0, sp 110 .if \docfi 111 .cfi_register sp, k0 112 .endif 113 LONG_L sp, %lo(kernelsp)(k1) 114 .else 115 LONG_L k1, %lo(kernelsp)(k1) 116 .endif 117 .endm 118 119 .macro set_saved_sp stackp temp temp2 120 ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG 121 LONG_SRL \temp, SMP_CPUID_PTRSHIFT 122 LONG_S \stackp, kernelsp(\temp) 123 .endm 124 #else /* !CONFIG_SMP */ 125 /* Uniprocessor variation */ 126 .macro get_saved_sp docfi=0 tosp=0 127 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 128 /* 129 * Clear BTB (branch target buffer), forbid RAS (return address 130 * stack) to workaround the Out-of-order Issue in Loongson2F 131 * via its diagnostic register. 132 */ 133 move k0, ra 134 jal 1f 135 nop 136 1: jal 1f 137 nop 138 1: jal 1f 139 nop 140 1: jal 1f 141 nop 142 1: move ra, k0 143 li k0, 3 144 mtc0 k0, $22 145 #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 146 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 147 lui k1, %hi(kernelsp) 148 #else 149 lui k1, %highest(kernelsp) 150 daddiu k1, %higher(kernelsp) 151 dsll k1, k1, 16 152 daddiu k1, %hi(kernelsp) 153 dsll k1, k1, 16 154 #endif 155 .if \tosp 156 move k0, sp 157 .if \docfi 158 .cfi_register sp, k0 159 .endif 160 LONG_L sp, %lo(kernelsp)(k1) 161 .else 162 LONG_L k1, %lo(kernelsp)(k1) 163 .endif 164 .endm 165 166 .macro set_saved_sp stackp temp temp2 167 LONG_S \stackp, kernelsp 168 .endm 169 #endif 170 171 .macro SAVE_SOME 172 .set push 173 .set noat 174 .set reorder 175 mfc0 k0, CP0_STATUS 176 sll k0, 3 /* extract cu0 bit */ 177 .set noreorder 178 bltz k0, 8f 179 #ifdef CONFIG_EVA 180 /* 181 * Flush interAptiv's Return Prediction Stack (RPS) by writing 182 * EntryHi. Toggling Config7.RPS is slower and less portable. 183 * 184 * The RPS isn't automatically flushed when exceptions are 185 * taken, which can result in kernel mode speculative accesses 186 * to user addresses if the RPS mispredicts. That's harmless 187 * when user and kernel share the same address space, but with 188 * EVA the same user segments may be unmapped to kernel mode, 189 * even containing sensitive MMIO regions or invalid memory. 190 * 191 * This can happen when the kernel sets the return address to 192 * ret_from_* and jr's to the exception handler, which looks 193 * more like a tail call than a function call. If nested calls 194 * don't evict the last user address in the RPS, it will 195 * mispredict the return and fetch from a user controlled 196 * address into the icache. 197 * 198 * More recent EVA-capable cores with MAAR to restrict 199 * speculative accesses aren't affected. 200 */ 201 MFC0 k0, CP0_ENTRYHI 202 MTC0 k0, CP0_ENTRYHI 203 #endif 204 .set reorder 205 move k0, sp 206 /* Called from user mode, new stack. */ 207 get_saved_sp 208 8: 209 #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 210 .set at=k1 211 #endif 212 PTR_SUBU sp, PT_SIZE 213 #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 214 .set noat 215 #endif 216 LONG_S k0, PT_R29(sp) 217 LONG_S $3, PT_R3(sp) 218 /* 219 * You might think that you don't need to save $0, 220 * but the FPU emulator and gdb remote debug stub 221 * need it to operate correctly 222 */ 223 LONG_S $0, PT_R0(sp) 224 mfc0 v1, CP0_STATUS 225 LONG_S $2, PT_R2(sp) 226 LONG_S v1, PT_STATUS(sp) 227 LONG_S $4, PT_R4(sp) 228 mfc0 v1, CP0_CAUSE 229 LONG_S $5, PT_R5(sp) 230 LONG_S v1, PT_CAUSE(sp) 231 LONG_S $6, PT_R6(sp) 232 LONG_S ra, PT_R31(sp) 233 MFC0 ra, CP0_EPC 234 LONG_S $7, PT_R7(sp) 235 #ifdef CONFIG_64BIT 236 LONG_S $8, PT_R8(sp) 237 LONG_S $9, PT_R9(sp) 238 #endif 239 LONG_S ra, PT_EPC(sp) 240 LONG_S $25, PT_R25(sp) 241 LONG_S $28, PT_R28(sp) 242 243 /* Set thread_info if we're coming from user mode */ 244 mfc0 k0, CP0_STATUS 245 sll k0, 3 /* extract cu0 bit */ 246 bltz k0, 9f 247 248 ori $28, sp, _THREAD_MASK 249 xori $28, _THREAD_MASK 250 #ifdef CONFIG_CPU_CAVIUM_OCTEON 251 .set mips64 252 pref 0, 0($28) /* Prefetch the current pointer */ 253 #endif 254 9: 255 .set pop 256 .endm 257 258 .macro SAVE_ALL 259 SAVE_SOME 260 SAVE_AT 261 SAVE_TEMP 262 SAVE_STATIC 263 .endm 264 265 .macro RESTORE_AT 266 .set push 267 .set noat 268 LONG_L $1, PT_R1(sp) 269 .set pop 270 .endm 271 272 .macro RESTORE_TEMP 273 #ifdef CONFIG_CPU_CAVIUM_OCTEON 274 /* Restore the Octeon multiplier state */ 275 jal octeon_mult_restore 276 #endif 277 #ifdef CONFIG_CPU_HAS_SMARTMIPS 278 LONG_L $24, PT_ACX(sp) 279 mtlhx $24 280 LONG_L $24, PT_HI(sp) 281 mtlhx $24 282 LONG_L $24, PT_LO(sp) 283 mtlhx $24 284 #elif !defined(CONFIG_CPU_MIPSR6) 285 LONG_L $24, PT_LO(sp) 286 mtlo $24 287 LONG_L $24, PT_HI(sp) 288 mthi $24 289 #endif 290 #ifdef CONFIG_32BIT 291 LONG_L $8, PT_R8(sp) 292 LONG_L $9, PT_R9(sp) 293 #endif 294 LONG_L $10, PT_R10(sp) 295 LONG_L $11, PT_R11(sp) 296 LONG_L $12, PT_R12(sp) 297 LONG_L $13, PT_R13(sp) 298 LONG_L $14, PT_R14(sp) 299 LONG_L $15, PT_R15(sp) 300 LONG_L $24, PT_R24(sp) 301 .endm 302 303 .macro RESTORE_STATIC 304 LONG_L $16, PT_R16(sp) 305 LONG_L $17, PT_R17(sp) 306 LONG_L $18, PT_R18(sp) 307 LONG_L $19, PT_R19(sp) 308 LONG_L $20, PT_R20(sp) 309 LONG_L $21, PT_R21(sp) 310 LONG_L $22, PT_R22(sp) 311 LONG_L $23, PT_R23(sp) 312 LONG_L $30, PT_R30(sp) 313 .endm 314 315 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 316 317 .macro RESTORE_SOME 318 .set push 319 .set reorder 320 .set noat 321 mfc0 a0, CP0_STATUS 322 li v1, ST0_CU1 | ST0_IM 323 ori a0, STATMASK 324 xori a0, STATMASK 325 mtc0 a0, CP0_STATUS 326 and a0, v1 327 LONG_L v0, PT_STATUS(sp) 328 nor v1, $0, v1 329 and v0, v1 330 or v0, a0 331 mtc0 v0, CP0_STATUS 332 LONG_L $31, PT_R31(sp) 333 LONG_L $28, PT_R28(sp) 334 LONG_L $25, PT_R25(sp) 335 LONG_L $7, PT_R7(sp) 336 LONG_L $6, PT_R6(sp) 337 LONG_L $5, PT_R5(sp) 338 LONG_L $4, PT_R4(sp) 339 LONG_L $3, PT_R3(sp) 340 LONG_L $2, PT_R2(sp) 341 .set pop 342 .endm 343 344 .macro RESTORE_SP_AND_RET 345 .set push 346 .set noreorder 347 LONG_L k0, PT_EPC(sp) 348 LONG_L sp, PT_R29(sp) 349 jr k0 350 rfe 351 .set pop 352 .endm 353 354 #else 355 .macro RESTORE_SOME 356 .set push 357 .set reorder 358 .set noat 359 mfc0 a0, CP0_STATUS 360 ori a0, STATMASK 361 xori a0, STATMASK 362 mtc0 a0, CP0_STATUS 363 li v1, ST0_CU1 | ST0_FR | ST0_IM 364 and a0, v1 365 LONG_L v0, PT_STATUS(sp) 366 nor v1, $0, v1 367 and v0, v1 368 or v0, a0 369 mtc0 v0, CP0_STATUS 370 LONG_L v1, PT_EPC(sp) 371 MTC0 v1, CP0_EPC 372 LONG_L $31, PT_R31(sp) 373 LONG_L $28, PT_R28(sp) 374 LONG_L $25, PT_R25(sp) 375 #ifdef CONFIG_64BIT 376 LONG_L $8, PT_R8(sp) 377 LONG_L $9, PT_R9(sp) 378 #endif 379 LONG_L $7, PT_R7(sp) 380 LONG_L $6, PT_R6(sp) 381 LONG_L $5, PT_R5(sp) 382 LONG_L $4, PT_R4(sp) 383 LONG_L $3, PT_R3(sp) 384 LONG_L $2, PT_R2(sp) 385 .set pop 386 .endm 387 388 .macro RESTORE_SP_AND_RET 389 LONG_L sp, PT_R29(sp) 390 #ifdef CONFIG_CPU_MIPSR6 391 eretnc 392 #else 393 .set arch=r4000 394 eret 395 .set mips0 396 #endif 397 .endm 398 399 #endif 400 401 .macro RESTORE_SP 402 LONG_L sp, PT_R29(sp) 403 .endm 404 405 .macro RESTORE_ALL 406 RESTORE_TEMP 407 RESTORE_STATIC 408 RESTORE_AT 409 RESTORE_SOME 410 RESTORE_SP 411 .endm 412 413 /* 414 * Move to kernel mode and disable interrupts. 415 * Set cp0 enable bit as sign that we're running on the kernel stack 416 */ 417 .macro CLI 418 mfc0 t0, CP0_STATUS 419 li t1, ST0_CU0 | STATMASK 420 or t0, t1 421 xori t0, STATMASK 422 mtc0 t0, CP0_STATUS 423 irq_disable_hazard 424 .endm 425 426 /* 427 * Move to kernel mode and enable interrupts. 428 * Set cp0 enable bit as sign that we're running on the kernel stack 429 */ 430 .macro STI 431 mfc0 t0, CP0_STATUS 432 li t1, ST0_CU0 | STATMASK 433 or t0, t1 434 xori t0, STATMASK & ~1 435 mtc0 t0, CP0_STATUS 436 irq_enable_hazard 437 .endm 438 439 /* 440 * Just move to kernel mode and leave interrupts as they are. Note 441 * for the R3000 this means copying the previous enable from IEp. 442 * Set cp0 enable bit as sign that we're running on the kernel stack 443 */ 444 .macro KMODE 445 mfc0 t0, CP0_STATUS 446 li t1, ST0_CU0 | (STATMASK & ~1) 447 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 448 andi t2, t0, ST0_IEP 449 srl t2, 2 450 or t0, t2 451 #endif 452 or t0, t1 453 xori t0, STATMASK & ~1 454 mtc0 t0, CP0_STATUS 455 irq_disable_hazard 456 .endm 457 458 #endif /* _ASM_STACKFRAME_H */ 459