1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8384740dcSRalf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. 9384740dcSRalf Baechle * Copyright (C) 2007 Maciej W. Rozycki 10384740dcSRalf Baechle */ 11384740dcSRalf Baechle #ifndef _ASM_STACKFRAME_H 12384740dcSRalf Baechle #define _ASM_STACKFRAME_H 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <linux/threads.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <asm/asm.h> 17384740dcSRalf Baechle #include <asm/asmmacro.h> 18384740dcSRalf Baechle #include <asm/mipsregs.h> 19384740dcSRalf Baechle #include <asm/asm-offsets.h> 20c2377a42SJayachandran C #include <asm/thread_info.h> 21384740dcSRalf Baechle 22384740dcSRalf Baechle /* 23384740dcSRalf Baechle * For SMTC kernel, global IE should be left set, and interrupts 24384740dcSRalf Baechle * controlled exclusively via IXMT. 25384740dcSRalf Baechle */ 26384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 27384740dcSRalf Baechle #define STATMASK 0x1e 28384740dcSRalf Baechle #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 29384740dcSRalf Baechle #define STATMASK 0x3f 30384740dcSRalf Baechle #else 31384740dcSRalf Baechle #define STATMASK 0x1f 32384740dcSRalf Baechle #endif 33384740dcSRalf Baechle 34384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 35384740dcSRalf Baechle #include <asm/mipsmtregs.h> 36384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 37384740dcSRalf Baechle 38384740dcSRalf Baechle .macro SAVE_AT 39384740dcSRalf Baechle .set push 40384740dcSRalf Baechle .set noat 41384740dcSRalf Baechle LONG_S $1, PT_R1(sp) 42384740dcSRalf Baechle .set pop 43384740dcSRalf Baechle .endm 44384740dcSRalf Baechle 45384740dcSRalf Baechle .macro SAVE_TEMP 46384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 47384740dcSRalf Baechle mflhxu v1 48384740dcSRalf Baechle LONG_S v1, PT_LO(sp) 49384740dcSRalf Baechle mflhxu v1 50384740dcSRalf Baechle LONG_S v1, PT_HI(sp) 51384740dcSRalf Baechle mflhxu v1 52384740dcSRalf Baechle LONG_S v1, PT_ACX(sp) 53384740dcSRalf Baechle #else 54384740dcSRalf Baechle mfhi v1 55384740dcSRalf Baechle #endif 56384740dcSRalf Baechle #ifdef CONFIG_32BIT 57384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 58384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 59384740dcSRalf Baechle #endif 60384740dcSRalf Baechle LONG_S $10, PT_R10(sp) 61384740dcSRalf Baechle LONG_S $11, PT_R11(sp) 62384740dcSRalf Baechle LONG_S $12, PT_R12(sp) 63362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 64362e6964SDavid Daney LONG_S v1, PT_HI(sp) 65362e6964SDavid Daney mflo v1 66362e6964SDavid Daney #endif 67384740dcSRalf Baechle LONG_S $13, PT_R13(sp) 68384740dcSRalf Baechle LONG_S $14, PT_R14(sp) 69384740dcSRalf Baechle LONG_S $15, PT_R15(sp) 70384740dcSRalf Baechle LONG_S $24, PT_R24(sp) 71362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 72362e6964SDavid Daney LONG_S v1, PT_LO(sp) 73362e6964SDavid Daney #endif 748dfdd02aSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 758dfdd02aSDavid Daney /* 768dfdd02aSDavid Daney * The Octeon multiplier state is affected by general 778dfdd02aSDavid Daney * multiply instructions. It must be saved before and 788dfdd02aSDavid Daney * kernel code might corrupt it 798dfdd02aSDavid Daney */ 808dfdd02aSDavid Daney jal octeon_mult_save 818dfdd02aSDavid Daney #endif 82384740dcSRalf Baechle .endm 83384740dcSRalf Baechle 84384740dcSRalf Baechle .macro SAVE_STATIC 85384740dcSRalf Baechle LONG_S $16, PT_R16(sp) 86384740dcSRalf Baechle LONG_S $17, PT_R17(sp) 87384740dcSRalf Baechle LONG_S $18, PT_R18(sp) 88384740dcSRalf Baechle LONG_S $19, PT_R19(sp) 89384740dcSRalf Baechle LONG_S $20, PT_R20(sp) 90384740dcSRalf Baechle LONG_S $21, PT_R21(sp) 91384740dcSRalf Baechle LONG_S $22, PT_R22(sp) 92384740dcSRalf Baechle LONG_S $23, PT_R23(sp) 93384740dcSRalf Baechle LONG_S $30, PT_R30(sp) 94384740dcSRalf Baechle .endm 95384740dcSRalf Baechle 96384740dcSRalf Baechle #ifdef CONFIG_SMP 97384740dcSRalf Baechle .macro get_saved_sp /* SMP variation */ 98c2377a42SJayachandran C ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 99384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 100384740dcSRalf Baechle lui k1, %hi(kernelsp) 101384740dcSRalf Baechle #else 102384740dcSRalf Baechle lui k1, %highest(kernelsp) 103384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 104384740dcSRalf Baechle dsll k1, 16 105384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 106384740dcSRalf Baechle dsll k1, 16 107384740dcSRalf Baechle #endif 108c2377a42SJayachandran C LONG_SRL k0, SMP_CPUID_PTRSHIFT 109384740dcSRalf Baechle LONG_ADDU k1, k0 110384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 111384740dcSRalf Baechle .endm 112384740dcSRalf Baechle 113384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 114c2377a42SJayachandran C ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG 115c2377a42SJayachandran C LONG_SRL \temp, SMP_CPUID_PTRSHIFT 116384740dcSRalf Baechle LONG_S \stackp, kernelsp(\temp) 117384740dcSRalf Baechle .endm 118c2377a42SJayachandran C #else /* !CONFIG_SMP */ 119384740dcSRalf Baechle .macro get_saved_sp /* Uniprocessor variation */ 120b197b628SWu Zhangjin #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 121f1df3239SWu Zhangjin /* 122f1df3239SWu Zhangjin * Clear BTB (branch target buffer), forbid RAS (return address 123f1df3239SWu Zhangjin * stack) to workaround the Out-of-order Issue in Loongson2F 124f1df3239SWu Zhangjin * via its diagnostic register. 125f1df3239SWu Zhangjin */ 126f1df3239SWu Zhangjin move k0, ra 127f1df3239SWu Zhangjin jal 1f 128f1df3239SWu Zhangjin nop 129f1df3239SWu Zhangjin 1: jal 1f 130f1df3239SWu Zhangjin nop 131f1df3239SWu Zhangjin 1: jal 1f 132f1df3239SWu Zhangjin nop 133f1df3239SWu Zhangjin 1: jal 1f 134f1df3239SWu Zhangjin nop 135f1df3239SWu Zhangjin 1: move ra, k0 136f1df3239SWu Zhangjin li k0, 3 137f1df3239SWu Zhangjin mtc0 k0, $22 1382a0b24f5SSteven J. Hill #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 139384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 140384740dcSRalf Baechle lui k1, %hi(kernelsp) 141384740dcSRalf Baechle #else 142384740dcSRalf Baechle lui k1, %highest(kernelsp) 143384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 144384740dcSRalf Baechle dsll k1, k1, 16 145384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 146384740dcSRalf Baechle dsll k1, k1, 16 147384740dcSRalf Baechle #endif 148384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 149384740dcSRalf Baechle .endm 150384740dcSRalf Baechle 151384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 152384740dcSRalf Baechle LONG_S \stackp, kernelsp 153384740dcSRalf Baechle .endm 154384740dcSRalf Baechle #endif 155384740dcSRalf Baechle 156384740dcSRalf Baechle .macro SAVE_SOME 157384740dcSRalf Baechle .set push 158384740dcSRalf Baechle .set noat 159384740dcSRalf Baechle .set reorder 160384740dcSRalf Baechle mfc0 k0, CP0_STATUS 161384740dcSRalf Baechle sll k0, 3 /* extract cu0 bit */ 162384740dcSRalf Baechle .set noreorder 163384740dcSRalf Baechle bltz k0, 8f 164384740dcSRalf Baechle move k1, sp 165384740dcSRalf Baechle .set reorder 166384740dcSRalf Baechle /* Called from user mode, new stack. */ 167384740dcSRalf Baechle get_saved_sp 168384740dcSRalf Baechle #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 169384740dcSRalf Baechle 8: move k0, sp 170384740dcSRalf Baechle PTR_SUBU sp, k1, PT_SIZE 171384740dcSRalf Baechle #else 172384740dcSRalf Baechle .set at=k0 173384740dcSRalf Baechle 8: PTR_SUBU k1, PT_SIZE 174384740dcSRalf Baechle .set noat 175384740dcSRalf Baechle move k0, sp 176384740dcSRalf Baechle move sp, k1 177384740dcSRalf Baechle #endif 178384740dcSRalf Baechle LONG_S k0, PT_R29(sp) 179384740dcSRalf Baechle LONG_S $3, PT_R3(sp) 180384740dcSRalf Baechle /* 181384740dcSRalf Baechle * You might think that you don't need to save $0, 182384740dcSRalf Baechle * but the FPU emulator and gdb remote debug stub 183384740dcSRalf Baechle * need it to operate correctly 184384740dcSRalf Baechle */ 185384740dcSRalf Baechle LONG_S $0, PT_R0(sp) 186384740dcSRalf Baechle mfc0 v1, CP0_STATUS 187384740dcSRalf Baechle LONG_S $2, PT_R2(sp) 1882a0b24f5SSteven J. Hill LONG_S v1, PT_STATUS(sp) 189384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 190384740dcSRalf Baechle /* 191384740dcSRalf Baechle * Ideally, these instructions would be shuffled in 192384740dcSRalf Baechle * to cover the pipeline delay. 193384740dcSRalf Baechle */ 194384740dcSRalf Baechle .set mips32 195d9682759SDavid Daney mfc0 k0, CP0_TCSTATUS 196384740dcSRalf Baechle .set mips0 197d9682759SDavid Daney LONG_S k0, PT_TCSTATUS(sp) 198384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 199384740dcSRalf Baechle LONG_S $4, PT_R4(sp) 200362e6964SDavid Daney mfc0 v1, CP0_CAUSE 2012a0b24f5SSteven J. Hill LONG_S $5, PT_R5(sp) 202362e6964SDavid Daney LONG_S v1, PT_CAUSE(sp) 2032a0b24f5SSteven J. Hill LONG_S $6, PT_R6(sp) 204362e6964SDavid Daney MFC0 v1, CP0_EPC 2052a0b24f5SSteven J. Hill LONG_S $7, PT_R7(sp) 206384740dcSRalf Baechle #ifdef CONFIG_64BIT 207384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 208384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 209384740dcSRalf Baechle #endif 2102a0b24f5SSteven J. Hill LONG_S v1, PT_EPC(sp) 211384740dcSRalf Baechle LONG_S $25, PT_R25(sp) 212384740dcSRalf Baechle LONG_S $28, PT_R28(sp) 213384740dcSRalf Baechle LONG_S $31, PT_R31(sp) 214384740dcSRalf Baechle ori $28, sp, _THREAD_MASK 215384740dcSRalf Baechle xori $28, _THREAD_MASK 2162a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 2172a219b0eSDavid Daney .set mips64 2182a219b0eSDavid Daney pref 0, 0($28) /* Prefetch the current pointer */ 2192a219b0eSDavid Daney #endif 220384740dcSRalf Baechle .set pop 221384740dcSRalf Baechle .endm 222384740dcSRalf Baechle 223384740dcSRalf Baechle .macro SAVE_ALL 224384740dcSRalf Baechle SAVE_SOME 225384740dcSRalf Baechle SAVE_AT 226384740dcSRalf Baechle SAVE_TEMP 227384740dcSRalf Baechle SAVE_STATIC 228384740dcSRalf Baechle .endm 229384740dcSRalf Baechle 230384740dcSRalf Baechle .macro RESTORE_AT 231384740dcSRalf Baechle .set push 232384740dcSRalf Baechle .set noat 233384740dcSRalf Baechle LONG_L $1, PT_R1(sp) 234384740dcSRalf Baechle .set pop 235384740dcSRalf Baechle .endm 236384740dcSRalf Baechle 237384740dcSRalf Baechle .macro RESTORE_TEMP 2388dfdd02aSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 2398dfdd02aSDavid Daney /* Restore the Octeon multiplier state */ 2408dfdd02aSDavid Daney jal octeon_mult_restore 2418dfdd02aSDavid Daney #endif 242384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 243384740dcSRalf Baechle LONG_L $24, PT_ACX(sp) 244384740dcSRalf Baechle mtlhx $24 245384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 246384740dcSRalf Baechle mtlhx $24 247384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 248384740dcSRalf Baechle mtlhx $24 249384740dcSRalf Baechle #else 250384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 251384740dcSRalf Baechle mtlo $24 252384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 253384740dcSRalf Baechle mthi $24 254384740dcSRalf Baechle #endif 255384740dcSRalf Baechle #ifdef CONFIG_32BIT 256384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 257384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 258384740dcSRalf Baechle #endif 259384740dcSRalf Baechle LONG_L $10, PT_R10(sp) 260384740dcSRalf Baechle LONG_L $11, PT_R11(sp) 261384740dcSRalf Baechle LONG_L $12, PT_R12(sp) 262384740dcSRalf Baechle LONG_L $13, PT_R13(sp) 263384740dcSRalf Baechle LONG_L $14, PT_R14(sp) 264384740dcSRalf Baechle LONG_L $15, PT_R15(sp) 265384740dcSRalf Baechle LONG_L $24, PT_R24(sp) 266384740dcSRalf Baechle .endm 267384740dcSRalf Baechle 268384740dcSRalf Baechle .macro RESTORE_STATIC 269384740dcSRalf Baechle LONG_L $16, PT_R16(sp) 270384740dcSRalf Baechle LONG_L $17, PT_R17(sp) 271384740dcSRalf Baechle LONG_L $18, PT_R18(sp) 272384740dcSRalf Baechle LONG_L $19, PT_R19(sp) 273384740dcSRalf Baechle LONG_L $20, PT_R20(sp) 274384740dcSRalf Baechle LONG_L $21, PT_R21(sp) 275384740dcSRalf Baechle LONG_L $22, PT_R22(sp) 276384740dcSRalf Baechle LONG_L $23, PT_R23(sp) 277384740dcSRalf Baechle LONG_L $30, PT_R30(sp) 278384740dcSRalf Baechle .endm 279384740dcSRalf Baechle 280384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 281384740dcSRalf Baechle 282384740dcSRalf Baechle .macro RESTORE_SOME 283384740dcSRalf Baechle .set push 284384740dcSRalf Baechle .set reorder 285384740dcSRalf Baechle .set noat 286384740dcSRalf Baechle mfc0 a0, CP0_STATUS 287384740dcSRalf Baechle li v1, 0xff00 288384740dcSRalf Baechle ori a0, STATMASK 289384740dcSRalf Baechle xori a0, STATMASK 290384740dcSRalf Baechle mtc0 a0, CP0_STATUS 291384740dcSRalf Baechle and a0, v1 292384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 293384740dcSRalf Baechle nor v1, $0, v1 294384740dcSRalf Baechle and v0, v1 295384740dcSRalf Baechle or v0, a0 296384740dcSRalf Baechle mtc0 v0, CP0_STATUS 297384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 298384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 299384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 300384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 301384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 302384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 303384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 304384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 305384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 306384740dcSRalf Baechle .set pop 307384740dcSRalf Baechle .endm 308384740dcSRalf Baechle 309384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 310384740dcSRalf Baechle .set push 311384740dcSRalf Baechle .set noreorder 312384740dcSRalf Baechle LONG_L k0, PT_EPC(sp) 313384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 314384740dcSRalf Baechle jr k0 315384740dcSRalf Baechle rfe 316384740dcSRalf Baechle .set pop 317384740dcSRalf Baechle .endm 318384740dcSRalf Baechle 319384740dcSRalf Baechle #else 320384740dcSRalf Baechle .macro RESTORE_SOME 321384740dcSRalf Baechle .set push 322384740dcSRalf Baechle .set reorder 323384740dcSRalf Baechle .set noat 324384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 325384740dcSRalf Baechle .set mips32r2 326384740dcSRalf Baechle /* 327384740dcSRalf Baechle * We need to make sure the read-modify-write 328384740dcSRalf Baechle * of Status below isn't perturbed by an interrupt 329384740dcSRalf Baechle * or cross-TC access, so we need to do at least a DMT, 330384740dcSRalf Baechle * protected by an interrupt-inhibit. But setting IXMT 331384740dcSRalf Baechle * also creates a few-cycle window where an IPI could 332384740dcSRalf Baechle * be queued and not be detected before potentially 333384740dcSRalf Baechle * returning to a WAIT or user-mode loop. It must be 334384740dcSRalf Baechle * replayed. 335384740dcSRalf Baechle * 336384740dcSRalf Baechle * We're in the middle of a context switch, and 337384740dcSRalf Baechle * we can't dispatch it directly without trashing 338384740dcSRalf Baechle * some registers, so we'll try to detect this unlikely 339384740dcSRalf Baechle * case and program a software interrupt in the VPE, 34025985edcSLucas De Marchi * as would be done for a cross-VPE IPI. To accommodate 341384740dcSRalf Baechle * the handling of that case, we're doing a DVPE instead 342384740dcSRalf Baechle * of just a DMT here to protect against other threads. 343384740dcSRalf Baechle * This is a lot of cruft to cover a tiny window. 344384740dcSRalf Baechle * If you can find a better design, implement it! 345384740dcSRalf Baechle * 346384740dcSRalf Baechle */ 347384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 348384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 349384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 350384740dcSRalf Baechle _ehb 351384740dcSRalf Baechle DVPE 5 # dvpe a1 352384740dcSRalf Baechle jal mips_ihb 353384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 354384740dcSRalf Baechle mfc0 a0, CP0_STATUS 355384740dcSRalf Baechle ori a0, STATMASK 356384740dcSRalf Baechle xori a0, STATMASK 357384740dcSRalf Baechle mtc0 a0, CP0_STATUS 358384740dcSRalf Baechle li v1, 0xff00 359384740dcSRalf Baechle and a0, v1 360384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 361384740dcSRalf Baechle nor v1, $0, v1 362384740dcSRalf Baechle and v0, v1 363384740dcSRalf Baechle or v0, a0 364384740dcSRalf Baechle mtc0 v0, CP0_STATUS 365384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 366384740dcSRalf Baechle /* 367384740dcSRalf Baechle * Only after EXL/ERL have been restored to status can we 368384740dcSRalf Baechle * restore TCStatus.IXMT. 369384740dcSRalf Baechle */ 370384740dcSRalf Baechle LONG_L v1, PT_TCSTATUS(sp) 371384740dcSRalf Baechle _ehb 372384740dcSRalf Baechle mfc0 a0, CP0_TCSTATUS 373384740dcSRalf Baechle andi v1, TCSTATUS_IXMT 374384740dcSRalf Baechle bnez v1, 0f 375384740dcSRalf Baechle 376384740dcSRalf Baechle /* 377384740dcSRalf Baechle * We'd like to detect any IPIs queued in the tiny window 378384740dcSRalf Baechle * above and request an software interrupt to service them 379384740dcSRalf Baechle * when we ERET. 380384740dcSRalf Baechle * 381384740dcSRalf Baechle * Computing the offset into the IPIQ array of the executing 382384740dcSRalf Baechle * TC's IPI queue in-line would be tedious. We use part of 383384740dcSRalf Baechle * the TCContext register to hold 16 bits of offset that we 384384740dcSRalf Baechle * can add in-line to find the queue head. 385384740dcSRalf Baechle */ 386384740dcSRalf Baechle mfc0 v0, CP0_TCCONTEXT 387384740dcSRalf Baechle la a2, IPIQ 388384740dcSRalf Baechle srl v0, v0, 16 389384740dcSRalf Baechle addu a2, a2, v0 390384740dcSRalf Baechle LONG_L v0, 0(a2) 391384740dcSRalf Baechle beqz v0, 0f 392384740dcSRalf Baechle /* 393384740dcSRalf Baechle * If we have a queue, provoke dispatch within the VPE by setting C_SW1 394384740dcSRalf Baechle */ 395384740dcSRalf Baechle mfc0 v0, CP0_CAUSE 396384740dcSRalf Baechle ori v0, v0, C_SW1 397384740dcSRalf Baechle mtc0 v0, CP0_CAUSE 398384740dcSRalf Baechle 0: 399384740dcSRalf Baechle /* 400384740dcSRalf Baechle * This test should really never branch but 401384740dcSRalf Baechle * let's be prudent here. Having atomized 402384740dcSRalf Baechle * the shared register modifications, we can 403384740dcSRalf Baechle * now EVPE, and must do so before interrupts 404384740dcSRalf Baechle * are potentially re-enabled. 405384740dcSRalf Baechle */ 406384740dcSRalf Baechle andi a1, a1, MVPCONTROL_EVP 407384740dcSRalf Baechle beqz a1, 1f 408384740dcSRalf Baechle evpe 409384740dcSRalf Baechle 1: 410384740dcSRalf Baechle /* We know that TCStatua.IXMT should be set from above */ 411384740dcSRalf Baechle xori a0, a0, TCSTATUS_IXMT 412384740dcSRalf Baechle or a0, a0, v1 413384740dcSRalf Baechle mtc0 a0, CP0_TCSTATUS 414384740dcSRalf Baechle _ehb 415384740dcSRalf Baechle 416384740dcSRalf Baechle .set mips0 417384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 418384740dcSRalf Baechle LONG_L v1, PT_EPC(sp) 419384740dcSRalf Baechle MTC0 v1, CP0_EPC 420384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 421384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 422384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 423384740dcSRalf Baechle #ifdef CONFIG_64BIT 424384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 425384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 426384740dcSRalf Baechle #endif 427384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 428384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 429384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 430384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 431384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 432384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 433384740dcSRalf Baechle .set pop 434384740dcSRalf Baechle .endm 435384740dcSRalf Baechle 436384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 437384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 438*a809d460SRalf Baechle .set arch=r4000 439384740dcSRalf Baechle eret 440384740dcSRalf Baechle .set mips0 441384740dcSRalf Baechle .endm 442384740dcSRalf Baechle 443384740dcSRalf Baechle #endif 444384740dcSRalf Baechle 445384740dcSRalf Baechle .macro RESTORE_SP 446384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 447384740dcSRalf Baechle .endm 448384740dcSRalf Baechle 449384740dcSRalf Baechle .macro RESTORE_ALL 450384740dcSRalf Baechle RESTORE_TEMP 451384740dcSRalf Baechle RESTORE_STATIC 452384740dcSRalf Baechle RESTORE_AT 453384740dcSRalf Baechle RESTORE_SOME 454384740dcSRalf Baechle RESTORE_SP 455384740dcSRalf Baechle .endm 456384740dcSRalf Baechle 457384740dcSRalf Baechle .macro RESTORE_ALL_AND_RET 458384740dcSRalf Baechle RESTORE_TEMP 459384740dcSRalf Baechle RESTORE_STATIC 460384740dcSRalf Baechle RESTORE_AT 461384740dcSRalf Baechle RESTORE_SOME 462384740dcSRalf Baechle RESTORE_SP_AND_RET 463384740dcSRalf Baechle .endm 464384740dcSRalf Baechle 465384740dcSRalf Baechle /* 466384740dcSRalf Baechle * Move to kernel mode and disable interrupts. 467384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 468384740dcSRalf Baechle */ 469384740dcSRalf Baechle .macro CLI 470384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 471384740dcSRalf Baechle mfc0 t0, CP0_STATUS 472384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 473384740dcSRalf Baechle or t0, t1 474384740dcSRalf Baechle xori t0, STATMASK 475384740dcSRalf Baechle mtc0 t0, CP0_STATUS 476384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 477384740dcSRalf Baechle /* 478384740dcSRalf Baechle * For SMTC, we need to set privilege 479384740dcSRalf Baechle * and disable interrupts only for the 480384740dcSRalf Baechle * current TC, using the TCStatus register. 481384740dcSRalf Baechle */ 482384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 483384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 484384740dcSRalf Baechle /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 485384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 486384740dcSRalf Baechle or t0, t1 487384740dcSRalf Baechle /* Clear TKSU, leave IXMT */ 488384740dcSRalf Baechle xori t0, 0x00001800 489384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 490384740dcSRalf Baechle _ehb 491384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 492384740dcSRalf Baechle mfc0 t0, CP0_STATUS 493384740dcSRalf Baechle ori t0, ST0_EXL | ST0_ERL 494384740dcSRalf Baechle xori t0, ST0_EXL | ST0_ERL 495384740dcSRalf Baechle mtc0 t0, CP0_STATUS 496384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 497384740dcSRalf Baechle irq_disable_hazard 498384740dcSRalf Baechle .endm 499384740dcSRalf Baechle 500384740dcSRalf Baechle /* 501384740dcSRalf Baechle * Move to kernel mode and enable interrupts. 502384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 503384740dcSRalf Baechle */ 504384740dcSRalf Baechle .macro STI 505384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 506384740dcSRalf Baechle mfc0 t0, CP0_STATUS 507384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 508384740dcSRalf Baechle or t0, t1 509384740dcSRalf Baechle xori t0, STATMASK & ~1 510384740dcSRalf Baechle mtc0 t0, CP0_STATUS 511384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 512384740dcSRalf Baechle /* 513384740dcSRalf Baechle * For SMTC, we need to set privilege 514384740dcSRalf Baechle * and enable interrupts only for the 515384740dcSRalf Baechle * current TC, using the TCStatus register. 516384740dcSRalf Baechle */ 517384740dcSRalf Baechle _ehb 518384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 519384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 520384740dcSRalf Baechle /* Set TCU0, TKSU (for later inversion) and IXMT */ 521384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 522384740dcSRalf Baechle or t0, t1 523384740dcSRalf Baechle /* Clear TKSU *and* IXMT */ 524384740dcSRalf Baechle xori t0, 0x00001c00 525384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 526384740dcSRalf Baechle _ehb 527384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 528384740dcSRalf Baechle mfc0 t0, CP0_STATUS 529384740dcSRalf Baechle ori t0, ST0_EXL 530384740dcSRalf Baechle xori t0, ST0_EXL 531384740dcSRalf Baechle mtc0 t0, CP0_STATUS 532384740dcSRalf Baechle /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 533384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 534384740dcSRalf Baechle irq_enable_hazard 535384740dcSRalf Baechle .endm 536384740dcSRalf Baechle 537384740dcSRalf Baechle /* 538384740dcSRalf Baechle * Just move to kernel mode and leave interrupts as they are. Note 539384740dcSRalf Baechle * for the R3000 this means copying the previous enable from IEp. 540384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 541384740dcSRalf Baechle */ 542384740dcSRalf Baechle .macro KMODE 543384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 544384740dcSRalf Baechle /* 545384740dcSRalf Baechle * This gets baroque in SMTC. We want to 546384740dcSRalf Baechle * protect the non-atomic clearing of EXL 547384740dcSRalf Baechle * with DMT/EMT, but we don't want to take 548384740dcSRalf Baechle * an interrupt while DMT is still in effect. 549384740dcSRalf Baechle */ 550384740dcSRalf Baechle 551384740dcSRalf Baechle /* KMODE gets invoked from both reorder and noreorder code */ 552384740dcSRalf Baechle .set push 553384740dcSRalf Baechle .set mips32r2 554384740dcSRalf Baechle .set noreorder 555384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 556384740dcSRalf Baechle andi v1, v0, TCSTATUS_IXMT 557384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 558384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 559384740dcSRalf Baechle _ehb 560384740dcSRalf Baechle DMT 2 # dmt v0 561384740dcSRalf Baechle /* 562384740dcSRalf Baechle * We don't know a priori if ra is "live" 563384740dcSRalf Baechle */ 564384740dcSRalf Baechle move t0, ra 565384740dcSRalf Baechle jal mips_ihb 566384740dcSRalf Baechle nop /* delay slot */ 567384740dcSRalf Baechle move ra, t0 568384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 569384740dcSRalf Baechle mfc0 t0, CP0_STATUS 570384740dcSRalf Baechle li t1, ST0_CU0 | (STATMASK & ~1) 571384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 572384740dcSRalf Baechle andi t2, t0, ST0_IEP 573384740dcSRalf Baechle srl t2, 2 574384740dcSRalf Baechle or t0, t2 575384740dcSRalf Baechle #endif 576384740dcSRalf Baechle or t0, t1 577384740dcSRalf Baechle xori t0, STATMASK & ~1 578384740dcSRalf Baechle mtc0 t0, CP0_STATUS 579384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 580384740dcSRalf Baechle _ehb 581384740dcSRalf Baechle andi v0, v0, VPECONTROL_TE 582384740dcSRalf Baechle beqz v0, 2f 583384740dcSRalf Baechle nop /* delay slot */ 584384740dcSRalf Baechle emt 585384740dcSRalf Baechle 2: 586384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 587384740dcSRalf Baechle /* Clear IXMT, then OR in previous value */ 588384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 589384740dcSRalf Baechle xori v0, TCSTATUS_IXMT 590384740dcSRalf Baechle or v0, v1, v0 591384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 592384740dcSRalf Baechle /* 593384740dcSRalf Baechle * irq_disable_hazard below should expand to EHB 594384740dcSRalf Baechle * on 24K/34K CPUS 595384740dcSRalf Baechle */ 596384740dcSRalf Baechle .set pop 597384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 598384740dcSRalf Baechle irq_disable_hazard 599384740dcSRalf Baechle .endm 600384740dcSRalf Baechle 601384740dcSRalf Baechle #endif /* _ASM_STACKFRAME_H */ 602