1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8384740dcSRalf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. 9384740dcSRalf Baechle * Copyright (C) 2007 Maciej W. Rozycki 10384740dcSRalf Baechle */ 11384740dcSRalf Baechle #ifndef _ASM_STACKFRAME_H 12384740dcSRalf Baechle #define _ASM_STACKFRAME_H 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <linux/threads.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <asm/asm.h> 17384740dcSRalf Baechle #include <asm/asmmacro.h> 18384740dcSRalf Baechle #include <asm/mipsregs.h> 19384740dcSRalf Baechle #include <asm/asm-offsets.h> 20384740dcSRalf Baechle 21384740dcSRalf Baechle /* 22384740dcSRalf Baechle * For SMTC kernel, global IE should be left set, and interrupts 23384740dcSRalf Baechle * controlled exclusively via IXMT. 24384740dcSRalf Baechle */ 25384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 26384740dcSRalf Baechle #define STATMASK 0x1e 27384740dcSRalf Baechle #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 28384740dcSRalf Baechle #define STATMASK 0x3f 29384740dcSRalf Baechle #else 30384740dcSRalf Baechle #define STATMASK 0x1f 31384740dcSRalf Baechle #endif 32384740dcSRalf Baechle 33384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 34384740dcSRalf Baechle #include <asm/mipsmtregs.h> 35384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 36384740dcSRalf Baechle 37384740dcSRalf Baechle .macro SAVE_AT 38384740dcSRalf Baechle .set push 39384740dcSRalf Baechle .set noat 40384740dcSRalf Baechle LONG_S $1, PT_R1(sp) 41384740dcSRalf Baechle .set pop 42384740dcSRalf Baechle .endm 43384740dcSRalf Baechle 44384740dcSRalf Baechle .macro SAVE_TEMP 45384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 46384740dcSRalf Baechle mflhxu v1 47384740dcSRalf Baechle LONG_S v1, PT_LO(sp) 48384740dcSRalf Baechle mflhxu v1 49384740dcSRalf Baechle LONG_S v1, PT_HI(sp) 50384740dcSRalf Baechle mflhxu v1 51384740dcSRalf Baechle LONG_S v1, PT_ACX(sp) 52384740dcSRalf Baechle #else 53384740dcSRalf Baechle mfhi v1 54384740dcSRalf Baechle #endif 55384740dcSRalf Baechle #ifdef CONFIG_32BIT 56384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 57384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 58384740dcSRalf Baechle #endif 59384740dcSRalf Baechle LONG_S $10, PT_R10(sp) 60384740dcSRalf Baechle LONG_S $11, PT_R11(sp) 61384740dcSRalf Baechle LONG_S $12, PT_R12(sp) 62362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 63362e6964SDavid Daney LONG_S v1, PT_HI(sp) 64362e6964SDavid Daney mflo v1 65362e6964SDavid Daney #endif 66384740dcSRalf Baechle LONG_S $13, PT_R13(sp) 67384740dcSRalf Baechle LONG_S $14, PT_R14(sp) 68384740dcSRalf Baechle LONG_S $15, PT_R15(sp) 69384740dcSRalf Baechle LONG_S $24, PT_R24(sp) 70362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 71362e6964SDavid Daney LONG_S v1, PT_LO(sp) 72362e6964SDavid Daney #endif 73384740dcSRalf Baechle .endm 74384740dcSRalf Baechle 75384740dcSRalf Baechle .macro SAVE_STATIC 76384740dcSRalf Baechle LONG_S $16, PT_R16(sp) 77384740dcSRalf Baechle LONG_S $17, PT_R17(sp) 78384740dcSRalf Baechle LONG_S $18, PT_R18(sp) 79384740dcSRalf Baechle LONG_S $19, PT_R19(sp) 80384740dcSRalf Baechle LONG_S $20, PT_R20(sp) 81384740dcSRalf Baechle LONG_S $21, PT_R21(sp) 82384740dcSRalf Baechle LONG_S $22, PT_R22(sp) 83384740dcSRalf Baechle LONG_S $23, PT_R23(sp) 84384740dcSRalf Baechle LONG_S $30, PT_R30(sp) 85384740dcSRalf Baechle .endm 86384740dcSRalf Baechle 87384740dcSRalf Baechle #ifdef CONFIG_SMP 88384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 89384740dcSRalf Baechle #define PTEBASE_SHIFT 19 /* TCBIND */ 9082622284SDavid Daney #define CPU_ID_REG CP0_TCBIND 9182622284SDavid Daney #define CPU_ID_MFC0 mfc0 9282622284SDavid Daney #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) 9382622284SDavid Daney #define PTEBASE_SHIFT 48 /* XCONTEXT */ 9482622284SDavid Daney #define CPU_ID_REG CP0_XCONTEXT 9582622284SDavid Daney #define CPU_ID_MFC0 MFC0 96384740dcSRalf Baechle #else 97384740dcSRalf Baechle #define PTEBASE_SHIFT 23 /* CONTEXT */ 9882622284SDavid Daney #define CPU_ID_REG CP0_CONTEXT 9982622284SDavid Daney #define CPU_ID_MFC0 MFC0 100384740dcSRalf Baechle #endif 101384740dcSRalf Baechle .macro get_saved_sp /* SMP variation */ 10282622284SDavid Daney CPU_ID_MFC0 k0, CPU_ID_REG 103384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 104384740dcSRalf Baechle lui k1, %hi(kernelsp) 105384740dcSRalf Baechle #else 106384740dcSRalf Baechle lui k1, %highest(kernelsp) 107384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 108384740dcSRalf Baechle dsll k1, 16 109384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 110384740dcSRalf Baechle dsll k1, 16 111384740dcSRalf Baechle #endif 112384740dcSRalf Baechle LONG_SRL k0, PTEBASE_SHIFT 113384740dcSRalf Baechle LONG_ADDU k1, k0 114384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 115384740dcSRalf Baechle .endm 116384740dcSRalf Baechle 117384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 11882622284SDavid Daney CPU_ID_MFC0 \temp, CPU_ID_REG 119384740dcSRalf Baechle LONG_SRL \temp, PTEBASE_SHIFT 120384740dcSRalf Baechle LONG_S \stackp, kernelsp(\temp) 121384740dcSRalf Baechle .endm 122384740dcSRalf Baechle #else 123384740dcSRalf Baechle .macro get_saved_sp /* Uniprocessor variation */ 124*b197b628SWu Zhangjin #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 125f1df3239SWu Zhangjin /* 126f1df3239SWu Zhangjin * Clear BTB (branch target buffer), forbid RAS (return address 127f1df3239SWu Zhangjin * stack) to workaround the Out-of-order Issue in Loongson2F 128f1df3239SWu Zhangjin * via its diagnostic register. 129f1df3239SWu Zhangjin */ 130f1df3239SWu Zhangjin move k0, ra 131f1df3239SWu Zhangjin jal 1f 132f1df3239SWu Zhangjin nop 133f1df3239SWu Zhangjin 1: jal 1f 134f1df3239SWu Zhangjin nop 135f1df3239SWu Zhangjin 1: jal 1f 136f1df3239SWu Zhangjin nop 137f1df3239SWu Zhangjin 1: jal 1f 138f1df3239SWu Zhangjin nop 139f1df3239SWu Zhangjin 1: move ra, k0 140f1df3239SWu Zhangjin li k0, 3 141f1df3239SWu Zhangjin mtc0 k0, $22 142f1df3239SWu Zhangjin #endif /* CONFIG_CPU_LOONGSON2F */ 143384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 144384740dcSRalf Baechle lui k1, %hi(kernelsp) 145384740dcSRalf Baechle #else 146384740dcSRalf Baechle lui k1, %highest(kernelsp) 147384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 148384740dcSRalf Baechle dsll k1, k1, 16 149384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 150384740dcSRalf Baechle dsll k1, k1, 16 151384740dcSRalf Baechle #endif 152384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 153384740dcSRalf Baechle .endm 154384740dcSRalf Baechle 155384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 156384740dcSRalf Baechle LONG_S \stackp, kernelsp 157384740dcSRalf Baechle .endm 158384740dcSRalf Baechle #endif 159384740dcSRalf Baechle 160384740dcSRalf Baechle .macro SAVE_SOME 161384740dcSRalf Baechle .set push 162384740dcSRalf Baechle .set noat 163384740dcSRalf Baechle .set reorder 164384740dcSRalf Baechle mfc0 k0, CP0_STATUS 165384740dcSRalf Baechle sll k0, 3 /* extract cu0 bit */ 166384740dcSRalf Baechle .set noreorder 167384740dcSRalf Baechle bltz k0, 8f 168384740dcSRalf Baechle move k1, sp 169384740dcSRalf Baechle .set reorder 170384740dcSRalf Baechle /* Called from user mode, new stack. */ 171384740dcSRalf Baechle get_saved_sp 172384740dcSRalf Baechle #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 173384740dcSRalf Baechle 8: move k0, sp 174384740dcSRalf Baechle PTR_SUBU sp, k1, PT_SIZE 175384740dcSRalf Baechle #else 176384740dcSRalf Baechle .set at=k0 177384740dcSRalf Baechle 8: PTR_SUBU k1, PT_SIZE 178384740dcSRalf Baechle .set noat 179384740dcSRalf Baechle move k0, sp 180384740dcSRalf Baechle move sp, k1 181384740dcSRalf Baechle #endif 182384740dcSRalf Baechle LONG_S k0, PT_R29(sp) 183384740dcSRalf Baechle LONG_S $3, PT_R3(sp) 184384740dcSRalf Baechle /* 185384740dcSRalf Baechle * You might think that you don't need to save $0, 186384740dcSRalf Baechle * but the FPU emulator and gdb remote debug stub 187384740dcSRalf Baechle * need it to operate correctly 188384740dcSRalf Baechle */ 189384740dcSRalf Baechle LONG_S $0, PT_R0(sp) 190384740dcSRalf Baechle mfc0 v1, CP0_STATUS 191384740dcSRalf Baechle LONG_S $2, PT_R2(sp) 192384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 193384740dcSRalf Baechle /* 194384740dcSRalf Baechle * Ideally, these instructions would be shuffled in 195384740dcSRalf Baechle * to cover the pipeline delay. 196384740dcSRalf Baechle */ 197384740dcSRalf Baechle .set mips32 198384740dcSRalf Baechle mfc0 v1, CP0_TCSTATUS 199384740dcSRalf Baechle .set mips0 200384740dcSRalf Baechle LONG_S v1, PT_TCSTATUS(sp) 201384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 202384740dcSRalf Baechle LONG_S $4, PT_R4(sp) 203384740dcSRalf Baechle LONG_S $5, PT_R5(sp) 204362e6964SDavid Daney LONG_S v1, PT_STATUS(sp) 205362e6964SDavid Daney mfc0 v1, CP0_CAUSE 206384740dcSRalf Baechle LONG_S $6, PT_R6(sp) 207384740dcSRalf Baechle LONG_S $7, PT_R7(sp) 208362e6964SDavid Daney LONG_S v1, PT_CAUSE(sp) 209362e6964SDavid Daney MFC0 v1, CP0_EPC 210384740dcSRalf Baechle #ifdef CONFIG_64BIT 211384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 212384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 213384740dcSRalf Baechle #endif 214384740dcSRalf Baechle LONG_S $25, PT_R25(sp) 215384740dcSRalf Baechle LONG_S $28, PT_R28(sp) 216384740dcSRalf Baechle LONG_S $31, PT_R31(sp) 217362e6964SDavid Daney LONG_S v1, PT_EPC(sp) 218384740dcSRalf Baechle ori $28, sp, _THREAD_MASK 219384740dcSRalf Baechle xori $28, _THREAD_MASK 2202a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 2212a219b0eSDavid Daney .set mips64 2222a219b0eSDavid Daney pref 0, 0($28) /* Prefetch the current pointer */ 2232a219b0eSDavid Daney pref 0, PT_R31(sp) /* Prefetch the $31(ra) */ 2242a219b0eSDavid Daney /* The Octeon multiplier state is affected by general multiply 2252a219b0eSDavid Daney instructions. It must be saved before and kernel code might 2262a219b0eSDavid Daney corrupt it */ 2272a219b0eSDavid Daney jal octeon_mult_save 2282a219b0eSDavid Daney LONG_L v1, 0($28) /* Load the current pointer */ 2292a219b0eSDavid Daney /* Restore $31(ra) that was changed by the jal */ 2302a219b0eSDavid Daney LONG_L ra, PT_R31(sp) 2312a219b0eSDavid Daney pref 0, 0(v1) /* Prefetch the current thread */ 2322a219b0eSDavid Daney #endif 233384740dcSRalf Baechle .set pop 234384740dcSRalf Baechle .endm 235384740dcSRalf Baechle 236384740dcSRalf Baechle .macro SAVE_ALL 237384740dcSRalf Baechle SAVE_SOME 238384740dcSRalf Baechle SAVE_AT 239384740dcSRalf Baechle SAVE_TEMP 240384740dcSRalf Baechle SAVE_STATIC 241384740dcSRalf Baechle .endm 242384740dcSRalf Baechle 243384740dcSRalf Baechle .macro RESTORE_AT 244384740dcSRalf Baechle .set push 245384740dcSRalf Baechle .set noat 246384740dcSRalf Baechle LONG_L $1, PT_R1(sp) 247384740dcSRalf Baechle .set pop 248384740dcSRalf Baechle .endm 249384740dcSRalf Baechle 250384740dcSRalf Baechle .macro RESTORE_TEMP 251384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 252384740dcSRalf Baechle LONG_L $24, PT_ACX(sp) 253384740dcSRalf Baechle mtlhx $24 254384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 255384740dcSRalf Baechle mtlhx $24 256384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 257384740dcSRalf Baechle mtlhx $24 258384740dcSRalf Baechle #else 259384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 260384740dcSRalf Baechle mtlo $24 261384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 262384740dcSRalf Baechle mthi $24 263384740dcSRalf Baechle #endif 264384740dcSRalf Baechle #ifdef CONFIG_32BIT 265384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 266384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 267384740dcSRalf Baechle #endif 268384740dcSRalf Baechle LONG_L $10, PT_R10(sp) 269384740dcSRalf Baechle LONG_L $11, PT_R11(sp) 270384740dcSRalf Baechle LONG_L $12, PT_R12(sp) 271384740dcSRalf Baechle LONG_L $13, PT_R13(sp) 272384740dcSRalf Baechle LONG_L $14, PT_R14(sp) 273384740dcSRalf Baechle LONG_L $15, PT_R15(sp) 274384740dcSRalf Baechle LONG_L $24, PT_R24(sp) 275384740dcSRalf Baechle .endm 276384740dcSRalf Baechle 277384740dcSRalf Baechle .macro RESTORE_STATIC 278384740dcSRalf Baechle LONG_L $16, PT_R16(sp) 279384740dcSRalf Baechle LONG_L $17, PT_R17(sp) 280384740dcSRalf Baechle LONG_L $18, PT_R18(sp) 281384740dcSRalf Baechle LONG_L $19, PT_R19(sp) 282384740dcSRalf Baechle LONG_L $20, PT_R20(sp) 283384740dcSRalf Baechle LONG_L $21, PT_R21(sp) 284384740dcSRalf Baechle LONG_L $22, PT_R22(sp) 285384740dcSRalf Baechle LONG_L $23, PT_R23(sp) 286384740dcSRalf Baechle LONG_L $30, PT_R30(sp) 287384740dcSRalf Baechle .endm 288384740dcSRalf Baechle 289384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 290384740dcSRalf Baechle 291384740dcSRalf Baechle .macro RESTORE_SOME 292384740dcSRalf Baechle .set push 293384740dcSRalf Baechle .set reorder 294384740dcSRalf Baechle .set noat 295384740dcSRalf Baechle mfc0 a0, CP0_STATUS 296384740dcSRalf Baechle li v1, 0xff00 297384740dcSRalf Baechle ori a0, STATMASK 298384740dcSRalf Baechle xori a0, STATMASK 299384740dcSRalf Baechle mtc0 a0, CP0_STATUS 300384740dcSRalf Baechle and a0, v1 301384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 302384740dcSRalf Baechle nor v1, $0, v1 303384740dcSRalf Baechle and v0, v1 304384740dcSRalf Baechle or v0, a0 305384740dcSRalf Baechle mtc0 v0, CP0_STATUS 306384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 307384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 308384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 309384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 310384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 311384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 312384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 313384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 314384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 315384740dcSRalf Baechle .set pop 316384740dcSRalf Baechle .endm 317384740dcSRalf Baechle 318384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 319384740dcSRalf Baechle .set push 320384740dcSRalf Baechle .set noreorder 321384740dcSRalf Baechle LONG_L k0, PT_EPC(sp) 322384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 323384740dcSRalf Baechle jr k0 324384740dcSRalf Baechle rfe 325384740dcSRalf Baechle .set pop 326384740dcSRalf Baechle .endm 327384740dcSRalf Baechle 328384740dcSRalf Baechle #else 329384740dcSRalf Baechle .macro RESTORE_SOME 330384740dcSRalf Baechle .set push 331384740dcSRalf Baechle .set reorder 332384740dcSRalf Baechle .set noat 333384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 334384740dcSRalf Baechle .set mips32r2 335384740dcSRalf Baechle /* 336384740dcSRalf Baechle * We need to make sure the read-modify-write 337384740dcSRalf Baechle * of Status below isn't perturbed by an interrupt 338384740dcSRalf Baechle * or cross-TC access, so we need to do at least a DMT, 339384740dcSRalf Baechle * protected by an interrupt-inhibit. But setting IXMT 340384740dcSRalf Baechle * also creates a few-cycle window where an IPI could 341384740dcSRalf Baechle * be queued and not be detected before potentially 342384740dcSRalf Baechle * returning to a WAIT or user-mode loop. It must be 343384740dcSRalf Baechle * replayed. 344384740dcSRalf Baechle * 345384740dcSRalf Baechle * We're in the middle of a context switch, and 346384740dcSRalf Baechle * we can't dispatch it directly without trashing 347384740dcSRalf Baechle * some registers, so we'll try to detect this unlikely 348384740dcSRalf Baechle * case and program a software interrupt in the VPE, 349384740dcSRalf Baechle * as would be done for a cross-VPE IPI. To accomodate 350384740dcSRalf Baechle * the handling of that case, we're doing a DVPE instead 351384740dcSRalf Baechle * of just a DMT here to protect against other threads. 352384740dcSRalf Baechle * This is a lot of cruft to cover a tiny window. 353384740dcSRalf Baechle * If you can find a better design, implement it! 354384740dcSRalf Baechle * 355384740dcSRalf Baechle */ 356384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 357384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 358384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 359384740dcSRalf Baechle _ehb 360384740dcSRalf Baechle DVPE 5 # dvpe a1 361384740dcSRalf Baechle jal mips_ihb 362384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 3632a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 3642a219b0eSDavid Daney /* Restore the Octeon multiplier state */ 3652a219b0eSDavid Daney jal octeon_mult_restore 3662a219b0eSDavid Daney #endif 367384740dcSRalf Baechle mfc0 a0, CP0_STATUS 368384740dcSRalf Baechle ori a0, STATMASK 369384740dcSRalf Baechle xori a0, STATMASK 370384740dcSRalf Baechle mtc0 a0, CP0_STATUS 371384740dcSRalf Baechle li v1, 0xff00 372384740dcSRalf Baechle and a0, v1 373384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 374384740dcSRalf Baechle nor v1, $0, v1 375384740dcSRalf Baechle and v0, v1 376384740dcSRalf Baechle or v0, a0 377384740dcSRalf Baechle mtc0 v0, CP0_STATUS 378384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 379384740dcSRalf Baechle /* 380384740dcSRalf Baechle * Only after EXL/ERL have been restored to status can we 381384740dcSRalf Baechle * restore TCStatus.IXMT. 382384740dcSRalf Baechle */ 383384740dcSRalf Baechle LONG_L v1, PT_TCSTATUS(sp) 384384740dcSRalf Baechle _ehb 385384740dcSRalf Baechle mfc0 a0, CP0_TCSTATUS 386384740dcSRalf Baechle andi v1, TCSTATUS_IXMT 387384740dcSRalf Baechle bnez v1, 0f 388384740dcSRalf Baechle 389384740dcSRalf Baechle /* 390384740dcSRalf Baechle * We'd like to detect any IPIs queued in the tiny window 391384740dcSRalf Baechle * above and request an software interrupt to service them 392384740dcSRalf Baechle * when we ERET. 393384740dcSRalf Baechle * 394384740dcSRalf Baechle * Computing the offset into the IPIQ array of the executing 395384740dcSRalf Baechle * TC's IPI queue in-line would be tedious. We use part of 396384740dcSRalf Baechle * the TCContext register to hold 16 bits of offset that we 397384740dcSRalf Baechle * can add in-line to find the queue head. 398384740dcSRalf Baechle */ 399384740dcSRalf Baechle mfc0 v0, CP0_TCCONTEXT 400384740dcSRalf Baechle la a2, IPIQ 401384740dcSRalf Baechle srl v0, v0, 16 402384740dcSRalf Baechle addu a2, a2, v0 403384740dcSRalf Baechle LONG_L v0, 0(a2) 404384740dcSRalf Baechle beqz v0, 0f 405384740dcSRalf Baechle /* 406384740dcSRalf Baechle * If we have a queue, provoke dispatch within the VPE by setting C_SW1 407384740dcSRalf Baechle */ 408384740dcSRalf Baechle mfc0 v0, CP0_CAUSE 409384740dcSRalf Baechle ori v0, v0, C_SW1 410384740dcSRalf Baechle mtc0 v0, CP0_CAUSE 411384740dcSRalf Baechle 0: 412384740dcSRalf Baechle /* 413384740dcSRalf Baechle * This test should really never branch but 414384740dcSRalf Baechle * let's be prudent here. Having atomized 415384740dcSRalf Baechle * the shared register modifications, we can 416384740dcSRalf Baechle * now EVPE, and must do so before interrupts 417384740dcSRalf Baechle * are potentially re-enabled. 418384740dcSRalf Baechle */ 419384740dcSRalf Baechle andi a1, a1, MVPCONTROL_EVP 420384740dcSRalf Baechle beqz a1, 1f 421384740dcSRalf Baechle evpe 422384740dcSRalf Baechle 1: 423384740dcSRalf Baechle /* We know that TCStatua.IXMT should be set from above */ 424384740dcSRalf Baechle xori a0, a0, TCSTATUS_IXMT 425384740dcSRalf Baechle or a0, a0, v1 426384740dcSRalf Baechle mtc0 a0, CP0_TCSTATUS 427384740dcSRalf Baechle _ehb 428384740dcSRalf Baechle 429384740dcSRalf Baechle .set mips0 430384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 431384740dcSRalf Baechle LONG_L v1, PT_EPC(sp) 432384740dcSRalf Baechle MTC0 v1, CP0_EPC 433384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 434384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 435384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 436384740dcSRalf Baechle #ifdef CONFIG_64BIT 437384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 438384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 439384740dcSRalf Baechle #endif 440384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 441384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 442384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 443384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 444384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 445384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 446384740dcSRalf Baechle .set pop 447384740dcSRalf Baechle .endm 448384740dcSRalf Baechle 449384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 450384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 451384740dcSRalf Baechle .set mips3 452384740dcSRalf Baechle eret 453384740dcSRalf Baechle .set mips0 454384740dcSRalf Baechle .endm 455384740dcSRalf Baechle 456384740dcSRalf Baechle #endif 457384740dcSRalf Baechle 458384740dcSRalf Baechle .macro RESTORE_SP 459384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 460384740dcSRalf Baechle .endm 461384740dcSRalf Baechle 462384740dcSRalf Baechle .macro RESTORE_ALL 463384740dcSRalf Baechle RESTORE_TEMP 464384740dcSRalf Baechle RESTORE_STATIC 465384740dcSRalf Baechle RESTORE_AT 466384740dcSRalf Baechle RESTORE_SOME 467384740dcSRalf Baechle RESTORE_SP 468384740dcSRalf Baechle .endm 469384740dcSRalf Baechle 470384740dcSRalf Baechle .macro RESTORE_ALL_AND_RET 471384740dcSRalf Baechle RESTORE_TEMP 472384740dcSRalf Baechle RESTORE_STATIC 473384740dcSRalf Baechle RESTORE_AT 474384740dcSRalf Baechle RESTORE_SOME 475384740dcSRalf Baechle RESTORE_SP_AND_RET 476384740dcSRalf Baechle .endm 477384740dcSRalf Baechle 478384740dcSRalf Baechle /* 479384740dcSRalf Baechle * Move to kernel mode and disable interrupts. 480384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 481384740dcSRalf Baechle */ 482384740dcSRalf Baechle .macro CLI 483384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 484384740dcSRalf Baechle mfc0 t0, CP0_STATUS 485384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 486384740dcSRalf Baechle or t0, t1 487384740dcSRalf Baechle xori t0, STATMASK 488384740dcSRalf Baechle mtc0 t0, CP0_STATUS 489384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 490384740dcSRalf Baechle /* 491384740dcSRalf Baechle * For SMTC, we need to set privilege 492384740dcSRalf Baechle * and disable interrupts only for the 493384740dcSRalf Baechle * current TC, using the TCStatus register. 494384740dcSRalf Baechle */ 495384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 496384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 497384740dcSRalf Baechle /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 498384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 499384740dcSRalf Baechle or t0, t1 500384740dcSRalf Baechle /* Clear TKSU, leave IXMT */ 501384740dcSRalf Baechle xori t0, 0x00001800 502384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 503384740dcSRalf Baechle _ehb 504384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 505384740dcSRalf Baechle mfc0 t0, CP0_STATUS 506384740dcSRalf Baechle ori t0, ST0_EXL | ST0_ERL 507384740dcSRalf Baechle xori t0, ST0_EXL | ST0_ERL 508384740dcSRalf Baechle mtc0 t0, CP0_STATUS 509384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 510384740dcSRalf Baechle irq_disable_hazard 511384740dcSRalf Baechle .endm 512384740dcSRalf Baechle 513384740dcSRalf Baechle /* 514384740dcSRalf Baechle * Move to kernel mode and enable interrupts. 515384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 516384740dcSRalf Baechle */ 517384740dcSRalf Baechle .macro STI 518384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 519384740dcSRalf Baechle mfc0 t0, CP0_STATUS 520384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 521384740dcSRalf Baechle or t0, t1 522384740dcSRalf Baechle xori t0, STATMASK & ~1 523384740dcSRalf Baechle mtc0 t0, CP0_STATUS 524384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 525384740dcSRalf Baechle /* 526384740dcSRalf Baechle * For SMTC, we need to set privilege 527384740dcSRalf Baechle * and enable interrupts only for the 528384740dcSRalf Baechle * current TC, using the TCStatus register. 529384740dcSRalf Baechle */ 530384740dcSRalf Baechle _ehb 531384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 532384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 533384740dcSRalf Baechle /* Set TCU0, TKSU (for later inversion) and IXMT */ 534384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 535384740dcSRalf Baechle or t0, t1 536384740dcSRalf Baechle /* Clear TKSU *and* IXMT */ 537384740dcSRalf Baechle xori t0, 0x00001c00 538384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 539384740dcSRalf Baechle _ehb 540384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 541384740dcSRalf Baechle mfc0 t0, CP0_STATUS 542384740dcSRalf Baechle ori t0, ST0_EXL 543384740dcSRalf Baechle xori t0, ST0_EXL 544384740dcSRalf Baechle mtc0 t0, CP0_STATUS 545384740dcSRalf Baechle /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 546384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 547384740dcSRalf Baechle irq_enable_hazard 548384740dcSRalf Baechle .endm 549384740dcSRalf Baechle 550384740dcSRalf Baechle /* 551384740dcSRalf Baechle * Just move to kernel mode and leave interrupts as they are. Note 552384740dcSRalf Baechle * for the R3000 this means copying the previous enable from IEp. 553384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 554384740dcSRalf Baechle */ 555384740dcSRalf Baechle .macro KMODE 556384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 557384740dcSRalf Baechle /* 558384740dcSRalf Baechle * This gets baroque in SMTC. We want to 559384740dcSRalf Baechle * protect the non-atomic clearing of EXL 560384740dcSRalf Baechle * with DMT/EMT, but we don't want to take 561384740dcSRalf Baechle * an interrupt while DMT is still in effect. 562384740dcSRalf Baechle */ 563384740dcSRalf Baechle 564384740dcSRalf Baechle /* KMODE gets invoked from both reorder and noreorder code */ 565384740dcSRalf Baechle .set push 566384740dcSRalf Baechle .set mips32r2 567384740dcSRalf Baechle .set noreorder 568384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 569384740dcSRalf Baechle andi v1, v0, TCSTATUS_IXMT 570384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 571384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 572384740dcSRalf Baechle _ehb 573384740dcSRalf Baechle DMT 2 # dmt v0 574384740dcSRalf Baechle /* 575384740dcSRalf Baechle * We don't know a priori if ra is "live" 576384740dcSRalf Baechle */ 577384740dcSRalf Baechle move t0, ra 578384740dcSRalf Baechle jal mips_ihb 579384740dcSRalf Baechle nop /* delay slot */ 580384740dcSRalf Baechle move ra, t0 581384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 582384740dcSRalf Baechle mfc0 t0, CP0_STATUS 583384740dcSRalf Baechle li t1, ST0_CU0 | (STATMASK & ~1) 584384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 585384740dcSRalf Baechle andi t2, t0, ST0_IEP 586384740dcSRalf Baechle srl t2, 2 587384740dcSRalf Baechle or t0, t2 588384740dcSRalf Baechle #endif 589384740dcSRalf Baechle or t0, t1 590384740dcSRalf Baechle xori t0, STATMASK & ~1 591384740dcSRalf Baechle mtc0 t0, CP0_STATUS 592384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 593384740dcSRalf Baechle _ehb 594384740dcSRalf Baechle andi v0, v0, VPECONTROL_TE 595384740dcSRalf Baechle beqz v0, 2f 596384740dcSRalf Baechle nop /* delay slot */ 597384740dcSRalf Baechle emt 598384740dcSRalf Baechle 2: 599384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 600384740dcSRalf Baechle /* Clear IXMT, then OR in previous value */ 601384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 602384740dcSRalf Baechle xori v0, TCSTATUS_IXMT 603384740dcSRalf Baechle or v0, v1, v0 604384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 605384740dcSRalf Baechle /* 606384740dcSRalf Baechle * irq_disable_hazard below should expand to EHB 607384740dcSRalf Baechle * on 24K/34K CPUS 608384740dcSRalf Baechle */ 609384740dcSRalf Baechle .set pop 610384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 611384740dcSRalf Baechle irq_disable_hazard 612384740dcSRalf Baechle .endm 613384740dcSRalf Baechle 614384740dcSRalf Baechle #endif /* _ASM_STACKFRAME_H */ 615