1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8384740dcSRalf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. 9384740dcSRalf Baechle * Copyright (C) 2007 Maciej W. Rozycki 10384740dcSRalf Baechle */ 11384740dcSRalf Baechle #ifndef _ASM_STACKFRAME_H 12384740dcSRalf Baechle #define _ASM_STACKFRAME_H 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <linux/threads.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <asm/asm.h> 17384740dcSRalf Baechle #include <asm/asmmacro.h> 18384740dcSRalf Baechle #include <asm/mipsregs.h> 19384740dcSRalf Baechle #include <asm/asm-offsets.h> 20384740dcSRalf Baechle 21384740dcSRalf Baechle /* 22384740dcSRalf Baechle * For SMTC kernel, global IE should be left set, and interrupts 23384740dcSRalf Baechle * controlled exclusively via IXMT. 24384740dcSRalf Baechle */ 25384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 26384740dcSRalf Baechle #define STATMASK 0x1e 27384740dcSRalf Baechle #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 28384740dcSRalf Baechle #define STATMASK 0x3f 29384740dcSRalf Baechle #else 30384740dcSRalf Baechle #define STATMASK 0x1f 31384740dcSRalf Baechle #endif 32384740dcSRalf Baechle 33384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 34384740dcSRalf Baechle #include <asm/mipsmtregs.h> 35384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 36384740dcSRalf Baechle 37384740dcSRalf Baechle .macro SAVE_AT 38384740dcSRalf Baechle .set push 39384740dcSRalf Baechle .set noat 40384740dcSRalf Baechle LONG_S $1, PT_R1(sp) 41384740dcSRalf Baechle .set pop 42384740dcSRalf Baechle .endm 43384740dcSRalf Baechle 44384740dcSRalf Baechle .macro SAVE_TEMP 45384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 46384740dcSRalf Baechle mflhxu v1 47384740dcSRalf Baechle LONG_S v1, PT_LO(sp) 48384740dcSRalf Baechle mflhxu v1 49384740dcSRalf Baechle LONG_S v1, PT_HI(sp) 50384740dcSRalf Baechle mflhxu v1 51384740dcSRalf Baechle LONG_S v1, PT_ACX(sp) 52384740dcSRalf Baechle #else 53384740dcSRalf Baechle mfhi v1 54384740dcSRalf Baechle #endif 55384740dcSRalf Baechle #ifdef CONFIG_32BIT 56384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 57384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 58384740dcSRalf Baechle #endif 59384740dcSRalf Baechle LONG_S $10, PT_R10(sp) 60384740dcSRalf Baechle LONG_S $11, PT_R11(sp) 61384740dcSRalf Baechle LONG_S $12, PT_R12(sp) 62362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 63362e6964SDavid Daney LONG_S v1, PT_HI(sp) 64362e6964SDavid Daney mflo v1 65362e6964SDavid Daney #endif 66384740dcSRalf Baechle LONG_S $13, PT_R13(sp) 67384740dcSRalf Baechle LONG_S $14, PT_R14(sp) 68384740dcSRalf Baechle LONG_S $15, PT_R15(sp) 69384740dcSRalf Baechle LONG_S $24, PT_R24(sp) 70362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 71362e6964SDavid Daney LONG_S v1, PT_LO(sp) 72362e6964SDavid Daney #endif 73384740dcSRalf Baechle .endm 74384740dcSRalf Baechle 75384740dcSRalf Baechle .macro SAVE_STATIC 76384740dcSRalf Baechle LONG_S $16, PT_R16(sp) 77384740dcSRalf Baechle LONG_S $17, PT_R17(sp) 78384740dcSRalf Baechle LONG_S $18, PT_R18(sp) 79384740dcSRalf Baechle LONG_S $19, PT_R19(sp) 80384740dcSRalf Baechle LONG_S $20, PT_R20(sp) 81384740dcSRalf Baechle LONG_S $21, PT_R21(sp) 82384740dcSRalf Baechle LONG_S $22, PT_R22(sp) 83384740dcSRalf Baechle LONG_S $23, PT_R23(sp) 84384740dcSRalf Baechle LONG_S $30, PT_R30(sp) 85384740dcSRalf Baechle .endm 86384740dcSRalf Baechle 87384740dcSRalf Baechle #ifdef CONFIG_SMP 88384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 89384740dcSRalf Baechle #define PTEBASE_SHIFT 19 /* TCBIND */ 90*82622284SDavid Daney #define CPU_ID_REG CP0_TCBIND 91*82622284SDavid Daney #define CPU_ID_MFC0 mfc0 92*82622284SDavid Daney #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) 93*82622284SDavid Daney #define PTEBASE_SHIFT 48 /* XCONTEXT */ 94*82622284SDavid Daney #define CPU_ID_REG CP0_XCONTEXT 95*82622284SDavid Daney #define CPU_ID_MFC0 MFC0 96384740dcSRalf Baechle #else 97384740dcSRalf Baechle #define PTEBASE_SHIFT 23 /* CONTEXT */ 98*82622284SDavid Daney #define CPU_ID_REG CP0_CONTEXT 99*82622284SDavid Daney #define CPU_ID_MFC0 MFC0 100384740dcSRalf Baechle #endif 101384740dcSRalf Baechle .macro get_saved_sp /* SMP variation */ 102*82622284SDavid Daney CPU_ID_MFC0 k0, CPU_ID_REG 103384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 104384740dcSRalf Baechle lui k1, %hi(kernelsp) 105384740dcSRalf Baechle #else 106384740dcSRalf Baechle lui k1, %highest(kernelsp) 107384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 108384740dcSRalf Baechle dsll k1, 16 109384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 110384740dcSRalf Baechle dsll k1, 16 111384740dcSRalf Baechle #endif 112384740dcSRalf Baechle LONG_SRL k0, PTEBASE_SHIFT 113384740dcSRalf Baechle LONG_ADDU k1, k0 114384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 115384740dcSRalf Baechle .endm 116384740dcSRalf Baechle 117384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 118*82622284SDavid Daney CPU_ID_MFC0 \temp, CPU_ID_REG 119384740dcSRalf Baechle LONG_SRL \temp, PTEBASE_SHIFT 120384740dcSRalf Baechle LONG_S \stackp, kernelsp(\temp) 121384740dcSRalf Baechle .endm 122384740dcSRalf Baechle #else 123384740dcSRalf Baechle .macro get_saved_sp /* Uniprocessor variation */ 124384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 125384740dcSRalf Baechle lui k1, %hi(kernelsp) 126384740dcSRalf Baechle #else 127384740dcSRalf Baechle lui k1, %highest(kernelsp) 128384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 129384740dcSRalf Baechle dsll k1, k1, 16 130384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 131384740dcSRalf Baechle dsll k1, k1, 16 132384740dcSRalf Baechle #endif 133384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 134384740dcSRalf Baechle .endm 135384740dcSRalf Baechle 136384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 137384740dcSRalf Baechle LONG_S \stackp, kernelsp 138384740dcSRalf Baechle .endm 139384740dcSRalf Baechle #endif 140384740dcSRalf Baechle 141384740dcSRalf Baechle .macro SAVE_SOME 142384740dcSRalf Baechle .set push 143384740dcSRalf Baechle .set noat 144384740dcSRalf Baechle .set reorder 145384740dcSRalf Baechle mfc0 k0, CP0_STATUS 146384740dcSRalf Baechle sll k0, 3 /* extract cu0 bit */ 147384740dcSRalf Baechle .set noreorder 148384740dcSRalf Baechle bltz k0, 8f 149384740dcSRalf Baechle move k1, sp 150384740dcSRalf Baechle .set reorder 151384740dcSRalf Baechle /* Called from user mode, new stack. */ 152384740dcSRalf Baechle get_saved_sp 153384740dcSRalf Baechle #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 154384740dcSRalf Baechle 8: move k0, sp 155384740dcSRalf Baechle PTR_SUBU sp, k1, PT_SIZE 156384740dcSRalf Baechle #else 157384740dcSRalf Baechle .set at=k0 158384740dcSRalf Baechle 8: PTR_SUBU k1, PT_SIZE 159384740dcSRalf Baechle .set noat 160384740dcSRalf Baechle move k0, sp 161384740dcSRalf Baechle move sp, k1 162384740dcSRalf Baechle #endif 163384740dcSRalf Baechle LONG_S k0, PT_R29(sp) 164384740dcSRalf Baechle LONG_S $3, PT_R3(sp) 165384740dcSRalf Baechle /* 166384740dcSRalf Baechle * You might think that you don't need to save $0, 167384740dcSRalf Baechle * but the FPU emulator and gdb remote debug stub 168384740dcSRalf Baechle * need it to operate correctly 169384740dcSRalf Baechle */ 170384740dcSRalf Baechle LONG_S $0, PT_R0(sp) 171384740dcSRalf Baechle mfc0 v1, CP0_STATUS 172384740dcSRalf Baechle LONG_S $2, PT_R2(sp) 173384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 174384740dcSRalf Baechle /* 175384740dcSRalf Baechle * Ideally, these instructions would be shuffled in 176384740dcSRalf Baechle * to cover the pipeline delay. 177384740dcSRalf Baechle */ 178384740dcSRalf Baechle .set mips32 179384740dcSRalf Baechle mfc0 v1, CP0_TCSTATUS 180384740dcSRalf Baechle .set mips0 181384740dcSRalf Baechle LONG_S v1, PT_TCSTATUS(sp) 182384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 183384740dcSRalf Baechle LONG_S $4, PT_R4(sp) 184384740dcSRalf Baechle LONG_S $5, PT_R5(sp) 185362e6964SDavid Daney LONG_S v1, PT_STATUS(sp) 186362e6964SDavid Daney mfc0 v1, CP0_CAUSE 187384740dcSRalf Baechle LONG_S $6, PT_R6(sp) 188384740dcSRalf Baechle LONG_S $7, PT_R7(sp) 189362e6964SDavid Daney LONG_S v1, PT_CAUSE(sp) 190362e6964SDavid Daney MFC0 v1, CP0_EPC 191384740dcSRalf Baechle #ifdef CONFIG_64BIT 192384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 193384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 194384740dcSRalf Baechle #endif 195384740dcSRalf Baechle LONG_S $25, PT_R25(sp) 196384740dcSRalf Baechle LONG_S $28, PT_R28(sp) 197384740dcSRalf Baechle LONG_S $31, PT_R31(sp) 198362e6964SDavid Daney LONG_S v1, PT_EPC(sp) 199384740dcSRalf Baechle ori $28, sp, _THREAD_MASK 200384740dcSRalf Baechle xori $28, _THREAD_MASK 2012a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 2022a219b0eSDavid Daney .set mips64 2032a219b0eSDavid Daney pref 0, 0($28) /* Prefetch the current pointer */ 2042a219b0eSDavid Daney pref 0, PT_R31(sp) /* Prefetch the $31(ra) */ 2052a219b0eSDavid Daney /* The Octeon multiplier state is affected by general multiply 2062a219b0eSDavid Daney instructions. It must be saved before and kernel code might 2072a219b0eSDavid Daney corrupt it */ 2082a219b0eSDavid Daney jal octeon_mult_save 2092a219b0eSDavid Daney LONG_L v1, 0($28) /* Load the current pointer */ 2102a219b0eSDavid Daney /* Restore $31(ra) that was changed by the jal */ 2112a219b0eSDavid Daney LONG_L ra, PT_R31(sp) 2122a219b0eSDavid Daney pref 0, 0(v1) /* Prefetch the current thread */ 2132a219b0eSDavid Daney #endif 214384740dcSRalf Baechle .set pop 215384740dcSRalf Baechle .endm 216384740dcSRalf Baechle 217384740dcSRalf Baechle .macro SAVE_ALL 218384740dcSRalf Baechle SAVE_SOME 219384740dcSRalf Baechle SAVE_AT 220384740dcSRalf Baechle SAVE_TEMP 221384740dcSRalf Baechle SAVE_STATIC 222384740dcSRalf Baechle .endm 223384740dcSRalf Baechle 224384740dcSRalf Baechle .macro RESTORE_AT 225384740dcSRalf Baechle .set push 226384740dcSRalf Baechle .set noat 227384740dcSRalf Baechle LONG_L $1, PT_R1(sp) 228384740dcSRalf Baechle .set pop 229384740dcSRalf Baechle .endm 230384740dcSRalf Baechle 231384740dcSRalf Baechle .macro RESTORE_TEMP 232384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 233384740dcSRalf Baechle LONG_L $24, PT_ACX(sp) 234384740dcSRalf Baechle mtlhx $24 235384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 236384740dcSRalf Baechle mtlhx $24 237384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 238384740dcSRalf Baechle mtlhx $24 239384740dcSRalf Baechle #else 240384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 241384740dcSRalf Baechle mtlo $24 242384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 243384740dcSRalf Baechle mthi $24 244384740dcSRalf Baechle #endif 245384740dcSRalf Baechle #ifdef CONFIG_32BIT 246384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 247384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 248384740dcSRalf Baechle #endif 249384740dcSRalf Baechle LONG_L $10, PT_R10(sp) 250384740dcSRalf Baechle LONG_L $11, PT_R11(sp) 251384740dcSRalf Baechle LONG_L $12, PT_R12(sp) 252384740dcSRalf Baechle LONG_L $13, PT_R13(sp) 253384740dcSRalf Baechle LONG_L $14, PT_R14(sp) 254384740dcSRalf Baechle LONG_L $15, PT_R15(sp) 255384740dcSRalf Baechle LONG_L $24, PT_R24(sp) 256384740dcSRalf Baechle .endm 257384740dcSRalf Baechle 258384740dcSRalf Baechle .macro RESTORE_STATIC 259384740dcSRalf Baechle LONG_L $16, PT_R16(sp) 260384740dcSRalf Baechle LONG_L $17, PT_R17(sp) 261384740dcSRalf Baechle LONG_L $18, PT_R18(sp) 262384740dcSRalf Baechle LONG_L $19, PT_R19(sp) 263384740dcSRalf Baechle LONG_L $20, PT_R20(sp) 264384740dcSRalf Baechle LONG_L $21, PT_R21(sp) 265384740dcSRalf Baechle LONG_L $22, PT_R22(sp) 266384740dcSRalf Baechle LONG_L $23, PT_R23(sp) 267384740dcSRalf Baechle LONG_L $30, PT_R30(sp) 268384740dcSRalf Baechle .endm 269384740dcSRalf Baechle 270384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 271384740dcSRalf Baechle 272384740dcSRalf Baechle .macro RESTORE_SOME 273384740dcSRalf Baechle .set push 274384740dcSRalf Baechle .set reorder 275384740dcSRalf Baechle .set noat 276384740dcSRalf Baechle mfc0 a0, CP0_STATUS 277384740dcSRalf Baechle li v1, 0xff00 278384740dcSRalf Baechle ori a0, STATMASK 279384740dcSRalf Baechle xori a0, STATMASK 280384740dcSRalf Baechle mtc0 a0, CP0_STATUS 281384740dcSRalf Baechle and a0, v1 282384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 283384740dcSRalf Baechle nor v1, $0, v1 284384740dcSRalf Baechle and v0, v1 285384740dcSRalf Baechle or v0, a0 286384740dcSRalf Baechle mtc0 v0, CP0_STATUS 287384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 288384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 289384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 290384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 291384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 292384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 293384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 294384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 295384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 296384740dcSRalf Baechle .set pop 297384740dcSRalf Baechle .endm 298384740dcSRalf Baechle 299384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 300384740dcSRalf Baechle .set push 301384740dcSRalf Baechle .set noreorder 302384740dcSRalf Baechle LONG_L k0, PT_EPC(sp) 303384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 304384740dcSRalf Baechle jr k0 305384740dcSRalf Baechle rfe 306384740dcSRalf Baechle .set pop 307384740dcSRalf Baechle .endm 308384740dcSRalf Baechle 309384740dcSRalf Baechle #else 310384740dcSRalf Baechle .macro RESTORE_SOME 311384740dcSRalf Baechle .set push 312384740dcSRalf Baechle .set reorder 313384740dcSRalf Baechle .set noat 314384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 315384740dcSRalf Baechle .set mips32r2 316384740dcSRalf Baechle /* 317384740dcSRalf Baechle * We need to make sure the read-modify-write 318384740dcSRalf Baechle * of Status below isn't perturbed by an interrupt 319384740dcSRalf Baechle * or cross-TC access, so we need to do at least a DMT, 320384740dcSRalf Baechle * protected by an interrupt-inhibit. But setting IXMT 321384740dcSRalf Baechle * also creates a few-cycle window where an IPI could 322384740dcSRalf Baechle * be queued and not be detected before potentially 323384740dcSRalf Baechle * returning to a WAIT or user-mode loop. It must be 324384740dcSRalf Baechle * replayed. 325384740dcSRalf Baechle * 326384740dcSRalf Baechle * We're in the middle of a context switch, and 327384740dcSRalf Baechle * we can't dispatch it directly without trashing 328384740dcSRalf Baechle * some registers, so we'll try to detect this unlikely 329384740dcSRalf Baechle * case and program a software interrupt in the VPE, 330384740dcSRalf Baechle * as would be done for a cross-VPE IPI. To accomodate 331384740dcSRalf Baechle * the handling of that case, we're doing a DVPE instead 332384740dcSRalf Baechle * of just a DMT here to protect against other threads. 333384740dcSRalf Baechle * This is a lot of cruft to cover a tiny window. 334384740dcSRalf Baechle * If you can find a better design, implement it! 335384740dcSRalf Baechle * 336384740dcSRalf Baechle */ 337384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 338384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 339384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 340384740dcSRalf Baechle _ehb 341384740dcSRalf Baechle DVPE 5 # dvpe a1 342384740dcSRalf Baechle jal mips_ihb 343384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 3442a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 3452a219b0eSDavid Daney /* Restore the Octeon multiplier state */ 3462a219b0eSDavid Daney jal octeon_mult_restore 3472a219b0eSDavid Daney #endif 348384740dcSRalf Baechle mfc0 a0, CP0_STATUS 349384740dcSRalf Baechle ori a0, STATMASK 350384740dcSRalf Baechle xori a0, STATMASK 351384740dcSRalf Baechle mtc0 a0, CP0_STATUS 352384740dcSRalf Baechle li v1, 0xff00 353384740dcSRalf Baechle and a0, v1 354384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 355384740dcSRalf Baechle nor v1, $0, v1 356384740dcSRalf Baechle and v0, v1 357384740dcSRalf Baechle or v0, a0 358384740dcSRalf Baechle mtc0 v0, CP0_STATUS 359384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 360384740dcSRalf Baechle /* 361384740dcSRalf Baechle * Only after EXL/ERL have been restored to status can we 362384740dcSRalf Baechle * restore TCStatus.IXMT. 363384740dcSRalf Baechle */ 364384740dcSRalf Baechle LONG_L v1, PT_TCSTATUS(sp) 365384740dcSRalf Baechle _ehb 366384740dcSRalf Baechle mfc0 a0, CP0_TCSTATUS 367384740dcSRalf Baechle andi v1, TCSTATUS_IXMT 368384740dcSRalf Baechle bnez v1, 0f 369384740dcSRalf Baechle 370384740dcSRalf Baechle /* 371384740dcSRalf Baechle * We'd like to detect any IPIs queued in the tiny window 372384740dcSRalf Baechle * above and request an software interrupt to service them 373384740dcSRalf Baechle * when we ERET. 374384740dcSRalf Baechle * 375384740dcSRalf Baechle * Computing the offset into the IPIQ array of the executing 376384740dcSRalf Baechle * TC's IPI queue in-line would be tedious. We use part of 377384740dcSRalf Baechle * the TCContext register to hold 16 bits of offset that we 378384740dcSRalf Baechle * can add in-line to find the queue head. 379384740dcSRalf Baechle */ 380384740dcSRalf Baechle mfc0 v0, CP0_TCCONTEXT 381384740dcSRalf Baechle la a2, IPIQ 382384740dcSRalf Baechle srl v0, v0, 16 383384740dcSRalf Baechle addu a2, a2, v0 384384740dcSRalf Baechle LONG_L v0, 0(a2) 385384740dcSRalf Baechle beqz v0, 0f 386384740dcSRalf Baechle /* 387384740dcSRalf Baechle * If we have a queue, provoke dispatch within the VPE by setting C_SW1 388384740dcSRalf Baechle */ 389384740dcSRalf Baechle mfc0 v0, CP0_CAUSE 390384740dcSRalf Baechle ori v0, v0, C_SW1 391384740dcSRalf Baechle mtc0 v0, CP0_CAUSE 392384740dcSRalf Baechle 0: 393384740dcSRalf Baechle /* 394384740dcSRalf Baechle * This test should really never branch but 395384740dcSRalf Baechle * let's be prudent here. Having atomized 396384740dcSRalf Baechle * the shared register modifications, we can 397384740dcSRalf Baechle * now EVPE, and must do so before interrupts 398384740dcSRalf Baechle * are potentially re-enabled. 399384740dcSRalf Baechle */ 400384740dcSRalf Baechle andi a1, a1, MVPCONTROL_EVP 401384740dcSRalf Baechle beqz a1, 1f 402384740dcSRalf Baechle evpe 403384740dcSRalf Baechle 1: 404384740dcSRalf Baechle /* We know that TCStatua.IXMT should be set from above */ 405384740dcSRalf Baechle xori a0, a0, TCSTATUS_IXMT 406384740dcSRalf Baechle or a0, a0, v1 407384740dcSRalf Baechle mtc0 a0, CP0_TCSTATUS 408384740dcSRalf Baechle _ehb 409384740dcSRalf Baechle 410384740dcSRalf Baechle .set mips0 411384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 412384740dcSRalf Baechle LONG_L v1, PT_EPC(sp) 413384740dcSRalf Baechle MTC0 v1, CP0_EPC 414384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 415384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 416384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 417384740dcSRalf Baechle #ifdef CONFIG_64BIT 418384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 419384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 420384740dcSRalf Baechle #endif 421384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 422384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 423384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 424384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 425384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 426384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 427384740dcSRalf Baechle .set pop 428384740dcSRalf Baechle .endm 429384740dcSRalf Baechle 430384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 431384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 432384740dcSRalf Baechle .set mips3 433384740dcSRalf Baechle eret 434384740dcSRalf Baechle .set mips0 435384740dcSRalf Baechle .endm 436384740dcSRalf Baechle 437384740dcSRalf Baechle #endif 438384740dcSRalf Baechle 439384740dcSRalf Baechle .macro RESTORE_SP 440384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 441384740dcSRalf Baechle .endm 442384740dcSRalf Baechle 443384740dcSRalf Baechle .macro RESTORE_ALL 444384740dcSRalf Baechle RESTORE_TEMP 445384740dcSRalf Baechle RESTORE_STATIC 446384740dcSRalf Baechle RESTORE_AT 447384740dcSRalf Baechle RESTORE_SOME 448384740dcSRalf Baechle RESTORE_SP 449384740dcSRalf Baechle .endm 450384740dcSRalf Baechle 451384740dcSRalf Baechle .macro RESTORE_ALL_AND_RET 452384740dcSRalf Baechle RESTORE_TEMP 453384740dcSRalf Baechle RESTORE_STATIC 454384740dcSRalf Baechle RESTORE_AT 455384740dcSRalf Baechle RESTORE_SOME 456384740dcSRalf Baechle RESTORE_SP_AND_RET 457384740dcSRalf Baechle .endm 458384740dcSRalf Baechle 459384740dcSRalf Baechle /* 460384740dcSRalf Baechle * Move to kernel mode and disable interrupts. 461384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 462384740dcSRalf Baechle */ 463384740dcSRalf Baechle .macro CLI 464384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 465384740dcSRalf Baechle mfc0 t0, CP0_STATUS 466384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 467384740dcSRalf Baechle or t0, t1 468384740dcSRalf Baechle xori t0, STATMASK 469384740dcSRalf Baechle mtc0 t0, CP0_STATUS 470384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 471384740dcSRalf Baechle /* 472384740dcSRalf Baechle * For SMTC, we need to set privilege 473384740dcSRalf Baechle * and disable interrupts only for the 474384740dcSRalf Baechle * current TC, using the TCStatus register. 475384740dcSRalf Baechle */ 476384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 477384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 478384740dcSRalf Baechle /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 479384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 480384740dcSRalf Baechle or t0, t1 481384740dcSRalf Baechle /* Clear TKSU, leave IXMT */ 482384740dcSRalf Baechle xori t0, 0x00001800 483384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 484384740dcSRalf Baechle _ehb 485384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 486384740dcSRalf Baechle mfc0 t0, CP0_STATUS 487384740dcSRalf Baechle ori t0, ST0_EXL | ST0_ERL 488384740dcSRalf Baechle xori t0, ST0_EXL | ST0_ERL 489384740dcSRalf Baechle mtc0 t0, CP0_STATUS 490384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 491384740dcSRalf Baechle irq_disable_hazard 492384740dcSRalf Baechle .endm 493384740dcSRalf Baechle 494384740dcSRalf Baechle /* 495384740dcSRalf Baechle * Move to kernel mode and enable interrupts. 496384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 497384740dcSRalf Baechle */ 498384740dcSRalf Baechle .macro STI 499384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 500384740dcSRalf Baechle mfc0 t0, CP0_STATUS 501384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 502384740dcSRalf Baechle or t0, t1 503384740dcSRalf Baechle xori t0, STATMASK & ~1 504384740dcSRalf Baechle mtc0 t0, CP0_STATUS 505384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 506384740dcSRalf Baechle /* 507384740dcSRalf Baechle * For SMTC, we need to set privilege 508384740dcSRalf Baechle * and enable interrupts only for the 509384740dcSRalf Baechle * current TC, using the TCStatus register. 510384740dcSRalf Baechle */ 511384740dcSRalf Baechle _ehb 512384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 513384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 514384740dcSRalf Baechle /* Set TCU0, TKSU (for later inversion) and IXMT */ 515384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 516384740dcSRalf Baechle or t0, t1 517384740dcSRalf Baechle /* Clear TKSU *and* IXMT */ 518384740dcSRalf Baechle xori t0, 0x00001c00 519384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 520384740dcSRalf Baechle _ehb 521384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 522384740dcSRalf Baechle mfc0 t0, CP0_STATUS 523384740dcSRalf Baechle ori t0, ST0_EXL 524384740dcSRalf Baechle xori t0, ST0_EXL 525384740dcSRalf Baechle mtc0 t0, CP0_STATUS 526384740dcSRalf Baechle /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 527384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 528384740dcSRalf Baechle irq_enable_hazard 529384740dcSRalf Baechle .endm 530384740dcSRalf Baechle 531384740dcSRalf Baechle /* 532384740dcSRalf Baechle * Just move to kernel mode and leave interrupts as they are. Note 533384740dcSRalf Baechle * for the R3000 this means copying the previous enable from IEp. 534384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 535384740dcSRalf Baechle */ 536384740dcSRalf Baechle .macro KMODE 537384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 538384740dcSRalf Baechle /* 539384740dcSRalf Baechle * This gets baroque in SMTC. We want to 540384740dcSRalf Baechle * protect the non-atomic clearing of EXL 541384740dcSRalf Baechle * with DMT/EMT, but we don't want to take 542384740dcSRalf Baechle * an interrupt while DMT is still in effect. 543384740dcSRalf Baechle */ 544384740dcSRalf Baechle 545384740dcSRalf Baechle /* KMODE gets invoked from both reorder and noreorder code */ 546384740dcSRalf Baechle .set push 547384740dcSRalf Baechle .set mips32r2 548384740dcSRalf Baechle .set noreorder 549384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 550384740dcSRalf Baechle andi v1, v0, TCSTATUS_IXMT 551384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 552384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 553384740dcSRalf Baechle _ehb 554384740dcSRalf Baechle DMT 2 # dmt v0 555384740dcSRalf Baechle /* 556384740dcSRalf Baechle * We don't know a priori if ra is "live" 557384740dcSRalf Baechle */ 558384740dcSRalf Baechle move t0, ra 559384740dcSRalf Baechle jal mips_ihb 560384740dcSRalf Baechle nop /* delay slot */ 561384740dcSRalf Baechle move ra, t0 562384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 563384740dcSRalf Baechle mfc0 t0, CP0_STATUS 564384740dcSRalf Baechle li t1, ST0_CU0 | (STATMASK & ~1) 565384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 566384740dcSRalf Baechle andi t2, t0, ST0_IEP 567384740dcSRalf Baechle srl t2, 2 568384740dcSRalf Baechle or t0, t2 569384740dcSRalf Baechle #endif 570384740dcSRalf Baechle or t0, t1 571384740dcSRalf Baechle xori t0, STATMASK & ~1 572384740dcSRalf Baechle mtc0 t0, CP0_STATUS 573384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 574384740dcSRalf Baechle _ehb 575384740dcSRalf Baechle andi v0, v0, VPECONTROL_TE 576384740dcSRalf Baechle beqz v0, 2f 577384740dcSRalf Baechle nop /* delay slot */ 578384740dcSRalf Baechle emt 579384740dcSRalf Baechle 2: 580384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 581384740dcSRalf Baechle /* Clear IXMT, then OR in previous value */ 582384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 583384740dcSRalf Baechle xori v0, TCSTATUS_IXMT 584384740dcSRalf Baechle or v0, v1, v0 585384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 586384740dcSRalf Baechle /* 587384740dcSRalf Baechle * irq_disable_hazard below should expand to EHB 588384740dcSRalf Baechle * on 24K/34K CPUS 589384740dcSRalf Baechle */ 590384740dcSRalf Baechle .set pop 591384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 592384740dcSRalf Baechle irq_disable_hazard 593384740dcSRalf Baechle .endm 594384740dcSRalf Baechle 595384740dcSRalf Baechle #endif /* _ASM_STACKFRAME_H */ 596