1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8384740dcSRalf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. 9384740dcSRalf Baechle * Copyright (C) 2007 Maciej W. Rozycki 10384740dcSRalf Baechle */ 11384740dcSRalf Baechle #ifndef _ASM_STACKFRAME_H 12384740dcSRalf Baechle #define _ASM_STACKFRAME_H 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <linux/threads.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <asm/asm.h> 17384740dcSRalf Baechle #include <asm/asmmacro.h> 18384740dcSRalf Baechle #include <asm/mipsregs.h> 19384740dcSRalf Baechle #include <asm/asm-offsets.h> 20384740dcSRalf Baechle 21384740dcSRalf Baechle /* 22384740dcSRalf Baechle * For SMTC kernel, global IE should be left set, and interrupts 23384740dcSRalf Baechle * controlled exclusively via IXMT. 24384740dcSRalf Baechle */ 25384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 26384740dcSRalf Baechle #define STATMASK 0x1e 27384740dcSRalf Baechle #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 28384740dcSRalf Baechle #define STATMASK 0x3f 29384740dcSRalf Baechle #else 30384740dcSRalf Baechle #define STATMASK 0x1f 31384740dcSRalf Baechle #endif 32384740dcSRalf Baechle 33384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 34384740dcSRalf Baechle #include <asm/mipsmtregs.h> 35384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 36384740dcSRalf Baechle 37384740dcSRalf Baechle .macro SAVE_AT 38384740dcSRalf Baechle .set push 39384740dcSRalf Baechle .set noat 40384740dcSRalf Baechle LONG_S $1, PT_R1(sp) 41384740dcSRalf Baechle .set pop 42384740dcSRalf Baechle .endm 43384740dcSRalf Baechle 44384740dcSRalf Baechle .macro SAVE_TEMP 45384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 46384740dcSRalf Baechle mflhxu v1 47384740dcSRalf Baechle LONG_S v1, PT_LO(sp) 48384740dcSRalf Baechle mflhxu v1 49384740dcSRalf Baechle LONG_S v1, PT_HI(sp) 50384740dcSRalf Baechle mflhxu v1 51384740dcSRalf Baechle LONG_S v1, PT_ACX(sp) 52384740dcSRalf Baechle #else 53384740dcSRalf Baechle mfhi v1 54384740dcSRalf Baechle #endif 55384740dcSRalf Baechle #ifdef CONFIG_32BIT 56384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 57384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 58384740dcSRalf Baechle #endif 59384740dcSRalf Baechle LONG_S $10, PT_R10(sp) 60384740dcSRalf Baechle LONG_S $11, PT_R11(sp) 61384740dcSRalf Baechle LONG_S $12, PT_R12(sp) 62362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 63362e6964SDavid Daney LONG_S v1, PT_HI(sp) 64362e6964SDavid Daney mflo v1 65362e6964SDavid Daney #endif 66384740dcSRalf Baechle LONG_S $13, PT_R13(sp) 67384740dcSRalf Baechle LONG_S $14, PT_R14(sp) 68384740dcSRalf Baechle LONG_S $15, PT_R15(sp) 69384740dcSRalf Baechle LONG_S $24, PT_R24(sp) 70362e6964SDavid Daney #ifndef CONFIG_CPU_HAS_SMARTMIPS 71362e6964SDavid Daney LONG_S v1, PT_LO(sp) 72362e6964SDavid Daney #endif 73*8dfdd02aSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 74*8dfdd02aSDavid Daney /* 75*8dfdd02aSDavid Daney * The Octeon multiplier state is affected by general 76*8dfdd02aSDavid Daney * multiply instructions. It must be saved before and 77*8dfdd02aSDavid Daney * kernel code might corrupt it 78*8dfdd02aSDavid Daney */ 79*8dfdd02aSDavid Daney jal octeon_mult_save 80*8dfdd02aSDavid Daney #endif 81384740dcSRalf Baechle .endm 82384740dcSRalf Baechle 83384740dcSRalf Baechle .macro SAVE_STATIC 84384740dcSRalf Baechle LONG_S $16, PT_R16(sp) 85384740dcSRalf Baechle LONG_S $17, PT_R17(sp) 86384740dcSRalf Baechle LONG_S $18, PT_R18(sp) 87384740dcSRalf Baechle LONG_S $19, PT_R19(sp) 88384740dcSRalf Baechle LONG_S $20, PT_R20(sp) 89384740dcSRalf Baechle LONG_S $21, PT_R21(sp) 90384740dcSRalf Baechle LONG_S $22, PT_R22(sp) 91384740dcSRalf Baechle LONG_S $23, PT_R23(sp) 92384740dcSRalf Baechle LONG_S $30, PT_R30(sp) 93384740dcSRalf Baechle .endm 94384740dcSRalf Baechle 95384740dcSRalf Baechle #ifdef CONFIG_SMP 96384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 97384740dcSRalf Baechle #define PTEBASE_SHIFT 19 /* TCBIND */ 9882622284SDavid Daney #define CPU_ID_REG CP0_TCBIND 9982622284SDavid Daney #define CPU_ID_MFC0 mfc0 10082622284SDavid Daney #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) 10182622284SDavid Daney #define PTEBASE_SHIFT 48 /* XCONTEXT */ 10282622284SDavid Daney #define CPU_ID_REG CP0_XCONTEXT 10382622284SDavid Daney #define CPU_ID_MFC0 MFC0 104384740dcSRalf Baechle #else 105384740dcSRalf Baechle #define PTEBASE_SHIFT 23 /* CONTEXT */ 10682622284SDavid Daney #define CPU_ID_REG CP0_CONTEXT 10782622284SDavid Daney #define CPU_ID_MFC0 MFC0 108384740dcSRalf Baechle #endif 109384740dcSRalf Baechle .macro get_saved_sp /* SMP variation */ 11082622284SDavid Daney CPU_ID_MFC0 k0, CPU_ID_REG 111384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 112384740dcSRalf Baechle lui k1, %hi(kernelsp) 113384740dcSRalf Baechle #else 114384740dcSRalf Baechle lui k1, %highest(kernelsp) 115384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 116384740dcSRalf Baechle dsll k1, 16 117384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 118384740dcSRalf Baechle dsll k1, 16 119384740dcSRalf Baechle #endif 120384740dcSRalf Baechle LONG_SRL k0, PTEBASE_SHIFT 121384740dcSRalf Baechle LONG_ADDU k1, k0 122384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 123384740dcSRalf Baechle .endm 124384740dcSRalf Baechle 125384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 12682622284SDavid Daney CPU_ID_MFC0 \temp, CPU_ID_REG 127384740dcSRalf Baechle LONG_SRL \temp, PTEBASE_SHIFT 128384740dcSRalf Baechle LONG_S \stackp, kernelsp(\temp) 129384740dcSRalf Baechle .endm 130384740dcSRalf Baechle #else 131384740dcSRalf Baechle .macro get_saved_sp /* Uniprocessor variation */ 132b197b628SWu Zhangjin #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 133f1df3239SWu Zhangjin /* 134f1df3239SWu Zhangjin * Clear BTB (branch target buffer), forbid RAS (return address 135f1df3239SWu Zhangjin * stack) to workaround the Out-of-order Issue in Loongson2F 136f1df3239SWu Zhangjin * via its diagnostic register. 137f1df3239SWu Zhangjin */ 138f1df3239SWu Zhangjin move k0, ra 139f1df3239SWu Zhangjin jal 1f 140f1df3239SWu Zhangjin nop 141f1df3239SWu Zhangjin 1: jal 1f 142f1df3239SWu Zhangjin nop 143f1df3239SWu Zhangjin 1: jal 1f 144f1df3239SWu Zhangjin nop 145f1df3239SWu Zhangjin 1: jal 1f 146f1df3239SWu Zhangjin nop 147f1df3239SWu Zhangjin 1: move ra, k0 148f1df3239SWu Zhangjin li k0, 3 149f1df3239SWu Zhangjin mtc0 k0, $22 1502a0b24f5SSteven J. Hill #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 151384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 152384740dcSRalf Baechle lui k1, %hi(kernelsp) 153384740dcSRalf Baechle #else 154384740dcSRalf Baechle lui k1, %highest(kernelsp) 155384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 156384740dcSRalf Baechle dsll k1, k1, 16 157384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 158384740dcSRalf Baechle dsll k1, k1, 16 159384740dcSRalf Baechle #endif 160384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 161384740dcSRalf Baechle .endm 162384740dcSRalf Baechle 163384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 164384740dcSRalf Baechle LONG_S \stackp, kernelsp 165384740dcSRalf Baechle .endm 166384740dcSRalf Baechle #endif 167384740dcSRalf Baechle 168384740dcSRalf Baechle .macro SAVE_SOME 169384740dcSRalf Baechle .set push 170384740dcSRalf Baechle .set noat 171384740dcSRalf Baechle .set reorder 172384740dcSRalf Baechle mfc0 k0, CP0_STATUS 173384740dcSRalf Baechle sll k0, 3 /* extract cu0 bit */ 174384740dcSRalf Baechle .set noreorder 175384740dcSRalf Baechle bltz k0, 8f 176384740dcSRalf Baechle move k1, sp 177384740dcSRalf Baechle .set reorder 178384740dcSRalf Baechle /* Called from user mode, new stack. */ 179384740dcSRalf Baechle get_saved_sp 180384740dcSRalf Baechle #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 181384740dcSRalf Baechle 8: move k0, sp 182384740dcSRalf Baechle PTR_SUBU sp, k1, PT_SIZE 183384740dcSRalf Baechle #else 184384740dcSRalf Baechle .set at=k0 185384740dcSRalf Baechle 8: PTR_SUBU k1, PT_SIZE 186384740dcSRalf Baechle .set noat 187384740dcSRalf Baechle move k0, sp 188384740dcSRalf Baechle move sp, k1 189384740dcSRalf Baechle #endif 190384740dcSRalf Baechle LONG_S k0, PT_R29(sp) 191384740dcSRalf Baechle LONG_S $3, PT_R3(sp) 192384740dcSRalf Baechle /* 193384740dcSRalf Baechle * You might think that you don't need to save $0, 194384740dcSRalf Baechle * but the FPU emulator and gdb remote debug stub 195384740dcSRalf Baechle * need it to operate correctly 196384740dcSRalf Baechle */ 197384740dcSRalf Baechle LONG_S $0, PT_R0(sp) 198384740dcSRalf Baechle mfc0 v1, CP0_STATUS 199384740dcSRalf Baechle LONG_S $2, PT_R2(sp) 2002a0b24f5SSteven J. Hill LONG_S v1, PT_STATUS(sp) 201384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 202384740dcSRalf Baechle /* 203384740dcSRalf Baechle * Ideally, these instructions would be shuffled in 204384740dcSRalf Baechle * to cover the pipeline delay. 205384740dcSRalf Baechle */ 206384740dcSRalf Baechle .set mips32 207d9682759SDavid Daney mfc0 k0, CP0_TCSTATUS 208384740dcSRalf Baechle .set mips0 209d9682759SDavid Daney LONG_S k0, PT_TCSTATUS(sp) 210384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 211384740dcSRalf Baechle LONG_S $4, PT_R4(sp) 212362e6964SDavid Daney mfc0 v1, CP0_CAUSE 2132a0b24f5SSteven J. Hill LONG_S $5, PT_R5(sp) 214362e6964SDavid Daney LONG_S v1, PT_CAUSE(sp) 2152a0b24f5SSteven J. Hill LONG_S $6, PT_R6(sp) 216362e6964SDavid Daney MFC0 v1, CP0_EPC 2172a0b24f5SSteven J. Hill LONG_S $7, PT_R7(sp) 218384740dcSRalf Baechle #ifdef CONFIG_64BIT 219384740dcSRalf Baechle LONG_S $8, PT_R8(sp) 220384740dcSRalf Baechle LONG_S $9, PT_R9(sp) 221384740dcSRalf Baechle #endif 2222a0b24f5SSteven J. Hill LONG_S v1, PT_EPC(sp) 223384740dcSRalf Baechle LONG_S $25, PT_R25(sp) 224384740dcSRalf Baechle LONG_S $28, PT_R28(sp) 225384740dcSRalf Baechle LONG_S $31, PT_R31(sp) 226384740dcSRalf Baechle ori $28, sp, _THREAD_MASK 227384740dcSRalf Baechle xori $28, _THREAD_MASK 2282a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 2292a219b0eSDavid Daney .set mips64 2302a219b0eSDavid Daney pref 0, 0($28) /* Prefetch the current pointer */ 2312a219b0eSDavid Daney #endif 232384740dcSRalf Baechle .set pop 233384740dcSRalf Baechle .endm 234384740dcSRalf Baechle 235384740dcSRalf Baechle .macro SAVE_ALL 236384740dcSRalf Baechle SAVE_SOME 237384740dcSRalf Baechle SAVE_AT 238384740dcSRalf Baechle SAVE_TEMP 239384740dcSRalf Baechle SAVE_STATIC 240384740dcSRalf Baechle .endm 241384740dcSRalf Baechle 242384740dcSRalf Baechle .macro RESTORE_AT 243384740dcSRalf Baechle .set push 244384740dcSRalf Baechle .set noat 245384740dcSRalf Baechle LONG_L $1, PT_R1(sp) 246384740dcSRalf Baechle .set pop 247384740dcSRalf Baechle .endm 248384740dcSRalf Baechle 249384740dcSRalf Baechle .macro RESTORE_TEMP 250*8dfdd02aSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 251*8dfdd02aSDavid Daney /* Restore the Octeon multiplier state */ 252*8dfdd02aSDavid Daney jal octeon_mult_restore 253*8dfdd02aSDavid Daney #endif 254384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 255384740dcSRalf Baechle LONG_L $24, PT_ACX(sp) 256384740dcSRalf Baechle mtlhx $24 257384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 258384740dcSRalf Baechle mtlhx $24 259384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 260384740dcSRalf Baechle mtlhx $24 261384740dcSRalf Baechle #else 262384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 263384740dcSRalf Baechle mtlo $24 264384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 265384740dcSRalf Baechle mthi $24 266384740dcSRalf Baechle #endif 267384740dcSRalf Baechle #ifdef CONFIG_32BIT 268384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 269384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 270384740dcSRalf Baechle #endif 271384740dcSRalf Baechle LONG_L $10, PT_R10(sp) 272384740dcSRalf Baechle LONG_L $11, PT_R11(sp) 273384740dcSRalf Baechle LONG_L $12, PT_R12(sp) 274384740dcSRalf Baechle LONG_L $13, PT_R13(sp) 275384740dcSRalf Baechle LONG_L $14, PT_R14(sp) 276384740dcSRalf Baechle LONG_L $15, PT_R15(sp) 277384740dcSRalf Baechle LONG_L $24, PT_R24(sp) 278384740dcSRalf Baechle .endm 279384740dcSRalf Baechle 280384740dcSRalf Baechle .macro RESTORE_STATIC 281384740dcSRalf Baechle LONG_L $16, PT_R16(sp) 282384740dcSRalf Baechle LONG_L $17, PT_R17(sp) 283384740dcSRalf Baechle LONG_L $18, PT_R18(sp) 284384740dcSRalf Baechle LONG_L $19, PT_R19(sp) 285384740dcSRalf Baechle LONG_L $20, PT_R20(sp) 286384740dcSRalf Baechle LONG_L $21, PT_R21(sp) 287384740dcSRalf Baechle LONG_L $22, PT_R22(sp) 288384740dcSRalf Baechle LONG_L $23, PT_R23(sp) 289384740dcSRalf Baechle LONG_L $30, PT_R30(sp) 290384740dcSRalf Baechle .endm 291384740dcSRalf Baechle 292384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 293384740dcSRalf Baechle 294384740dcSRalf Baechle .macro RESTORE_SOME 295384740dcSRalf Baechle .set push 296384740dcSRalf Baechle .set reorder 297384740dcSRalf Baechle .set noat 298384740dcSRalf Baechle mfc0 a0, CP0_STATUS 299384740dcSRalf Baechle li v1, 0xff00 300384740dcSRalf Baechle ori a0, STATMASK 301384740dcSRalf Baechle xori a0, STATMASK 302384740dcSRalf Baechle mtc0 a0, CP0_STATUS 303384740dcSRalf Baechle and a0, v1 304384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 305384740dcSRalf Baechle nor v1, $0, v1 306384740dcSRalf Baechle and v0, v1 307384740dcSRalf Baechle or v0, a0 308384740dcSRalf Baechle mtc0 v0, CP0_STATUS 309384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 310384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 311384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 312384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 313384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 314384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 315384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 316384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 317384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 318384740dcSRalf Baechle .set pop 319384740dcSRalf Baechle .endm 320384740dcSRalf Baechle 321384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 322384740dcSRalf Baechle .set push 323384740dcSRalf Baechle .set noreorder 324384740dcSRalf Baechle LONG_L k0, PT_EPC(sp) 325384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 326384740dcSRalf Baechle jr k0 327384740dcSRalf Baechle rfe 328384740dcSRalf Baechle .set pop 329384740dcSRalf Baechle .endm 330384740dcSRalf Baechle 331384740dcSRalf Baechle #else 332384740dcSRalf Baechle .macro RESTORE_SOME 333384740dcSRalf Baechle .set push 334384740dcSRalf Baechle .set reorder 335384740dcSRalf Baechle .set noat 336384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 337384740dcSRalf Baechle .set mips32r2 338384740dcSRalf Baechle /* 339384740dcSRalf Baechle * We need to make sure the read-modify-write 340384740dcSRalf Baechle * of Status below isn't perturbed by an interrupt 341384740dcSRalf Baechle * or cross-TC access, so we need to do at least a DMT, 342384740dcSRalf Baechle * protected by an interrupt-inhibit. But setting IXMT 343384740dcSRalf Baechle * also creates a few-cycle window where an IPI could 344384740dcSRalf Baechle * be queued and not be detected before potentially 345384740dcSRalf Baechle * returning to a WAIT or user-mode loop. It must be 346384740dcSRalf Baechle * replayed. 347384740dcSRalf Baechle * 348384740dcSRalf Baechle * We're in the middle of a context switch, and 349384740dcSRalf Baechle * we can't dispatch it directly without trashing 350384740dcSRalf Baechle * some registers, so we'll try to detect this unlikely 351384740dcSRalf Baechle * case and program a software interrupt in the VPE, 35225985edcSLucas De Marchi * as would be done for a cross-VPE IPI. To accommodate 353384740dcSRalf Baechle * the handling of that case, we're doing a DVPE instead 354384740dcSRalf Baechle * of just a DMT here to protect against other threads. 355384740dcSRalf Baechle * This is a lot of cruft to cover a tiny window. 356384740dcSRalf Baechle * If you can find a better design, implement it! 357384740dcSRalf Baechle * 358384740dcSRalf Baechle */ 359384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 360384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 361384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 362384740dcSRalf Baechle _ehb 363384740dcSRalf Baechle DVPE 5 # dvpe a1 364384740dcSRalf Baechle jal mips_ihb 365384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 366384740dcSRalf Baechle mfc0 a0, CP0_STATUS 367384740dcSRalf Baechle ori a0, STATMASK 368384740dcSRalf Baechle xori a0, STATMASK 369384740dcSRalf Baechle mtc0 a0, CP0_STATUS 370384740dcSRalf Baechle li v1, 0xff00 371384740dcSRalf Baechle and a0, v1 372384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 373384740dcSRalf Baechle nor v1, $0, v1 374384740dcSRalf Baechle and v0, v1 375384740dcSRalf Baechle or v0, a0 376384740dcSRalf Baechle mtc0 v0, CP0_STATUS 377384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 378384740dcSRalf Baechle /* 379384740dcSRalf Baechle * Only after EXL/ERL have been restored to status can we 380384740dcSRalf Baechle * restore TCStatus.IXMT. 381384740dcSRalf Baechle */ 382384740dcSRalf Baechle LONG_L v1, PT_TCSTATUS(sp) 383384740dcSRalf Baechle _ehb 384384740dcSRalf Baechle mfc0 a0, CP0_TCSTATUS 385384740dcSRalf Baechle andi v1, TCSTATUS_IXMT 386384740dcSRalf Baechle bnez v1, 0f 387384740dcSRalf Baechle 388384740dcSRalf Baechle /* 389384740dcSRalf Baechle * We'd like to detect any IPIs queued in the tiny window 390384740dcSRalf Baechle * above and request an software interrupt to service them 391384740dcSRalf Baechle * when we ERET. 392384740dcSRalf Baechle * 393384740dcSRalf Baechle * Computing the offset into the IPIQ array of the executing 394384740dcSRalf Baechle * TC's IPI queue in-line would be tedious. We use part of 395384740dcSRalf Baechle * the TCContext register to hold 16 bits of offset that we 396384740dcSRalf Baechle * can add in-line to find the queue head. 397384740dcSRalf Baechle */ 398384740dcSRalf Baechle mfc0 v0, CP0_TCCONTEXT 399384740dcSRalf Baechle la a2, IPIQ 400384740dcSRalf Baechle srl v0, v0, 16 401384740dcSRalf Baechle addu a2, a2, v0 402384740dcSRalf Baechle LONG_L v0, 0(a2) 403384740dcSRalf Baechle beqz v0, 0f 404384740dcSRalf Baechle /* 405384740dcSRalf Baechle * If we have a queue, provoke dispatch within the VPE by setting C_SW1 406384740dcSRalf Baechle */ 407384740dcSRalf Baechle mfc0 v0, CP0_CAUSE 408384740dcSRalf Baechle ori v0, v0, C_SW1 409384740dcSRalf Baechle mtc0 v0, CP0_CAUSE 410384740dcSRalf Baechle 0: 411384740dcSRalf Baechle /* 412384740dcSRalf Baechle * This test should really never branch but 413384740dcSRalf Baechle * let's be prudent here. Having atomized 414384740dcSRalf Baechle * the shared register modifications, we can 415384740dcSRalf Baechle * now EVPE, and must do so before interrupts 416384740dcSRalf Baechle * are potentially re-enabled. 417384740dcSRalf Baechle */ 418384740dcSRalf Baechle andi a1, a1, MVPCONTROL_EVP 419384740dcSRalf Baechle beqz a1, 1f 420384740dcSRalf Baechle evpe 421384740dcSRalf Baechle 1: 422384740dcSRalf Baechle /* We know that TCStatua.IXMT should be set from above */ 423384740dcSRalf Baechle xori a0, a0, TCSTATUS_IXMT 424384740dcSRalf Baechle or a0, a0, v1 425384740dcSRalf Baechle mtc0 a0, CP0_TCSTATUS 426384740dcSRalf Baechle _ehb 427384740dcSRalf Baechle 428384740dcSRalf Baechle .set mips0 429384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 430384740dcSRalf Baechle LONG_L v1, PT_EPC(sp) 431384740dcSRalf Baechle MTC0 v1, CP0_EPC 432384740dcSRalf Baechle LONG_L $31, PT_R31(sp) 433384740dcSRalf Baechle LONG_L $28, PT_R28(sp) 434384740dcSRalf Baechle LONG_L $25, PT_R25(sp) 435384740dcSRalf Baechle #ifdef CONFIG_64BIT 436384740dcSRalf Baechle LONG_L $8, PT_R8(sp) 437384740dcSRalf Baechle LONG_L $9, PT_R9(sp) 438384740dcSRalf Baechle #endif 439384740dcSRalf Baechle LONG_L $7, PT_R7(sp) 440384740dcSRalf Baechle LONG_L $6, PT_R6(sp) 441384740dcSRalf Baechle LONG_L $5, PT_R5(sp) 442384740dcSRalf Baechle LONG_L $4, PT_R4(sp) 443384740dcSRalf Baechle LONG_L $3, PT_R3(sp) 444384740dcSRalf Baechle LONG_L $2, PT_R2(sp) 445384740dcSRalf Baechle .set pop 446384740dcSRalf Baechle .endm 447384740dcSRalf Baechle 448384740dcSRalf Baechle .macro RESTORE_SP_AND_RET 449384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 450384740dcSRalf Baechle .set mips3 451384740dcSRalf Baechle eret 452384740dcSRalf Baechle .set mips0 453384740dcSRalf Baechle .endm 454384740dcSRalf Baechle 455384740dcSRalf Baechle #endif 456384740dcSRalf Baechle 457384740dcSRalf Baechle .macro RESTORE_SP 458384740dcSRalf Baechle LONG_L sp, PT_R29(sp) 459384740dcSRalf Baechle .endm 460384740dcSRalf Baechle 461384740dcSRalf Baechle .macro RESTORE_ALL 462384740dcSRalf Baechle RESTORE_TEMP 463384740dcSRalf Baechle RESTORE_STATIC 464384740dcSRalf Baechle RESTORE_AT 465384740dcSRalf Baechle RESTORE_SOME 466384740dcSRalf Baechle RESTORE_SP 467384740dcSRalf Baechle .endm 468384740dcSRalf Baechle 469384740dcSRalf Baechle .macro RESTORE_ALL_AND_RET 470384740dcSRalf Baechle RESTORE_TEMP 471384740dcSRalf Baechle RESTORE_STATIC 472384740dcSRalf Baechle RESTORE_AT 473384740dcSRalf Baechle RESTORE_SOME 474384740dcSRalf Baechle RESTORE_SP_AND_RET 475384740dcSRalf Baechle .endm 476384740dcSRalf Baechle 477384740dcSRalf Baechle /* 478384740dcSRalf Baechle * Move to kernel mode and disable interrupts. 479384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 480384740dcSRalf Baechle */ 481384740dcSRalf Baechle .macro CLI 482384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 483384740dcSRalf Baechle mfc0 t0, CP0_STATUS 484384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 485384740dcSRalf Baechle or t0, t1 486384740dcSRalf Baechle xori t0, STATMASK 487384740dcSRalf Baechle mtc0 t0, CP0_STATUS 488384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 489384740dcSRalf Baechle /* 490384740dcSRalf Baechle * For SMTC, we need to set privilege 491384740dcSRalf Baechle * and disable interrupts only for the 492384740dcSRalf Baechle * current TC, using the TCStatus register. 493384740dcSRalf Baechle */ 494384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 495384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 496384740dcSRalf Baechle /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 497384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 498384740dcSRalf Baechle or t0, t1 499384740dcSRalf Baechle /* Clear TKSU, leave IXMT */ 500384740dcSRalf Baechle xori t0, 0x00001800 501384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 502384740dcSRalf Baechle _ehb 503384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 504384740dcSRalf Baechle mfc0 t0, CP0_STATUS 505384740dcSRalf Baechle ori t0, ST0_EXL | ST0_ERL 506384740dcSRalf Baechle xori t0, ST0_EXL | ST0_ERL 507384740dcSRalf Baechle mtc0 t0, CP0_STATUS 508384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 509384740dcSRalf Baechle irq_disable_hazard 510384740dcSRalf Baechle .endm 511384740dcSRalf Baechle 512384740dcSRalf Baechle /* 513384740dcSRalf Baechle * Move to kernel mode and enable interrupts. 514384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 515384740dcSRalf Baechle */ 516384740dcSRalf Baechle .macro STI 517384740dcSRalf Baechle #if !defined(CONFIG_MIPS_MT_SMTC) 518384740dcSRalf Baechle mfc0 t0, CP0_STATUS 519384740dcSRalf Baechle li t1, ST0_CU0 | STATMASK 520384740dcSRalf Baechle or t0, t1 521384740dcSRalf Baechle xori t0, STATMASK & ~1 522384740dcSRalf Baechle mtc0 t0, CP0_STATUS 523384740dcSRalf Baechle #else /* CONFIG_MIPS_MT_SMTC */ 524384740dcSRalf Baechle /* 525384740dcSRalf Baechle * For SMTC, we need to set privilege 526384740dcSRalf Baechle * and enable interrupts only for the 527384740dcSRalf Baechle * current TC, using the TCStatus register. 528384740dcSRalf Baechle */ 529384740dcSRalf Baechle _ehb 530384740dcSRalf Baechle mfc0 t0, CP0_TCSTATUS 531384740dcSRalf Baechle /* Fortunately CU 0 is in the same place in both registers */ 532384740dcSRalf Baechle /* Set TCU0, TKSU (for later inversion) and IXMT */ 533384740dcSRalf Baechle li t1, ST0_CU0 | 0x08001c00 534384740dcSRalf Baechle or t0, t1 535384740dcSRalf Baechle /* Clear TKSU *and* IXMT */ 536384740dcSRalf Baechle xori t0, 0x00001c00 537384740dcSRalf Baechle mtc0 t0, CP0_TCSTATUS 538384740dcSRalf Baechle _ehb 539384740dcSRalf Baechle /* We need to leave the global IE bit set, but clear EXL...*/ 540384740dcSRalf Baechle mfc0 t0, CP0_STATUS 541384740dcSRalf Baechle ori t0, ST0_EXL 542384740dcSRalf Baechle xori t0, ST0_EXL 543384740dcSRalf Baechle mtc0 t0, CP0_STATUS 544384740dcSRalf Baechle /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 545384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 546384740dcSRalf Baechle irq_enable_hazard 547384740dcSRalf Baechle .endm 548384740dcSRalf Baechle 549384740dcSRalf Baechle /* 550384740dcSRalf Baechle * Just move to kernel mode and leave interrupts as they are. Note 551384740dcSRalf Baechle * for the R3000 this means copying the previous enable from IEp. 552384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 553384740dcSRalf Baechle */ 554384740dcSRalf Baechle .macro KMODE 555384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 556384740dcSRalf Baechle /* 557384740dcSRalf Baechle * This gets baroque in SMTC. We want to 558384740dcSRalf Baechle * protect the non-atomic clearing of EXL 559384740dcSRalf Baechle * with DMT/EMT, but we don't want to take 560384740dcSRalf Baechle * an interrupt while DMT is still in effect. 561384740dcSRalf Baechle */ 562384740dcSRalf Baechle 563384740dcSRalf Baechle /* KMODE gets invoked from both reorder and noreorder code */ 564384740dcSRalf Baechle .set push 565384740dcSRalf Baechle .set mips32r2 566384740dcSRalf Baechle .set noreorder 567384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 568384740dcSRalf Baechle andi v1, v0, TCSTATUS_IXMT 569384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 570384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 571384740dcSRalf Baechle _ehb 572384740dcSRalf Baechle DMT 2 # dmt v0 573384740dcSRalf Baechle /* 574384740dcSRalf Baechle * We don't know a priori if ra is "live" 575384740dcSRalf Baechle */ 576384740dcSRalf Baechle move t0, ra 577384740dcSRalf Baechle jal mips_ihb 578384740dcSRalf Baechle nop /* delay slot */ 579384740dcSRalf Baechle move ra, t0 580384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 581384740dcSRalf Baechle mfc0 t0, CP0_STATUS 582384740dcSRalf Baechle li t1, ST0_CU0 | (STATMASK & ~1) 583384740dcSRalf Baechle #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 584384740dcSRalf Baechle andi t2, t0, ST0_IEP 585384740dcSRalf Baechle srl t2, 2 586384740dcSRalf Baechle or t0, t2 587384740dcSRalf Baechle #endif 588384740dcSRalf Baechle or t0, t1 589384740dcSRalf Baechle xori t0, STATMASK & ~1 590384740dcSRalf Baechle mtc0 t0, CP0_STATUS 591384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 592384740dcSRalf Baechle _ehb 593384740dcSRalf Baechle andi v0, v0, VPECONTROL_TE 594384740dcSRalf Baechle beqz v0, 2f 595384740dcSRalf Baechle nop /* delay slot */ 596384740dcSRalf Baechle emt 597384740dcSRalf Baechle 2: 598384740dcSRalf Baechle mfc0 v0, CP0_TCSTATUS 599384740dcSRalf Baechle /* Clear IXMT, then OR in previous value */ 600384740dcSRalf Baechle ori v0, TCSTATUS_IXMT 601384740dcSRalf Baechle xori v0, TCSTATUS_IXMT 602384740dcSRalf Baechle or v0, v1, v0 603384740dcSRalf Baechle mtc0 v0, CP0_TCSTATUS 604384740dcSRalf Baechle /* 605384740dcSRalf Baechle * irq_disable_hazard below should expand to EHB 606384740dcSRalf Baechle * on 24K/34K CPUS 607384740dcSRalf Baechle */ 608384740dcSRalf Baechle .set pop 609384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 610384740dcSRalf Baechle irq_disable_hazard 611384740dcSRalf Baechle .endm 612384740dcSRalf Baechle 613384740dcSRalf Baechle #endif /* _ASM_STACKFRAME_H */ 614