1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8384740dcSRalf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. 9384740dcSRalf Baechle * Copyright (C) 2007 Maciej W. Rozycki 10384740dcSRalf Baechle */ 11384740dcSRalf Baechle #ifndef _ASM_STACKFRAME_H 12384740dcSRalf Baechle #define _ASM_STACKFRAME_H 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <linux/threads.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <asm/asm.h> 17384740dcSRalf Baechle #include <asm/asmmacro.h> 18384740dcSRalf Baechle #include <asm/mipsregs.h> 19384740dcSRalf Baechle #include <asm/asm-offsets.h> 20c2377a42SJayachandran C #include <asm/thread_info.h> 21384740dcSRalf Baechle 22866b6a89SCorey Minyard /* Make the addition of cfi info a little easier. */ 23866b6a89SCorey Minyard .macro cfi_rel_offset reg offset=0 docfi=0 24866b6a89SCorey Minyard .if \docfi 25866b6a89SCorey Minyard .cfi_rel_offset \reg, \offset 26866b6a89SCorey Minyard .endif 27866b6a89SCorey Minyard .endm 28866b6a89SCorey Minyard 29866b6a89SCorey Minyard .macro cfi_st reg offset=0 docfi=0 30866b6a89SCorey Minyard LONG_S \reg, \offset(sp) 31866b6a89SCorey Minyard cfi_rel_offset \reg, \offset, \docfi 32866b6a89SCorey Minyard .endm 33866b6a89SCorey Minyard 34866b6a89SCorey Minyard .macro cfi_restore reg offset=0 docfi=0 35866b6a89SCorey Minyard .if \docfi 36866b6a89SCorey Minyard .cfi_restore \reg 37866b6a89SCorey Minyard .endif 38866b6a89SCorey Minyard .endm 39866b6a89SCorey Minyard 40866b6a89SCorey Minyard .macro cfi_ld reg offset=0 docfi=0 41866b6a89SCorey Minyard LONG_L \reg, \offset(sp) 42866b6a89SCorey Minyard cfi_restore \reg \offset \docfi 43866b6a89SCorey Minyard .endm 44866b6a89SCorey Minyard 45*455481fcSThomas Bogendoerfer #if defined(CONFIG_CPU_R3000) 46384740dcSRalf Baechle #define STATMASK 0x3f 47384740dcSRalf Baechle #else 48384740dcSRalf Baechle #define STATMASK 0x1f 49384740dcSRalf Baechle #endif 50384740dcSRalf Baechle 51866b6a89SCorey Minyard .macro SAVE_AT docfi=0 52384740dcSRalf Baechle .set push 53384740dcSRalf Baechle .set noat 54866b6a89SCorey Minyard cfi_st $1, PT_R1, \docfi 55384740dcSRalf Baechle .set pop 56384740dcSRalf Baechle .endm 57384740dcSRalf Baechle 58866b6a89SCorey Minyard .macro SAVE_TEMP docfi=0 59384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 60384740dcSRalf Baechle mflhxu v1 61384740dcSRalf Baechle LONG_S v1, PT_LO(sp) 62384740dcSRalf Baechle mflhxu v1 63384740dcSRalf Baechle LONG_S v1, PT_HI(sp) 64384740dcSRalf Baechle mflhxu v1 65384740dcSRalf Baechle LONG_S v1, PT_ACX(sp) 666a0e9865SLeonid Yegoshin #elif !defined(CONFIG_CPU_MIPSR6) 67384740dcSRalf Baechle mfhi v1 68384740dcSRalf Baechle #endif 69384740dcSRalf Baechle #ifdef CONFIG_32BIT 70866b6a89SCorey Minyard cfi_st $8, PT_R8, \docfi 71866b6a89SCorey Minyard cfi_st $9, PT_R9, \docfi 72384740dcSRalf Baechle #endif 73866b6a89SCorey Minyard cfi_st $10, PT_R10, \docfi 74866b6a89SCorey Minyard cfi_st $11, PT_R11, \docfi 75866b6a89SCorey Minyard cfi_st $12, PT_R12, \docfi 766a0e9865SLeonid Yegoshin #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 77362e6964SDavid Daney LONG_S v1, PT_HI(sp) 78362e6964SDavid Daney mflo v1 79362e6964SDavid Daney #endif 80866b6a89SCorey Minyard cfi_st $13, PT_R13, \docfi 81866b6a89SCorey Minyard cfi_st $14, PT_R14, \docfi 82866b6a89SCorey Minyard cfi_st $15, PT_R15, \docfi 83866b6a89SCorey Minyard cfi_st $24, PT_R24, \docfi 846a0e9865SLeonid Yegoshin #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 85362e6964SDavid Daney LONG_S v1, PT_LO(sp) 86362e6964SDavid Daney #endif 878dfdd02aSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 888dfdd02aSDavid Daney /* 898dfdd02aSDavid Daney * The Octeon multiplier state is affected by general 908dfdd02aSDavid Daney * multiply instructions. It must be saved before and 918dfdd02aSDavid Daney * kernel code might corrupt it 928dfdd02aSDavid Daney */ 938dfdd02aSDavid Daney jal octeon_mult_save 948dfdd02aSDavid Daney #endif 95384740dcSRalf Baechle .endm 96384740dcSRalf Baechle 97866b6a89SCorey Minyard .macro SAVE_STATIC docfi=0 98866b6a89SCorey Minyard cfi_st $16, PT_R16, \docfi 99866b6a89SCorey Minyard cfi_st $17, PT_R17, \docfi 100866b6a89SCorey Minyard cfi_st $18, PT_R18, \docfi 101866b6a89SCorey Minyard cfi_st $19, PT_R19, \docfi 102866b6a89SCorey Minyard cfi_st $20, PT_R20, \docfi 103866b6a89SCorey Minyard cfi_st $21, PT_R21, \docfi 104866b6a89SCorey Minyard cfi_st $22, PT_R22, \docfi 105866b6a89SCorey Minyard cfi_st $23, PT_R23, \docfi 106866b6a89SCorey Minyard cfi_st $30, PT_R30, \docfi 107384740dcSRalf Baechle .endm 108384740dcSRalf Baechle 1099fef6868SCorey Minyard /* 1109fef6868SCorey Minyard * get_saved_sp returns the SP for the current CPU by looking in the 1119fef6868SCorey Minyard * kernelsp array for it. If tosp is set, it stores the current sp in 1129fef6868SCorey Minyard * k0 and loads the new value in sp. If not, it clobbers k0 and 1139fef6868SCorey Minyard * stores the new value in k1, leaving sp unaffected. 1149fef6868SCorey Minyard */ 115384740dcSRalf Baechle #ifdef CONFIG_SMP 1169fef6868SCorey Minyard 1179fef6868SCorey Minyard /* SMP variation */ 1189fef6868SCorey Minyard .macro get_saved_sp docfi=0 tosp=0 119c2377a42SJayachandran C ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 120384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 121384740dcSRalf Baechle lui k1, %hi(kernelsp) 122384740dcSRalf Baechle #else 123384740dcSRalf Baechle lui k1, %highest(kernelsp) 124384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 125384740dcSRalf Baechle dsll k1, 16 126384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 127384740dcSRalf Baechle dsll k1, 16 128384740dcSRalf Baechle #endif 129c2377a42SJayachandran C LONG_SRL k0, SMP_CPUID_PTRSHIFT 130384740dcSRalf Baechle LONG_ADDU k1, k0 1319fef6868SCorey Minyard .if \tosp 1329fef6868SCorey Minyard move k0, sp 1339fef6868SCorey Minyard .if \docfi 1349fef6868SCorey Minyard .cfi_register sp, k0 1359fef6868SCorey Minyard .endif 1369fef6868SCorey Minyard LONG_L sp, %lo(kernelsp)(k1) 1379fef6868SCorey Minyard .else 138384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 1399fef6868SCorey Minyard .endif 140384740dcSRalf Baechle .endm 141384740dcSRalf Baechle 142384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 143c2377a42SJayachandran C ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG 144c2377a42SJayachandran C LONG_SRL \temp, SMP_CPUID_PTRSHIFT 145384740dcSRalf Baechle LONG_S \stackp, kernelsp(\temp) 146384740dcSRalf Baechle .endm 147c2377a42SJayachandran C #else /* !CONFIG_SMP */ 1489fef6868SCorey Minyard /* Uniprocessor variation */ 1499fef6868SCorey Minyard .macro get_saved_sp docfi=0 tosp=0 150b197b628SWu Zhangjin #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 151f1df3239SWu Zhangjin /* 152f1df3239SWu Zhangjin * Clear BTB (branch target buffer), forbid RAS (return address 153f1df3239SWu Zhangjin * stack) to workaround the Out-of-order Issue in Loongson2F 154f1df3239SWu Zhangjin * via its diagnostic register. 155f1df3239SWu Zhangjin */ 156f1df3239SWu Zhangjin move k0, ra 157f1df3239SWu Zhangjin jal 1f 158f1df3239SWu Zhangjin nop 159f1df3239SWu Zhangjin 1: jal 1f 160f1df3239SWu Zhangjin nop 161f1df3239SWu Zhangjin 1: jal 1f 162f1df3239SWu Zhangjin nop 163f1df3239SWu Zhangjin 1: jal 1f 164f1df3239SWu Zhangjin nop 165f1df3239SWu Zhangjin 1: move ra, k0 166f1df3239SWu Zhangjin li k0, 3 167f1df3239SWu Zhangjin mtc0 k0, $22 1682a0b24f5SSteven J. Hill #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 169384740dcSRalf Baechle #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 170384740dcSRalf Baechle lui k1, %hi(kernelsp) 171384740dcSRalf Baechle #else 172384740dcSRalf Baechle lui k1, %highest(kernelsp) 173384740dcSRalf Baechle daddiu k1, %higher(kernelsp) 174384740dcSRalf Baechle dsll k1, k1, 16 175384740dcSRalf Baechle daddiu k1, %hi(kernelsp) 176384740dcSRalf Baechle dsll k1, k1, 16 177384740dcSRalf Baechle #endif 1789fef6868SCorey Minyard .if \tosp 1799fef6868SCorey Minyard move k0, sp 1809fef6868SCorey Minyard .if \docfi 1819fef6868SCorey Minyard .cfi_register sp, k0 1829fef6868SCorey Minyard .endif 1839fef6868SCorey Minyard LONG_L sp, %lo(kernelsp)(k1) 1849fef6868SCorey Minyard .else 185384740dcSRalf Baechle LONG_L k1, %lo(kernelsp)(k1) 1869fef6868SCorey Minyard .endif 187384740dcSRalf Baechle .endm 188384740dcSRalf Baechle 189384740dcSRalf Baechle .macro set_saved_sp stackp temp temp2 190384740dcSRalf Baechle LONG_S \stackp, kernelsp 191384740dcSRalf Baechle .endm 192384740dcSRalf Baechle #endif 193384740dcSRalf Baechle 194866b6a89SCorey Minyard .macro SAVE_SOME docfi=0 195384740dcSRalf Baechle .set push 196384740dcSRalf Baechle .set noat 197384740dcSRalf Baechle .set reorder 198384740dcSRalf Baechle mfc0 k0, CP0_STATUS 199384740dcSRalf Baechle sll k0, 3 /* extract cu0 bit */ 200384740dcSRalf Baechle .set noreorder 201384740dcSRalf Baechle bltz k0, 8f 202c496f3c0SMatt Redfearn move k0, sp 203c496f3c0SMatt Redfearn .if \docfi 204c496f3c0SMatt Redfearn .cfi_register sp, k0 205c496f3c0SMatt Redfearn .endif 2063aff47c0SJames Hogan #ifdef CONFIG_EVA 2073aff47c0SJames Hogan /* 2083aff47c0SJames Hogan * Flush interAptiv's Return Prediction Stack (RPS) by writing 2093aff47c0SJames Hogan * EntryHi. Toggling Config7.RPS is slower and less portable. 2103aff47c0SJames Hogan * 2113aff47c0SJames Hogan * The RPS isn't automatically flushed when exceptions are 2123aff47c0SJames Hogan * taken, which can result in kernel mode speculative accesses 2133aff47c0SJames Hogan * to user addresses if the RPS mispredicts. That's harmless 2143aff47c0SJames Hogan * when user and kernel share the same address space, but with 2153aff47c0SJames Hogan * EVA the same user segments may be unmapped to kernel mode, 2163aff47c0SJames Hogan * even containing sensitive MMIO regions or invalid memory. 2173aff47c0SJames Hogan * 2183aff47c0SJames Hogan * This can happen when the kernel sets the return address to 2193aff47c0SJames Hogan * ret_from_* and jr's to the exception handler, which looks 2203aff47c0SJames Hogan * more like a tail call than a function call. If nested calls 2213aff47c0SJames Hogan * don't evict the last user address in the RPS, it will 2223aff47c0SJames Hogan * mispredict the return and fetch from a user controlled 2233aff47c0SJames Hogan * address into the icache. 2243aff47c0SJames Hogan * 2253aff47c0SJames Hogan * More recent EVA-capable cores with MAAR to restrict 2263aff47c0SJames Hogan * speculative accesses aren't affected. 2273aff47c0SJames Hogan */ 2283aff47c0SJames Hogan MFC0 k0, CP0_ENTRYHI 2293aff47c0SJames Hogan MTC0 k0, CP0_ENTRYHI 2303aff47c0SJames Hogan #endif 231384740dcSRalf Baechle .set reorder 232384740dcSRalf Baechle /* Called from user mode, new stack. */ 233866b6a89SCorey Minyard get_saved_sp docfi=\docfi tosp=1 2349fef6868SCorey Minyard 8: 2359fef6868SCorey Minyard #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 2369fef6868SCorey Minyard .set at=k1 2379fef6868SCorey Minyard #endif 2389fef6868SCorey Minyard PTR_SUBU sp, PT_SIZE 2399fef6868SCorey Minyard #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 240384740dcSRalf Baechle .set noat 241384740dcSRalf Baechle #endif 242866b6a89SCorey Minyard .if \docfi 243866b6a89SCorey Minyard .cfi_def_cfa sp,0 244866b6a89SCorey Minyard .endif 245866b6a89SCorey Minyard cfi_st k0, PT_R29, \docfi 246866b6a89SCorey Minyard cfi_rel_offset sp, PT_R29, \docfi 247866b6a89SCorey Minyard cfi_st v1, PT_R3, \docfi 248384740dcSRalf Baechle /* 249384740dcSRalf Baechle * You might think that you don't need to save $0, 250384740dcSRalf Baechle * but the FPU emulator and gdb remote debug stub 251384740dcSRalf Baechle * need it to operate correctly 252384740dcSRalf Baechle */ 253384740dcSRalf Baechle LONG_S $0, PT_R0(sp) 254384740dcSRalf Baechle mfc0 v1, CP0_STATUS 255866b6a89SCorey Minyard cfi_st v0, PT_R2, \docfi 2562a0b24f5SSteven J. Hill LONG_S v1, PT_STATUS(sp) 257866b6a89SCorey Minyard cfi_st $4, PT_R4, \docfi 258362e6964SDavid Daney mfc0 v1, CP0_CAUSE 259866b6a89SCorey Minyard cfi_st $5, PT_R5, \docfi 260362e6964SDavid Daney LONG_S v1, PT_CAUSE(sp) 261866b6a89SCorey Minyard cfi_st $6, PT_R6, \docfi 262866b6a89SCorey Minyard cfi_st ra, PT_R31, \docfi 2639fef6868SCorey Minyard MFC0 ra, CP0_EPC 264866b6a89SCorey Minyard cfi_st $7, PT_R7, \docfi 265384740dcSRalf Baechle #ifdef CONFIG_64BIT 266866b6a89SCorey Minyard cfi_st $8, PT_R8, \docfi 267866b6a89SCorey Minyard cfi_st $9, PT_R9, \docfi 268384740dcSRalf Baechle #endif 2699fef6868SCorey Minyard LONG_S ra, PT_EPC(sp) 270866b6a89SCorey Minyard .if \docfi 271866b6a89SCorey Minyard .cfi_rel_offset ra, PT_EPC 272866b6a89SCorey Minyard .endif 273866b6a89SCorey Minyard cfi_st $25, PT_R25, \docfi 274866b6a89SCorey Minyard cfi_st $28, PT_R28, \docfi 275510d8636SMatt Redfearn 276510d8636SMatt Redfearn /* Set thread_info if we're coming from user mode */ 277510d8636SMatt Redfearn mfc0 k0, CP0_STATUS 278510d8636SMatt Redfearn sll k0, 3 /* extract cu0 bit */ 279510d8636SMatt Redfearn bltz k0, 9f 280510d8636SMatt Redfearn 281384740dcSRalf Baechle ori $28, sp, _THREAD_MASK 282384740dcSRalf Baechle xori $28, _THREAD_MASK 2832a219b0eSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 2842a219b0eSDavid Daney .set mips64 2852a219b0eSDavid Daney pref 0, 0($28) /* Prefetch the current pointer */ 2862a219b0eSDavid Daney #endif 287510d8636SMatt Redfearn 9: 288384740dcSRalf Baechle .set pop 289384740dcSRalf Baechle .endm 290384740dcSRalf Baechle 291866b6a89SCorey Minyard .macro SAVE_ALL docfi=0 292866b6a89SCorey Minyard SAVE_SOME \docfi 293866b6a89SCorey Minyard SAVE_AT \docfi 294866b6a89SCorey Minyard SAVE_TEMP \docfi 295866b6a89SCorey Minyard SAVE_STATIC \docfi 296384740dcSRalf Baechle .endm 297384740dcSRalf Baechle 298866b6a89SCorey Minyard .macro RESTORE_AT docfi=0 299384740dcSRalf Baechle .set push 300384740dcSRalf Baechle .set noat 301866b6a89SCorey Minyard cfi_ld $1, PT_R1, \docfi 302384740dcSRalf Baechle .set pop 303384740dcSRalf Baechle .endm 304384740dcSRalf Baechle 305866b6a89SCorey Minyard .macro RESTORE_TEMP docfi=0 3068dfdd02aSDavid Daney #ifdef CONFIG_CPU_CAVIUM_OCTEON 3078dfdd02aSDavid Daney /* Restore the Octeon multiplier state */ 3088dfdd02aSDavid Daney jal octeon_mult_restore 3098dfdd02aSDavid Daney #endif 310384740dcSRalf Baechle #ifdef CONFIG_CPU_HAS_SMARTMIPS 311384740dcSRalf Baechle LONG_L $24, PT_ACX(sp) 312384740dcSRalf Baechle mtlhx $24 313384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 314384740dcSRalf Baechle mtlhx $24 315384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 316384740dcSRalf Baechle mtlhx $24 3176a0e9865SLeonid Yegoshin #elif !defined(CONFIG_CPU_MIPSR6) 318384740dcSRalf Baechle LONG_L $24, PT_LO(sp) 319384740dcSRalf Baechle mtlo $24 320384740dcSRalf Baechle LONG_L $24, PT_HI(sp) 321384740dcSRalf Baechle mthi $24 322384740dcSRalf Baechle #endif 323384740dcSRalf Baechle #ifdef CONFIG_32BIT 324866b6a89SCorey Minyard cfi_ld $8, PT_R8, \docfi 325866b6a89SCorey Minyard cfi_ld $9, PT_R9, \docfi 326384740dcSRalf Baechle #endif 327866b6a89SCorey Minyard cfi_ld $10, PT_R10, \docfi 328866b6a89SCorey Minyard cfi_ld $11, PT_R11, \docfi 329866b6a89SCorey Minyard cfi_ld $12, PT_R12, \docfi 330866b6a89SCorey Minyard cfi_ld $13, PT_R13, \docfi 331866b6a89SCorey Minyard cfi_ld $14, PT_R14, \docfi 332866b6a89SCorey Minyard cfi_ld $15, PT_R15, \docfi 333866b6a89SCorey Minyard cfi_ld $24, PT_R24, \docfi 334384740dcSRalf Baechle .endm 335384740dcSRalf Baechle 336866b6a89SCorey Minyard .macro RESTORE_STATIC docfi=0 337866b6a89SCorey Minyard cfi_ld $16, PT_R16, \docfi 338866b6a89SCorey Minyard cfi_ld $17, PT_R17, \docfi 339866b6a89SCorey Minyard cfi_ld $18, PT_R18, \docfi 340866b6a89SCorey Minyard cfi_ld $19, PT_R19, \docfi 341866b6a89SCorey Minyard cfi_ld $20, PT_R20, \docfi 342866b6a89SCorey Minyard cfi_ld $21, PT_R21, \docfi 343866b6a89SCorey Minyard cfi_ld $22, PT_R22, \docfi 344866b6a89SCorey Minyard cfi_ld $23, PT_R23, \docfi 345866b6a89SCorey Minyard cfi_ld $30, PT_R30, \docfi 346866b6a89SCorey Minyard .endm 347866b6a89SCorey Minyard 348866b6a89SCorey Minyard .macro RESTORE_SP docfi=0 349866b6a89SCorey Minyard cfi_ld sp, PT_R29, \docfi 350384740dcSRalf Baechle .endm 351384740dcSRalf Baechle 352*455481fcSThomas Bogendoerfer #if defined(CONFIG_CPU_R3000) 353384740dcSRalf Baechle 354866b6a89SCorey Minyard .macro RESTORE_SOME docfi=0 355384740dcSRalf Baechle .set push 356384740dcSRalf Baechle .set reorder 357384740dcSRalf Baechle .set noat 358384740dcSRalf Baechle mfc0 a0, CP0_STATUS 35900fe56dcSJames Hogan li v1, ST0_CU1 | ST0_IM 360384740dcSRalf Baechle ori a0, STATMASK 361384740dcSRalf Baechle xori a0, STATMASK 362384740dcSRalf Baechle mtc0 a0, CP0_STATUS 363384740dcSRalf Baechle and a0, v1 364384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 365384740dcSRalf Baechle nor v1, $0, v1 366384740dcSRalf Baechle and v0, v1 367384740dcSRalf Baechle or v0, a0 368384740dcSRalf Baechle mtc0 v0, CP0_STATUS 369866b6a89SCorey Minyard cfi_ld $31, PT_R31, \docfi 370866b6a89SCorey Minyard cfi_ld $28, PT_R28, \docfi 371866b6a89SCorey Minyard cfi_ld $25, PT_R25, \docfi 372866b6a89SCorey Minyard cfi_ld $7, PT_R7, \docfi 373866b6a89SCorey Minyard cfi_ld $6, PT_R6, \docfi 374866b6a89SCorey Minyard cfi_ld $5, PT_R5, \docfi 375866b6a89SCorey Minyard cfi_ld $4, PT_R4, \docfi 376866b6a89SCorey Minyard cfi_ld $3, PT_R3, \docfi 377866b6a89SCorey Minyard cfi_ld $2, PT_R2, \docfi 378384740dcSRalf Baechle .set pop 379384740dcSRalf Baechle .endm 380384740dcSRalf Baechle 381866b6a89SCorey Minyard .macro RESTORE_SP_AND_RET docfi=0 382384740dcSRalf Baechle .set push 383384740dcSRalf Baechle .set noreorder 384384740dcSRalf Baechle LONG_L k0, PT_EPC(sp) 385866b6a89SCorey Minyard RESTORE_SP \docfi 386384740dcSRalf Baechle jr k0 387384740dcSRalf Baechle rfe 388384740dcSRalf Baechle .set pop 389384740dcSRalf Baechle .endm 390384740dcSRalf Baechle 391384740dcSRalf Baechle #else 392866b6a89SCorey Minyard .macro RESTORE_SOME docfi=0 393384740dcSRalf Baechle .set push 394384740dcSRalf Baechle .set reorder 395384740dcSRalf Baechle .set noat 396384740dcSRalf Baechle mfc0 a0, CP0_STATUS 397384740dcSRalf Baechle ori a0, STATMASK 398384740dcSRalf Baechle xori a0, STATMASK 399384740dcSRalf Baechle mtc0 a0, CP0_STATUS 40000fe56dcSJames Hogan li v1, ST0_CU1 | ST0_FR | ST0_IM 401384740dcSRalf Baechle and a0, v1 402384740dcSRalf Baechle LONG_L v0, PT_STATUS(sp) 403384740dcSRalf Baechle nor v1, $0, v1 404384740dcSRalf Baechle and v0, v1 405384740dcSRalf Baechle or v0, a0 406384740dcSRalf Baechle mtc0 v0, CP0_STATUS 407384740dcSRalf Baechle LONG_L v1, PT_EPC(sp) 408384740dcSRalf Baechle MTC0 v1, CP0_EPC 409866b6a89SCorey Minyard cfi_ld $31, PT_R31, \docfi 410866b6a89SCorey Minyard cfi_ld $28, PT_R28, \docfi 411866b6a89SCorey Minyard cfi_ld $25, PT_R25, \docfi 412384740dcSRalf Baechle #ifdef CONFIG_64BIT 413866b6a89SCorey Minyard cfi_ld $8, PT_R8, \docfi 414866b6a89SCorey Minyard cfi_ld $9, PT_R9, \docfi 415384740dcSRalf Baechle #endif 416866b6a89SCorey Minyard cfi_ld $7, PT_R7, \docfi 417866b6a89SCorey Minyard cfi_ld $6, PT_R6, \docfi 418866b6a89SCorey Minyard cfi_ld $5, PT_R5, \docfi 419866b6a89SCorey Minyard cfi_ld $4, PT_R4, \docfi 420866b6a89SCorey Minyard cfi_ld $3, PT_R3, \docfi 421866b6a89SCorey Minyard cfi_ld $2, PT_R2, \docfi 422384740dcSRalf Baechle .set pop 423384740dcSRalf Baechle .endm 424384740dcSRalf Baechle 425866b6a89SCorey Minyard .macro RESTORE_SP_AND_RET docfi=0 426866b6a89SCorey Minyard RESTORE_SP \docfi 427ab7c01fdSSerge Semin #if defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) 428e11124d8SPaul Burton eretnc 429e11124d8SPaul Burton #else 430378ed6f0SPaul Burton .set push 431a809d460SRalf Baechle .set arch=r4000 432384740dcSRalf Baechle eret 433378ed6f0SPaul Burton .set pop 434e11124d8SPaul Burton #endif 435384740dcSRalf Baechle .endm 436384740dcSRalf Baechle 437384740dcSRalf Baechle #endif 438384740dcSRalf Baechle 439866b6a89SCorey Minyard .macro RESTORE_ALL docfi=0 440866b6a89SCorey Minyard RESTORE_TEMP \docfi 441866b6a89SCorey Minyard RESTORE_STATIC \docfi 442866b6a89SCorey Minyard RESTORE_AT \docfi 443866b6a89SCorey Minyard RESTORE_SOME \docfi 444866b6a89SCorey Minyard RESTORE_SP \docfi 445384740dcSRalf Baechle .endm 446384740dcSRalf Baechle 447384740dcSRalf Baechle /* 448384740dcSRalf Baechle * Move to kernel mode and disable interrupts. 449384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 450384740dcSRalf Baechle */ 451384740dcSRalf Baechle .macro CLI 452384740dcSRalf Baechle mfc0 t0, CP0_STATUS 453195615ecSHuacai Chen li t1, ST0_KERNEL_CUMASK | STATMASK 454384740dcSRalf Baechle or t0, t1 455384740dcSRalf Baechle xori t0, STATMASK 456384740dcSRalf Baechle mtc0 t0, CP0_STATUS 457384740dcSRalf Baechle irq_disable_hazard 458384740dcSRalf Baechle .endm 459384740dcSRalf Baechle 460384740dcSRalf Baechle /* 461384740dcSRalf Baechle * Move to kernel mode and enable interrupts. 462384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 463384740dcSRalf Baechle */ 464384740dcSRalf Baechle .macro STI 465384740dcSRalf Baechle mfc0 t0, CP0_STATUS 466195615ecSHuacai Chen li t1, ST0_KERNEL_CUMASK | STATMASK 467384740dcSRalf Baechle or t0, t1 468384740dcSRalf Baechle xori t0, STATMASK & ~1 469384740dcSRalf Baechle mtc0 t0, CP0_STATUS 470384740dcSRalf Baechle irq_enable_hazard 471384740dcSRalf Baechle .endm 472384740dcSRalf Baechle 473384740dcSRalf Baechle /* 474384740dcSRalf Baechle * Just move to kernel mode and leave interrupts as they are. Note 475384740dcSRalf Baechle * for the R3000 this means copying the previous enable from IEp. 476384740dcSRalf Baechle * Set cp0 enable bit as sign that we're running on the kernel stack 477384740dcSRalf Baechle */ 478384740dcSRalf Baechle .macro KMODE 479384740dcSRalf Baechle mfc0 t0, CP0_STATUS 480195615ecSHuacai Chen li t1, ST0_KERNEL_CUMASK | (STATMASK & ~1) 481*455481fcSThomas Bogendoerfer #if defined(CONFIG_CPU_R3000) 482384740dcSRalf Baechle andi t2, t0, ST0_IEP 483384740dcSRalf Baechle srl t2, 2 484384740dcSRalf Baechle or t0, t2 485384740dcSRalf Baechle #endif 486384740dcSRalf Baechle or t0, t1 487384740dcSRalf Baechle xori t0, STATMASK & ~1 488384740dcSRalf Baechle mtc0 t0, CP0_STATUS 489384740dcSRalf Baechle irq_disable_hazard 490384740dcSRalf Baechle .endm 491384740dcSRalf Baechle 492384740dcSRalf Baechle #endif /* _ASM_STACKFRAME_H */ 493