12874c5fdSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-or-later */ 29994a338SPaul Mackerras/* 39994a338SPaul Mackerras * PowerPC version 49994a338SPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 59994a338SPaul Mackerras * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP 69994a338SPaul Mackerras * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> 79994a338SPaul Mackerras * Adapted for Power Macintosh by Paul Mackerras. 89994a338SPaul Mackerras * Low-level exception handlers and MMU support 99994a338SPaul Mackerras * rewritten by Paul Mackerras. 109994a338SPaul Mackerras * Copyright (C) 1996 Paul Mackerras. 119994a338SPaul Mackerras * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 129994a338SPaul Mackerras * 139994a338SPaul Mackerras * This file contains the system call entry code, context switch 149994a338SPaul Mackerras * code, and exception/interrupt return code for PowerPC. 159994a338SPaul Mackerras */ 169994a338SPaul Mackerras 179994a338SPaul Mackerras#include <linux/errno.h> 18c3525940SMichael Ellerman#include <linux/err.h> 199994a338SPaul Mackerras#include <linux/sys.h> 209994a338SPaul Mackerras#include <linux/threads.h> 212da37761SChristophe Leroy#include <linux/linkage.h> 222da37761SChristophe Leroy 239994a338SPaul Mackerras#include <asm/reg.h> 249994a338SPaul Mackerras#include <asm/page.h> 259994a338SPaul Mackerras#include <asm/mmu.h> 269994a338SPaul Mackerras#include <asm/cputable.h> 279994a338SPaul Mackerras#include <asm/thread_info.h> 289994a338SPaul Mackerras#include <asm/ppc_asm.h> 299994a338SPaul Mackerras#include <asm/asm-offsets.h> 309994a338SPaul Mackerras#include <asm/unistd.h> 3146f52210SStephen Rothwell#include <asm/ptrace.h> 322c86cd18SChristophe Leroy#include <asm/feature-fixups.h> 33c28218d4SDiana Craciun#include <asm/barrier.h> 34e2fb9f54SChristophe Leroy#include <asm/kup.h> 3540530db7SChristophe Leroy#include <asm/bug.h> 364bd9e05aSChristophe Leroy#include <asm/interrupt.h> 379994a338SPaul Mackerras 3837737a2aSChristophe Leroy#include "head_32.h" 399994a338SPaul Mackerras 400eb0d2e7SChristophe Leroy/* 412384b36fSNicholas Piggin * powerpc relies on return from interrupt/syscall being context synchronising 422384b36fSNicholas Piggin * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional 432384b36fSNicholas Piggin * synchronisation instructions. 442384b36fSNicholas Piggin */ 452384b36fSNicholas Piggin 462384b36fSNicholas Piggin/* 470eb0d2e7SChristophe Leroy * Align to 4k in order to ensure that all functions modyfing srr0/srr1 480eb0d2e7SChristophe Leroy * fit into one page in order to not encounter a TLB miss between the 490eb0d2e7SChristophe Leroy * modification of srr0/srr1 and the associated rfi. 500eb0d2e7SChristophe Leroy */ 510eb0d2e7SChristophe Leroy .align 12 520eb0d2e7SChristophe Leroy 53688de017SChristophe Leroy#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500) 54bce4c26aSChristophe Leroy .globl prepare_transfer_to_handler 55bce4c26aSChristophe Leroyprepare_transfer_to_handler: 5657472306SChristophe Leroy /* if from kernel, check interrupted DOZE/NAP mode */ 57f7354ccaSChristophe Leroy lwz r12,TI_LOCAL_FLAGS(r2) 58f39224a8SPaul Mackerras mtcrf 0x01,r12 59f39224a8SPaul Mackerras bt- 31-TLF_NAPPING,4f 60a560643eSPaul Mackerras bt- 31-TLF_SLEEPING,7f 610f2793e3SChristophe Leroy blr 6240530db7SChristophe Leroy 63a616c442SChristophe Leroy4: rlwinm r12,r12,0,~_TLF_NAPPING 64a616c442SChristophe Leroy stw r12,TI_LOCAL_FLAGS(r2) 65a616c442SChristophe Leroy b power_save_ppc32_restore 66a616c442SChristophe Leroy 67a616c442SChristophe Leroy7: rlwinm r12,r12,0,~_TLF_SLEEPING 68a616c442SChristophe Leroy stw r12,TI_LOCAL_FLAGS(r2) 69a616c442SChristophe Leroy lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ 70a616c442SChristophe Leroy rlwinm r9,r9,0,~MSR_EE 71a616c442SChristophe Leroy lwz r12,_LINK(r11) /* and return to address in LR */ 7215ba7450SRohan McLure REST_GPR(2, r11) 73a616c442SChristophe Leroy b fast_exception_return 74bce4c26aSChristophe Leroy_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) 75688de017SChristophe Leroy#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ 76a616c442SChristophe Leroy 77526d4a4cSChristophe Leroy#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) 782da37761SChristophe LeroySYM_FUNC_START(__kuep_lock) 7970428da9SChristophe Leroy lwz r9, THREAD+THSR0(r2) 80526d4a4cSChristophe Leroy update_user_segments_by_4 r9, r10, r11, r12 81526d4a4cSChristophe Leroy blr 822da37761SChristophe LeroySYM_FUNC_END(__kuep_lock) 83526d4a4cSChristophe Leroy 842da37761SChristophe LeroySYM_FUNC_START_LOCAL(__kuep_unlock) 8570428da9SChristophe Leroy lwz r9, THREAD+THSR0(r2) 8670428da9SChristophe Leroy rlwinm r9,r9,0,~SR_NX 87526d4a4cSChristophe Leroy update_user_segments_by_4 r9, r10, r11, r12 88526d4a4cSChristophe Leroy blr 892da37761SChristophe LeroySYM_FUNC_END(__kuep_unlock) 90526d4a4cSChristophe Leroy 91526d4a4cSChristophe Leroy.macro kuep_lock 92526d4a4cSChristophe Leroy bl __kuep_lock 93526d4a4cSChristophe Leroy.endm 94526d4a4cSChristophe Leroy.macro kuep_unlock 95526d4a4cSChristophe Leroy bl __kuep_unlock 96526d4a4cSChristophe Leroy.endm 97526d4a4cSChristophe Leroy#else 98526d4a4cSChristophe Leroy.macro kuep_lock 99526d4a4cSChristophe Leroy.endm 100526d4a4cSChristophe Leroy.macro kuep_unlock 101526d4a4cSChristophe Leroy.endm 102526d4a4cSChristophe Leroy#endif 103526d4a4cSChristophe Leroy 104b86fb888SChristophe Leroy .globl transfer_to_syscall 105b86fb888SChristophe Leroytransfer_to_syscall: 1062c27d4a4SRohan McLure stw r3, ORIG_GPR3(r1) 1074bd9e05aSChristophe Leroy stw r11, GPR1(r1) 1084bd9e05aSChristophe Leroy stw r11, 0(r1) 1094bd9e05aSChristophe Leroy mflr r12 1104bd9e05aSChristophe Leroy stw r12, _LINK(r1) 111047a6fd4SChristophe Leroy#ifdef CONFIG_BOOKE_OR_40x 1124bd9e05aSChristophe Leroy rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ 1134bd9e05aSChristophe Leroy#endif 1144bd9e05aSChristophe Leroy lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ 1154bd9e05aSChristophe Leroy SAVE_GPR(2, r1) 1164bd9e05aSChristophe Leroy addi r12,r12,STACK_FRAME_REGS_MARKER@l 1174bd9e05aSChristophe Leroy stw r9,_MSR(r1) 1184bd9e05aSChristophe Leroy li r2, INTERRUPT_SYSCALL 119d2e8ff9fSNicholas Piggin stw r12,STACK_INT_FRAME_MARKER(r1) 1204bd9e05aSChristophe Leroy stw r2,_TRAP(r1) 1214bd9e05aSChristophe Leroy SAVE_GPR(0, r1) 122aebd1fb4SNicholas Piggin SAVE_GPRS(3, 8, r1) 1234bd9e05aSChristophe Leroy addi r2,r10,-THREAD 124fbcee2ebSChristophe Leroy SAVE_NVGPRS(r1) 125526d4a4cSChristophe Leroy kuep_lock 126b86fb888SChristophe Leroy 127f8971c62SRohan McLure /* Calling convention has r3 = regs, r4 = orig r0 */ 128c03be0a3SNicholas Piggin addi r3,r1,STACK_INT_FRAME_REGS 129f8971c62SRohan McLure mr r4,r0 1306f76a011SChristophe Leroy bl system_call_exception 1319994a338SPaul Mackerras 1329994a338SPaul Mackerrasret_from_syscall: 133c03be0a3SNicholas Piggin addi r4,r1,STACK_INT_FRAME_REGS 1346f76a011SChristophe Leroy li r5,0 1356f76a011SChristophe Leroy bl syscall_exit_prepare 1361f69aa0bSChristophe Leroy#ifdef CONFIG_PPC_47x 137b98ac05dSBenjamin Herrenschmidt lis r4,icache_44x_need_flush@ha 138b98ac05dSBenjamin Herrenschmidt lwz r5,icache_44x_need_flush@l(r4) 139b98ac05dSBenjamin Herrenschmidt cmplwi cr0,r5,0 140*f0eee815SMichael Ellerman bne- .L44x_icache_flush 1411f69aa0bSChristophe Leroy#endif /* CONFIG_PPC_47x */ 142*f0eee815SMichael Ellerman.L44x_icache_flush_return: 143526d4a4cSChristophe Leroy kuep_unlock 1449994a338SPaul Mackerras lwz r4,_LINK(r1) 1459994a338SPaul Mackerras lwz r5,_CCR(r1) 1469994a338SPaul Mackerras mtlr r4 1479994a338SPaul Mackerras lwz r7,_NIP(r1) 1486f76a011SChristophe Leroy lwz r8,_MSR(r1) 1496f76a011SChristophe Leroy cmpwi r3,0 15015ba7450SRohan McLure REST_GPR(3, r1) 1517cdf4401SChristophe Leroysyscall_exit_finish: 1529994a338SPaul Mackerras mtspr SPRN_SRR0,r7 1539994a338SPaul Mackerras mtspr SPRN_SRR1,r8 1546f76a011SChristophe Leroy 1556f76a011SChristophe Leroy bne 3f 1566f76a011SChristophe Leroy mtcr r5 1576f76a011SChristophe Leroy 15815ba7450SRohan McLure1: REST_GPR(2, r1) 15915ba7450SRohan McLure REST_GPR(1, r1) 16062182e6cSChristophe Leroy rfi 16162182e6cSChristophe Leroy#ifdef CONFIG_40x 16262182e6cSChristophe Leroy b . /* Prevent prefetch past rfi */ 16362182e6cSChristophe Leroy#endif 1646f76a011SChristophe Leroy 1656f76a011SChristophe Leroy3: mtcr r5 1666f76a011SChristophe Leroy lwz r4,_CTR(r1) 1676f76a011SChristophe Leroy lwz r5,_XER(r1) 1686f76a011SChristophe Leroy REST_NVGPRS(r1) 1696f76a011SChristophe Leroy mtctr r4 1706f76a011SChristophe Leroy mtxer r5 17115ba7450SRohan McLure REST_GPR(0, r1) 17215ba7450SRohan McLure REST_GPRS(3, 12, r1) 1736f76a011SChristophe Leroy b 1b 1746f76a011SChristophe Leroy 175b98ac05dSBenjamin Herrenschmidt#ifdef CONFIG_44x 176*f0eee815SMichael Ellerman.L44x_icache_flush: 177*f0eee815SMichael Ellerman li r7,0 178b98ac05dSBenjamin Herrenschmidt iccci r0,r0 179b98ac05dSBenjamin Herrenschmidt stw r7,icache_44x_need_flush@l(r4) 180*f0eee815SMichael Ellerman b .L44x_icache_flush_return 181b98ac05dSBenjamin Herrenschmidt#endif /* CONFIG_44x */ 1829994a338SPaul Mackerras 1839994a338SPaul Mackerras .globl ret_from_fork 1849994a338SPaul Mackerrasret_from_fork: 1859994a338SPaul Mackerras REST_NVGPRS(r1) 1869994a338SPaul Mackerras bl schedule_tail 187959791e4SNicholas Piggin li r3,0 /* fork() return value */ 1889994a338SPaul Mackerras b ret_from_syscall 1899994a338SPaul Mackerras 190b504b6aaSNicholas Piggin .globl ret_from_kernel_user_thread 191b504b6aaSNicholas Pigginret_from_kernel_user_thread: 19258254e10SAl Viro bl schedule_tail 193113ec9ccSChristophe Leroy mtctr r14 19458254e10SAl Viro mr r3,r15 19558254e10SAl Viro PPC440EP_ERR42 196113ec9ccSChristophe Leroy bctrl 19758254e10SAl Viro li r3,0 198be6abfa7SAl Viro b ret_from_syscall 199be6abfa7SAl Viro 200b504b6aaSNicholas Piggin .globl start_kernel_thread 201b504b6aaSNicholas Pigginstart_kernel_thread: 202b504b6aaSNicholas Piggin bl schedule_tail 203b504b6aaSNicholas Piggin mtctr r14 204b504b6aaSNicholas Piggin mr r3,r15 205b504b6aaSNicholas Piggin PPC440EP_ERR42 206b504b6aaSNicholas Piggin bctrl 207b504b6aaSNicholas Piggin /* 208b504b6aaSNicholas Piggin * This must not return. We actually want to BUG here, not WARN, 209b504b6aaSNicholas Piggin * because BUG will exit the process which is what the kernel thread 210b504b6aaSNicholas Piggin * should have done, which may give some hope of continuing. 211b504b6aaSNicholas Piggin */ 212b504b6aaSNicholas Piggin100: trap 213b504b6aaSNicholas Piggin EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 214b504b6aaSNicholas Piggin 2159994a338SPaul Mackerras .globl fast_exception_return 2169994a338SPaul Mackerrasfast_exception_return: 2179994a338SPaul Mackerras#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 2189994a338SPaul Mackerras andi. r10,r9,MSR_RI /* check for recoverable interrupt */ 219b96bae3aSChristophe Leroy beq 3f /* if not, we've got problems */ 2209994a338SPaul Mackerras#endif 2219994a338SPaul Mackerras 22215ba7450SRohan McLure2: lwz r10,_CCR(r11) 22315ba7450SRohan McLure REST_GPRS(1, 6, r11) 2249994a338SPaul Mackerras mtcr r10 2259994a338SPaul Mackerras lwz r10,_LINK(r11) 2269994a338SPaul Mackerras mtlr r10 22717773afdSNicholas Piggin /* Clear the exception marker on the stack to avoid confusing stacktrace */ 2289580b71bSChristophe Leroy li r10, 0 2299580b71bSChristophe Leroy stw r10, 8(r11) 2309994a338SPaul Mackerras REST_GPR(10, r11) 231cd99ddbeSChristophe Leroy#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) 23275b82472SChristophe Leroy mtspr SPRN_NRI, r0 23375b82472SChristophe Leroy#endif 2349994a338SPaul Mackerras mtspr SPRN_SRR1,r9 2359994a338SPaul Mackerras mtspr SPRN_SRR0,r12 2369994a338SPaul Mackerras REST_GPR(9, r11) 2379994a338SPaul Mackerras REST_GPR(12, r11) 23815ba7450SRohan McLure REST_GPR(11, r11) 23962182e6cSChristophe Leroy rfi 24062182e6cSChristophe Leroy#ifdef CONFIG_40x 24162182e6cSChristophe Leroy b . /* Prevent prefetch past rfi */ 24262182e6cSChristophe Leroy#endif 243e51c3e13SChristophe Leroy_ASM_NOKPROBE_SYMBOL(fast_exception_return) 2449994a338SPaul Mackerras 2459994a338SPaul Mackerras/* aargh, a nonrecoverable interrupt, panic */ 2469994a338SPaul Mackerras/* aargh, we don't know which trap this is */ 2479994a338SPaul Mackerras3: 2489994a338SPaul Mackerras li r10,-1 249d73e0c99SPaul Mackerras stw r10,_TRAP(r11) 2504c0104a8SChristophe Leroy prepare_transfer_to_handler 2510f2793e3SChristophe Leroy bl unrecoverable_exception 252b96bae3aSChristophe Leroy trap /* should not get here */ 2539994a338SPaul Mackerras 254b96bae3aSChristophe Leroy .globl interrupt_return 255b96bae3aSChristophe Leroyinterrupt_return: 256b96bae3aSChristophe Leroy lwz r4,_MSR(r1) 257c03be0a3SNicholas Piggin addi r3,r1,STACK_INT_FRAME_REGS 258b96bae3aSChristophe Leroy andi. r0,r4,MSR_PR 259b96bae3aSChristophe Leroy beq .Lkernel_interrupt_return 260b96bae3aSChristophe Leroy bl interrupt_exit_user_prepare 261b96bae3aSChristophe Leroy cmpwi r3,0 262526d4a4cSChristophe Leroy kuep_unlock 263b96bae3aSChristophe Leroy bne- .Lrestore_nvgprs 2649994a338SPaul Mackerras 265b96bae3aSChristophe Leroy.Lfast_user_interrupt_return: 2669994a338SPaul Mackerras lwz r11,_NIP(r1) 2679994a338SPaul Mackerras lwz r12,_MSR(r1) 2689994a338SPaul Mackerras mtspr SPRN_SRR0,r11 2699994a338SPaul Mackerras mtspr SPRN_SRR1,r12 270b96bae3aSChristophe Leroy 271b96bae3aSChristophe LeroyBEGIN_FTR_SECTION 272b96bae3aSChristophe Leroy stwcx. r0,0,r1 /* to clear the reservation */ 273b96bae3aSChristophe LeroyFTR_SECTION_ELSE 274b96bae3aSChristophe Leroy lwarx r0,0,r1 275b96bae3aSChristophe LeroyALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 276b96bae3aSChristophe Leroy 277b96bae3aSChristophe Leroy lwz r3,_CCR(r1) 278b96bae3aSChristophe Leroy lwz r4,_LINK(r1) 279b96bae3aSChristophe Leroy lwz r5,_CTR(r1) 280b96bae3aSChristophe Leroy lwz r6,_XER(r1) 281b96bae3aSChristophe Leroy li r0,0 282b96bae3aSChristophe Leroy 283b96bae3aSChristophe Leroy /* 28417773afdSNicholas Piggin * Leaving a stale exception marker on the stack can confuse 285b96bae3aSChristophe Leroy * the reliable stack unwinder later on. Clear it. 286b96bae3aSChristophe Leroy */ 287b96bae3aSChristophe Leroy stw r0,8(r1) 288aebd1fb4SNicholas Piggin REST_GPRS(7, 12, r1) 289b96bae3aSChristophe Leroy 290b96bae3aSChristophe Leroy mtcr r3 291b96bae3aSChristophe Leroy mtlr r4 292b96bae3aSChristophe Leroy mtctr r5 293b96bae3aSChristophe Leroy mtspr SPRN_XER,r6 294b96bae3aSChristophe Leroy 295aebd1fb4SNicholas Piggin REST_GPRS(2, 6, r1) 296b96bae3aSChristophe Leroy REST_GPR(0, r1) 297b96bae3aSChristophe Leroy REST_GPR(1, r1) 2989994a338SPaul Mackerras rfi 299b96bae3aSChristophe Leroy#ifdef CONFIG_40x 300b96bae3aSChristophe Leroy b . /* Prevent prefetch past rfi */ 301b96bae3aSChristophe Leroy#endif 302b96bae3aSChristophe Leroy 303b96bae3aSChristophe Leroy.Lrestore_nvgprs: 304b96bae3aSChristophe Leroy REST_NVGPRS(r1) 305b96bae3aSChristophe Leroy b .Lfast_user_interrupt_return 306b96bae3aSChristophe Leroy 307b96bae3aSChristophe Leroy.Lkernel_interrupt_return: 308b96bae3aSChristophe Leroy bl interrupt_exit_kernel_prepare 309b96bae3aSChristophe Leroy 310b96bae3aSChristophe Leroy.Lfast_kernel_interrupt_return: 311b96bae3aSChristophe Leroy cmpwi cr1,r3,0 312b96bae3aSChristophe Leroy lwz r11,_NIP(r1) 313b96bae3aSChristophe Leroy lwz r12,_MSR(r1) 314b96bae3aSChristophe Leroy mtspr SPRN_SRR0,r11 315b96bae3aSChristophe Leroy mtspr SPRN_SRR1,r12 316b96bae3aSChristophe Leroy 317b96bae3aSChristophe LeroyBEGIN_FTR_SECTION 318b96bae3aSChristophe Leroy stwcx. r0,0,r1 /* to clear the reservation */ 319b96bae3aSChristophe LeroyFTR_SECTION_ELSE 320b96bae3aSChristophe Leroy lwarx r0,0,r1 321b96bae3aSChristophe LeroyALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 322b96bae3aSChristophe Leroy 323b96bae3aSChristophe Leroy lwz r3,_LINK(r1) 324b96bae3aSChristophe Leroy lwz r4,_CTR(r1) 325b96bae3aSChristophe Leroy lwz r5,_XER(r1) 326b96bae3aSChristophe Leroy lwz r6,_CCR(r1) 327b96bae3aSChristophe Leroy li r0,0 328b96bae3aSChristophe Leroy 329aebd1fb4SNicholas Piggin REST_GPRS(7, 12, r1) 330b96bae3aSChristophe Leroy 331b96bae3aSChristophe Leroy mtlr r3 332b96bae3aSChristophe Leroy mtctr r4 333b96bae3aSChristophe Leroy mtspr SPRN_XER,r5 334b96bae3aSChristophe Leroy 335b96bae3aSChristophe Leroy /* 33617773afdSNicholas Piggin * Leaving a stale exception marker on the stack can confuse 337b96bae3aSChristophe Leroy * the reliable stack unwinder later on. Clear it. 338b96bae3aSChristophe Leroy */ 339b96bae3aSChristophe Leroy stw r0,8(r1) 340b96bae3aSChristophe Leroy 341aebd1fb4SNicholas Piggin REST_GPRS(2, 5, r1) 342b96bae3aSChristophe Leroy 343b96bae3aSChristophe Leroy bne- cr1,1f /* emulate stack store */ 344b96bae3aSChristophe Leroy mtcr r6 345b96bae3aSChristophe Leroy REST_GPR(6, r1) 346b96bae3aSChristophe Leroy REST_GPR(0, r1) 347b96bae3aSChristophe Leroy REST_GPR(1, r1) 348b96bae3aSChristophe Leroy rfi 349b96bae3aSChristophe Leroy#ifdef CONFIG_40x 350b96bae3aSChristophe Leroy b . /* Prevent prefetch past rfi */ 351b96bae3aSChristophe Leroy#endif 352b96bae3aSChristophe Leroy 353b96bae3aSChristophe Leroy1: /* 354b96bae3aSChristophe Leroy * Emulate stack store with update. New r1 value was already calculated 355b96bae3aSChristophe Leroy * and updated in our interrupt regs by emulate_loadstore, but we can't 356b96bae3aSChristophe Leroy * store the previous value of r1 to the stack before re-loading our 357b96bae3aSChristophe Leroy * registers from it, otherwise they could be clobbered. Use 358b96bae3aSChristophe Leroy * SPRG Scratch0 as temporary storage to hold the store 359b96bae3aSChristophe Leroy * data, as interrupts are disabled here so it won't be clobbered. 360b96bae3aSChristophe Leroy */ 361b96bae3aSChristophe Leroy mtcr r6 362b96bae3aSChristophe Leroy#ifdef CONFIG_BOOKE 363b96bae3aSChristophe Leroy mtspr SPRN_SPRG_WSCRATCH0, r9 364b96bae3aSChristophe Leroy#else 365b96bae3aSChristophe Leroy mtspr SPRN_SPRG_SCRATCH0, r9 366b96bae3aSChristophe Leroy#endif 367b96bae3aSChristophe Leroy addi r9,r1,INT_FRAME_SIZE /* get original r1 */ 368b96bae3aSChristophe Leroy REST_GPR(6, r1) 369b96bae3aSChristophe Leroy REST_GPR(0, r1) 370b96bae3aSChristophe Leroy REST_GPR(1, r1) 371b96bae3aSChristophe Leroy stw r9,0(r1) /* perform store component of stwu */ 372b96bae3aSChristophe Leroy#ifdef CONFIG_BOOKE 373b96bae3aSChristophe Leroy mfspr r9, SPRN_SPRG_RSCRATCH0 374b96bae3aSChristophe Leroy#else 375b96bae3aSChristophe Leroy mfspr r9, SPRN_SPRG_SCRATCH0 376b96bae3aSChristophe Leroy#endif 377b96bae3aSChristophe Leroy rfi 378b96bae3aSChristophe Leroy#ifdef CONFIG_40x 379b96bae3aSChristophe Leroy b . /* Prevent prefetch past rfi */ 380b96bae3aSChristophe Leroy#endif 381b96bae3aSChristophe Leroy_ASM_NOKPROBE_SYMBOL(interrupt_return) 382b96bae3aSChristophe Leroy 383b96bae3aSChristophe Leroy#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 3849994a338SPaul Mackerras 3859994a338SPaul Mackerras/* 3869994a338SPaul Mackerras * Returning from a critical interrupt in user mode doesn't need 3879994a338SPaul Mackerras * to be any different from a normal exception. For a critical 3889994a338SPaul Mackerras * interrupt in the kernel, we just return (without checking for 3899994a338SPaul Mackerras * preemption) since the interrupt may have happened at some crucial 3909994a338SPaul Mackerras * place (e.g. inside the TLB miss handler), and because we will be 3919994a338SPaul Mackerras * running with r1 pointing into critical_stack, not the current 3929994a338SPaul Mackerras * process's kernel stack (and therefore current_thread_info() will 3939994a338SPaul Mackerras * give the wrong answer). 3949994a338SPaul Mackerras * We have to restore various SPRs that may have been in use at the 3959994a338SPaul Mackerras * time of the critical interrupt. 3969994a338SPaul Mackerras * 3979994a338SPaul Mackerras */ 3989994a338SPaul Mackerras#ifdef CONFIG_40x 3999994a338SPaul Mackerras#define PPC_40x_TURN_OFF_MSR_DR \ 4009994a338SPaul Mackerras /* avoid any possible TLB misses here by turning off MSR.DR, we \ 4019994a338SPaul Mackerras * assume the instructions here are mapped by a pinned TLB entry */ \ 4029994a338SPaul Mackerras li r10,MSR_IR; \ 4039994a338SPaul Mackerras mtmsr r10; \ 4049994a338SPaul Mackerras isync; \ 4059994a338SPaul Mackerras tophys(r1, r1); 4069994a338SPaul Mackerras#else 4079994a338SPaul Mackerras#define PPC_40x_TURN_OFF_MSR_DR 4089994a338SPaul Mackerras#endif 4099994a338SPaul Mackerras 4109994a338SPaul Mackerras#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ 4119994a338SPaul Mackerras REST_NVGPRS(r1); \ 4129994a338SPaul Mackerras lwz r3,_MSR(r1); \ 4139994a338SPaul Mackerras andi. r3,r3,MSR_PR; \ 414b96bae3aSChristophe Leroy bne interrupt_return; \ 41515ba7450SRohan McLure REST_GPR(0, r1); \ 41615ba7450SRohan McLure REST_GPRS(2, 8, r1); \ 4179994a338SPaul Mackerras lwz r10,_XER(r1); \ 4189994a338SPaul Mackerras lwz r11,_CTR(r1); \ 4199994a338SPaul Mackerras mtspr SPRN_XER,r10; \ 4209994a338SPaul Mackerras mtctr r11; \ 4219994a338SPaul Mackerras stwcx. r0,0,r1; /* to clear the reservation */ \ 4229994a338SPaul Mackerras lwz r11,_LINK(r1); \ 4239994a338SPaul Mackerras mtlr r11; \ 4249994a338SPaul Mackerras lwz r10,_CCR(r1); \ 4259994a338SPaul Mackerras mtcrf 0xff,r10; \ 4269994a338SPaul Mackerras PPC_40x_TURN_OFF_MSR_DR; \ 4279994a338SPaul Mackerras lwz r9,_DEAR(r1); \ 4289994a338SPaul Mackerras lwz r10,_ESR(r1); \ 4299994a338SPaul Mackerras mtspr SPRN_DEAR,r9; \ 4309994a338SPaul Mackerras mtspr SPRN_ESR,r10; \ 4319994a338SPaul Mackerras lwz r11,_NIP(r1); \ 4329994a338SPaul Mackerras lwz r12,_MSR(r1); \ 4339994a338SPaul Mackerras mtspr exc_lvl_srr0,r11; \ 4349994a338SPaul Mackerras mtspr exc_lvl_srr1,r12; \ 43515ba7450SRohan McLure REST_GPRS(9, 12, r1); \ 43615ba7450SRohan McLure REST_GPR(1, r1); \ 4379994a338SPaul Mackerras exc_lvl_rfi; \ 4389994a338SPaul Mackerras b .; /* prevent prefetch past exc_lvl_rfi */ 4399994a338SPaul Mackerras 440fca622c5SKumar Gala#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ 441fca622c5SKumar Gala lwz r9,_##exc_lvl_srr0(r1); \ 442fca622c5SKumar Gala lwz r10,_##exc_lvl_srr1(r1); \ 443fca622c5SKumar Gala mtspr SPRN_##exc_lvl_srr0,r9; \ 444fca622c5SKumar Gala mtspr SPRN_##exc_lvl_srr1,r10; 445fca622c5SKumar Gala 446aa5f59dfSChristophe Leroy#if defined(CONFIG_PPC_E500) 447fca622c5SKumar Gala#ifdef CONFIG_PHYS_64BIT 448fca622c5SKumar Gala#define RESTORE_MAS7 \ 449fca622c5SKumar Gala lwz r11,MAS7(r1); \ 450fca622c5SKumar Gala mtspr SPRN_MAS7,r11; 451fca622c5SKumar Gala#else 452fca622c5SKumar Gala#define RESTORE_MAS7 453fca622c5SKumar Gala#endif /* CONFIG_PHYS_64BIT */ 454fca622c5SKumar Gala#define RESTORE_MMU_REGS \ 455fca622c5SKumar Gala lwz r9,MAS0(r1); \ 456fca622c5SKumar Gala lwz r10,MAS1(r1); \ 457fca622c5SKumar Gala lwz r11,MAS2(r1); \ 458fca622c5SKumar Gala mtspr SPRN_MAS0,r9; \ 459fca622c5SKumar Gala lwz r9,MAS3(r1); \ 460fca622c5SKumar Gala mtspr SPRN_MAS1,r10; \ 461fca622c5SKumar Gala lwz r10,MAS6(r1); \ 462fca622c5SKumar Gala mtspr SPRN_MAS2,r11; \ 463fca622c5SKumar Gala mtspr SPRN_MAS3,r9; \ 464fca622c5SKumar Gala mtspr SPRN_MAS6,r10; \ 465fca622c5SKumar Gala RESTORE_MAS7; 466fca622c5SKumar Gala#elif defined(CONFIG_44x) 467fca622c5SKumar Gala#define RESTORE_MMU_REGS \ 468fca622c5SKumar Gala lwz r9,MMUCR(r1); \ 469fca622c5SKumar Gala mtspr SPRN_MMUCR,r9; 470fca622c5SKumar Gala#else 471fca622c5SKumar Gala#define RESTORE_MMU_REGS 472fca622c5SKumar Gala#endif 473fca622c5SKumar Gala 474fca622c5SKumar Gala#ifdef CONFIG_40x 4759994a338SPaul Mackerras .globl ret_from_crit_exc 4769994a338SPaul Mackerrasret_from_crit_exc: 477fca622c5SKumar Gala lis r9,crit_srr0@ha; 478fca622c5SKumar Gala lwz r9,crit_srr0@l(r9); 479fca622c5SKumar Gala lis r10,crit_srr1@ha; 480fca622c5SKumar Gala lwz r10,crit_srr1@l(r10); 481fca622c5SKumar Gala mtspr SPRN_SRR0,r9; 482fca622c5SKumar Gala mtspr SPRN_SRR1,r10; 48316c57b36SKumar Gala RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) 484e51c3e13SChristophe Leroy_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) 485fca622c5SKumar Gala#endif /* CONFIG_40x */ 4869994a338SPaul Mackerras 4879994a338SPaul Mackerras#ifdef CONFIG_BOOKE 488fca622c5SKumar Gala .globl ret_from_crit_exc 489fca622c5SKumar Galaret_from_crit_exc: 490fca622c5SKumar Gala RESTORE_xSRR(SRR0,SRR1); 491fca622c5SKumar Gala RESTORE_MMU_REGS; 49216c57b36SKumar Gala RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) 493e51c3e13SChristophe Leroy_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) 494fca622c5SKumar Gala 4959994a338SPaul Mackerras .globl ret_from_debug_exc 4969994a338SPaul Mackerrasret_from_debug_exc: 497fca622c5SKumar Gala RESTORE_xSRR(SRR0,SRR1); 498fca622c5SKumar Gala RESTORE_xSRR(CSRR0,CSRR1); 499fca622c5SKumar Gala RESTORE_MMU_REGS; 50016c57b36SKumar Gala RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) 501e51c3e13SChristophe Leroy_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) 5029994a338SPaul Mackerras 5039994a338SPaul Mackerras .globl ret_from_mcheck_exc 5049994a338SPaul Mackerrasret_from_mcheck_exc: 505fca622c5SKumar Gala RESTORE_xSRR(SRR0,SRR1); 506fca622c5SKumar Gala RESTORE_xSRR(CSRR0,CSRR1); 507fca622c5SKumar Gala RESTORE_xSRR(DSRR0,DSRR1); 508fca622c5SKumar Gala RESTORE_MMU_REGS; 50916c57b36SKumar Gala RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) 510e51c3e13SChristophe Leroy_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) 5119994a338SPaul Mackerras#endif /* CONFIG_BOOKE */ 5129994a338SPaul Mackerras#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ 513