1 #ifndef _ASM_POWERPC_EXCEPTION_H 2 #define _ASM_POWERPC_EXCEPTION_H 3 /* 4 * Extracted from head_64.S 5 * 6 * PowerPC version 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * 9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 11 * Adapted for Power Macintosh by Paul Mackerras. 12 * Low-level exception handlers and MMU support 13 * rewritten by Paul Mackerras. 14 * Copyright (C) 1996 Paul Mackerras. 15 * 16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 18 * 19 * This file contains the low-level support and setup for the 20 * PowerPC-64 platform, including trap and interrupt dispatch. 21 * 22 * This program is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU General Public License 24 * as published by the Free Software Foundation; either version 25 * 2 of the License, or (at your option) any later version. 26 */ 27 /* 28 * The following macros define the code that appears as 29 * the prologue to each of the exception handlers. They 30 * are split into two parts to allow a single kernel binary 31 * to be used for pSeries and iSeries. 32 * 33 * We make as much of the exception code common between native 34 * exception handlers (including pSeries LPAR) and iSeries LPAR 35 * implementations as possible. 36 */ 37 38 #define EX_R9 0 39 #define EX_R10 8 40 #define EX_R11 16 41 #define EX_R12 24 42 #define EX_R13 32 43 #define EX_SRR0 40 44 #define EX_DAR 48 45 #define EX_DSISR 56 46 #define EX_CCR 60 47 #define EX_R3 64 48 #define EX_LR 72 49 #define EX_CFAR 80 50 51 /* 52 * We're short on space and time in the exception prolog, so we can't 53 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the 54 * low halfword of the address, but for Kdump we need the whole low 55 * word. 56 */ 57 #define LOAD_HANDLER(reg, label) \ 58 addi reg,reg,(label)-_stext; /* virt addr of handler ... */ 59 60 /* Exception register prefixes */ 61 #define EXC_HV H 62 #define EXC_STD 63 64 #define __EXCEPTION_PROLOG_1(area, extra, vec) \ 65 GET_PACA(r13); \ 66 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 67 std r10,area+EX_R10(r13); \ 68 BEGIN_FTR_SECTION_NESTED(66); \ 69 mfspr r10,SPRN_CFAR; \ 70 std r10,area+EX_CFAR(r13); \ 71 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 72 mfcr r9; \ 73 extra(vec); \ 74 std r11,area+EX_R11(r13); \ 75 std r12,area+EX_R12(r13); \ 76 GET_SCRATCH0(r10); \ 77 std r10,area+EX_R13(r13) 78 #define EXCEPTION_PROLOG_1(area, extra, vec) \ 79 __EXCEPTION_PROLOG_1(area, extra, vec) 80 81 #define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ 82 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 83 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 84 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 85 LOAD_HANDLER(r12,label) \ 86 mtspr SPRN_##h##SRR0,r12; \ 87 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 88 mtspr SPRN_##h##SRR1,r10; \ 89 h##rfid; \ 90 b . /* prevent speculative execution */ 91 #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 92 __EXCEPTION_PROLOG_PSERIES_1(label, h) 93 94 #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ 95 EXCEPTION_PROLOG_1(area, extra, vec); \ 96 EXCEPTION_PROLOG_PSERIES_1(label, h); 97 98 #define __KVMTEST(n) \ 99 lbz r10,HSTATE_IN_GUEST(r13); \ 100 cmpwi r10,0; \ 101 bne do_kvm_##n 102 103 #define __KVM_HANDLER(area, h, n) \ 104 do_kvm_##n: \ 105 ld r10,area+EX_R10(r13); \ 106 stw r9,HSTATE_SCRATCH1(r13); \ 107 ld r9,area+EX_R9(r13); \ 108 std r12,HSTATE_SCRATCH0(r13); \ 109 li r12,n; \ 110 b kvmppc_interrupt 111 112 #define __KVM_HANDLER_SKIP(area, h, n) \ 113 do_kvm_##n: \ 114 cmpwi r10,KVM_GUEST_MODE_SKIP; \ 115 ld r10,area+EX_R10(r13); \ 116 beq 89f; \ 117 stw r9,HSTATE_SCRATCH1(r13); \ 118 ld r9,area+EX_R9(r13); \ 119 std r12,HSTATE_SCRATCH0(r13); \ 120 li r12,n; \ 121 b kvmppc_interrupt; \ 122 89: mtocrf 0x80,r9; \ 123 ld r9,area+EX_R9(r13); \ 124 b kvmppc_skip_##h##interrupt 125 126 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 127 #define KVMTEST(n) __KVMTEST(n) 128 #define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n) 129 #define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 130 131 #else 132 #define KVMTEST(n) 133 #define KVM_HANDLER(area, h, n) 134 #define KVM_HANDLER_SKIP(area, h, n) 135 #endif 136 137 #ifdef CONFIG_KVM_BOOK3S_PR 138 #define KVMTEST_PR(n) __KVMTEST(n) 139 #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) 140 #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 141 142 #else 143 #define KVMTEST_PR(n) 144 #define KVM_HANDLER_PR(area, h, n) 145 #define KVM_HANDLER_PR_SKIP(area, h, n) 146 #endif 147 148 #define NOTEST(n) 149 150 /* 151 * The common exception prolog is used for all except a few exceptions 152 * such as a segment miss on a kernel address. We have to be prepared 153 * to take another exception from the point where we first touch the 154 * kernel stack onwards. 155 * 156 * On entry r13 points to the paca, r9-r13 are saved in the paca, 157 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 158 * SRR1, and relocation is on. 159 */ 160 #define EXCEPTION_PROLOG_COMMON(n, area) \ 161 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 162 mr r10,r1; /* Save r1 */ \ 163 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 164 beq- 1f; \ 165 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 166 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 167 blt+ cr1,3f; /* abort if it is */ \ 168 li r1,(n); /* will be reloaded later */ \ 169 sth r1,PACA_TRAP_SAVE(r13); \ 170 std r3,area+EX_R3(r13); \ 171 addi r3,r13,area; /* r3 -> where regs are saved*/ \ 172 b bad_stack; \ 173 3: std r9,_CCR(r1); /* save CR in stackframe */ \ 174 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 175 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 176 std r10,0(r1); /* make stack chain pointer */ \ 177 std r0,GPR0(r1); /* save r0 in stackframe */ \ 178 std r10,GPR1(r1); /* save r1 in stackframe */ \ 179 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 180 std r2,GPR2(r1); /* save r2 in stackframe */ \ 181 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 182 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 183 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 184 ld r10,area+EX_R10(r13); \ 185 std r9,GPR9(r1); \ 186 std r10,GPR10(r1); \ 187 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 188 ld r10,area+EX_R12(r13); \ 189 ld r11,area+EX_R13(r13); \ 190 std r9,GPR11(r1); \ 191 std r10,GPR12(r1); \ 192 std r11,GPR13(r1); \ 193 BEGIN_FTR_SECTION_NESTED(66); \ 194 ld r10,area+EX_CFAR(r13); \ 195 std r10,ORIG_GPR3(r1); \ 196 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 197 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 198 mflr r9; /* save LR in stackframe */ \ 199 std r9,_LINK(r1); \ 200 mfctr r10; /* save CTR in stackframe */ \ 201 std r10,_CTR(r1); \ 202 lbz r10,PACASOFTIRQEN(r13); \ 203 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 204 std r10,SOFTE(r1); \ 205 std r11,_XER(r1); \ 206 li r9,(n)+1; \ 207 std r9,_TRAP(r1); /* set trap number */ \ 208 li r10,0; \ 209 ld r11,exception_marker@toc(r2); \ 210 std r10,RESULT(r1); /* clear regs->result */ \ 211 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 212 ACCOUNT_STOLEN_TIME 213 214 /* 215 * Exception vectors. 216 */ 217 #define STD_EXCEPTION_PSERIES(loc, vec, label) \ 218 . = loc; \ 219 .globl label##_pSeries; \ 220 label##_pSeries: \ 221 HMT_MEDIUM; \ 222 SET_SCRATCH0(r13); /* save r13 */ \ 223 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 224 EXC_STD, KVMTEST_PR, vec) 225 226 #define STD_EXCEPTION_HV(loc, vec, label) \ 227 . = loc; \ 228 .globl label##_hv; \ 229 label##_hv: \ 230 HMT_MEDIUM; \ 231 SET_SCRATCH0(r13); /* save r13 */ \ 232 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 233 EXC_HV, KVMTEST, vec) 234 235 #define __SOFTEN_TEST(h) \ 236 lbz r10,PACASOFTIRQEN(r13); \ 237 cmpwi r10,0; \ 238 beq masked_##h##interrupt 239 #define _SOFTEN_TEST(h) __SOFTEN_TEST(h) 240 241 #define SOFTEN_TEST_PR(vec) \ 242 KVMTEST_PR(vec); \ 243 _SOFTEN_TEST(EXC_STD) 244 245 #define SOFTEN_TEST_HV(vec) \ 246 KVMTEST(vec); \ 247 _SOFTEN_TEST(EXC_HV) 248 249 #define SOFTEN_TEST_HV_201(vec) \ 250 KVMTEST(vec); \ 251 _SOFTEN_TEST(EXC_STD) 252 253 #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 254 HMT_MEDIUM; \ 255 SET_SCRATCH0(r13); /* save r13 */ \ 256 __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ 257 EXCEPTION_PROLOG_PSERIES_1(label##_common, h); 258 #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 259 __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) 260 261 #define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ 262 . = loc; \ 263 .globl label##_pSeries; \ 264 label##_pSeries: \ 265 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 266 EXC_STD, SOFTEN_TEST_PR) 267 268 #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ 269 . = loc; \ 270 .globl label##_hv; \ 271 label##_hv: \ 272 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 273 EXC_HV, SOFTEN_TEST_HV) 274 275 #define DISABLE_INTS \ 276 li r11,0; \ 277 stb r11,PACASOFTIRQEN(r13); \ 278 stb r11,PACAHARDIRQEN(r13); \ 279 TRACE_DISABLE_INTS 280 281 #define ENABLE_INTS \ 282 ld r12,_MSR(r1); \ 283 mfmsr r11; \ 284 rlwimi r11,r12,0,MSR_EE; \ 285 mtmsrd r11,1 286 287 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 288 .align 7; \ 289 .globl label##_common; \ 290 label##_common: \ 291 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 292 DISABLE_INTS; \ 293 bl .save_nvgprs; \ 294 addi r3,r1,STACK_FRAME_OVERHEAD; \ 295 bl hdlr; \ 296 b .ret_from_except 297 298 /* 299 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur 300 * in the idle task and therefore need the special idle handling. 301 */ 302 #define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ 303 .align 7; \ 304 .globl label##_common; \ 305 label##_common: \ 306 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 307 FINISH_NAP; \ 308 DISABLE_INTS; \ 309 bl .save_nvgprs; \ 310 addi r3,r1,STACK_FRAME_OVERHEAD; \ 311 bl hdlr; \ 312 b .ret_from_except 313 314 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ 315 .align 7; \ 316 .globl label##_common; \ 317 label##_common: \ 318 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 319 FINISH_NAP; \ 320 DISABLE_INTS; \ 321 BEGIN_FTR_SECTION \ 322 bl .ppc64_runlatch_on; \ 323 END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \ 324 addi r3,r1,STACK_FRAME_OVERHEAD; \ 325 bl hdlr; \ 326 b .ret_from_except_lite 327 328 /* 329 * When the idle code in power4_idle puts the CPU into NAP mode, 330 * it has to do so in a loop, and relies on the external interrupt 331 * and decrementer interrupt entry code to get it out of the loop. 332 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 333 * to signal that it is in the loop and needs help to get out. 334 */ 335 #ifdef CONFIG_PPC_970_NAP 336 #define FINISH_NAP \ 337 BEGIN_FTR_SECTION \ 338 clrrdi r11,r1,THREAD_SHIFT; \ 339 ld r9,TI_LOCAL_FLAGS(r11); \ 340 andi. r10,r9,_TLF_NAPPING; \ 341 bnel power4_fixup_nap; \ 342 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 343 #else 344 #define FINISH_NAP 345 #endif 346 347 #endif /* _ASM_POWERPC_EXCEPTION_H */ 348