1 #ifndef _ASM_POWERPC_EXCEPTION_H 2 #define _ASM_POWERPC_EXCEPTION_H 3 /* 4 * Extracted from head_64.S 5 * 6 * PowerPC version 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * 9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 11 * Adapted for Power Macintosh by Paul Mackerras. 12 * Low-level exception handlers and MMU support 13 * rewritten by Paul Mackerras. 14 * Copyright (C) 1996 Paul Mackerras. 15 * 16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 18 * 19 * This file contains the low-level support and setup for the 20 * PowerPC-64 platform, including trap and interrupt dispatch. 21 * 22 * This program is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU General Public License 24 * as published by the Free Software Foundation; either version 25 * 2 of the License, or (at your option) any later version. 26 */ 27 /* 28 * The following macros define the code that appears as 29 * the prologue to each of the exception handlers. They 30 * are split into two parts to allow a single kernel binary 31 * to be used for pSeries and iSeries. 32 * 33 * We make as much of the exception code common between native 34 * exception handlers (including pSeries LPAR) and iSeries LPAR 35 * implementations as possible. 36 */ 37 38 #define EX_R9 0 39 #define EX_R10 8 40 #define EX_R11 16 41 #define EX_R12 24 42 #define EX_R13 32 43 #define EX_SRR0 40 44 #define EX_DAR 48 45 #define EX_DSISR 56 46 #define EX_CCR 60 47 #define EX_R3 64 48 #define EX_LR 72 49 #define EX_CFAR 80 50 #define EX_PPR 88 /* SMT thread status register (priority) */ 51 52 #ifdef CONFIG_RELOCATABLE 53 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 54 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 55 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 56 LOAD_HANDLER(r12,label); \ 57 mtlr r12; \ 58 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 59 li r10,MSR_RI; \ 60 mtmsrd r10,1; /* Set RI (EE=0) */ \ 61 blr; 62 #else 63 /* If not relocatable, we can jump directly -- and save messing with LR */ 64 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 65 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 66 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 67 li r10,MSR_RI; \ 68 mtmsrd r10,1; /* Set RI (EE=0) */ \ 69 b label; 70 #endif 71 #define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 72 __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 73 74 /* 75 * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on 76 * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which 77 * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr. 78 */ 79 #define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \ 80 EXCEPTION_PROLOG_0(area); \ 81 EXCEPTION_PROLOG_1(area, extra, vec); \ 82 EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) 83 84 /* 85 * We're short on space and time in the exception prolog, so we can't 86 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the 87 * low halfword of the address, but for Kdump we need the whole low 88 * word. 89 */ 90 #define LOAD_HANDLER(reg, label) \ 91 /* Handlers must be within 64K of kbase, which must be 64k aligned */ \ 92 ori reg,reg,(label)-_stext; /* virt addr of handler ... */ 93 94 /* Exception register prefixes */ 95 #define EXC_HV H 96 #define EXC_STD 97 98 #if defined(CONFIG_RELOCATABLE) 99 /* 100 * If we support interrupts with relocation on AND we're a relocatable 101 * kernel, we need to use LR to get to the 2nd level handler. So, save/restore 102 * it when required. 103 */ 104 #define SAVE_LR(reg, area) mflr reg ; std reg,area+EX_LR(r13) 105 #define GET_LR(reg, area) ld reg,area+EX_LR(r13) 106 #define RESTORE_LR(reg, area) ld reg,area+EX_LR(r13) ; mtlr reg 107 #else 108 /* ...else LR is unused and in register. */ 109 #define SAVE_LR(reg, area) 110 #define GET_LR(reg, area) mflr reg 111 #define RESTORE_LR(reg, area) 112 #endif 113 114 /* 115 * PPR save/restore macros used in exceptions_64s.S 116 * Used for P7 or later processors 117 */ 118 #define SAVE_PPR(area, ra, rb) \ 119 BEGIN_FTR_SECTION_NESTED(940) \ 120 ld ra,PACACURRENT(r13); \ 121 ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \ 122 std rb,TASKTHREADPPR(ra); \ 123 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) 124 125 #define RESTORE_PPR_PACA(area, ra) \ 126 BEGIN_FTR_SECTION_NESTED(941) \ 127 ld ra,area+EX_PPR(r13); \ 128 mtspr SPRN_PPR,ra; \ 129 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) 130 131 /* 132 * Increase the priority on systems where PPR save/restore is not 133 * implemented/ supported. 134 */ 135 #define HMT_MEDIUM_PPR_DISCARD \ 136 BEGIN_FTR_SECTION_NESTED(942) \ 137 HMT_MEDIUM; \ 138 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/ 139 140 /* 141 * Get an SPR into a register if the CPU has the given feature 142 */ 143 #define OPT_GET_SPR(ra, spr, ftr) \ 144 BEGIN_FTR_SECTION_NESTED(943) \ 145 mfspr ra,spr; \ 146 END_FTR_SECTION_NESTED(ftr,ftr,943) 147 148 /* 149 * Save a register to the PACA if the CPU has the given feature 150 */ 151 #define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \ 152 BEGIN_FTR_SECTION_NESTED(943) \ 153 std ra,offset(r13); \ 154 END_FTR_SECTION_NESTED(ftr,ftr,943) 155 156 #define EXCEPTION_PROLOG_0(area) \ 157 GET_PACA(r13); \ 158 std r9,area+EX_R9(r13); /* save r9 */ \ 159 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ 160 HMT_MEDIUM; \ 161 std r10,area+EX_R10(r13); /* save r10 - r12 */ \ 162 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) 163 164 #define __EXCEPTION_PROLOG_1(area, extra, vec) \ 165 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ 166 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ 167 SAVE_LR(r10, area); \ 168 mfcr r9; \ 169 extra(vec); \ 170 std r11,area+EX_R11(r13); \ 171 std r12,area+EX_R12(r13); \ 172 GET_SCRATCH0(r10); \ 173 std r10,area+EX_R13(r13) 174 #define EXCEPTION_PROLOG_1(area, extra, vec) \ 175 __EXCEPTION_PROLOG_1(area, extra, vec) 176 177 #define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ 178 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 179 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 180 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 181 LOAD_HANDLER(r12,label) \ 182 mtspr SPRN_##h##SRR0,r12; \ 183 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 184 mtspr SPRN_##h##SRR1,r10; \ 185 h##rfid; \ 186 b . /* prevent speculative execution */ 187 #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 188 __EXCEPTION_PROLOG_PSERIES_1(label, h) 189 190 #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ 191 EXCEPTION_PROLOG_0(area); \ 192 EXCEPTION_PROLOG_1(area, extra, vec); \ 193 EXCEPTION_PROLOG_PSERIES_1(label, h); 194 195 #define __KVMTEST(n) \ 196 lbz r10,HSTATE_IN_GUEST(r13); \ 197 cmpwi r10,0; \ 198 bne do_kvm_##n 199 200 #define __KVM_HANDLER(area, h, n) \ 201 do_kvm_##n: \ 202 BEGIN_FTR_SECTION_NESTED(947) \ 203 ld r10,area+EX_CFAR(r13); \ 204 std r10,HSTATE_CFAR(r13); \ 205 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ 206 ld r10,area+EX_R10(r13); \ 207 stw r9,HSTATE_SCRATCH1(r13); \ 208 ld r9,area+EX_R9(r13); \ 209 std r12,HSTATE_SCRATCH0(r13); \ 210 li r12,n; \ 211 b kvmppc_interrupt 212 213 #define __KVM_HANDLER_SKIP(area, h, n) \ 214 do_kvm_##n: \ 215 cmpwi r10,KVM_GUEST_MODE_SKIP; \ 216 ld r10,area+EX_R10(r13); \ 217 beq 89f; \ 218 stw r9,HSTATE_SCRATCH1(r13); \ 219 ld r9,area+EX_R9(r13); \ 220 std r12,HSTATE_SCRATCH0(r13); \ 221 li r12,n; \ 222 b kvmppc_interrupt; \ 223 89: mtocrf 0x80,r9; \ 224 ld r9,area+EX_R9(r13); \ 225 b kvmppc_skip_##h##interrupt 226 227 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 228 #define KVMTEST(n) __KVMTEST(n) 229 #define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n) 230 #define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 231 232 #else 233 #define KVMTEST(n) 234 #define KVM_HANDLER(area, h, n) 235 #define KVM_HANDLER_SKIP(area, h, n) 236 #endif 237 238 #ifdef CONFIG_KVM_BOOK3S_PR 239 #define KVMTEST_PR(n) __KVMTEST(n) 240 #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) 241 #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 242 243 #else 244 #define KVMTEST_PR(n) 245 #define KVM_HANDLER_PR(area, h, n) 246 #define KVM_HANDLER_PR_SKIP(area, h, n) 247 #endif 248 249 #define NOTEST(n) 250 251 /* 252 * The common exception prolog is used for all except a few exceptions 253 * such as a segment miss on a kernel address. We have to be prepared 254 * to take another exception from the point where we first touch the 255 * kernel stack onwards. 256 * 257 * On entry r13 points to the paca, r9-r13 are saved in the paca, 258 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 259 * SRR1, and relocation is on. 260 */ 261 #define EXCEPTION_PROLOG_COMMON(n, area) \ 262 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 263 mr r10,r1; /* Save r1 */ \ 264 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 265 beq- 1f; \ 266 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 267 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 268 blt+ cr1,3f; /* abort if it is */ \ 269 li r1,(n); /* will be reloaded later */ \ 270 sth r1,PACA_TRAP_SAVE(r13); \ 271 std r3,area+EX_R3(r13); \ 272 addi r3,r13,area; /* r3 -> where regs are saved*/ \ 273 RESTORE_LR(r1, area); \ 274 b bad_stack; \ 275 3: std r9,_CCR(r1); /* save CR in stackframe */ \ 276 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 277 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 278 std r10,0(r1); /* make stack chain pointer */ \ 279 std r0,GPR0(r1); /* save r0 in stackframe */ \ 280 std r10,GPR1(r1); /* save r1 in stackframe */ \ 281 beq 4f; /* if from kernel mode */ \ 282 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 283 SAVE_PPR(area, r9, r10); \ 284 4: std r2,GPR2(r1); /* save r2 in stackframe */ \ 285 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 286 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 287 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 288 ld r10,area+EX_R10(r13); \ 289 std r9,GPR9(r1); \ 290 std r10,GPR10(r1); \ 291 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 292 ld r10,area+EX_R12(r13); \ 293 ld r11,area+EX_R13(r13); \ 294 std r9,GPR11(r1); \ 295 std r10,GPR12(r1); \ 296 std r11,GPR13(r1); \ 297 BEGIN_FTR_SECTION_NESTED(66); \ 298 ld r10,area+EX_CFAR(r13); \ 299 std r10,ORIG_GPR3(r1); \ 300 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 301 GET_LR(r9,area); /* Get LR, later save to stack */ \ 302 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 303 std r9,_LINK(r1); \ 304 mfctr r10; /* save CTR in stackframe */ \ 305 std r10,_CTR(r1); \ 306 lbz r10,PACASOFTIRQEN(r13); \ 307 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 308 std r10,SOFTE(r1); \ 309 std r11,_XER(r1); \ 310 li r9,(n)+1; \ 311 std r9,_TRAP(r1); /* set trap number */ \ 312 li r10,0; \ 313 ld r11,exception_marker@toc(r2); \ 314 std r10,RESULT(r1); /* clear regs->result */ \ 315 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 316 ACCOUNT_STOLEN_TIME 317 318 /* 319 * Exception vectors. 320 */ 321 #define STD_EXCEPTION_PSERIES(loc, vec, label) \ 322 . = loc; \ 323 .globl label##_pSeries; \ 324 label##_pSeries: \ 325 HMT_MEDIUM_PPR_DISCARD; \ 326 SET_SCRATCH0(r13); /* save r13 */ \ 327 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 328 EXC_STD, KVMTEST_PR, vec) 329 330 /* Version of above for when we have to branch out-of-line */ 331 #define STD_EXCEPTION_PSERIES_OOL(vec, label) \ 332 .globl label##_pSeries; \ 333 label##_pSeries: \ 334 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ 335 EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD) 336 337 #define STD_EXCEPTION_HV(loc, vec, label) \ 338 . = loc; \ 339 .globl label##_hv; \ 340 label##_hv: \ 341 HMT_MEDIUM_PPR_DISCARD; \ 342 SET_SCRATCH0(r13); /* save r13 */ \ 343 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 344 EXC_HV, KVMTEST, vec) 345 346 /* Version of above for when we have to branch out-of-line */ 347 #define STD_EXCEPTION_HV_OOL(vec, label) \ 348 .globl label##_hv; \ 349 label##_hv: \ 350 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ 351 EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV) 352 353 #define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \ 354 . = loc; \ 355 .globl label##_relon_pSeries; \ 356 label##_relon_pSeries: \ 357 HMT_MEDIUM_PPR_DISCARD; \ 358 /* No guest interrupts come through here */ \ 359 SET_SCRATCH0(r13); /* save r13 */ \ 360 EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 361 EXC_STD, KVMTEST_PR, vec) 362 363 #define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \ 364 .globl label##_relon_pSeries; \ 365 label##_relon_pSeries: \ 366 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ 367 EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD) 368 369 #define STD_RELON_EXCEPTION_HV(loc, vec, label) \ 370 . = loc; \ 371 .globl label##_relon_hv; \ 372 label##_relon_hv: \ 373 HMT_MEDIUM_PPR_DISCARD; \ 374 /* No guest interrupts come through here */ \ 375 SET_SCRATCH0(r13); /* save r13 */ \ 376 EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 377 EXC_HV, KVMTEST, vec) 378 379 #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \ 380 .globl label##_relon_hv; \ 381 label##_relon_hv: \ 382 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ 383 EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV) 384 385 /* This associate vector numbers with bits in paca->irq_happened */ 386 #define SOFTEN_VALUE_0x500 PACA_IRQ_EE 387 #define SOFTEN_VALUE_0x502 PACA_IRQ_EE 388 #define SOFTEN_VALUE_0x900 PACA_IRQ_DEC 389 #define SOFTEN_VALUE_0x982 PACA_IRQ_DEC 390 #define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL 391 #define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL 392 #define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL 393 394 #define __SOFTEN_TEST(h, vec) \ 395 lbz r10,PACASOFTIRQEN(r13); \ 396 cmpwi r10,0; \ 397 li r10,SOFTEN_VALUE_##vec; \ 398 beq masked_##h##interrupt 399 #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) 400 401 #define SOFTEN_TEST_PR(vec) \ 402 KVMTEST_PR(vec); \ 403 _SOFTEN_TEST(EXC_STD, vec) 404 405 #define SOFTEN_TEST_HV(vec) \ 406 KVMTEST(vec); \ 407 _SOFTEN_TEST(EXC_HV, vec) 408 409 #define SOFTEN_TEST_HV_201(vec) \ 410 KVMTEST(vec); \ 411 _SOFTEN_TEST(EXC_STD, vec) 412 413 #define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec) 414 #define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) 415 416 #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 417 SET_SCRATCH0(r13); /* save r13 */ \ 418 EXCEPTION_PROLOG_0(PACA_EXGEN); \ 419 __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ 420 EXCEPTION_PROLOG_PSERIES_1(label##_common, h); 421 422 #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 423 __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) 424 425 #define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ 426 . = loc; \ 427 .globl label##_pSeries; \ 428 label##_pSeries: \ 429 HMT_MEDIUM_PPR_DISCARD; \ 430 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 431 EXC_STD, SOFTEN_TEST_PR) 432 433 #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ 434 . = loc; \ 435 .globl label##_hv; \ 436 label##_hv: \ 437 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 438 EXC_HV, SOFTEN_TEST_HV) 439 440 #define MASKABLE_EXCEPTION_HV_OOL(vec, label) \ 441 .globl label##_hv; \ 442 label##_hv: \ 443 EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \ 444 EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); 445 446 #define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ 447 HMT_MEDIUM_PPR_DISCARD; \ 448 SET_SCRATCH0(r13); /* save r13 */ \ 449 EXCEPTION_PROLOG_0(PACA_EXGEN); \ 450 __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ 451 EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h); 452 #define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ 453 __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) 454 455 #define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label) \ 456 . = loc; \ 457 .globl label##_relon_pSeries; \ 458 label##_relon_pSeries: \ 459 _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ 460 EXC_STD, SOFTEN_NOTEST_PR) 461 462 #define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label) \ 463 . = loc; \ 464 .globl label##_relon_hv; \ 465 label##_relon_hv: \ 466 _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ 467 EXC_HV, SOFTEN_NOTEST_HV) 468 469 #define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \ 470 .globl label##_relon_hv; \ 471 label##_relon_hv: \ 472 EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \ 473 EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); 474 475 /* 476 * Our exception common code can be passed various "additions" 477 * to specify the behaviour of interrupts, whether to kick the 478 * runlatch, etc... 479 */ 480 481 /* Exception addition: Hard disable interrupts */ 482 #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) 483 484 #define ADD_NVGPRS \ 485 bl .save_nvgprs 486 487 #define RUNLATCH_ON \ 488 BEGIN_FTR_SECTION \ 489 CURRENT_THREAD_INFO(r3, r1); \ 490 ld r4,TI_LOCAL_FLAGS(r3); \ 491 andi. r0,r4,_TLF_RUNLATCH; \ 492 beql ppc64_runlatch_on_trampoline; \ 493 END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 494 495 #define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \ 496 .align 7; \ 497 .globl label##_common; \ 498 label##_common: \ 499 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 500 additions; \ 501 addi r3,r1,STACK_FRAME_OVERHEAD; \ 502 bl hdlr; \ 503 b ret 504 505 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 506 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \ 507 ADD_NVGPRS;DISABLE_INTS) 508 509 /* 510 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur 511 * in the idle task and therefore need the special idle handling 512 * (finish nap and runlatch) 513 */ 514 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ 515 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ 516 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) 517 518 /* 519 * When the idle code in power4_idle puts the CPU into NAP mode, 520 * it has to do so in a loop, and relies on the external interrupt 521 * and decrementer interrupt entry code to get it out of the loop. 522 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 523 * to signal that it is in the loop and needs help to get out. 524 */ 525 #ifdef CONFIG_PPC_970_NAP 526 #define FINISH_NAP \ 527 BEGIN_FTR_SECTION \ 528 CURRENT_THREAD_INFO(r11, r1); \ 529 ld r9,TI_LOCAL_FLAGS(r11); \ 530 andi. r10,r9,_TLF_NAPPING; \ 531 bnel power4_fixup_nap; \ 532 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 533 #else 534 #define FINISH_NAP 535 #endif 536 537 #endif /* _ASM_POWERPC_EXCEPTION_H */ 538