1 #ifndef _ASM_POWERPC_EXCEPTION_H 2 #define _ASM_POWERPC_EXCEPTION_H 3 /* 4 * Extracted from head_64.S 5 * 6 * PowerPC version 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * 9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 11 * Adapted for Power Macintosh by Paul Mackerras. 12 * Low-level exception handlers and MMU support 13 * rewritten by Paul Mackerras. 14 * Copyright (C) 1996 Paul Mackerras. 15 * 16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 18 * 19 * This file contains the low-level support and setup for the 20 * PowerPC-64 platform, including trap and interrupt dispatch. 21 * 22 * This program is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU General Public License 24 * as published by the Free Software Foundation; either version 25 * 2 of the License, or (at your option) any later version. 26 */ 27 /* 28 * The following macros define the code that appears as 29 * the prologue to each of the exception handlers. They 30 * are split into two parts to allow a single kernel binary 31 * to be used for pSeries and iSeries. 32 * 33 * We make as much of the exception code common between native 34 * exception handlers (including pSeries LPAR) and iSeries LPAR 35 * implementations as possible. 36 */ 37 #include <asm/head-64.h> 38 #include <asm/feature-fixups.h> 39 40 /* PACA save area offsets (exgen, exmc, etc) */ 41 #define EX_R9 0 42 #define EX_R10 8 43 #define EX_R11 16 44 #define EX_R12 24 45 #define EX_R13 32 46 #define EX_DAR 40 47 #define EX_DSISR 48 48 #define EX_CCR 52 49 #define EX_CFAR 56 50 #define EX_PPR 64 51 #if defined(CONFIG_RELOCATABLE) 52 #define EX_CTR 72 53 #define EX_SIZE 10 /* size in u64 units */ 54 #else 55 #define EX_SIZE 9 /* size in u64 units */ 56 #endif 57 58 /* 59 * maximum recursive depth of MCE exceptions 60 */ 61 #define MAX_MCE_DEPTH 4 62 63 /* 64 * EX_R3 is only used by the bad_stack handler. bad_stack reloads and 65 * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap 66 * with EX_DAR. 67 */ 68 #define EX_R3 EX_DAR 69 70 #ifdef __ASSEMBLY__ 71 72 #define STF_ENTRY_BARRIER_SLOT \ 73 STF_ENTRY_BARRIER_FIXUP_SECTION; \ 74 nop; \ 75 nop; \ 76 nop 77 78 #define STF_EXIT_BARRIER_SLOT \ 79 STF_EXIT_BARRIER_FIXUP_SECTION; \ 80 nop; \ 81 nop; \ 82 nop; \ 83 nop; \ 84 nop; \ 85 nop 86 87 /* 88 * r10 must be free to use, r13 must be paca 89 */ 90 #define INTERRUPT_TO_KERNEL \ 91 STF_ENTRY_BARRIER_SLOT 92 93 /* 94 * Macros for annotating the expected destination of (h)rfid 95 * 96 * The nop instructions allow us to insert one or more instructions to flush the 97 * L1-D cache when returning to userspace or a guest. 98 */ 99 #define RFI_FLUSH_SLOT \ 100 RFI_FLUSH_FIXUP_SECTION; \ 101 nop; \ 102 nop; \ 103 nop 104 105 #define RFI_TO_KERNEL \ 106 rfid 107 108 #define RFI_TO_USER \ 109 STF_EXIT_BARRIER_SLOT; \ 110 RFI_FLUSH_SLOT; \ 111 rfid; \ 112 b rfi_flush_fallback 113 114 #define RFI_TO_USER_OR_KERNEL \ 115 STF_EXIT_BARRIER_SLOT; \ 116 RFI_FLUSH_SLOT; \ 117 rfid; \ 118 b rfi_flush_fallback 119 120 #define RFI_TO_GUEST \ 121 STF_EXIT_BARRIER_SLOT; \ 122 RFI_FLUSH_SLOT; \ 123 rfid; \ 124 b rfi_flush_fallback 125 126 #define HRFI_TO_KERNEL \ 127 hrfid 128 129 #define HRFI_TO_USER \ 130 STF_EXIT_BARRIER_SLOT; \ 131 RFI_FLUSH_SLOT; \ 132 hrfid; \ 133 b hrfi_flush_fallback 134 135 #define HRFI_TO_USER_OR_KERNEL \ 136 STF_EXIT_BARRIER_SLOT; \ 137 RFI_FLUSH_SLOT; \ 138 hrfid; \ 139 b hrfi_flush_fallback 140 141 #define HRFI_TO_GUEST \ 142 STF_EXIT_BARRIER_SLOT; \ 143 RFI_FLUSH_SLOT; \ 144 hrfid; \ 145 b hrfi_flush_fallback 146 147 #define HRFI_TO_UNKNOWN \ 148 STF_EXIT_BARRIER_SLOT; \ 149 RFI_FLUSH_SLOT; \ 150 hrfid; \ 151 b hrfi_flush_fallback 152 153 /* 154 * We're short on space and time in the exception prolog, so we can't 155 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 156 * Instead we get the base of the kernel from paca->kernelbase and or in the low 157 * part of label. This requires that the label be within 64KB of kernelbase, and 158 * that kernelbase be 64K aligned. 159 */ 160 #define LOAD_HANDLER(reg, label) \ 161 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 162 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 163 164 #define __LOAD_HANDLER(reg, label) \ 165 ld reg,PACAKBASE(r13); \ 166 ori reg,reg,(ABS_ADDR(label))@l 167 168 /* 169 * Branches from unrelocated code (e.g., interrupts) to labels outside 170 * head-y require >64K offsets. 171 */ 172 #define __LOAD_FAR_HANDLER(reg, label) \ 173 ld reg,PACAKBASE(r13); \ 174 ori reg,reg,(ABS_ADDR(label))@l; \ 175 addis reg,reg,(ABS_ADDR(label))@h 176 177 #ifdef CONFIG_RELOCATABLE 178 .macro EXCEPTION_PROLOG_2_RELON label, hsrr 179 .if \hsrr 180 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 181 .else 182 mfspr r11,SPRN_SRR0 /* save SRR0 */ 183 .endif 184 LOAD_HANDLER(r12, \label\()) 185 mtctr r12 186 .if \hsrr 187 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 188 .else 189 mfspr r12,SPRN_SRR1 /* and HSRR1 */ 190 .endif 191 li r10,MSR_RI 192 mtmsrd r10,1 /* Set RI (EE=0) */ 193 bctr 194 .endm 195 #else 196 /* If not relocatable, we can jump directly -- and save messing with LR */ 197 .macro EXCEPTION_PROLOG_2_RELON label, hsrr 198 .if \hsrr 199 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 200 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 201 .else 202 mfspr r11,SPRN_SRR0 /* save SRR0 */ 203 mfspr r12,SPRN_SRR1 /* and SRR1 */ 204 .endif 205 li r10,MSR_RI 206 mtmsrd r10,1 /* Set RI (EE=0) */ 207 b \label 208 .endm 209 #endif 210 211 /* 212 * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to 213 * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case 214 * EXCEPTION_PROLOG_2_RELON will be using LR. 215 */ 216 #define EXCEPTION_RELON_PROLOG(area, label, hsrr, extra, vec) \ 217 SET_SCRATCH0(r13); /* save r13 */ \ 218 EXCEPTION_PROLOG_0(area); \ 219 EXCEPTION_PROLOG_1(area, extra, vec); \ 220 EXCEPTION_PROLOG_2_RELON label, hsrr 221 222 /* Exception register prefixes */ 223 #define EXC_HV 1 224 #define EXC_STD 0 225 226 #if defined(CONFIG_RELOCATABLE) 227 /* 228 * If we support interrupts with relocation on AND we're a relocatable kernel, 229 * we need to use CTR to get to the 2nd level handler. So, save/restore it 230 * when required. 231 */ 232 #define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13) 233 #define GET_CTR(reg, area) ld reg,area+EX_CTR(r13) 234 #define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg 235 #else 236 /* ...else CTR is unused and in register. */ 237 #define SAVE_CTR(reg, area) 238 #define GET_CTR(reg, area) mfctr reg 239 #define RESTORE_CTR(reg, area) 240 #endif 241 242 /* 243 * PPR save/restore macros used in exceptions_64s.S 244 * Used for P7 or later processors 245 */ 246 #define SAVE_PPR(area, ra) \ 247 BEGIN_FTR_SECTION_NESTED(940) \ 248 ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \ 249 std ra,_PPR(r1); \ 250 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) 251 252 #define RESTORE_PPR_PACA(area, ra) \ 253 BEGIN_FTR_SECTION_NESTED(941) \ 254 ld ra,area+EX_PPR(r13); \ 255 mtspr SPRN_PPR,ra; \ 256 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) 257 258 /* 259 * Get an SPR into a register if the CPU has the given feature 260 */ 261 #define OPT_GET_SPR(ra, spr, ftr) \ 262 BEGIN_FTR_SECTION_NESTED(943) \ 263 mfspr ra,spr; \ 264 END_FTR_SECTION_NESTED(ftr,ftr,943) 265 266 /* 267 * Set an SPR from a register if the CPU has the given feature 268 */ 269 #define OPT_SET_SPR(ra, spr, ftr) \ 270 BEGIN_FTR_SECTION_NESTED(943) \ 271 mtspr spr,ra; \ 272 END_FTR_SECTION_NESTED(ftr,ftr,943) 273 274 /* 275 * Save a register to the PACA if the CPU has the given feature 276 */ 277 #define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \ 278 BEGIN_FTR_SECTION_NESTED(943) \ 279 std ra,offset(r13); \ 280 END_FTR_SECTION_NESTED(ftr,ftr,943) 281 282 #define EXCEPTION_PROLOG_0(area) \ 283 GET_PACA(r13); \ 284 std r9,area+EX_R9(r13); /* save r9 */ \ 285 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ 286 HMT_MEDIUM; \ 287 std r10,area+EX_R10(r13); /* save r10 - r12 */ \ 288 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) 289 290 #define __EXCEPTION_PROLOG_1_PRE(area) \ 291 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ 292 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ 293 INTERRUPT_TO_KERNEL; \ 294 SAVE_CTR(r10, area); \ 295 mfcr r9 296 297 #define __EXCEPTION_PROLOG_1_POST(area) \ 298 std r11,area+EX_R11(r13); \ 299 std r12,area+EX_R12(r13); \ 300 GET_SCRATCH0(r10); \ 301 std r10,area+EX_R13(r13) 302 303 /* 304 * This version of the EXCEPTION_PROLOG_1 will carry 305 * addition parameter called "bitmask" to support 306 * checking of the interrupt maskable level in the SOFTEN_TEST. 307 * Intended to be used in MASKABLE_EXCPETION_* macros. 308 */ 309 #define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask) \ 310 __EXCEPTION_PROLOG_1_PRE(area); \ 311 extra(vec, bitmask); \ 312 __EXCEPTION_PROLOG_1_POST(area) 313 314 /* 315 * This version of the EXCEPTION_PROLOG_1 is intended 316 * to be used in STD_EXCEPTION* macros 317 */ 318 #define _EXCEPTION_PROLOG_1(area, extra, vec) \ 319 __EXCEPTION_PROLOG_1_PRE(area); \ 320 extra(vec); \ 321 __EXCEPTION_PROLOG_1_POST(area) 322 323 #define EXCEPTION_PROLOG_1(area, extra, vec) \ 324 _EXCEPTION_PROLOG_1(area, extra, vec) 325 326 .macro EXCEPTION_PROLOG_2 label, hsrr 327 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 328 .if \hsrr 329 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 330 .else 331 mfspr r11,SPRN_SRR0 /* save SRR0 */ 332 .endif 333 LOAD_HANDLER(r12,\label\()) 334 .if \hsrr 335 mtspr SPRN_HSRR0,r12 336 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 337 mtspr SPRN_HSRR1,r10 338 HRFI_TO_KERNEL 339 .else 340 mtspr SPRN_SRR0,r12 341 mfspr r12,SPRN_SRR1 /* and SRR1 */ 342 mtspr SPRN_SRR1,r10 343 RFI_TO_KERNEL 344 .endif 345 b . /* prevent speculative execution */ 346 .endm 347 348 /* _NORI variant keeps MSR_RI clear */ 349 .macro EXCEPTION_PROLOG_2_NORI label, hsrr 350 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 351 xori r10,r10,MSR_RI /* Clear MSR_RI */ 352 .if \hsrr 353 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 354 .else 355 mfspr r11,SPRN_SRR0 /* save SRR0 */ 356 .endif 357 LOAD_HANDLER(r12,\label\()) 358 .if \hsrr 359 mtspr SPRN_HSRR0,r12 360 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 361 mtspr SPRN_HSRR1,r10 362 HRFI_TO_KERNEL 363 .else 364 mtspr SPRN_SRR0,r12 365 mfspr r12,SPRN_SRR1 /* and SRR1 */ 366 mtspr SPRN_SRR1,r10 367 RFI_TO_KERNEL 368 .endif 369 b . /* prevent speculative execution */ 370 .endm 371 372 #define EXCEPTION_PROLOG(area, label, h, extra, vec) \ 373 SET_SCRATCH0(r13); /* save r13 */ \ 374 EXCEPTION_PROLOG_0(area); \ 375 EXCEPTION_PROLOG_1(area, extra, vec); \ 376 EXCEPTION_PROLOG_2 label, h 377 378 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 379 /* 380 * If hv is possible, interrupts come into to the hv version 381 * of the kvmppc_interrupt code, which then jumps to the PR handler, 382 * kvmppc_interrupt_pr, if the guest is a PR guest. 383 */ 384 #define kvmppc_interrupt kvmppc_interrupt_hv 385 #else 386 #define kvmppc_interrupt kvmppc_interrupt_pr 387 #endif 388 389 /* 390 * Branch to label using its 0xC000 address. This results in instruction 391 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned 392 * on using mtmsr rather than rfid. 393 * 394 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than 395 * load KBASE for a slight optimisation. 396 */ 397 #define BRANCH_TO_C000(reg, label) \ 398 __LOAD_HANDLER(reg, label); \ 399 mtctr reg; \ 400 bctr 401 402 #ifdef CONFIG_RELOCATABLE 403 #define BRANCH_TO_COMMON(reg, label) \ 404 __LOAD_HANDLER(reg, label); \ 405 mtctr reg; \ 406 bctr 407 408 #define BRANCH_LINK_TO_FAR(label) \ 409 __LOAD_FAR_HANDLER(r12, label); \ 410 mtctr r12; \ 411 bctrl 412 413 /* 414 * KVM requires __LOAD_FAR_HANDLER. 415 * 416 * __BRANCH_TO_KVM_EXIT branches are also a special case because they 417 * explicitly use r9 then reload it from PACA before branching. Hence 418 * the double-underscore. 419 */ 420 #define __BRANCH_TO_KVM_EXIT(area, label) \ 421 mfctr r9; \ 422 std r9,HSTATE_SCRATCH1(r13); \ 423 __LOAD_FAR_HANDLER(r9, label); \ 424 mtctr r9; \ 425 ld r9,area+EX_R9(r13); \ 426 bctr 427 428 #else 429 #define BRANCH_TO_COMMON(reg, label) \ 430 b label 431 432 #define BRANCH_LINK_TO_FAR(label) \ 433 bl label 434 435 #define __BRANCH_TO_KVM_EXIT(area, label) \ 436 ld r9,area+EX_R9(r13); \ 437 b label 438 439 #endif 440 441 /* Do not enable RI */ 442 #define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \ 443 EXCEPTION_PROLOG_0(area); \ 444 EXCEPTION_PROLOG_1(area, extra, vec); \ 445 EXCEPTION_PROLOG_2_NORI label, h 446 447 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 448 .macro KVMTEST hsrr, n 449 lbz r10,HSTATE_IN_GUEST(r13) 450 cmpwi r10,0 451 .if \hsrr 452 bne do_kvm_H\n 453 .else 454 bne do_kvm_\n 455 .endif 456 .endm 457 458 .macro KVM_HANDLER area, hsrr, n 459 BEGIN_FTR_SECTION_NESTED(947) 460 ld r10,\area+EX_CFAR(r13) 461 std r10,HSTATE_CFAR(r13) 462 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947) 463 BEGIN_FTR_SECTION_NESTED(948) 464 ld r10,\area+EX_PPR(r13) 465 std r10,HSTATE_PPR(r13) 466 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948) 467 ld r10,\area+EX_R10(r13) 468 std r12,HSTATE_SCRATCH0(r13) 469 sldi r12,r9,32 470 ori r12,r12,(\n) 471 /* This reloads r9 before branching to kvmppc_interrupt */ 472 __BRANCH_TO_KVM_EXIT(\area, kvmppc_interrupt) 473 .endm 474 475 .macro KVM_HANDLER_SKIP area, hsrr, n 476 cmpwi r10,KVM_GUEST_MODE_SKIP 477 beq 89f 478 BEGIN_FTR_SECTION_NESTED(948) 479 ld r10,\area+EX_PPR(r13) 480 std r10,HSTATE_PPR(r13) 481 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948) 482 ld r10,\area+EX_R10(r13) 483 std r12,HSTATE_SCRATCH0(r13) 484 sldi r12,r9,32 485 ori r12,r12,(\n) 486 /* This reloads r9 before branching to kvmppc_interrupt */ 487 __BRANCH_TO_KVM_EXIT(\area, kvmppc_interrupt) 488 89: mtocrf 0x80,r9 489 ld r9,\area+EX_R9(r13) 490 ld r10,\area+EX_R10(r13) 491 .if \hsrr 492 b kvmppc_skip_Hinterrupt 493 .else 494 b kvmppc_skip_interrupt 495 .endif 496 .endm 497 498 #else 499 .macro KVMTEST hsrr, n 500 .endm 501 .macro KVM_HANDLER area, hsrr, n 502 .endm 503 .macro KVM_HANDLER_SKIP area, hsrr, n 504 .endm 505 #endif 506 507 #define NOTEST(n) 508 509 #define EXCEPTION_PROLOG_COMMON_1() \ 510 std r9,_CCR(r1); /* save CR in stackframe */ \ 511 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 512 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 513 std r10,0(r1); /* make stack chain pointer */ \ 514 std r0,GPR0(r1); /* save r0 in stackframe */ \ 515 std r10,GPR1(r1); /* save r1 in stackframe */ \ 516 517 518 /* 519 * The common exception prolog is used for all except a few exceptions 520 * such as a segment miss on a kernel address. We have to be prepared 521 * to take another exception from the point where we first touch the 522 * kernel stack onwards. 523 * 524 * On entry r13 points to the paca, r9-r13 are saved in the paca, 525 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 526 * SRR1, and relocation is on. 527 */ 528 #define EXCEPTION_PROLOG_COMMON(n, area) \ 529 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 530 mr r10,r1; /* Save r1 */ \ 531 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 532 beq- 1f; \ 533 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 534 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ 535 blt+ cr1,3f; /* abort if it is */ \ 536 li r1,(n); /* will be reloaded later */ \ 537 sth r1,PACA_TRAP_SAVE(r13); \ 538 std r3,area+EX_R3(r13); \ 539 addi r3,r13,area; /* r3 -> where regs are saved*/ \ 540 RESTORE_CTR(r1, area); \ 541 b bad_stack; \ 542 3: EXCEPTION_PROLOG_COMMON_1(); \ 543 kuap_save_amr_and_lock r9, r10, cr1, cr0; \ 544 beq 4f; /* if from kernel mode */ \ 545 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ 546 SAVE_PPR(area, r9); \ 547 4: EXCEPTION_PROLOG_COMMON_2(area) \ 548 EXCEPTION_PROLOG_COMMON_3(n) \ 549 ACCOUNT_STOLEN_TIME 550 551 /* Save original regs values from save area to stack frame. */ 552 #define EXCEPTION_PROLOG_COMMON_2(area) \ 553 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 554 ld r10,area+EX_R10(r13); \ 555 std r9,GPR9(r1); \ 556 std r10,GPR10(r1); \ 557 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 558 ld r10,area+EX_R12(r13); \ 559 ld r11,area+EX_R13(r13); \ 560 std r9,GPR11(r1); \ 561 std r10,GPR12(r1); \ 562 std r11,GPR13(r1); \ 563 BEGIN_FTR_SECTION_NESTED(66); \ 564 ld r10,area+EX_CFAR(r13); \ 565 std r10,ORIG_GPR3(r1); \ 566 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 567 GET_CTR(r10, area); \ 568 std r10,_CTR(r1); 569 570 #define EXCEPTION_PROLOG_COMMON_3(n) \ 571 std r2,GPR2(r1); /* save r2 in stackframe */ \ 572 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 573 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 574 mflr r9; /* Get LR, later save to stack */ \ 575 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 576 std r9,_LINK(r1); \ 577 lbz r10,PACAIRQSOFTMASK(r13); \ 578 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 579 std r10,SOFTE(r1); \ 580 std r11,_XER(r1); \ 581 li r9,(n)+1; \ 582 std r9,_TRAP(r1); /* set trap number */ \ 583 li r10,0; \ 584 ld r11,exception_marker@toc(r2); \ 585 std r10,RESULT(r1); /* clear regs->result */ \ 586 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ 587 588 /* 589 * Exception vectors. 590 */ 591 #define STD_EXCEPTION(vec, label) \ 592 EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec); 593 594 /* Version of above for when we have to branch out-of-line */ 595 #define __OOL_EXCEPTION(vec, label, hdlr) \ 596 SET_SCRATCH0(r13); \ 597 EXCEPTION_PROLOG_0(PACA_EXGEN); \ 598 b hdlr 599 600 #define STD_EXCEPTION_OOL(vec, label) \ 601 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ 602 EXCEPTION_PROLOG_2 label, EXC_STD 603 604 #define STD_EXCEPTION_HV(loc, vec, label) \ 605 EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec) 606 607 #define STD_EXCEPTION_HV_OOL(vec, label) \ 608 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \ 609 EXCEPTION_PROLOG_2 label, EXC_HV 610 611 #define STD_RELON_EXCEPTION(loc, vec, label) \ 612 /* No guest interrupts come through here */ \ 613 EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec) 614 615 #define STD_RELON_EXCEPTION_OOL(vec, label) \ 616 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \ 617 EXCEPTION_PROLOG_2_RELON label, EXC_STD 618 619 #define STD_RELON_EXCEPTION_HV(loc, vec, label) \ 620 EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec) 621 622 #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \ 623 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \ 624 EXCEPTION_PROLOG_2_RELON label, EXC_HV 625 626 .macro SOFTEN_TEST hsrr, vec, bitmask 627 lbz r10, PACAIRQSOFTMASK(r13) 628 andi. r10, r10, \bitmask 629 /* This associates vector numbers with bits in paca->irq_happened */ 630 .if \vec == 0x500 || \vec == 0xea0 631 li r10, PACA_IRQ_EE 632 .elseif \vec == 0x900 633 li r10, PACA_IRQ_DEC 634 .elseif \vec == 0xa00 || \vec == 0xe80 635 li r10, PACA_IRQ_DBELL 636 .elseif \vec == 0xe60 637 li r10, PACA_IRQ_HMI 638 .elseif \vec == 0xf00 639 li r10, PACA_IRQ_PMI 640 .else 641 .abort "Bad maskable vector" 642 .endif 643 644 645 .if \hsrr 646 bne masked_Hinterrupt 647 .else 648 bne masked_interrupt 649 .endif 650 .endm 651 652 #define SOFTEN_TEST_PR(vec, bitmask) \ 653 KVMTEST EXC_STD, vec ; \ 654 SOFTEN_TEST EXC_STD, vec, bitmask 655 656 #define SOFTEN_TEST_HV(vec, bitmask) \ 657 KVMTEST EXC_HV, vec ; \ 658 SOFTEN_TEST EXC_HV, vec, bitmask 659 660 #define KVMTEST_PR(vec) \ 661 KVMTEST EXC_STD, vec 662 663 #define KVMTEST_HV(vec) \ 664 KVMTEST EXC_HV, vec 665 666 #define SOFTEN_NOTEST_PR(vec, bitmask) SOFTEN_TEST EXC_STD, vec, bitmask 667 #define SOFTEN_NOTEST_HV(vec, bitmask) SOFTEN_TEST EXC_HV, vec, bitmask 668 669 #define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \ 670 SET_SCRATCH0(r13); /* save r13 */ \ 671 EXCEPTION_PROLOG_0(PACA_EXGEN); \ 672 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \ 673 EXCEPTION_PROLOG_2 label, h 674 675 #define MASKABLE_EXCEPTION(vec, label, bitmask) \ 676 __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask) 677 678 #define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \ 679 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\ 680 EXCEPTION_PROLOG_2 label, EXC_STD 681 682 #define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \ 683 __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask) 684 685 #define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \ 686 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ 687 EXCEPTION_PROLOG_2 label, EXC_HV 688 689 #define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \ 690 SET_SCRATCH0(r13); /* save r13 */ \ 691 EXCEPTION_PROLOG_0(PACA_EXGEN); \ 692 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \ 693 EXCEPTION_PROLOG_2_RELON label, h 694 695 #define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \ 696 __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask) 697 698 #define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \ 699 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\ 700 EXCEPTION_PROLOG_2 label, EXC_STD 701 702 #define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \ 703 __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask) 704 705 #define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \ 706 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ 707 EXCEPTION_PROLOG_2_RELON label, EXC_HV 708 709 /* 710 * Our exception common code can be passed various "additions" 711 * to specify the behaviour of interrupts, whether to kick the 712 * runlatch, etc... 713 */ 714 715 /* 716 * This addition reconciles our actual IRQ state with the various software 717 * flags that track it. This may call C code. 718 */ 719 #define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11) 720 721 #define ADD_NVGPRS \ 722 bl save_nvgprs 723 724 #define RUNLATCH_ON \ 725 BEGIN_FTR_SECTION \ 726 ld r3, PACA_THREAD_INFO(r13); \ 727 ld r4,TI_LOCAL_FLAGS(r3); \ 728 andi. r0,r4,_TLF_RUNLATCH; \ 729 beql ppc64_runlatch_on_trampoline; \ 730 END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 731 732 #define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \ 733 EXCEPTION_PROLOG_COMMON(trap, area); \ 734 /* Volatile regs are potentially clobbered here */ \ 735 additions; \ 736 addi r3,r1,STACK_FRAME_OVERHEAD; \ 737 bl hdlr; \ 738 b ret 739 740 /* 741 * Exception where stack is already set in r1, r1 is saved in r10, and it 742 * continues rather than returns. 743 */ 744 #define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \ 745 EXCEPTION_PROLOG_COMMON_1(); \ 746 kuap_save_amr_and_lock r9, r10, cr1; \ 747 EXCEPTION_PROLOG_COMMON_2(area); \ 748 EXCEPTION_PROLOG_COMMON_3(trap); \ 749 /* Volatile regs are potentially clobbered here */ \ 750 additions; \ 751 addi r3,r1,STACK_FRAME_OVERHEAD; \ 752 bl hdlr 753 754 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 755 EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \ 756 ret_from_except, ADD_NVGPRS;ADD_RECONCILE) 757 758 /* 759 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur 760 * in the idle task and therefore need the special idle handling 761 * (finish nap and runlatch) 762 */ 763 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ 764 EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \ 765 ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON) 766 767 /* 768 * When the idle code in power4_idle puts the CPU into NAP mode, 769 * it has to do so in a loop, and relies on the external interrupt 770 * and decrementer interrupt entry code to get it out of the loop. 771 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 772 * to signal that it is in the loop and needs help to get out. 773 */ 774 #ifdef CONFIG_PPC_970_NAP 775 #define FINISH_NAP \ 776 BEGIN_FTR_SECTION \ 777 ld r11, PACA_THREAD_INFO(r13); \ 778 ld r9,TI_LOCAL_FLAGS(r11); \ 779 andi. r10,r9,_TLF_NAPPING; \ 780 bnel power4_fixup_nap; \ 781 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 782 #else 783 #define FINISH_NAP 784 #endif 785 786 #endif /* __ASSEMBLY__ */ 787 788 #endif /* _ASM_POWERPC_EXCEPTION_H */ 789