1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18 19/* 20 * We layout physical memory as follows: 21 * 0x0000 - 0x00ff : Secondary processor spin code 22 * 0x0100 - 0x17ff : pSeries Interrupt prologs 23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs 24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 26 * 0x7000 - 0x7fff : FWNMI data area 27 * 0x8000 - 0x8fff : Initial (CPU0) segment table 28 * 0x9000 - : Early init and support code 29 */ 30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */ 31#define SYSCALL_PSERIES_1 \ 32BEGIN_FTR_SECTION \ 33 cmpdi r0,0x1ebe ; \ 34 beq- 1f ; \ 35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ 36 mr r9,r13 ; \ 37 GET_PACA(r13) ; \ 38 mfspr r11,SPRN_SRR0 ; \ 390: 40 41#define SYSCALL_PSERIES_2_RFID \ 42 mfspr r12,SPRN_SRR1 ; \ 43 ld r10,PACAKBASE(r13) ; \ 44 LOAD_HANDLER(r10, system_call_entry) ; \ 45 mtspr SPRN_SRR0,r10 ; \ 46 ld r10,PACAKMSR(r13) ; \ 47 mtspr SPRN_SRR1,r10 ; \ 48 rfid ; \ 49 b . ; /* prevent speculative execution */ 50 51#define SYSCALL_PSERIES_3 \ 52 /* Fast LE/BE switch system call */ \ 531: mfspr r12,SPRN_SRR1 ; \ 54 xori r12,r12,MSR_LE ; \ 55 mtspr SPRN_SRR1,r12 ; \ 56 rfid ; /* return to userspace */ \ 57 b . ; \ 582: mfspr r12,SPRN_SRR1 ; \ 59 andi. r12,r12,MSR_PR ; \ 60 bne 0b ; \ 61 mtspr SPRN_SRR0,r3 ; \ 62 mtspr SPRN_SRR1,r4 ; \ 63 mtspr SPRN_SDR1,r5 ; \ 64 rfid ; \ 65 b . ; /* prevent speculative execution */ 66 67#if defined(CONFIG_RELOCATABLE) 68 /* 69 * We can't branch directly; in the direct case we use LR 70 * and system_call_entry restores LR. (We thus need to move 71 * LR to r10 in the RFID case too.) 72 */ 73#define SYSCALL_PSERIES_2_DIRECT \ 74 mflr r10 ; \ 75 ld r12,PACAKBASE(r13) ; \ 76 LOAD_HANDLER(r12, system_call_entry_direct) ; \ 77 mtctr r12 ; \ 78 mfspr r12,SPRN_SRR1 ; \ 79 /* Re-use of r13... No spare regs to do this */ \ 80 li r13,MSR_RI ; \ 81 mtmsrd r13,1 ; \ 82 GET_PACA(r13) ; /* get r13 back */ \ 83 bctr ; 84#else 85 /* We can branch directly */ 86#define SYSCALL_PSERIES_2_DIRECT \ 87 mfspr r12,SPRN_SRR1 ; \ 88 li r10,MSR_RI ; \ 89 mtmsrd r10,1 ; /* Set RI (EE=0) */ \ 90 b system_call_entry_direct ; 91#endif 92 93/* 94 * This is the start of the interrupt handlers for pSeries 95 * This code runs with relocation off. 96 * Code from here to __end_interrupts gets copied down to real 97 * address 0x100 when we are running a relocatable kernel. 98 * Therefore any relative branches in this section must only 99 * branch to labels in this section. 100 */ 101 . = 0x100 102 .globl __start_interrupts 103__start_interrupts: 104 105 .globl system_reset_pSeries; 106system_reset_pSeries: 107 HMT_MEDIUM_PPR_DISCARD 108 SET_SCRATCH0(r13) 109#ifdef CONFIG_PPC_P7_NAP 110BEGIN_FTR_SECTION 111 /* Running native on arch 2.06 or later, check if we are 112 * waking up from nap. We only handle no state loss and 113 * supervisor state loss. We do -not- handle hypervisor 114 * state loss at this time. 115 */ 116 mfspr r13,SPRN_SRR1 117 rlwinm. r13,r13,47-31,30,31 118 beq 9f 119 120 /* waking up from powersave (nap) state */ 121 cmpwi cr1,r13,2 122 /* Total loss of HV state is fatal, we could try to use the 123 * PIR to locate a PACA, then use an emergency stack etc... 124 * but for now, let's just stay stuck here 125 */ 126 bgt cr1,. 127 GET_PACA(r13) 128 129#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 130 li r0,KVM_HWTHREAD_IN_KERNEL 131 stb r0,HSTATE_HWTHREAD_STATE(r13) 132 /* Order setting hwthread_state vs. testing hwthread_req */ 133 sync 134 lbz r0,HSTATE_HWTHREAD_REQ(r13) 135 cmpwi r0,0 136 beq 1f 137 b kvm_start_guest 1381: 139#endif 140 141 beq cr1,2f 142 b .power7_wakeup_noloss 1432: b .power7_wakeup_loss 1449: 145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 146#endif /* CONFIG_PPC_P7_NAP */ 147 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 148 NOTEST, 0x100) 149 150 . = 0x200 151machine_check_pSeries_1: 152 /* This is moved out of line as it can be patched by FW, but 153 * some code path might still want to branch into the original 154 * vector 155 */ 156 HMT_MEDIUM_PPR_DISCARD 157 SET_SCRATCH0(r13) /* save r13 */ 158 EXCEPTION_PROLOG_0(PACA_EXMC) 159 b machine_check_pSeries_0 160 161 . = 0x300 162 .globl data_access_pSeries 163data_access_pSeries: 164 HMT_MEDIUM_PPR_DISCARD 165 SET_SCRATCH0(r13) 166BEGIN_FTR_SECTION 167 b data_access_check_stab 168data_access_not_stab: 169END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 170 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 171 KVMTEST, 0x300) 172 173 . = 0x380 174 .globl data_access_slb_pSeries 175data_access_slb_pSeries: 176 HMT_MEDIUM_PPR_DISCARD 177 SET_SCRATCH0(r13) 178 EXCEPTION_PROLOG_0(PACA_EXSLB) 179 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 180 std r3,PACA_EXSLB+EX_R3(r13) 181 mfspr r3,SPRN_DAR 182#ifdef __DISABLED__ 183 /* Keep that around for when we re-implement dynamic VSIDs */ 184 cmpdi r3,0 185 bge slb_miss_user_pseries 186#endif /* __DISABLED__ */ 187 mfspr r12,SPRN_SRR1 188#ifndef CONFIG_RELOCATABLE 189 b .slb_miss_realmode 190#else 191 /* 192 * We can't just use a direct branch to .slb_miss_realmode 193 * because the distance from here to there depends on where 194 * the kernel ends up being put. 195 */ 196 mfctr r11 197 ld r10,PACAKBASE(r13) 198 LOAD_HANDLER(r10, .slb_miss_realmode) 199 mtctr r10 200 bctr 201#endif 202 203 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 204 205 . = 0x480 206 .globl instruction_access_slb_pSeries 207instruction_access_slb_pSeries: 208 HMT_MEDIUM_PPR_DISCARD 209 SET_SCRATCH0(r13) 210 EXCEPTION_PROLOG_0(PACA_EXSLB) 211 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 212 std r3,PACA_EXSLB+EX_R3(r13) 213 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 214#ifdef __DISABLED__ 215 /* Keep that around for when we re-implement dynamic VSIDs */ 216 cmpdi r3,0 217 bge slb_miss_user_pseries 218#endif /* __DISABLED__ */ 219 mfspr r12,SPRN_SRR1 220#ifndef CONFIG_RELOCATABLE 221 b .slb_miss_realmode 222#else 223 mfctr r11 224 ld r10,PACAKBASE(r13) 225 LOAD_HANDLER(r10, .slb_miss_realmode) 226 mtctr r10 227 bctr 228#endif 229 230 /* We open code these as we can't have a ". = x" (even with 231 * x = "." within a feature section 232 */ 233 . = 0x500; 234 .globl hardware_interrupt_pSeries; 235 .globl hardware_interrupt_hv; 236hardware_interrupt_pSeries: 237hardware_interrupt_hv: 238 HMT_MEDIUM_PPR_DISCARD 239 BEGIN_FTR_SECTION 240 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 241 EXC_HV, SOFTEN_TEST_HV) 242 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 243 FTR_SECTION_ELSE 244 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 245 EXC_STD, SOFTEN_TEST_HV_201) 246 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 247 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 248 249 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 250 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 251 252 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 253 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 254 255 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 256 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 257 258 . = 0x900 259 .globl decrementer_pSeries 260decrementer_pSeries: 261 _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR) 262 263 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) 264 265 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) 266 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 267 268 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 269 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 270 271 . = 0xc00 272 .globl system_call_pSeries 273system_call_pSeries: 274 HMT_MEDIUM 275#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 276 SET_SCRATCH0(r13) 277 GET_PACA(r13) 278 std r9,PACA_EXGEN+EX_R9(r13) 279 std r10,PACA_EXGEN+EX_R10(r13) 280 mfcr r9 281 KVMTEST(0xc00) 282 GET_SCRATCH0(r13) 283#endif 284 SYSCALL_PSERIES_1 285 SYSCALL_PSERIES_2_RFID 286 SYSCALL_PSERIES_3 287 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 288 289 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 290 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 291 292 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 293 * out of line to handle them 294 */ 295 . = 0xe00 296hv_data_storage_trampoline: 297 SET_SCRATCH0(r13) 298 EXCEPTION_PROLOG_0(PACA_EXGEN) 299 b h_data_storage_hv 300 301 . = 0xe20 302hv_instr_storage_trampoline: 303 SET_SCRATCH0(r13) 304 EXCEPTION_PROLOG_0(PACA_EXGEN) 305 b h_instr_storage_hv 306 307 . = 0xe40 308emulation_assist_trampoline: 309 SET_SCRATCH0(r13) 310 EXCEPTION_PROLOG_0(PACA_EXGEN) 311 b emulation_assist_hv 312 313 . = 0xe60 314hv_exception_trampoline: 315 SET_SCRATCH0(r13) 316 EXCEPTION_PROLOG_0(PACA_EXGEN) 317 b hmi_exception_hv 318 319 . = 0xe80 320hv_doorbell_trampoline: 321 SET_SCRATCH0(r13) 322 EXCEPTION_PROLOG_0(PACA_EXGEN) 323 b h_doorbell_hv 324 325 /* We need to deal with the Altivec unavailable exception 326 * here which is at 0xf20, thus in the middle of the 327 * prolog code of the PerformanceMonitor one. A little 328 * trickery is thus necessary 329 */ 330 . = 0xf00 331performance_monitor_pseries_trampoline: 332 SET_SCRATCH0(r13) 333 EXCEPTION_PROLOG_0(PACA_EXGEN) 334 b performance_monitor_pSeries 335 336 . = 0xf20 337altivec_unavailable_pseries_trampoline: 338 SET_SCRATCH0(r13) 339 EXCEPTION_PROLOG_0(PACA_EXGEN) 340 b altivec_unavailable_pSeries 341 342 . = 0xf40 343vsx_unavailable_pseries_trampoline: 344 SET_SCRATCH0(r13) 345 EXCEPTION_PROLOG_0(PACA_EXGEN) 346 b vsx_unavailable_pSeries 347 348 . = 0xf60 349facility_unavailable_trampoline: 350 SET_SCRATCH0(r13) 351 EXCEPTION_PROLOG_0(PACA_EXGEN) 352 b facility_unavailable_pSeries 353 354 . = 0xf80 355hv_facility_unavailable_trampoline: 356 SET_SCRATCH0(r13) 357 EXCEPTION_PROLOG_0(PACA_EXGEN) 358 b facility_unavailable_hv 359 360#ifdef CONFIG_CBE_RAS 361 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 362 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 363#endif /* CONFIG_CBE_RAS */ 364 365 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 366 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 367 368 . = 0x1500 369 .global denorm_exception_hv 370denorm_exception_hv: 371 HMT_MEDIUM_PPR_DISCARD 372 mtspr SPRN_SPRG_HSCRATCH0,r13 373 EXCEPTION_PROLOG_0(PACA_EXGEN) 374 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) 375 376#ifdef CONFIG_PPC_DENORMALISATION 377 mfspr r10,SPRN_HSRR1 378 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 379 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ 380 addi r11,r11,-4 /* HSRR0 is next instruction */ 381 bne+ denorm_assist 382#endif 383 384 KVMTEST(0x1500) 385 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) 386 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500) 387 388#ifdef CONFIG_CBE_RAS 389 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 390 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 391#endif /* CONFIG_CBE_RAS */ 392 393 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 394 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 395 396#ifdef CONFIG_CBE_RAS 397 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 398 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 399#else 400 . = 0x1800 401#endif /* CONFIG_CBE_RAS */ 402 403 404/*** Out of line interrupts support ***/ 405 406 .align 7 407 /* moved from 0x200 */ 408machine_check_pSeries: 409 .globl machine_check_fwnmi 410machine_check_fwnmi: 411 HMT_MEDIUM_PPR_DISCARD 412 SET_SCRATCH0(r13) /* save r13 */ 413 EXCEPTION_PROLOG_0(PACA_EXMC) 414machine_check_pSeries_0: 415 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) 416 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) 417 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 418 419 /* moved from 0x300 */ 420data_access_check_stab: 421 GET_PACA(r13) 422 std r9,PACA_EXSLB+EX_R9(r13) 423 std r10,PACA_EXSLB+EX_R10(r13) 424 mfspr r10,SPRN_DAR 425 mfspr r9,SPRN_DSISR 426 srdi r10,r10,60 427 rlwimi r10,r9,16,0x20 428#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 429 lbz r9,HSTATE_IN_GUEST(r13) 430 rlwimi r10,r9,8,0x300 431#endif 432 mfcr r9 433 cmpwi r10,0x2c 434 beq do_stab_bolted_pSeries 435 mtcrf 0x80,r9 436 ld r9,PACA_EXSLB+EX_R9(r13) 437 ld r10,PACA_EXSLB+EX_R10(r13) 438 b data_access_not_stab 439do_stab_bolted_pSeries: 440 std r11,PACA_EXSLB+EX_R11(r13) 441 std r12,PACA_EXSLB+EX_R12(r13) 442 GET_SCRATCH0(r10) 443 std r10,PACA_EXSLB+EX_R13(r13) 444 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 445 446 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 447 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 448 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 449 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 450 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 451 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 452 453#ifdef CONFIG_PPC_DENORMALISATION 454denorm_assist: 455BEGIN_FTR_SECTION 456/* 457 * To denormalise we need to move a copy of the register to itself. 458 * For POWER6 do that here for all FP regs. 459 */ 460 mfmsr r10 461 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 462 xori r10,r10,(MSR_FE0|MSR_FE1) 463 mtmsrd r10 464 sync 465 466#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 467#define FMR4(n) FMR2(n) ; FMR2(n+2) 468#define FMR8(n) FMR4(n) ; FMR4(n+4) 469#define FMR16(n) FMR8(n) ; FMR8(n+8) 470#define FMR32(n) FMR16(n) ; FMR16(n+16) 471 FMR32(0) 472 473FTR_SECTION_ELSE 474/* 475 * To denormalise we need to move a copy of the register to itself. 476 * For POWER7 do that here for the first 32 VSX registers only. 477 */ 478 mfmsr r10 479 oris r10,r10,MSR_VSX@h 480 mtmsrd r10 481 sync 482 483#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) 484#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) 485#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) 486#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) 487#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) 488 XVCPSGNDP32(0) 489 490ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 491 492BEGIN_FTR_SECTION 493 b denorm_done 494END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 495/* 496 * To denormalise we need to move a copy of the register to itself. 497 * For POWER8 we need to do that for all 64 VSX registers 498 */ 499 XVCPSGNDP32(32) 500denorm_done: 501 mtspr SPRN_HSRR0,r11 502 mtcrf 0x80,r9 503 ld r9,PACA_EXGEN+EX_R9(r13) 504 RESTORE_PPR_PACA(PACA_EXGEN, r10) 505BEGIN_FTR_SECTION 506 ld r10,PACA_EXGEN+EX_CFAR(r13) 507 mtspr SPRN_CFAR,r10 508END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 509 ld r10,PACA_EXGEN+EX_R10(r13) 510 ld r11,PACA_EXGEN+EX_R11(r13) 511 ld r12,PACA_EXGEN+EX_R12(r13) 512 ld r13,PACA_EXGEN+EX_R13(r13) 513 HRFID 514 b . 515#endif 516 517 .align 7 518 /* moved from 0xe00 */ 519 STD_EXCEPTION_HV_OOL(0xe02, h_data_storage) 520 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 521 STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage) 522 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 523 STD_EXCEPTION_HV_OOL(0xe42, emulation_assist) 524 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 525 STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */ 526 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 527 MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell) 528 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82) 529 530 /* moved from 0xf00 */ 531 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 532 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 533 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 534 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 535 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 536 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 537 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 538 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) 539 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) 540 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) 541 542/* 543 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 544 * - If it was a decrementer interrupt, we bump the dec to max and and return. 545 * - If it was a doorbell we return immediately since doorbells are edge 546 * triggered and won't automatically refire. 547 * - else we hard disable and return. 548 * This is called with r10 containing the value to OR to the paca field. 549 */ 550#define MASKED_INTERRUPT(_H) \ 551masked_##_H##interrupt: \ 552 std r11,PACA_EXGEN+EX_R11(r13); \ 553 lbz r11,PACAIRQHAPPENED(r13); \ 554 or r11,r11,r10; \ 555 stb r11,PACAIRQHAPPENED(r13); \ 556 cmpwi r10,PACA_IRQ_DEC; \ 557 bne 1f; \ 558 lis r10,0x7fff; \ 559 ori r10,r10,0xffff; \ 560 mtspr SPRN_DEC,r10; \ 561 b 2f; \ 5621: cmpwi r10,PACA_IRQ_DBELL; \ 563 beq 2f; \ 564 mfspr r10,SPRN_##_H##SRR1; \ 565 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 566 rotldi r10,r10,16; \ 567 mtspr SPRN_##_H##SRR1,r10; \ 5682: mtcrf 0x80,r9; \ 569 ld r9,PACA_EXGEN+EX_R9(r13); \ 570 ld r10,PACA_EXGEN+EX_R10(r13); \ 571 ld r11,PACA_EXGEN+EX_R11(r13); \ 572 GET_SCRATCH0(r13); \ 573 ##_H##rfid; \ 574 b . 575 576 MASKED_INTERRUPT() 577 MASKED_INTERRUPT(H) 578 579/* 580 * Called from arch_local_irq_enable when an interrupt needs 581 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate 582 * which kind of interrupt. MSR:EE is already off. We generate a 583 * stackframe like if a real interrupt had happened. 584 * 585 * Note: While MSR:EE is off, we need to make sure that _MSR 586 * in the generated frame has EE set to 1 or the exception 587 * handler will not properly re-enable them. 588 */ 589_GLOBAL(__replay_interrupt) 590 /* We are going to jump to the exception common code which 591 * will retrieve various register values from the PACA which 592 * we don't give a damn about, so we don't bother storing them. 593 */ 594 mfmsr r12 595 mflr r11 596 mfcr r9 597 ori r12,r12,MSR_EE 598 cmpwi r3,0x900 599 beq decrementer_common 600 cmpwi r3,0x500 601 beq hardware_interrupt_common 602BEGIN_FTR_SECTION 603 cmpwi r3,0xe80 604 beq h_doorbell_common 605FTR_SECTION_ELSE 606 cmpwi r3,0xa00 607 beq doorbell_super_common 608ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 609 blr 610 611#ifdef CONFIG_PPC_PSERIES 612/* 613 * Vectors for the FWNMI option. Share common code. 614 */ 615 .globl system_reset_fwnmi 616 .align 7 617system_reset_fwnmi: 618 HMT_MEDIUM_PPR_DISCARD 619 SET_SCRATCH0(r13) /* save r13 */ 620 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 621 NOTEST, 0x100) 622 623#endif /* CONFIG_PPC_PSERIES */ 624 625#ifdef __DISABLED__ 626/* 627 * This is used for when the SLB miss handler has to go virtual, 628 * which doesn't happen for now anymore but will once we re-implement 629 * dynamic VSIDs for shared page tables 630 */ 631slb_miss_user_pseries: 632 std r10,PACA_EXGEN+EX_R10(r13) 633 std r11,PACA_EXGEN+EX_R11(r13) 634 std r12,PACA_EXGEN+EX_R12(r13) 635 GET_SCRATCH0(r10) 636 ld r11,PACA_EXSLB+EX_R9(r13) 637 ld r12,PACA_EXSLB+EX_R3(r13) 638 std r10,PACA_EXGEN+EX_R13(r13) 639 std r11,PACA_EXGEN+EX_R9(r13) 640 std r12,PACA_EXGEN+EX_R3(r13) 641 clrrdi r12,r13,32 642 mfmsr r10 643 mfspr r11,SRR0 /* save SRR0 */ 644 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 645 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 646 mtspr SRR0,r12 647 mfspr r12,SRR1 /* and SRR1 */ 648 mtspr SRR1,r10 649 rfid 650 b . /* prevent spec. execution */ 651#endif /* __DISABLED__ */ 652 653#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 654kvmppc_skip_interrupt: 655 /* 656 * Here all GPRs are unchanged from when the interrupt happened 657 * except for r13, which is saved in SPRG_SCRATCH0. 658 */ 659 mfspr r13, SPRN_SRR0 660 addi r13, r13, 4 661 mtspr SPRN_SRR0, r13 662 GET_SCRATCH0(r13) 663 rfid 664 b . 665 666kvmppc_skip_Hinterrupt: 667 /* 668 * Here all GPRs are unchanged from when the interrupt happened 669 * except for r13, which is saved in SPRG_SCRATCH0. 670 */ 671 mfspr r13, SPRN_HSRR0 672 addi r13, r13, 4 673 mtspr SPRN_HSRR0, r13 674 GET_SCRATCH0(r13) 675 hrfid 676 b . 677#endif 678 679/* 680 * Code from here down to __end_handlers is invoked from the 681 * exception prologs above. Because the prologs assemble the 682 * addresses of these handlers using the LOAD_HANDLER macro, 683 * which uses an ori instruction, these handlers must be in 684 * the first 64k of the kernel image. 685 */ 686 687/*** Common interrupt handlers ***/ 688 689 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 690 691 /* 692 * Machine check is different because we use a different 693 * save area: PACA_EXMC instead of PACA_EXGEN. 694 */ 695 .align 7 696 .globl machine_check_common 697machine_check_common: 698 699 mfspr r10,SPRN_DAR 700 std r10,PACA_EXGEN+EX_DAR(r13) 701 mfspr r10,SPRN_DSISR 702 stw r10,PACA_EXGEN+EX_DSISR(r13) 703 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 704 FINISH_NAP 705 DISABLE_INTS 706 ld r3,PACA_EXGEN+EX_DAR(r13) 707 lwz r4,PACA_EXGEN+EX_DSISR(r13) 708 std r3,_DAR(r1) 709 std r4,_DSISR(r1) 710 bl .save_nvgprs 711 addi r3,r1,STACK_FRAME_OVERHEAD 712 bl .machine_check_exception 713 b .ret_from_except 714 715 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 716 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 717 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) 718#ifdef CONFIG_PPC_DOORBELL 719 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) 720#else 721 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) 722#endif 723 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 724 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 725 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 726 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) 727 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 728#ifdef CONFIG_PPC_DOORBELL 729 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) 730#else 731 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) 732#endif 733 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 734 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 735 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) 736#ifdef CONFIG_ALTIVEC 737 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 738#else 739 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 740#endif 741#ifdef CONFIG_CBE_RAS 742 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 743 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 744 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 745#endif /* CONFIG_CBE_RAS */ 746 747 /* 748 * Relocation-on interrupts: A subset of the interrupts can be delivered 749 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 750 * it. Addresses are the same as the original interrupt addresses, but 751 * offset by 0xc000000000004000. 752 * It's impossible to receive interrupts below 0x300 via this mechanism. 753 * KVM: None of these traps are from the guest ; anything that escalated 754 * to HV=1 from HV=0 is delivered via real mode handlers. 755 */ 756 757 /* 758 * This uses the standard macro, since the original 0x300 vector 759 * only has extra guff for STAB-based processors -- which never 760 * come here. 761 */ 762 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access) 763 . = 0x4380 764 .globl data_access_slb_relon_pSeries 765data_access_slb_relon_pSeries: 766 SET_SCRATCH0(r13) 767 EXCEPTION_PROLOG_0(PACA_EXSLB) 768 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) 769 std r3,PACA_EXSLB+EX_R3(r13) 770 mfspr r3,SPRN_DAR 771 mfspr r12,SPRN_SRR1 772#ifndef CONFIG_RELOCATABLE 773 b .slb_miss_realmode 774#else 775 /* 776 * We can't just use a direct branch to .slb_miss_realmode 777 * because the distance from here to there depends on where 778 * the kernel ends up being put. 779 */ 780 mfctr r11 781 ld r10,PACAKBASE(r13) 782 LOAD_HANDLER(r10, .slb_miss_realmode) 783 mtctr r10 784 bctr 785#endif 786 787 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access) 788 . = 0x4480 789 .globl instruction_access_slb_relon_pSeries 790instruction_access_slb_relon_pSeries: 791 SET_SCRATCH0(r13) 792 EXCEPTION_PROLOG_0(PACA_EXSLB) 793 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) 794 std r3,PACA_EXSLB+EX_R3(r13) 795 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 796 mfspr r12,SPRN_SRR1 797#ifndef CONFIG_RELOCATABLE 798 b .slb_miss_realmode 799#else 800 mfctr r11 801 ld r10,PACAKBASE(r13) 802 LOAD_HANDLER(r10, .slb_miss_realmode) 803 mtctr r10 804 bctr 805#endif 806 807 . = 0x4500 808 .globl hardware_interrupt_relon_pSeries; 809 .globl hardware_interrupt_relon_hv; 810hardware_interrupt_relon_pSeries: 811hardware_interrupt_relon_hv: 812 BEGIN_FTR_SECTION 813 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV) 814 FTR_SECTION_ELSE 815 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR) 816 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 817 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment) 818 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check) 819 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable) 820 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer) 821 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer) 822 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super) 823 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b) 824 825 . = 0x4c00 826 .globl system_call_relon_pSeries 827system_call_relon_pSeries: 828 HMT_MEDIUM 829 SYSCALL_PSERIES_1 830 SYSCALL_PSERIES_2_DIRECT 831 SYSCALL_PSERIES_3 832 833 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) 834 835 . = 0x4e00 836 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 837 838 . = 0x4e20 839 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 840 841 . = 0x4e40 842emulation_assist_relon_trampoline: 843 SET_SCRATCH0(r13) 844 EXCEPTION_PROLOG_0(PACA_EXGEN) 845 b emulation_assist_relon_hv 846 847 . = 0x4e60 848 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 849 850 . = 0x4e80 851h_doorbell_relon_trampoline: 852 SET_SCRATCH0(r13) 853 EXCEPTION_PROLOG_0(PACA_EXGEN) 854 b h_doorbell_relon_hv 855 856 . = 0x4f00 857performance_monitor_relon_pseries_trampoline: 858 SET_SCRATCH0(r13) 859 EXCEPTION_PROLOG_0(PACA_EXGEN) 860 b performance_monitor_relon_pSeries 861 862 . = 0x4f20 863altivec_unavailable_relon_pseries_trampoline: 864 SET_SCRATCH0(r13) 865 EXCEPTION_PROLOG_0(PACA_EXGEN) 866 b altivec_unavailable_relon_pSeries 867 868 . = 0x4f40 869vsx_unavailable_relon_pseries_trampoline: 870 SET_SCRATCH0(r13) 871 EXCEPTION_PROLOG_0(PACA_EXGEN) 872 b vsx_unavailable_relon_pSeries 873 874 . = 0x4f60 875facility_unavailable_relon_trampoline: 876 SET_SCRATCH0(r13) 877 EXCEPTION_PROLOG_0(PACA_EXGEN) 878 b facility_unavailable_relon_pSeries 879 880 . = 0x4f80 881hv_facility_unavailable_relon_trampoline: 882 SET_SCRATCH0(r13) 883 EXCEPTION_PROLOG_0(PACA_EXGEN) 884 b hv_facility_unavailable_relon_hv 885 886 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 887#ifdef CONFIG_PPC_DENORMALISATION 888 . = 0x5500 889 b denorm_exception_hv 890#endif 891 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) 892 893 /* Other future vectors */ 894 .align 7 895 .globl __end_interrupts 896__end_interrupts: 897 898 .align 7 899system_call_entry_direct: 900#if defined(CONFIG_RELOCATABLE) 901 /* The first level prologue may have used LR to get here, saving 902 * orig in r10. To save hacking/ifdeffing common code, restore here. 903 */ 904 mtlr r10 905#endif 906system_call_entry: 907 b system_call_common 908 909ppc64_runlatch_on_trampoline: 910 b .__ppc64_runlatch_on 911 912/* 913 * Here we have detected that the kernel stack pointer is bad. 914 * R9 contains the saved CR, r13 points to the paca, 915 * r10 contains the (bad) kernel stack pointer, 916 * r11 and r12 contain the saved SRR0 and SRR1. 917 * We switch to using an emergency stack, save the registers there, 918 * and call kernel_bad_stack(), which panics. 919 */ 920bad_stack: 921 ld r1,PACAEMERGSP(r13) 922 subi r1,r1,64+INT_FRAME_SIZE 923 std r9,_CCR(r1) 924 std r10,GPR1(r1) 925 std r11,_NIP(r1) 926 std r12,_MSR(r1) 927 mfspr r11,SPRN_DAR 928 mfspr r12,SPRN_DSISR 929 std r11,_DAR(r1) 930 std r12,_DSISR(r1) 931 mflr r10 932 mfctr r11 933 mfxer r12 934 std r10,_LINK(r1) 935 std r11,_CTR(r1) 936 std r12,_XER(r1) 937 SAVE_GPR(0,r1) 938 SAVE_GPR(2,r1) 939 ld r10,EX_R3(r3) 940 std r10,GPR3(r1) 941 SAVE_GPR(4,r1) 942 SAVE_4GPRS(5,r1) 943 ld r9,EX_R9(r3) 944 ld r10,EX_R10(r3) 945 SAVE_2GPRS(9,r1) 946 ld r9,EX_R11(r3) 947 ld r10,EX_R12(r3) 948 ld r11,EX_R13(r3) 949 std r9,GPR11(r1) 950 std r10,GPR12(r1) 951 std r11,GPR13(r1) 952BEGIN_FTR_SECTION 953 ld r10,EX_CFAR(r3) 954 std r10,ORIG_GPR3(r1) 955END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 956 SAVE_8GPRS(14,r1) 957 SAVE_10GPRS(22,r1) 958 lhz r12,PACA_TRAP_SAVE(r13) 959 std r12,_TRAP(r1) 960 addi r11,r1,INT_FRAME_SIZE 961 std r11,0(r1) 962 li r12,0 963 std r12,0(r11) 964 ld r2,PACATOC(r13) 965 ld r11,exception_marker@toc(r2) 966 std r12,RESULT(r1) 967 std r11,STACK_FRAME_OVERHEAD-16(r1) 9681: addi r3,r1,STACK_FRAME_OVERHEAD 969 bl .kernel_bad_stack 970 b 1b 971 972/* 973 * Here r13 points to the paca, r9 contains the saved CR, 974 * SRR0 and SRR1 are saved in r11 and r12, 975 * r9 - r13 are saved in paca->exgen. 976 */ 977 .align 7 978 .globl data_access_common 979data_access_common: 980 mfspr r10,SPRN_DAR 981 std r10,PACA_EXGEN+EX_DAR(r13) 982 mfspr r10,SPRN_DSISR 983 stw r10,PACA_EXGEN+EX_DSISR(r13) 984 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 985 DISABLE_INTS 986 ld r12,_MSR(r1) 987 ld r3,PACA_EXGEN+EX_DAR(r13) 988 lwz r4,PACA_EXGEN+EX_DSISR(r13) 989 li r5,0x300 990 b .do_hash_page /* Try to handle as hpte fault */ 991 992 .align 7 993 .globl h_data_storage_common 994h_data_storage_common: 995 mfspr r10,SPRN_HDAR 996 std r10,PACA_EXGEN+EX_DAR(r13) 997 mfspr r10,SPRN_HDSISR 998 stw r10,PACA_EXGEN+EX_DSISR(r13) 999 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 1000 bl .save_nvgprs 1001 DISABLE_INTS 1002 addi r3,r1,STACK_FRAME_OVERHEAD 1003 bl .unknown_exception 1004 b .ret_from_except 1005 1006 .align 7 1007 .globl instruction_access_common 1008instruction_access_common: 1009 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 1010 DISABLE_INTS 1011 ld r12,_MSR(r1) 1012 ld r3,_NIP(r1) 1013 andis. r4,r12,0x5820 1014 li r5,0x400 1015 b .do_hash_page /* Try to handle as hpte fault */ 1016 1017 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 1018 1019/* 1020 * Here is the common SLB miss user that is used when going to virtual 1021 * mode for SLB misses, that is currently not used 1022 */ 1023#ifdef __DISABLED__ 1024 .align 7 1025 .globl slb_miss_user_common 1026slb_miss_user_common: 1027 mflr r10 1028 std r3,PACA_EXGEN+EX_DAR(r13) 1029 stw r9,PACA_EXGEN+EX_CCR(r13) 1030 std r10,PACA_EXGEN+EX_LR(r13) 1031 std r11,PACA_EXGEN+EX_SRR0(r13) 1032 bl .slb_allocate_user 1033 1034 ld r10,PACA_EXGEN+EX_LR(r13) 1035 ld r3,PACA_EXGEN+EX_R3(r13) 1036 lwz r9,PACA_EXGEN+EX_CCR(r13) 1037 ld r11,PACA_EXGEN+EX_SRR0(r13) 1038 mtlr r10 1039 beq- slb_miss_fault 1040 1041 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1042 beq- unrecov_user_slb 1043 mfmsr r10 1044 1045.machine push 1046.machine "power4" 1047 mtcrf 0x80,r9 1048.machine pop 1049 1050 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 1051 mtmsrd r10,1 1052 1053 mtspr SRR0,r11 1054 mtspr SRR1,r12 1055 1056 ld r9,PACA_EXGEN+EX_R9(r13) 1057 ld r10,PACA_EXGEN+EX_R10(r13) 1058 ld r11,PACA_EXGEN+EX_R11(r13) 1059 ld r12,PACA_EXGEN+EX_R12(r13) 1060 ld r13,PACA_EXGEN+EX_R13(r13) 1061 rfid 1062 b . 1063 1064slb_miss_fault: 1065 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 1066 ld r4,PACA_EXGEN+EX_DAR(r13) 1067 li r5,0 1068 std r4,_DAR(r1) 1069 std r5,_DSISR(r1) 1070 b handle_page_fault 1071 1072unrecov_user_slb: 1073 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 1074 DISABLE_INTS 1075 bl .save_nvgprs 10761: addi r3,r1,STACK_FRAME_OVERHEAD 1077 bl .unrecoverable_exception 1078 b 1b 1079 1080#endif /* __DISABLED__ */ 1081 1082 1083 .align 7 1084 .globl alignment_common 1085alignment_common: 1086 mfspr r10,SPRN_DAR 1087 std r10,PACA_EXGEN+EX_DAR(r13) 1088 mfspr r10,SPRN_DSISR 1089 stw r10,PACA_EXGEN+EX_DSISR(r13) 1090 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 1091 ld r3,PACA_EXGEN+EX_DAR(r13) 1092 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1093 std r3,_DAR(r1) 1094 std r4,_DSISR(r1) 1095 bl .save_nvgprs 1096 DISABLE_INTS 1097 addi r3,r1,STACK_FRAME_OVERHEAD 1098 bl .alignment_exception 1099 b .ret_from_except 1100 1101 .align 7 1102 .globl program_check_common 1103program_check_common: 1104 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1105 bl .save_nvgprs 1106 DISABLE_INTS 1107 addi r3,r1,STACK_FRAME_OVERHEAD 1108 bl .program_check_exception 1109 b .ret_from_except 1110 1111 .align 7 1112 .globl fp_unavailable_common 1113fp_unavailable_common: 1114 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1115 bne 1f /* if from user, just load it up */ 1116 bl .save_nvgprs 1117 DISABLE_INTS 1118 addi r3,r1,STACK_FRAME_OVERHEAD 1119 bl .kernel_fp_unavailable_exception 1120 BUG_OPCODE 11211: 1122#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1123BEGIN_FTR_SECTION 1124 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1125 * transaction), go do TM stuff 1126 */ 1127 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1128 bne- 2f 1129END_FTR_SECTION_IFSET(CPU_FTR_TM) 1130#endif 1131 bl .load_up_fpu 1132 b fast_exception_return 1133#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11342: /* User process was in a transaction */ 1135 bl .save_nvgprs 1136 DISABLE_INTS 1137 addi r3,r1,STACK_FRAME_OVERHEAD 1138 bl .fp_unavailable_tm 1139 b .ret_from_except 1140#endif 1141 .align 7 1142 .globl altivec_unavailable_common 1143altivec_unavailable_common: 1144 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 1145#ifdef CONFIG_ALTIVEC 1146BEGIN_FTR_SECTION 1147 beq 1f 1148#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1149 BEGIN_FTR_SECTION_NESTED(69) 1150 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1151 * transaction), go do TM stuff 1152 */ 1153 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1154 bne- 2f 1155 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1156#endif 1157 bl .load_up_altivec 1158 b fast_exception_return 1159#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11602: /* User process was in a transaction */ 1161 bl .save_nvgprs 1162 DISABLE_INTS 1163 addi r3,r1,STACK_FRAME_OVERHEAD 1164 bl .altivec_unavailable_tm 1165 b .ret_from_except 1166#endif 11671: 1168END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1169#endif 1170 bl .save_nvgprs 1171 DISABLE_INTS 1172 addi r3,r1,STACK_FRAME_OVERHEAD 1173 bl .altivec_unavailable_exception 1174 b .ret_from_except 1175 1176 .align 7 1177 .globl vsx_unavailable_common 1178vsx_unavailable_common: 1179 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 1180#ifdef CONFIG_VSX 1181BEGIN_FTR_SECTION 1182 beq 1f 1183#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1184 BEGIN_FTR_SECTION_NESTED(69) 1185 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1186 * transaction), go do TM stuff 1187 */ 1188 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1189 bne- 2f 1190 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1191#endif 1192 b .load_up_vsx 1193#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11942: /* User process was in a transaction */ 1195 bl .save_nvgprs 1196 DISABLE_INTS 1197 addi r3,r1,STACK_FRAME_OVERHEAD 1198 bl .vsx_unavailable_tm 1199 b .ret_from_except 1200#endif 12011: 1202END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1203#endif 1204 bl .save_nvgprs 1205 DISABLE_INTS 1206 addi r3,r1,STACK_FRAME_OVERHEAD 1207 bl .vsx_unavailable_exception 1208 b .ret_from_except 1209 1210 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1211 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) 1212 1213 .align 7 1214 .globl __end_handlers 1215__end_handlers: 1216 1217 /* Equivalents to the above handlers for relocation-on interrupt vectors */ 1218 STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist) 1219 MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell) 1220 1221 STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 1222 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1223 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1224 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1225 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) 1226 1227#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1228/* 1229 * Data area reserved for FWNMI option. 1230 * This address (0x7000) is fixed by the RPA. 1231 */ 1232 .= 0x7000 1233 .globl fwnmi_data_area 1234fwnmi_data_area: 1235 1236 /* pseries and powernv need to keep the whole page from 1237 * 0x7000 to 0x8000 free for use by the firmware 1238 */ 1239 . = 0x8000 1240#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1241 1242/* Space for CPU0's segment table */ 1243 .balign 4096 1244 .globl initial_stab 1245initial_stab: 1246 .space 4096 1247 1248#ifdef CONFIG_PPC_POWERNV 1249_GLOBAL(opal_mc_secondary_handler) 1250 HMT_MEDIUM_PPR_DISCARD 1251 SET_SCRATCH0(r13) 1252 GET_PACA(r13) 1253 clrldi r3,r3,2 1254 tovirt(r3,r3) 1255 std r3,PACA_OPAL_MC_EVT(r13) 1256 ld r13,OPAL_MC_SRR0(r3) 1257 mtspr SPRN_SRR0,r13 1258 ld r13,OPAL_MC_SRR1(r3) 1259 mtspr SPRN_SRR1,r13 1260 ld r3,OPAL_MC_GPR3(r3) 1261 GET_SCRATCH0(r13) 1262 b machine_check_pSeries 1263#endif /* CONFIG_PPC_POWERNV */ 1264 1265 1266/* 1267 * r13 points to the PACA, r9 contains the saved CR, 1268 * r12 contain the saved SRR1, SRR0 is still ready for return 1269 * r3 has the faulting address 1270 * r9 - r13 are saved in paca->exslb. 1271 * r3 is saved in paca->slb_r3 1272 * We assume we aren't going to take any exceptions during this procedure. 1273 */ 1274_GLOBAL(slb_miss_realmode) 1275 mflr r10 1276#ifdef CONFIG_RELOCATABLE 1277 mtctr r11 1278#endif 1279 1280 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1281 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1282 1283 bl .slb_allocate_realmode 1284 1285 /* All done -- return from exception. */ 1286 1287 ld r10,PACA_EXSLB+EX_LR(r13) 1288 ld r3,PACA_EXSLB+EX_R3(r13) 1289 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1290 1291 mtlr r10 1292 1293 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1294 beq- 2f 1295 1296.machine push 1297.machine "power4" 1298 mtcrf 0x80,r9 1299 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1300.machine pop 1301 1302 RESTORE_PPR_PACA(PACA_EXSLB, r9) 1303 ld r9,PACA_EXSLB+EX_R9(r13) 1304 ld r10,PACA_EXSLB+EX_R10(r13) 1305 ld r11,PACA_EXSLB+EX_R11(r13) 1306 ld r12,PACA_EXSLB+EX_R12(r13) 1307 ld r13,PACA_EXSLB+EX_R13(r13) 1308 rfid 1309 b . /* prevent speculative execution */ 1310 13112: mfspr r11,SPRN_SRR0 1312 ld r10,PACAKBASE(r13) 1313 LOAD_HANDLER(r10,unrecov_slb) 1314 mtspr SPRN_SRR0,r10 1315 ld r10,PACAKMSR(r13) 1316 mtspr SPRN_SRR1,r10 1317 rfid 1318 b . 1319 1320unrecov_slb: 1321 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1322 DISABLE_INTS 1323 bl .save_nvgprs 13241: addi r3,r1,STACK_FRAME_OVERHEAD 1325 bl .unrecoverable_exception 1326 b 1b 1327 1328 1329#ifdef CONFIG_PPC_970_NAP 1330power4_fixup_nap: 1331 andc r9,r9,r10 1332 std r9,TI_LOCAL_FLAGS(r11) 1333 ld r10,_LINK(r1) /* make idle task do the */ 1334 std r10,_NIP(r1) /* equivalent of a blr */ 1335 blr 1336#endif 1337 1338/* 1339 * Hash table stuff 1340 */ 1341 .align 7 1342_STATIC(do_hash_page) 1343 std r3,_DAR(r1) 1344 std r4,_DSISR(r1) 1345 1346 andis. r0,r4,0xa410 /* weird error? */ 1347 bne- handle_page_fault /* if not, try to insert a HPTE */ 1348 andis. r0,r4,DSISR_DABRMATCH@h 1349 bne- handle_dabr_fault 1350 1351BEGIN_FTR_SECTION 1352 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 1353 bne- do_ste_alloc /* If so handle it */ 1354END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 1355 1356 CURRENT_THREAD_INFO(r11, r1) 1357 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1358 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1359 bne 77f /* then don't call hash_page now */ 1360 /* 1361 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 1362 * accessing a userspace segment (even from the kernel). We assume 1363 * kernel addresses always have the high bit set. 1364 */ 1365 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 1366 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 1367 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 1368 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 1369 ori r4,r4,1 /* add _PAGE_PRESENT */ 1370 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 1371 1372 /* 1373 * r3 contains the faulting address 1374 * r4 contains the required access permissions 1375 * r5 contains the trap number 1376 * 1377 * at return r3 = 0 for success, 1 for page fault, negative for error 1378 */ 1379 bl .hash_page /* build HPTE if possible */ 1380 cmpdi r3,0 /* see if hash_page succeeded */ 1381 1382 /* Success */ 1383 beq fast_exc_return_irq /* Return from exception on success */ 1384 1385 /* Error */ 1386 blt- 13f 1387 1388/* Here we have a page fault that hash_page can't handle. */ 1389handle_page_fault: 139011: ld r4,_DAR(r1) 1391 ld r5,_DSISR(r1) 1392 addi r3,r1,STACK_FRAME_OVERHEAD 1393 bl .do_page_fault 1394 cmpdi r3,0 1395 beq+ 12f 1396 bl .save_nvgprs 1397 mr r5,r3 1398 addi r3,r1,STACK_FRAME_OVERHEAD 1399 lwz r4,_DAR(r1) 1400 bl .bad_page_fault 1401 b .ret_from_except 1402 1403/* We have a data breakpoint exception - handle it */ 1404handle_dabr_fault: 1405 bl .save_nvgprs 1406 ld r4,_DAR(r1) 1407 ld r5,_DSISR(r1) 1408 addi r3,r1,STACK_FRAME_OVERHEAD 1409 bl .do_break 141012: b .ret_from_except_lite 1411 1412 1413/* We have a page fault that hash_page could handle but HV refused 1414 * the PTE insertion 1415 */ 141613: bl .save_nvgprs 1417 mr r5,r3 1418 addi r3,r1,STACK_FRAME_OVERHEAD 1419 ld r4,_DAR(r1) 1420 bl .low_hash_fault 1421 b .ret_from_except 1422 1423/* 1424 * We come here as a result of a DSI at a point where we don't want 1425 * to call hash_page, such as when we are accessing memory (possibly 1426 * user memory) inside a PMU interrupt that occurred while interrupts 1427 * were soft-disabled. We want to invoke the exception handler for 1428 * the access, or panic if there isn't a handler. 1429 */ 143077: bl .save_nvgprs 1431 mr r4,r3 1432 addi r3,r1,STACK_FRAME_OVERHEAD 1433 li r5,SIGSEGV 1434 bl .bad_page_fault 1435 b .ret_from_except 1436 1437 /* here we have a segment miss */ 1438do_ste_alloc: 1439 bl .ste_allocate /* try to insert stab entry */ 1440 cmpdi r3,0 1441 bne- handle_page_fault 1442 b fast_exception_return 1443 1444/* 1445 * r13 points to the PACA, r9 contains the saved CR, 1446 * r11 and r12 contain the saved SRR0 and SRR1. 1447 * r9 - r13 are saved in paca->exslb. 1448 * We assume we aren't going to take any exceptions during this procedure. 1449 * We assume (DAR >> 60) == 0xc. 1450 */ 1451 .align 7 1452_GLOBAL(do_stab_bolted) 1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1455 mfspr r11,SPRN_DAR /* ea */ 1456 1457 /* 1458 * check for bad kernel/user address 1459 * (ea & ~REGION_MASK) >= PGTABLE_RANGE 1460 */ 1461 rldicr. r9,r11,4,(63 - 46 - 4) 1462 li r9,0 /* VSID = 0 for bad address */ 1463 bne- 0f 1464 1465 /* 1466 * Calculate VSID: 1467 * This is the kernel vsid, we take the top for context from 1468 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 1469 * Here we know that (ea >> 60) == 0xc 1470 */ 1471 lis r9,(MAX_USER_CONTEXT + 1)@ha 1472 addi r9,r9,(MAX_USER_CONTEXT + 1)@l 1473 1474 srdi r10,r11,SID_SHIFT 1475 rldimi r10,r9,ESID_BITS,0 /* proto vsid */ 1476 ASM_VSID_SCRAMBLE(r10, r9, 256M) 1477 rldic r9,r10,12,16 /* r9 = vsid << 12 */ 1478 14790: 1480 /* Hash to the primary group */ 1481 ld r10,PACASTABVIRT(r13) 1482 srdi r11,r11,SID_SHIFT 1483 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1484 1485 /* Search the primary group for a free entry */ 14861: ld r11,0(r10) /* Test valid bit of the current ste */ 1487 andi. r11,r11,0x80 1488 beq 2f 1489 addi r10,r10,16 1490 andi. r11,r10,0x70 1491 bne 1b 1492 1493 /* Stick for only searching the primary group for now. */ 1494 /* At least for now, we use a very simple random castout scheme */ 1495 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1496 mftb r11 1497 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1498 ori r11,r11,0x10 1499 1500 /* r10 currently points to an ste one past the group of interest */ 1501 /* make it point to the randomly selected entry */ 1502 subi r10,r10,128 1503 or r10,r10,r11 /* r10 is the entry to invalidate */ 1504 1505 isync /* mark the entry invalid */ 1506 ld r11,0(r10) 1507 rldicl r11,r11,56,1 /* clear the valid bit */ 1508 rotldi r11,r11,8 1509 std r11,0(r10) 1510 sync 1511 1512 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1513 slbie r11 1514 15152: std r9,8(r10) /* Store the vsid part of the ste */ 1516 eieio 1517 1518 mfspr r11,SPRN_DAR /* Get the new esid */ 1519 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1520 ori r11,r11,0x90 /* Turn on valid and kp */ 1521 std r11,0(r10) /* Put new entry back into the stab */ 1522 1523 sync 1524 1525 /* All done -- return from exception. */ 1526 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1527 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1528 1529 andi. r10,r12,MSR_RI 1530 beq- unrecov_slb 1531 1532 mtcrf 0x80,r9 /* restore CR */ 1533 1534 mfmsr r10 1535 clrrdi r10,r10,2 1536 mtmsrd r10,1 1537 1538 mtspr SPRN_SRR0,r11 1539 mtspr SPRN_SRR1,r12 1540 ld r9,PACA_EXSLB+EX_R9(r13) 1541 ld r10,PACA_EXSLB+EX_R10(r13) 1542 ld r11,PACA_EXSLB+EX_R11(r13) 1543 ld r12,PACA_EXSLB+EX_R12(r13) 1544 ld r13,PACA_EXSLB+EX_R13(r13) 1545 rfid 1546 b . /* prevent speculative execution */ 1547