1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18 19/* 20 * We layout physical memory as follows: 21 * 0x0000 - 0x00ff : Secondary processor spin code 22 * 0x0100 - 0x17ff : pSeries Interrupt prologs 23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs 24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 26 * 0x7000 - 0x7fff : FWNMI data area 27 * 0x8000 - 0x8fff : Initial (CPU0) segment table 28 * 0x9000 - : Early init and support code 29 */ 30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */ 31#define SYSCALL_PSERIES_1 \ 32BEGIN_FTR_SECTION \ 33 cmpdi r0,0x1ebe ; \ 34 beq- 1f ; \ 35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ 36 mr r9,r13 ; \ 37 GET_PACA(r13) ; \ 38 mfspr r11,SPRN_SRR0 ; \ 390: 40 41#define SYSCALL_PSERIES_2_RFID \ 42 mfspr r12,SPRN_SRR1 ; \ 43 ld r10,PACAKBASE(r13) ; \ 44 LOAD_HANDLER(r10, system_call_entry) ; \ 45 mtspr SPRN_SRR0,r10 ; \ 46 ld r10,PACAKMSR(r13) ; \ 47 mtspr SPRN_SRR1,r10 ; \ 48 rfid ; \ 49 b . ; /* prevent speculative execution */ 50 51#define SYSCALL_PSERIES_3 \ 52 /* Fast LE/BE switch system call */ \ 531: mfspr r12,SPRN_SRR1 ; \ 54 xori r12,r12,MSR_LE ; \ 55 mtspr SPRN_SRR1,r12 ; \ 56 rfid ; /* return to userspace */ \ 57 b . ; /* prevent speculative execution */ 58 59#if defined(CONFIG_RELOCATABLE) 60 /* 61 * We can't branch directly; in the direct case we use LR 62 * and system_call_entry restores LR. (We thus need to move 63 * LR to r10 in the RFID case too.) 64 */ 65#define SYSCALL_PSERIES_2_DIRECT \ 66 mflr r10 ; \ 67 ld r12,PACAKBASE(r13) ; \ 68 LOAD_HANDLER(r12, system_call_entry_direct) ; \ 69 mtctr r12 ; \ 70 mfspr r12,SPRN_SRR1 ; \ 71 /* Re-use of r13... No spare regs to do this */ \ 72 li r13,MSR_RI ; \ 73 mtmsrd r13,1 ; \ 74 GET_PACA(r13) ; /* get r13 back */ \ 75 bctr ; 76#else 77 /* We can branch directly */ 78#define SYSCALL_PSERIES_2_DIRECT \ 79 mfspr r12,SPRN_SRR1 ; \ 80 li r10,MSR_RI ; \ 81 mtmsrd r10,1 ; /* Set RI (EE=0) */ \ 82 b system_call_entry_direct ; 83#endif 84 85/* 86 * This is the start of the interrupt handlers for pSeries 87 * This code runs with relocation off. 88 * Code from here to __end_interrupts gets copied down to real 89 * address 0x100 when we are running a relocatable kernel. 90 * Therefore any relative branches in this section must only 91 * branch to labels in this section. 92 */ 93 . = 0x100 94 .globl __start_interrupts 95__start_interrupts: 96 97 .globl system_reset_pSeries; 98system_reset_pSeries: 99 HMT_MEDIUM_PPR_DISCARD 100 SET_SCRATCH0(r13) 101#ifdef CONFIG_PPC_P7_NAP 102BEGIN_FTR_SECTION 103 /* Running native on arch 2.06 or later, check if we are 104 * waking up from nap. We only handle no state loss and 105 * supervisor state loss. We do -not- handle hypervisor 106 * state loss at this time. 107 */ 108 mfspr r13,SPRN_SRR1 109 rlwinm. r13,r13,47-31,30,31 110 beq 9f 111 112 /* waking up from powersave (nap) state */ 113 cmpwi cr1,r13,2 114 /* Total loss of HV state is fatal, we could try to use the 115 * PIR to locate a PACA, then use an emergency stack etc... 116 * OPAL v3 based powernv platforms have new idle states 117 * which fall in this catagory. 118 */ 119 bgt cr1,8f 120 GET_PACA(r13) 121 122#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 123 li r0,KVM_HWTHREAD_IN_KERNEL 124 stb r0,HSTATE_HWTHREAD_STATE(r13) 125 /* Order setting hwthread_state vs. testing hwthread_req */ 126 sync 127 lbz r0,HSTATE_HWTHREAD_REQ(r13) 128 cmpwi r0,0 129 beq 1f 130 b kvm_start_guest 1311: 132#endif 133 134 beq cr1,2f 135 b .power7_wakeup_noloss 1362: b .power7_wakeup_loss 137 138 /* Fast Sleep wakeup on PowerNV */ 1398: GET_PACA(r13) 140 b .power7_wakeup_tb_loss 141 1429: 143END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 144#endif /* CONFIG_PPC_P7_NAP */ 145 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 146 NOTEST, 0x100) 147 148 . = 0x200 149machine_check_pSeries_1: 150 /* This is moved out of line as it can be patched by FW, but 151 * some code path might still want to branch into the original 152 * vector 153 */ 154 HMT_MEDIUM_PPR_DISCARD 155 SET_SCRATCH0(r13) /* save r13 */ 156#ifdef CONFIG_PPC_P7_NAP 157BEGIN_FTR_SECTION 158 /* Running native on arch 2.06 or later, check if we are 159 * waking up from nap. We only handle no state loss and 160 * supervisor state loss. We do -not- handle hypervisor 161 * state loss at this time. 162 */ 163 mfspr r13,SPRN_SRR1 164 rlwinm. r13,r13,47-31,30,31 165 OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 166 beq 9f 167 168 mfspr r13,SPRN_SRR1 169 rlwinm. r13,r13,47-31,30,31 170 /* waking up from powersave (nap) state */ 171 cmpwi cr1,r13,2 172 /* Total loss of HV state is fatal. let's just stay stuck here */ 173 OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 174 bgt cr1,. 1759: 176 OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 177END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 178#endif /* CONFIG_PPC_P7_NAP */ 179 EXCEPTION_PROLOG_0(PACA_EXMC) 180BEGIN_FTR_SECTION 181 b machine_check_pSeries_early 182FTR_SECTION_ELSE 183 b machine_check_pSeries_0 184ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 185 186 . = 0x300 187 .globl data_access_pSeries 188data_access_pSeries: 189 HMT_MEDIUM_PPR_DISCARD 190 SET_SCRATCH0(r13) 191BEGIN_FTR_SECTION 192 b data_access_check_stab 193data_access_not_stab: 194END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 195 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 196 KVMTEST, 0x300) 197 198 . = 0x380 199 .globl data_access_slb_pSeries 200data_access_slb_pSeries: 201 HMT_MEDIUM_PPR_DISCARD 202 SET_SCRATCH0(r13) 203 EXCEPTION_PROLOG_0(PACA_EXSLB) 204 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 205 std r3,PACA_EXSLB+EX_R3(r13) 206 mfspr r3,SPRN_DAR 207#ifdef __DISABLED__ 208 /* Keep that around for when we re-implement dynamic VSIDs */ 209 cmpdi r3,0 210 bge slb_miss_user_pseries 211#endif /* __DISABLED__ */ 212 mfspr r12,SPRN_SRR1 213#ifndef CONFIG_RELOCATABLE 214 b .slb_miss_realmode 215#else 216 /* 217 * We can't just use a direct branch to .slb_miss_realmode 218 * because the distance from here to there depends on where 219 * the kernel ends up being put. 220 */ 221 mfctr r11 222 ld r10,PACAKBASE(r13) 223 LOAD_HANDLER(r10, .slb_miss_realmode) 224 mtctr r10 225 bctr 226#endif 227 228 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 229 230 . = 0x480 231 .globl instruction_access_slb_pSeries 232instruction_access_slb_pSeries: 233 HMT_MEDIUM_PPR_DISCARD 234 SET_SCRATCH0(r13) 235 EXCEPTION_PROLOG_0(PACA_EXSLB) 236 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 237 std r3,PACA_EXSLB+EX_R3(r13) 238 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 239#ifdef __DISABLED__ 240 /* Keep that around for when we re-implement dynamic VSIDs */ 241 cmpdi r3,0 242 bge slb_miss_user_pseries 243#endif /* __DISABLED__ */ 244 mfspr r12,SPRN_SRR1 245#ifndef CONFIG_RELOCATABLE 246 b .slb_miss_realmode 247#else 248 mfctr r11 249 ld r10,PACAKBASE(r13) 250 LOAD_HANDLER(r10, .slb_miss_realmode) 251 mtctr r10 252 bctr 253#endif 254 255 /* We open code these as we can't have a ". = x" (even with 256 * x = "." within a feature section 257 */ 258 . = 0x500; 259 .globl hardware_interrupt_pSeries; 260 .globl hardware_interrupt_hv; 261hardware_interrupt_pSeries: 262hardware_interrupt_hv: 263 HMT_MEDIUM_PPR_DISCARD 264 BEGIN_FTR_SECTION 265 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 266 EXC_HV, SOFTEN_TEST_HV) 267 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 268 FTR_SECTION_ELSE 269 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 270 EXC_STD, SOFTEN_TEST_HV_201) 271 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 272 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 273 274 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 275 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 276 277 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 278 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 279 280 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 281 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 282 283 . = 0x900 284 .globl decrementer_pSeries 285decrementer_pSeries: 286 _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR) 287 288 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) 289 290 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) 291 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 292 293 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 294 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 295 296 . = 0xc00 297 .globl system_call_pSeries 298system_call_pSeries: 299 HMT_MEDIUM 300#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 301 SET_SCRATCH0(r13) 302 GET_PACA(r13) 303 std r9,PACA_EXGEN+EX_R9(r13) 304 std r10,PACA_EXGEN+EX_R10(r13) 305 mfcr r9 306 KVMTEST(0xc00) 307 GET_SCRATCH0(r13) 308#endif 309 SYSCALL_PSERIES_1 310 SYSCALL_PSERIES_2_RFID 311 SYSCALL_PSERIES_3 312 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 313 314 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 315 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 316 317 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 318 * out of line to handle them 319 */ 320 . = 0xe00 321hv_data_storage_trampoline: 322 SET_SCRATCH0(r13) 323 EXCEPTION_PROLOG_0(PACA_EXGEN) 324 b h_data_storage_hv 325 326 . = 0xe20 327hv_instr_storage_trampoline: 328 SET_SCRATCH0(r13) 329 EXCEPTION_PROLOG_0(PACA_EXGEN) 330 b h_instr_storage_hv 331 332 . = 0xe40 333emulation_assist_trampoline: 334 SET_SCRATCH0(r13) 335 EXCEPTION_PROLOG_0(PACA_EXGEN) 336 b emulation_assist_hv 337 338 . = 0xe60 339hv_exception_trampoline: 340 SET_SCRATCH0(r13) 341 EXCEPTION_PROLOG_0(PACA_EXGEN) 342 b hmi_exception_hv 343 344 . = 0xe80 345hv_doorbell_trampoline: 346 SET_SCRATCH0(r13) 347 EXCEPTION_PROLOG_0(PACA_EXGEN) 348 b h_doorbell_hv 349 350 /* We need to deal with the Altivec unavailable exception 351 * here which is at 0xf20, thus in the middle of the 352 * prolog code of the PerformanceMonitor one. A little 353 * trickery is thus necessary 354 */ 355 . = 0xf00 356performance_monitor_pseries_trampoline: 357 SET_SCRATCH0(r13) 358 EXCEPTION_PROLOG_0(PACA_EXGEN) 359 b performance_monitor_pSeries 360 361 . = 0xf20 362altivec_unavailable_pseries_trampoline: 363 SET_SCRATCH0(r13) 364 EXCEPTION_PROLOG_0(PACA_EXGEN) 365 b altivec_unavailable_pSeries 366 367 . = 0xf40 368vsx_unavailable_pseries_trampoline: 369 SET_SCRATCH0(r13) 370 EXCEPTION_PROLOG_0(PACA_EXGEN) 371 b vsx_unavailable_pSeries 372 373 . = 0xf60 374facility_unavailable_trampoline: 375 SET_SCRATCH0(r13) 376 EXCEPTION_PROLOG_0(PACA_EXGEN) 377 b facility_unavailable_pSeries 378 379 . = 0xf80 380hv_facility_unavailable_trampoline: 381 SET_SCRATCH0(r13) 382 EXCEPTION_PROLOG_0(PACA_EXGEN) 383 b facility_unavailable_hv 384 385#ifdef CONFIG_CBE_RAS 386 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 387 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 388#endif /* CONFIG_CBE_RAS */ 389 390 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 391 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 392 393 . = 0x1500 394 .global denorm_exception_hv 395denorm_exception_hv: 396 HMT_MEDIUM_PPR_DISCARD 397 mtspr SPRN_SPRG_HSCRATCH0,r13 398 EXCEPTION_PROLOG_0(PACA_EXGEN) 399 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) 400 401#ifdef CONFIG_PPC_DENORMALISATION 402 mfspr r10,SPRN_HSRR1 403 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 404 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ 405 addi r11,r11,-4 /* HSRR0 is next instruction */ 406 bne+ denorm_assist 407#endif 408 409 KVMTEST(0x1500) 410 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) 411 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500) 412 413#ifdef CONFIG_CBE_RAS 414 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 415 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 416#endif /* CONFIG_CBE_RAS */ 417 418 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 419 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 420 421#ifdef CONFIG_CBE_RAS 422 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 423 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 424#else 425 . = 0x1800 426#endif /* CONFIG_CBE_RAS */ 427 428 429/*** Out of line interrupts support ***/ 430 431 .align 7 432 /* moved from 0x200 */ 433machine_check_pSeries_early: 434BEGIN_FTR_SECTION 435 EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) 436 /* 437 * Register contents: 438 * R13 = PACA 439 * R9 = CR 440 * Original R9 to R13 is saved on PACA_EXMC 441 * 442 * Switch to mc_emergency stack and handle re-entrancy (though we 443 * currently don't test for overflow). Save MCE registers srr1, 444 * srr0, dar and dsisr and then set ME=1 445 * 446 * We use paca->in_mce to check whether this is the first entry or 447 * nested machine check. We increment paca->in_mce to track nested 448 * machine checks. 449 * 450 * If this is the first entry then set stack pointer to 451 * paca->mc_emergency_sp, otherwise r1 is already pointing to 452 * stack frame on mc_emergency stack. 453 * 454 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 455 * checkstop if we get another machine check exception before we do 456 * rfid with MSR_ME=1. 457 */ 458 mr r11,r1 /* Save r1 */ 459 lhz r10,PACA_IN_MCE(r13) 460 cmpwi r10,0 /* Are we in nested machine check */ 461 bne 0f /* Yes, we are. */ 462 /* First machine check entry */ 463 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 4640: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 465 addi r10,r10,1 /* increment paca->in_mce */ 466 sth r10,PACA_IN_MCE(r13) 467 std r11,GPR1(r1) /* Save r1 on the stack. */ 468 std r11,0(r1) /* make stack chain pointer */ 469 mfspr r11,SPRN_SRR0 /* Save SRR0 */ 470 std r11,_NIP(r1) 471 mfspr r11,SPRN_SRR1 /* Save SRR1 */ 472 std r11,_MSR(r1) 473 mfspr r11,SPRN_DAR /* Save DAR */ 474 std r11,_DAR(r1) 475 mfspr r11,SPRN_DSISR /* Save DSISR */ 476 std r11,_DSISR(r1) 477 std r9,_CCR(r1) /* Save CR in stackframe */ 478 /* Save r9 through r13 from EXMC save area to stack frame. */ 479 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) 480 mfmsr r11 /* get MSR value */ 481 ori r11,r11,MSR_ME /* turn on ME bit */ 482 ori r11,r11,MSR_RI /* turn on RI bit */ 483 ld r12,PACAKBASE(r13) /* get high part of &label */ 484 LOAD_HANDLER(r12, machine_check_handle_early) 485 mtspr SPRN_SRR0,r12 486 mtspr SPRN_SRR1,r11 487 rfid 488 b . /* prevent speculative execution */ 489END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 490 491machine_check_pSeries: 492 .globl machine_check_fwnmi 493machine_check_fwnmi: 494 HMT_MEDIUM_PPR_DISCARD 495 SET_SCRATCH0(r13) /* save r13 */ 496 EXCEPTION_PROLOG_0(PACA_EXMC) 497machine_check_pSeries_0: 498 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) 499 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) 500 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 501 502 /* moved from 0x300 */ 503data_access_check_stab: 504 GET_PACA(r13) 505 std r9,PACA_EXSLB+EX_R9(r13) 506 std r10,PACA_EXSLB+EX_R10(r13) 507 mfspr r10,SPRN_DAR 508 mfspr r9,SPRN_DSISR 509 srdi r10,r10,60 510 rlwimi r10,r9,16,0x20 511#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 512 lbz r9,HSTATE_IN_GUEST(r13) 513 rlwimi r10,r9,8,0x300 514#endif 515 mfcr r9 516 cmpwi r10,0x2c 517 beq do_stab_bolted_pSeries 518 mtcrf 0x80,r9 519 ld r9,PACA_EXSLB+EX_R9(r13) 520 ld r10,PACA_EXSLB+EX_R10(r13) 521 b data_access_not_stab 522do_stab_bolted_pSeries: 523 std r11,PACA_EXSLB+EX_R11(r13) 524 std r12,PACA_EXSLB+EX_R12(r13) 525 GET_SCRATCH0(r10) 526 std r10,PACA_EXSLB+EX_R13(r13) 527 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 528 529 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 530 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 531 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 532 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 533 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 534 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 535 536#ifdef CONFIG_PPC_DENORMALISATION 537denorm_assist: 538BEGIN_FTR_SECTION 539/* 540 * To denormalise we need to move a copy of the register to itself. 541 * For POWER6 do that here for all FP regs. 542 */ 543 mfmsr r10 544 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 545 xori r10,r10,(MSR_FE0|MSR_FE1) 546 mtmsrd r10 547 sync 548 549#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 550#define FMR4(n) FMR2(n) ; FMR2(n+2) 551#define FMR8(n) FMR4(n) ; FMR4(n+4) 552#define FMR16(n) FMR8(n) ; FMR8(n+8) 553#define FMR32(n) FMR16(n) ; FMR16(n+16) 554 FMR32(0) 555 556FTR_SECTION_ELSE 557/* 558 * To denormalise we need to move a copy of the register to itself. 559 * For POWER7 do that here for the first 32 VSX registers only. 560 */ 561 mfmsr r10 562 oris r10,r10,MSR_VSX@h 563 mtmsrd r10 564 sync 565 566#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) 567#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) 568#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) 569#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) 570#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) 571 XVCPSGNDP32(0) 572 573ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 574 575BEGIN_FTR_SECTION 576 b denorm_done 577END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 578/* 579 * To denormalise we need to move a copy of the register to itself. 580 * For POWER8 we need to do that for all 64 VSX registers 581 */ 582 XVCPSGNDP32(32) 583denorm_done: 584 mtspr SPRN_HSRR0,r11 585 mtcrf 0x80,r9 586 ld r9,PACA_EXGEN+EX_R9(r13) 587 RESTORE_PPR_PACA(PACA_EXGEN, r10) 588BEGIN_FTR_SECTION 589 ld r10,PACA_EXGEN+EX_CFAR(r13) 590 mtspr SPRN_CFAR,r10 591END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 592 ld r10,PACA_EXGEN+EX_R10(r13) 593 ld r11,PACA_EXGEN+EX_R11(r13) 594 ld r12,PACA_EXGEN+EX_R12(r13) 595 ld r13,PACA_EXGEN+EX_R13(r13) 596 HRFID 597 b . 598#endif 599 600 .align 7 601 /* moved from 0xe00 */ 602 STD_EXCEPTION_HV_OOL(0xe02, h_data_storage) 603 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 604 STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage) 605 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 606 STD_EXCEPTION_HV_OOL(0xe42, emulation_assist) 607 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 608 STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */ 609 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 610 MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell) 611 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82) 612 613 /* moved from 0xf00 */ 614 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 615 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 616 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 617 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 618 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 619 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 620 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 621 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) 622 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) 623 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) 624 625/* 626 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 627 * - If it was a decrementer interrupt, we bump the dec to max and and return. 628 * - If it was a doorbell we return immediately since doorbells are edge 629 * triggered and won't automatically refire. 630 * - else we hard disable and return. 631 * This is called with r10 containing the value to OR to the paca field. 632 */ 633#define MASKED_INTERRUPT(_H) \ 634masked_##_H##interrupt: \ 635 std r11,PACA_EXGEN+EX_R11(r13); \ 636 lbz r11,PACAIRQHAPPENED(r13); \ 637 or r11,r11,r10; \ 638 stb r11,PACAIRQHAPPENED(r13); \ 639 cmpwi r10,PACA_IRQ_DEC; \ 640 bne 1f; \ 641 lis r10,0x7fff; \ 642 ori r10,r10,0xffff; \ 643 mtspr SPRN_DEC,r10; \ 644 b 2f; \ 6451: cmpwi r10,PACA_IRQ_DBELL; \ 646 beq 2f; \ 647 mfspr r10,SPRN_##_H##SRR1; \ 648 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 649 rotldi r10,r10,16; \ 650 mtspr SPRN_##_H##SRR1,r10; \ 6512: mtcrf 0x80,r9; \ 652 ld r9,PACA_EXGEN+EX_R9(r13); \ 653 ld r10,PACA_EXGEN+EX_R10(r13); \ 654 ld r11,PACA_EXGEN+EX_R11(r13); \ 655 GET_SCRATCH0(r13); \ 656 ##_H##rfid; \ 657 b . 658 659 MASKED_INTERRUPT() 660 MASKED_INTERRUPT(H) 661 662/* 663 * Called from arch_local_irq_enable when an interrupt needs 664 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate 665 * which kind of interrupt. MSR:EE is already off. We generate a 666 * stackframe like if a real interrupt had happened. 667 * 668 * Note: While MSR:EE is off, we need to make sure that _MSR 669 * in the generated frame has EE set to 1 or the exception 670 * handler will not properly re-enable them. 671 */ 672_GLOBAL(__replay_interrupt) 673 /* We are going to jump to the exception common code which 674 * will retrieve various register values from the PACA which 675 * we don't give a damn about, so we don't bother storing them. 676 */ 677 mfmsr r12 678 mflr r11 679 mfcr r9 680 ori r12,r12,MSR_EE 681 cmpwi r3,0x900 682 beq decrementer_common 683 cmpwi r3,0x500 684 beq hardware_interrupt_common 685BEGIN_FTR_SECTION 686 cmpwi r3,0xe80 687 beq h_doorbell_common 688FTR_SECTION_ELSE 689 cmpwi r3,0xa00 690 beq doorbell_super_common 691ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 692 blr 693 694#ifdef CONFIG_PPC_PSERIES 695/* 696 * Vectors for the FWNMI option. Share common code. 697 */ 698 .globl system_reset_fwnmi 699 .align 7 700system_reset_fwnmi: 701 HMT_MEDIUM_PPR_DISCARD 702 SET_SCRATCH0(r13) /* save r13 */ 703 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 704 NOTEST, 0x100) 705 706#endif /* CONFIG_PPC_PSERIES */ 707 708#ifdef __DISABLED__ 709/* 710 * This is used for when the SLB miss handler has to go virtual, 711 * which doesn't happen for now anymore but will once we re-implement 712 * dynamic VSIDs for shared page tables 713 */ 714slb_miss_user_pseries: 715 std r10,PACA_EXGEN+EX_R10(r13) 716 std r11,PACA_EXGEN+EX_R11(r13) 717 std r12,PACA_EXGEN+EX_R12(r13) 718 GET_SCRATCH0(r10) 719 ld r11,PACA_EXSLB+EX_R9(r13) 720 ld r12,PACA_EXSLB+EX_R3(r13) 721 std r10,PACA_EXGEN+EX_R13(r13) 722 std r11,PACA_EXGEN+EX_R9(r13) 723 std r12,PACA_EXGEN+EX_R3(r13) 724 clrrdi r12,r13,32 725 mfmsr r10 726 mfspr r11,SRR0 /* save SRR0 */ 727 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 728 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 729 mtspr SRR0,r12 730 mfspr r12,SRR1 /* and SRR1 */ 731 mtspr SRR1,r10 732 rfid 733 b . /* prevent spec. execution */ 734#endif /* __DISABLED__ */ 735 736#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 737kvmppc_skip_interrupt: 738 /* 739 * Here all GPRs are unchanged from when the interrupt happened 740 * except for r13, which is saved in SPRG_SCRATCH0. 741 */ 742 mfspr r13, SPRN_SRR0 743 addi r13, r13, 4 744 mtspr SPRN_SRR0, r13 745 GET_SCRATCH0(r13) 746 rfid 747 b . 748 749kvmppc_skip_Hinterrupt: 750 /* 751 * Here all GPRs are unchanged from when the interrupt happened 752 * except for r13, which is saved in SPRG_SCRATCH0. 753 */ 754 mfspr r13, SPRN_HSRR0 755 addi r13, r13, 4 756 mtspr SPRN_HSRR0, r13 757 GET_SCRATCH0(r13) 758 hrfid 759 b . 760#endif 761 762/* 763 * Code from here down to __end_handlers is invoked from the 764 * exception prologs above. Because the prologs assemble the 765 * addresses of these handlers using the LOAD_HANDLER macro, 766 * which uses an ori instruction, these handlers must be in 767 * the first 64k of the kernel image. 768 */ 769 770/*** Common interrupt handlers ***/ 771 772 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 773 774 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 775 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 776 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) 777#ifdef CONFIG_PPC_DOORBELL 778 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) 779#else 780 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) 781#endif 782 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 783 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 784 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 785 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) 786 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 787#ifdef CONFIG_PPC_DOORBELL 788 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) 789#else 790 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) 791#endif 792 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 793 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 794 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) 795#ifdef CONFIG_ALTIVEC 796 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 797#else 798 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 799#endif 800#ifdef CONFIG_CBE_RAS 801 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 802 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 803 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 804#endif /* CONFIG_CBE_RAS */ 805 806 /* 807 * Relocation-on interrupts: A subset of the interrupts can be delivered 808 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 809 * it. Addresses are the same as the original interrupt addresses, but 810 * offset by 0xc000000000004000. 811 * It's impossible to receive interrupts below 0x300 via this mechanism. 812 * KVM: None of these traps are from the guest ; anything that escalated 813 * to HV=1 from HV=0 is delivered via real mode handlers. 814 */ 815 816 /* 817 * This uses the standard macro, since the original 0x300 vector 818 * only has extra guff for STAB-based processors -- which never 819 * come here. 820 */ 821 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access) 822 . = 0x4380 823 .globl data_access_slb_relon_pSeries 824data_access_slb_relon_pSeries: 825 SET_SCRATCH0(r13) 826 EXCEPTION_PROLOG_0(PACA_EXSLB) 827 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) 828 std r3,PACA_EXSLB+EX_R3(r13) 829 mfspr r3,SPRN_DAR 830 mfspr r12,SPRN_SRR1 831#ifndef CONFIG_RELOCATABLE 832 b .slb_miss_realmode 833#else 834 /* 835 * We can't just use a direct branch to .slb_miss_realmode 836 * because the distance from here to there depends on where 837 * the kernel ends up being put. 838 */ 839 mfctr r11 840 ld r10,PACAKBASE(r13) 841 LOAD_HANDLER(r10, .slb_miss_realmode) 842 mtctr r10 843 bctr 844#endif 845 846 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access) 847 . = 0x4480 848 .globl instruction_access_slb_relon_pSeries 849instruction_access_slb_relon_pSeries: 850 SET_SCRATCH0(r13) 851 EXCEPTION_PROLOG_0(PACA_EXSLB) 852 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) 853 std r3,PACA_EXSLB+EX_R3(r13) 854 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 855 mfspr r12,SPRN_SRR1 856#ifndef CONFIG_RELOCATABLE 857 b .slb_miss_realmode 858#else 859 mfctr r11 860 ld r10,PACAKBASE(r13) 861 LOAD_HANDLER(r10, .slb_miss_realmode) 862 mtctr r10 863 bctr 864#endif 865 866 . = 0x4500 867 .globl hardware_interrupt_relon_pSeries; 868 .globl hardware_interrupt_relon_hv; 869hardware_interrupt_relon_pSeries: 870hardware_interrupt_relon_hv: 871 BEGIN_FTR_SECTION 872 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV) 873 FTR_SECTION_ELSE 874 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR) 875 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 876 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment) 877 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check) 878 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable) 879 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer) 880 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer) 881 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super) 882 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b) 883 884 . = 0x4c00 885 .globl system_call_relon_pSeries 886system_call_relon_pSeries: 887 HMT_MEDIUM 888 SYSCALL_PSERIES_1 889 SYSCALL_PSERIES_2_DIRECT 890 SYSCALL_PSERIES_3 891 892 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) 893 894 . = 0x4e00 895 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 896 897 . = 0x4e20 898 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 899 900 . = 0x4e40 901emulation_assist_relon_trampoline: 902 SET_SCRATCH0(r13) 903 EXCEPTION_PROLOG_0(PACA_EXGEN) 904 b emulation_assist_relon_hv 905 906 . = 0x4e60 907 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 908 909 . = 0x4e80 910h_doorbell_relon_trampoline: 911 SET_SCRATCH0(r13) 912 EXCEPTION_PROLOG_0(PACA_EXGEN) 913 b h_doorbell_relon_hv 914 915 . = 0x4f00 916performance_monitor_relon_pseries_trampoline: 917 SET_SCRATCH0(r13) 918 EXCEPTION_PROLOG_0(PACA_EXGEN) 919 b performance_monitor_relon_pSeries 920 921 . = 0x4f20 922altivec_unavailable_relon_pseries_trampoline: 923 SET_SCRATCH0(r13) 924 EXCEPTION_PROLOG_0(PACA_EXGEN) 925 b altivec_unavailable_relon_pSeries 926 927 . = 0x4f40 928vsx_unavailable_relon_pseries_trampoline: 929 SET_SCRATCH0(r13) 930 EXCEPTION_PROLOG_0(PACA_EXGEN) 931 b vsx_unavailable_relon_pSeries 932 933 . = 0x4f60 934facility_unavailable_relon_trampoline: 935 SET_SCRATCH0(r13) 936 EXCEPTION_PROLOG_0(PACA_EXGEN) 937 b facility_unavailable_relon_pSeries 938 939 . = 0x4f80 940hv_facility_unavailable_relon_trampoline: 941 SET_SCRATCH0(r13) 942 EXCEPTION_PROLOG_0(PACA_EXGEN) 943 b hv_facility_unavailable_relon_hv 944 945 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 946#ifdef CONFIG_PPC_DENORMALISATION 947 . = 0x5500 948 b denorm_exception_hv 949#endif 950 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) 951 952 /* Other future vectors */ 953 .align 7 954 .globl __end_interrupts 955__end_interrupts: 956 957 .align 7 958system_call_entry_direct: 959#if defined(CONFIG_RELOCATABLE) 960 /* The first level prologue may have used LR to get here, saving 961 * orig in r10. To save hacking/ifdeffing common code, restore here. 962 */ 963 mtlr r10 964#endif 965system_call_entry: 966 b system_call_common 967 968ppc64_runlatch_on_trampoline: 969 b .__ppc64_runlatch_on 970 971/* 972 * Here we have detected that the kernel stack pointer is bad. 973 * R9 contains the saved CR, r13 points to the paca, 974 * r10 contains the (bad) kernel stack pointer, 975 * r11 and r12 contain the saved SRR0 and SRR1. 976 * We switch to using an emergency stack, save the registers there, 977 * and call kernel_bad_stack(), which panics. 978 */ 979bad_stack: 980 ld r1,PACAEMERGSP(r13) 981 subi r1,r1,64+INT_FRAME_SIZE 982 std r9,_CCR(r1) 983 std r10,GPR1(r1) 984 std r11,_NIP(r1) 985 std r12,_MSR(r1) 986 mfspr r11,SPRN_DAR 987 mfspr r12,SPRN_DSISR 988 std r11,_DAR(r1) 989 std r12,_DSISR(r1) 990 mflr r10 991 mfctr r11 992 mfxer r12 993 std r10,_LINK(r1) 994 std r11,_CTR(r1) 995 std r12,_XER(r1) 996 SAVE_GPR(0,r1) 997 SAVE_GPR(2,r1) 998 ld r10,EX_R3(r3) 999 std r10,GPR3(r1) 1000 SAVE_GPR(4,r1) 1001 SAVE_4GPRS(5,r1) 1002 ld r9,EX_R9(r3) 1003 ld r10,EX_R10(r3) 1004 SAVE_2GPRS(9,r1) 1005 ld r9,EX_R11(r3) 1006 ld r10,EX_R12(r3) 1007 ld r11,EX_R13(r3) 1008 std r9,GPR11(r1) 1009 std r10,GPR12(r1) 1010 std r11,GPR13(r1) 1011BEGIN_FTR_SECTION 1012 ld r10,EX_CFAR(r3) 1013 std r10,ORIG_GPR3(r1) 1014END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1015 SAVE_8GPRS(14,r1) 1016 SAVE_10GPRS(22,r1) 1017 lhz r12,PACA_TRAP_SAVE(r13) 1018 std r12,_TRAP(r1) 1019 addi r11,r1,INT_FRAME_SIZE 1020 std r11,0(r1) 1021 li r12,0 1022 std r12,0(r11) 1023 ld r2,PACATOC(r13) 1024 ld r11,exception_marker@toc(r2) 1025 std r12,RESULT(r1) 1026 std r11,STACK_FRAME_OVERHEAD-16(r1) 10271: addi r3,r1,STACK_FRAME_OVERHEAD 1028 bl .kernel_bad_stack 1029 b 1b 1030 1031/* 1032 * Here r13 points to the paca, r9 contains the saved CR, 1033 * SRR0 and SRR1 are saved in r11 and r12, 1034 * r9 - r13 are saved in paca->exgen. 1035 */ 1036 .align 7 1037 .globl data_access_common 1038data_access_common: 1039 mfspr r10,SPRN_DAR 1040 std r10,PACA_EXGEN+EX_DAR(r13) 1041 mfspr r10,SPRN_DSISR 1042 stw r10,PACA_EXGEN+EX_DSISR(r13) 1043 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 1044 DISABLE_INTS 1045 ld r12,_MSR(r1) 1046 ld r3,PACA_EXGEN+EX_DAR(r13) 1047 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1048 li r5,0x300 1049 b .do_hash_page /* Try to handle as hpte fault */ 1050 1051 .align 7 1052 .globl h_data_storage_common 1053h_data_storage_common: 1054 mfspr r10,SPRN_HDAR 1055 std r10,PACA_EXGEN+EX_DAR(r13) 1056 mfspr r10,SPRN_HDSISR 1057 stw r10,PACA_EXGEN+EX_DSISR(r13) 1058 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 1059 bl .save_nvgprs 1060 DISABLE_INTS 1061 addi r3,r1,STACK_FRAME_OVERHEAD 1062 bl .unknown_exception 1063 b .ret_from_except 1064 1065 .align 7 1066 .globl instruction_access_common 1067instruction_access_common: 1068 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 1069 DISABLE_INTS 1070 ld r12,_MSR(r1) 1071 ld r3,_NIP(r1) 1072 andis. r4,r12,0x5820 1073 li r5,0x400 1074 b .do_hash_page /* Try to handle as hpte fault */ 1075 1076 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 1077 1078/* 1079 * Here is the common SLB miss user that is used when going to virtual 1080 * mode for SLB misses, that is currently not used 1081 */ 1082#ifdef __DISABLED__ 1083 .align 7 1084 .globl slb_miss_user_common 1085slb_miss_user_common: 1086 mflr r10 1087 std r3,PACA_EXGEN+EX_DAR(r13) 1088 stw r9,PACA_EXGEN+EX_CCR(r13) 1089 std r10,PACA_EXGEN+EX_LR(r13) 1090 std r11,PACA_EXGEN+EX_SRR0(r13) 1091 bl .slb_allocate_user 1092 1093 ld r10,PACA_EXGEN+EX_LR(r13) 1094 ld r3,PACA_EXGEN+EX_R3(r13) 1095 lwz r9,PACA_EXGEN+EX_CCR(r13) 1096 ld r11,PACA_EXGEN+EX_SRR0(r13) 1097 mtlr r10 1098 beq- slb_miss_fault 1099 1100 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1101 beq- unrecov_user_slb 1102 mfmsr r10 1103 1104.machine push 1105.machine "power4" 1106 mtcrf 0x80,r9 1107.machine pop 1108 1109 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 1110 mtmsrd r10,1 1111 1112 mtspr SRR0,r11 1113 mtspr SRR1,r12 1114 1115 ld r9,PACA_EXGEN+EX_R9(r13) 1116 ld r10,PACA_EXGEN+EX_R10(r13) 1117 ld r11,PACA_EXGEN+EX_R11(r13) 1118 ld r12,PACA_EXGEN+EX_R12(r13) 1119 ld r13,PACA_EXGEN+EX_R13(r13) 1120 rfid 1121 b . 1122 1123slb_miss_fault: 1124 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 1125 ld r4,PACA_EXGEN+EX_DAR(r13) 1126 li r5,0 1127 std r4,_DAR(r1) 1128 std r5,_DSISR(r1) 1129 b handle_page_fault 1130 1131unrecov_user_slb: 1132 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 1133 DISABLE_INTS 1134 bl .save_nvgprs 11351: addi r3,r1,STACK_FRAME_OVERHEAD 1136 bl .unrecoverable_exception 1137 b 1b 1138 1139#endif /* __DISABLED__ */ 1140 1141 1142 /* 1143 * Machine check is different because we use a different 1144 * save area: PACA_EXMC instead of PACA_EXGEN. 1145 */ 1146 .align 7 1147 .globl machine_check_common 1148machine_check_common: 1149 1150 mfspr r10,SPRN_DAR 1151 std r10,PACA_EXGEN+EX_DAR(r13) 1152 mfspr r10,SPRN_DSISR 1153 stw r10,PACA_EXGEN+EX_DSISR(r13) 1154 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 1155 FINISH_NAP 1156 DISABLE_INTS 1157 ld r3,PACA_EXGEN+EX_DAR(r13) 1158 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1159 std r3,_DAR(r1) 1160 std r4,_DSISR(r1) 1161 bl .save_nvgprs 1162 addi r3,r1,STACK_FRAME_OVERHEAD 1163 bl .machine_check_exception 1164 b .ret_from_except 1165 1166 .align 7 1167 .globl alignment_common 1168alignment_common: 1169 mfspr r10,SPRN_DAR 1170 std r10,PACA_EXGEN+EX_DAR(r13) 1171 mfspr r10,SPRN_DSISR 1172 stw r10,PACA_EXGEN+EX_DSISR(r13) 1173 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 1174 ld r3,PACA_EXGEN+EX_DAR(r13) 1175 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1176 std r3,_DAR(r1) 1177 std r4,_DSISR(r1) 1178 bl .save_nvgprs 1179 DISABLE_INTS 1180 addi r3,r1,STACK_FRAME_OVERHEAD 1181 bl .alignment_exception 1182 b .ret_from_except 1183 1184 .align 7 1185 .globl program_check_common 1186program_check_common: 1187 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1188 bl .save_nvgprs 1189 DISABLE_INTS 1190 addi r3,r1,STACK_FRAME_OVERHEAD 1191 bl .program_check_exception 1192 b .ret_from_except 1193 1194 .align 7 1195 .globl fp_unavailable_common 1196fp_unavailable_common: 1197 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1198 bne 1f /* if from user, just load it up */ 1199 bl .save_nvgprs 1200 DISABLE_INTS 1201 addi r3,r1,STACK_FRAME_OVERHEAD 1202 bl .kernel_fp_unavailable_exception 1203 BUG_OPCODE 12041: 1205#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1206BEGIN_FTR_SECTION 1207 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1208 * transaction), go do TM stuff 1209 */ 1210 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1211 bne- 2f 1212END_FTR_SECTION_IFSET(CPU_FTR_TM) 1213#endif 1214 bl .load_up_fpu 1215 b fast_exception_return 1216#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 12172: /* User process was in a transaction */ 1218 bl .save_nvgprs 1219 DISABLE_INTS 1220 addi r3,r1,STACK_FRAME_OVERHEAD 1221 bl .fp_unavailable_tm 1222 b .ret_from_except 1223#endif 1224 .align 7 1225 .globl altivec_unavailable_common 1226altivec_unavailable_common: 1227 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 1228#ifdef CONFIG_ALTIVEC 1229BEGIN_FTR_SECTION 1230 beq 1f 1231#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1232 BEGIN_FTR_SECTION_NESTED(69) 1233 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1234 * transaction), go do TM stuff 1235 */ 1236 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1237 bne- 2f 1238 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1239#endif 1240 bl .load_up_altivec 1241 b fast_exception_return 1242#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 12432: /* User process was in a transaction */ 1244 bl .save_nvgprs 1245 DISABLE_INTS 1246 addi r3,r1,STACK_FRAME_OVERHEAD 1247 bl .altivec_unavailable_tm 1248 b .ret_from_except 1249#endif 12501: 1251END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1252#endif 1253 bl .save_nvgprs 1254 DISABLE_INTS 1255 addi r3,r1,STACK_FRAME_OVERHEAD 1256 bl .altivec_unavailable_exception 1257 b .ret_from_except 1258 1259 .align 7 1260 .globl vsx_unavailable_common 1261vsx_unavailable_common: 1262 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 1263#ifdef CONFIG_VSX 1264BEGIN_FTR_SECTION 1265 beq 1f 1266#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1267 BEGIN_FTR_SECTION_NESTED(69) 1268 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1269 * transaction), go do TM stuff 1270 */ 1271 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1272 bne- 2f 1273 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1274#endif 1275 b .load_up_vsx 1276#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 12772: /* User process was in a transaction */ 1278 bl .save_nvgprs 1279 DISABLE_INTS 1280 addi r3,r1,STACK_FRAME_OVERHEAD 1281 bl .vsx_unavailable_tm 1282 b .ret_from_except 1283#endif 12841: 1285END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1286#endif 1287 bl .save_nvgprs 1288 DISABLE_INTS 1289 addi r3,r1,STACK_FRAME_OVERHEAD 1290 bl .vsx_unavailable_exception 1291 b .ret_from_except 1292 1293 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1294 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) 1295 1296 .align 7 1297 .globl __end_handlers 1298__end_handlers: 1299 1300 /* Equivalents to the above handlers for relocation-on interrupt vectors */ 1301 STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist) 1302 MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell) 1303 1304 STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 1305 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1306 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1307 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1308 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) 1309 1310#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1311/* 1312 * Data area reserved for FWNMI option. 1313 * This address (0x7000) is fixed by the RPA. 1314 */ 1315 .= 0x7000 1316 .globl fwnmi_data_area 1317fwnmi_data_area: 1318 1319 /* pseries and powernv need to keep the whole page from 1320 * 0x7000 to 0x8000 free for use by the firmware 1321 */ 1322 . = 0x8000 1323#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1324 1325/* Space for CPU0's segment table */ 1326 .balign 4096 1327 .globl initial_stab 1328initial_stab: 1329 .space 4096 1330 1331#ifdef CONFIG_PPC_POWERNV 1332_GLOBAL(opal_mc_secondary_handler) 1333 HMT_MEDIUM_PPR_DISCARD 1334 SET_SCRATCH0(r13) 1335 GET_PACA(r13) 1336 clrldi r3,r3,2 1337 tovirt(r3,r3) 1338 std r3,PACA_OPAL_MC_EVT(r13) 1339 ld r13,OPAL_MC_SRR0(r3) 1340 mtspr SPRN_SRR0,r13 1341 ld r13,OPAL_MC_SRR1(r3) 1342 mtspr SPRN_SRR1,r13 1343 ld r3,OPAL_MC_GPR3(r3) 1344 GET_SCRATCH0(r13) 1345 b machine_check_pSeries 1346#endif /* CONFIG_PPC_POWERNV */ 1347 1348 1349#define MACHINE_CHECK_HANDLER_WINDUP \ 1350 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1351 li r0,MSR_RI; \ 1352 mfmsr r9; /* get MSR value */ \ 1353 andc r9,r9,r0; \ 1354 mtmsrd r9,1; /* Clear MSR_RI */ \ 1355 /* Move original SRR0 and SRR1 into the respective regs */ \ 1356 ld r9,_MSR(r1); \ 1357 mtspr SPRN_SRR1,r9; \ 1358 ld r3,_NIP(r1); \ 1359 mtspr SPRN_SRR0,r3; \ 1360 ld r9,_CTR(r1); \ 1361 mtctr r9; \ 1362 ld r9,_XER(r1); \ 1363 mtxer r9; \ 1364 ld r9,_LINK(r1); \ 1365 mtlr r9; \ 1366 REST_GPR(0, r1); \ 1367 REST_8GPRS(2, r1); \ 1368 REST_GPR(10, r1); \ 1369 ld r11,_CCR(r1); \ 1370 mtcr r11; \ 1371 /* Decrement paca->in_mce. */ \ 1372 lhz r12,PACA_IN_MCE(r13); \ 1373 subi r12,r12,1; \ 1374 sth r12,PACA_IN_MCE(r13); \ 1375 REST_GPR(11, r1); \ 1376 REST_2GPRS(12, r1); \ 1377 /* restore original r1. */ \ 1378 ld r1,GPR1(r1) 1379 1380 /* 1381 * Handle machine check early in real mode. We come here with 1382 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. 1383 */ 1384 .align 7 1385 .globl machine_check_handle_early 1386machine_check_handle_early: 1387 std r0,GPR0(r1) /* Save r0 */ 1388 EXCEPTION_PROLOG_COMMON_3(0x200) 1389 bl .save_nvgprs 1390 addi r3,r1,STACK_FRAME_OVERHEAD 1391 bl .machine_check_early 1392 ld r12,_MSR(r1) 1393#ifdef CONFIG_PPC_P7_NAP 1394 /* 1395 * Check if thread was in power saving mode. We come here when any 1396 * of the following is true: 1397 * a. thread wasn't in power saving mode 1398 * b. thread was in power saving mode with no state loss or 1399 * supervisor state loss 1400 * 1401 * Go back to nap again if (b) is true. 1402 */ 1403 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ 1404 beq 4f /* No, it wasn;t */ 1405 /* Thread was in power saving mode. Go back to nap again. */ 1406 cmpwi r11,2 1407 bne 3f 1408 /* Supervisor state loss */ 1409 li r0,1 1410 stb r0,PACA_NAPSTATELOST(r13) 14113: bl .machine_check_queue_event 1412 MACHINE_CHECK_HANDLER_WINDUP 1413 GET_PACA(r13) 1414 ld r1,PACAR1(r13) 1415 b .power7_enter_nap_mode 14164: 1417#endif 1418 /* 1419 * Check if we are coming from hypervisor userspace. If yes then we 1420 * continue in host kernel in V mode to deliver the MC event. 1421 */ 1422 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ 1423 beq 5f 1424 andi. r11,r12,MSR_PR /* See if coming from user. */ 1425 bne 9f /* continue in V mode if we are. */ 1426 14275: 1428#ifdef CONFIG_KVM_BOOK3S_64_HV 1429 /* 1430 * We are coming from kernel context. Check if we are coming from 1431 * guest. if yes, then we can continue. We will fall through 1432 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. 1433 */ 1434 lbz r11,HSTATE_IN_GUEST(r13) 1435 cmpwi r11,0 /* Check if coming from guest */ 1436 bne 9f /* continue if we are. */ 1437#endif 1438 /* 1439 * At this point we are not sure about what context we come from. 1440 * Queue up the MCE event and return from the interrupt. 1441 * But before that, check if this is an un-recoverable exception. 1442 * If yes, then stay on emergency stack and panic. 1443 */ 1444 andi. r11,r12,MSR_RI 1445 bne 2f 14461: addi r3,r1,STACK_FRAME_OVERHEAD 1447 bl .unrecoverable_exception 1448 b 1b 14492: 1450 /* 1451 * Return from MC interrupt. 1452 * Queue up the MCE event so that we can log it later, while 1453 * returning from kernel or opal call. 1454 */ 1455 bl .machine_check_queue_event 1456 MACHINE_CHECK_HANDLER_WINDUP 1457 rfid 14589: 1459 /* Deliver the machine check to host kernel in V mode. */ 1460 MACHINE_CHECK_HANDLER_WINDUP 1461 b machine_check_pSeries 1462 1463/* 1464 * r13 points to the PACA, r9 contains the saved CR, 1465 * r12 contain the saved SRR1, SRR0 is still ready for return 1466 * r3 has the faulting address 1467 * r9 - r13 are saved in paca->exslb. 1468 * r3 is saved in paca->slb_r3 1469 * We assume we aren't going to take any exceptions during this procedure. 1470 */ 1471_GLOBAL(slb_miss_realmode) 1472 mflr r10 1473#ifdef CONFIG_RELOCATABLE 1474 mtctr r11 1475#endif 1476 1477 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1478 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1479 1480 bl .slb_allocate_realmode 1481 1482 /* All done -- return from exception. */ 1483 1484 ld r10,PACA_EXSLB+EX_LR(r13) 1485 ld r3,PACA_EXSLB+EX_R3(r13) 1486 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1487 1488 mtlr r10 1489 1490 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1491 beq- 2f 1492 1493.machine push 1494.machine "power4" 1495 mtcrf 0x80,r9 1496 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1497.machine pop 1498 1499 RESTORE_PPR_PACA(PACA_EXSLB, r9) 1500 ld r9,PACA_EXSLB+EX_R9(r13) 1501 ld r10,PACA_EXSLB+EX_R10(r13) 1502 ld r11,PACA_EXSLB+EX_R11(r13) 1503 ld r12,PACA_EXSLB+EX_R12(r13) 1504 ld r13,PACA_EXSLB+EX_R13(r13) 1505 rfid 1506 b . /* prevent speculative execution */ 1507 15082: mfspr r11,SPRN_SRR0 1509 ld r10,PACAKBASE(r13) 1510 LOAD_HANDLER(r10,unrecov_slb) 1511 mtspr SPRN_SRR0,r10 1512 ld r10,PACAKMSR(r13) 1513 mtspr SPRN_SRR1,r10 1514 rfid 1515 b . 1516 1517unrecov_slb: 1518 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1519 DISABLE_INTS 1520 bl .save_nvgprs 15211: addi r3,r1,STACK_FRAME_OVERHEAD 1522 bl .unrecoverable_exception 1523 b 1b 1524 1525 1526#ifdef CONFIG_PPC_970_NAP 1527power4_fixup_nap: 1528 andc r9,r9,r10 1529 std r9,TI_LOCAL_FLAGS(r11) 1530 ld r10,_LINK(r1) /* make idle task do the */ 1531 std r10,_NIP(r1) /* equivalent of a blr */ 1532 blr 1533#endif 1534 1535/* 1536 * Hash table stuff 1537 */ 1538 .align 7 1539_STATIC(do_hash_page) 1540 std r3,_DAR(r1) 1541 std r4,_DSISR(r1) 1542 1543 andis. r0,r4,0xa410 /* weird error? */ 1544 bne- handle_page_fault /* if not, try to insert a HPTE */ 1545 andis. r0,r4,DSISR_DABRMATCH@h 1546 bne- handle_dabr_fault 1547 1548BEGIN_FTR_SECTION 1549 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 1550 bne- do_ste_alloc /* If so handle it */ 1551END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 1552 1553 CURRENT_THREAD_INFO(r11, r1) 1554 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1555 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1556 bne 77f /* then don't call hash_page now */ 1557 /* 1558 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 1559 * accessing a userspace segment (even from the kernel). We assume 1560 * kernel addresses always have the high bit set. 1561 */ 1562 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 1563 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 1564 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 1565 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 1566 ori r4,r4,1 /* add _PAGE_PRESENT */ 1567 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 1568 1569 /* 1570 * r3 contains the faulting address 1571 * r4 contains the required access permissions 1572 * r5 contains the trap number 1573 * 1574 * at return r3 = 0 for success, 1 for page fault, negative for error 1575 */ 1576 bl .hash_page /* build HPTE if possible */ 1577 cmpdi r3,0 /* see if hash_page succeeded */ 1578 1579 /* Success */ 1580 beq fast_exc_return_irq /* Return from exception on success */ 1581 1582 /* Error */ 1583 blt- 13f 1584 1585/* Here we have a page fault that hash_page can't handle. */ 1586handle_page_fault: 158711: ld r4,_DAR(r1) 1588 ld r5,_DSISR(r1) 1589 addi r3,r1,STACK_FRAME_OVERHEAD 1590 bl .do_page_fault 1591 cmpdi r3,0 1592 beq+ 12f 1593 bl .save_nvgprs 1594 mr r5,r3 1595 addi r3,r1,STACK_FRAME_OVERHEAD 1596 lwz r4,_DAR(r1) 1597 bl .bad_page_fault 1598 b .ret_from_except 1599 1600/* We have a data breakpoint exception - handle it */ 1601handle_dabr_fault: 1602 bl .save_nvgprs 1603 ld r4,_DAR(r1) 1604 ld r5,_DSISR(r1) 1605 addi r3,r1,STACK_FRAME_OVERHEAD 1606 bl .do_break 160712: b .ret_from_except_lite 1608 1609 1610/* We have a page fault that hash_page could handle but HV refused 1611 * the PTE insertion 1612 */ 161313: bl .save_nvgprs 1614 mr r5,r3 1615 addi r3,r1,STACK_FRAME_OVERHEAD 1616 ld r4,_DAR(r1) 1617 bl .low_hash_fault 1618 b .ret_from_except 1619 1620/* 1621 * We come here as a result of a DSI at a point where we don't want 1622 * to call hash_page, such as when we are accessing memory (possibly 1623 * user memory) inside a PMU interrupt that occurred while interrupts 1624 * were soft-disabled. We want to invoke the exception handler for 1625 * the access, or panic if there isn't a handler. 1626 */ 162777: bl .save_nvgprs 1628 mr r4,r3 1629 addi r3,r1,STACK_FRAME_OVERHEAD 1630 li r5,SIGSEGV 1631 bl .bad_page_fault 1632 b .ret_from_except 1633 1634 /* here we have a segment miss */ 1635do_ste_alloc: 1636 bl .ste_allocate /* try to insert stab entry */ 1637 cmpdi r3,0 1638 bne- handle_page_fault 1639 b fast_exception_return 1640 1641/* 1642 * r13 points to the PACA, r9 contains the saved CR, 1643 * r11 and r12 contain the saved SRR0 and SRR1. 1644 * r9 - r13 are saved in paca->exslb. 1645 * We assume we aren't going to take any exceptions during this procedure. 1646 * We assume (DAR >> 60) == 0xc. 1647 */ 1648 .align 7 1649_GLOBAL(do_stab_bolted) 1650 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1651 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1652 mfspr r11,SPRN_DAR /* ea */ 1653 1654 /* 1655 * check for bad kernel/user address 1656 * (ea & ~REGION_MASK) >= PGTABLE_RANGE 1657 */ 1658 rldicr. r9,r11,4,(63 - 46 - 4) 1659 li r9,0 /* VSID = 0 for bad address */ 1660 bne- 0f 1661 1662 /* 1663 * Calculate VSID: 1664 * This is the kernel vsid, we take the top for context from 1665 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 1666 * Here we know that (ea >> 60) == 0xc 1667 */ 1668 lis r9,(MAX_USER_CONTEXT + 1)@ha 1669 addi r9,r9,(MAX_USER_CONTEXT + 1)@l 1670 1671 srdi r10,r11,SID_SHIFT 1672 rldimi r10,r9,ESID_BITS,0 /* proto vsid */ 1673 ASM_VSID_SCRAMBLE(r10, r9, 256M) 1674 rldic r9,r10,12,16 /* r9 = vsid << 12 */ 1675 16760: 1677 /* Hash to the primary group */ 1678 ld r10,PACASTABVIRT(r13) 1679 srdi r11,r11,SID_SHIFT 1680 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1681 1682 /* Search the primary group for a free entry */ 16831: ld r11,0(r10) /* Test valid bit of the current ste */ 1684 andi. r11,r11,0x80 1685 beq 2f 1686 addi r10,r10,16 1687 andi. r11,r10,0x70 1688 bne 1b 1689 1690 /* Stick for only searching the primary group for now. */ 1691 /* At least for now, we use a very simple random castout scheme */ 1692 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1693 mftb r11 1694 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1695 ori r11,r11,0x10 1696 1697 /* r10 currently points to an ste one past the group of interest */ 1698 /* make it point to the randomly selected entry */ 1699 subi r10,r10,128 1700 or r10,r10,r11 /* r10 is the entry to invalidate */ 1701 1702 isync /* mark the entry invalid */ 1703 ld r11,0(r10) 1704 rldicl r11,r11,56,1 /* clear the valid bit */ 1705 rotldi r11,r11,8 1706 std r11,0(r10) 1707 sync 1708 1709 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1710 slbie r11 1711 17122: std r9,8(r10) /* Store the vsid part of the ste */ 1713 eieio 1714 1715 mfspr r11,SPRN_DAR /* Get the new esid */ 1716 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1717 ori r11,r11,0x90 /* Turn on valid and kp */ 1718 std r11,0(r10) /* Put new entry back into the stab */ 1719 1720 sync 1721 1722 /* All done -- return from exception. */ 1723 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1724 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1725 1726 andi. r10,r12,MSR_RI 1727 beq- unrecov_slb 1728 1729 mtcrf 0x80,r9 /* restore CR */ 1730 1731 mfmsr r10 1732 clrrdi r10,r10,2 1733 mtmsrd r10,1 1734 1735 mtspr SPRN_SRR0,r11 1736 mtspr SPRN_SRR1,r12 1737 ld r9,PACA_EXSLB+EX_R9(r13) 1738 ld r10,PACA_EXSLB+EX_R10(r13) 1739 ld r11,PACA_EXSLB+EX_R11(r13) 1740 ld r12,PACA_EXSLB+EX_R12(r13) 1741 ld r13,PACA_EXSLB+EX_R13(r13) 1742 rfid 1743 b . /* prevent speculative execution */ 1744