1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18 19/* 20 * We layout physical memory as follows: 21 * 0x0000 - 0x00ff : Secondary processor spin code 22 * 0x0100 - 0x17ff : pSeries Interrupt prologs 23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs 24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 26 * 0x7000 - 0x7fff : FWNMI data area 27 * 0x8000 - 0x8fff : Initial (CPU0) segment table 28 * 0x9000 - : Early init and support code 29 */ 30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */ 31#define SYSCALL_PSERIES_1 \ 32BEGIN_FTR_SECTION \ 33 cmpdi r0,0x1ebe ; \ 34 beq- 1f ; \ 35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ 36 mr r9,r13 ; \ 37 GET_PACA(r13) ; \ 38 mfspr r11,SPRN_SRR0 ; \ 390: 40 41#define SYSCALL_PSERIES_2_RFID \ 42 mfspr r12,SPRN_SRR1 ; \ 43 ld r10,PACAKBASE(r13) ; \ 44 LOAD_HANDLER(r10, system_call_entry) ; \ 45 mtspr SPRN_SRR0,r10 ; \ 46 ld r10,PACAKMSR(r13) ; \ 47 mtspr SPRN_SRR1,r10 ; \ 48 rfid ; \ 49 b . ; /* prevent speculative execution */ 50 51#define SYSCALL_PSERIES_3 \ 52 /* Fast LE/BE switch system call */ \ 531: mfspr r12,SPRN_SRR1 ; \ 54 xori r12,r12,MSR_LE ; \ 55 mtspr SPRN_SRR1,r12 ; \ 56 rfid ; /* return to userspace */ \ 57 b . ; \ 582: mfspr r12,SPRN_SRR1 ; \ 59 andi. r12,r12,MSR_PR ; \ 60 bne 0b ; \ 61 mtspr SPRN_SRR0,r3 ; \ 62 mtspr SPRN_SRR1,r4 ; \ 63 mtspr SPRN_SDR1,r5 ; \ 64 rfid ; \ 65 b . ; /* prevent speculative execution */ 66 67#if defined(CONFIG_RELOCATABLE) 68 /* 69 * We can't branch directly; in the direct case we use LR 70 * and system_call_entry restores LR. (We thus need to move 71 * LR to r10 in the RFID case too.) 72 */ 73#define SYSCALL_PSERIES_2_DIRECT \ 74 mflr r10 ; \ 75 ld r12,PACAKBASE(r13) ; \ 76 LOAD_HANDLER(r12, system_call_entry_direct) ; \ 77 mtctr r12 ; \ 78 mfspr r12,SPRN_SRR1 ; \ 79 /* Re-use of r13... No spare regs to do this */ \ 80 li r13,MSR_RI ; \ 81 mtmsrd r13,1 ; \ 82 GET_PACA(r13) ; /* get r13 back */ \ 83 bctr ; 84#else 85 /* We can branch directly */ 86#define SYSCALL_PSERIES_2_DIRECT \ 87 mfspr r12,SPRN_SRR1 ; \ 88 li r10,MSR_RI ; \ 89 mtmsrd r10,1 ; /* Set RI (EE=0) */ \ 90 b system_call_entry_direct ; 91#endif 92 93/* 94 * This is the start of the interrupt handlers for pSeries 95 * This code runs with relocation off. 96 * Code from here to __end_interrupts gets copied down to real 97 * address 0x100 when we are running a relocatable kernel. 98 * Therefore any relative branches in this section must only 99 * branch to labels in this section. 100 */ 101 . = 0x100 102 .globl __start_interrupts 103__start_interrupts: 104 105 .globl system_reset_pSeries; 106system_reset_pSeries: 107 HMT_MEDIUM_PPR_DISCARD 108 SET_SCRATCH0(r13) 109#ifdef CONFIG_PPC_P7_NAP 110BEGIN_FTR_SECTION 111 /* Running native on arch 2.06 or later, check if we are 112 * waking up from nap. We only handle no state loss and 113 * supervisor state loss. We do -not- handle hypervisor 114 * state loss at this time. 115 */ 116 mfspr r13,SPRN_SRR1 117 rlwinm. r13,r13,47-31,30,31 118 beq 9f 119 120 /* waking up from powersave (nap) state */ 121 cmpwi cr1,r13,2 122 /* Total loss of HV state is fatal, we could try to use the 123 * PIR to locate a PACA, then use an emergency stack etc... 124 * but for now, let's just stay stuck here 125 */ 126 bgt cr1,. 127 GET_PACA(r13) 128 129#ifdef CONFIG_KVM_BOOK3S_64_HV 130 li r0,KVM_HWTHREAD_IN_KERNEL 131 stb r0,HSTATE_HWTHREAD_STATE(r13) 132 /* Order setting hwthread_state vs. testing hwthread_req */ 133 sync 134 lbz r0,HSTATE_HWTHREAD_REQ(r13) 135 cmpwi r0,0 136 beq 1f 137 b kvm_start_guest 1381: 139#endif 140 141 beq cr1,2f 142 b .power7_wakeup_noloss 1432: b .power7_wakeup_loss 1449: 145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 146#endif /* CONFIG_PPC_P7_NAP */ 147 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 148 NOTEST, 0x100) 149 150 . = 0x200 151machine_check_pSeries_1: 152 /* This is moved out of line as it can be patched by FW, but 153 * some code path might still want to branch into the original 154 * vector 155 */ 156 HMT_MEDIUM_PPR_DISCARD 157 SET_SCRATCH0(r13) /* save r13 */ 158 EXCEPTION_PROLOG_0(PACA_EXMC) 159 b machine_check_pSeries_0 160 161 . = 0x300 162 .globl data_access_pSeries 163data_access_pSeries: 164 HMT_MEDIUM_PPR_DISCARD 165 SET_SCRATCH0(r13) 166BEGIN_FTR_SECTION 167 b data_access_check_stab 168data_access_not_stab: 169END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 170 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 171 KVMTEST, 0x300) 172 173 . = 0x380 174 .globl data_access_slb_pSeries 175data_access_slb_pSeries: 176 HMT_MEDIUM_PPR_DISCARD 177 SET_SCRATCH0(r13) 178 EXCEPTION_PROLOG_0(PACA_EXSLB) 179 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 180 std r3,PACA_EXSLB+EX_R3(r13) 181 mfspr r3,SPRN_DAR 182#ifdef __DISABLED__ 183 /* Keep that around for when we re-implement dynamic VSIDs */ 184 cmpdi r3,0 185 bge slb_miss_user_pseries 186#endif /* __DISABLED__ */ 187 mfspr r12,SPRN_SRR1 188#ifndef CONFIG_RELOCATABLE 189 b .slb_miss_realmode 190#else 191 /* 192 * We can't just use a direct branch to .slb_miss_realmode 193 * because the distance from here to there depends on where 194 * the kernel ends up being put. 195 */ 196 mfctr r11 197 ld r10,PACAKBASE(r13) 198 LOAD_HANDLER(r10, .slb_miss_realmode) 199 mtctr r10 200 bctr 201#endif 202 203 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 204 205 . = 0x480 206 .globl instruction_access_slb_pSeries 207instruction_access_slb_pSeries: 208 HMT_MEDIUM_PPR_DISCARD 209 SET_SCRATCH0(r13) 210 EXCEPTION_PROLOG_0(PACA_EXSLB) 211 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 212 std r3,PACA_EXSLB+EX_R3(r13) 213 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 214#ifdef __DISABLED__ 215 /* Keep that around for when we re-implement dynamic VSIDs */ 216 cmpdi r3,0 217 bge slb_miss_user_pseries 218#endif /* __DISABLED__ */ 219 mfspr r12,SPRN_SRR1 220#ifndef CONFIG_RELOCATABLE 221 b .slb_miss_realmode 222#else 223 mfctr r11 224 ld r10,PACAKBASE(r13) 225 LOAD_HANDLER(r10, .slb_miss_realmode) 226 mtctr r10 227 bctr 228#endif 229 230 /* We open code these as we can't have a ". = x" (even with 231 * x = "." within a feature section 232 */ 233 . = 0x500; 234 .globl hardware_interrupt_pSeries; 235 .globl hardware_interrupt_hv; 236hardware_interrupt_pSeries: 237hardware_interrupt_hv: 238 HMT_MEDIUM_PPR_DISCARD 239 BEGIN_FTR_SECTION 240 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 241 EXC_HV, SOFTEN_TEST_HV) 242 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 243 FTR_SECTION_ELSE 244 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 245 EXC_STD, SOFTEN_TEST_HV_201) 246 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 247 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 248 249 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 250 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 251 252 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 253 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 254 255 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 256 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 257 258 . = 0x900 259 .globl decrementer_pSeries 260decrementer_pSeries: 261 _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR) 262 263 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) 264 265 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) 266 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 267 268 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 269 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 270 271 . = 0xc00 272 .globl system_call_pSeries 273system_call_pSeries: 274 HMT_MEDIUM 275#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 276 SET_SCRATCH0(r13) 277 GET_PACA(r13) 278 std r9,PACA_EXGEN+EX_R9(r13) 279 std r10,PACA_EXGEN+EX_R10(r13) 280 mfcr r9 281 KVMTEST(0xc00) 282 GET_SCRATCH0(r13) 283#endif 284 SYSCALL_PSERIES_1 285 SYSCALL_PSERIES_2_RFID 286 SYSCALL_PSERIES_3 287 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 288 289 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 290 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 291 292 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 293 * out of line to handle them 294 */ 295 . = 0xe00 296hv_exception_trampoline: 297 SET_SCRATCH0(r13) 298 EXCEPTION_PROLOG_0(PACA_EXGEN) 299 b h_data_storage_hv 300 301 . = 0xe20 302 SET_SCRATCH0(r13) 303 EXCEPTION_PROLOG_0(PACA_EXGEN) 304 b h_instr_storage_hv 305 306 . = 0xe40 307 SET_SCRATCH0(r13) 308 EXCEPTION_PROLOG_0(PACA_EXGEN) 309 b emulation_assist_hv 310 311 . = 0xe60 312 SET_SCRATCH0(r13) 313 EXCEPTION_PROLOG_0(PACA_EXGEN) 314 b hmi_exception_hv 315 316 . = 0xe80 317 SET_SCRATCH0(r13) 318 EXCEPTION_PROLOG_0(PACA_EXGEN) 319 b h_doorbell_hv 320 321 /* We need to deal with the Altivec unavailable exception 322 * here which is at 0xf20, thus in the middle of the 323 * prolog code of the PerformanceMonitor one. A little 324 * trickery is thus necessary 325 */ 326performance_monitor_pSeries_1: 327 . = 0xf00 328 SET_SCRATCH0(r13) 329 EXCEPTION_PROLOG_0(PACA_EXGEN) 330 b performance_monitor_pSeries 331 332altivec_unavailable_pSeries_1: 333 . = 0xf20 334 SET_SCRATCH0(r13) 335 EXCEPTION_PROLOG_0(PACA_EXGEN) 336 b altivec_unavailable_pSeries 337 338vsx_unavailable_pSeries_1: 339 . = 0xf40 340 SET_SCRATCH0(r13) 341 EXCEPTION_PROLOG_0(PACA_EXGEN) 342 b vsx_unavailable_pSeries 343 344facility_unavailable_trampoline: 345 . = 0xf60 346 SET_SCRATCH0(r13) 347 EXCEPTION_PROLOG_0(PACA_EXGEN) 348 b facility_unavailable_pSeries 349 350hv_facility_unavailable_trampoline: 351 . = 0xf80 352 SET_SCRATCH0(r13) 353 EXCEPTION_PROLOG_0(PACA_EXGEN) 354 b facility_unavailable_hv 355 356#ifdef CONFIG_CBE_RAS 357 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 358 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 359#endif /* CONFIG_CBE_RAS */ 360 361 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 362 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 363 364 . = 0x1500 365 .global denorm_exception_hv 366denorm_exception_hv: 367 HMT_MEDIUM_PPR_DISCARD 368 mtspr SPRN_SPRG_HSCRATCH0,r13 369 EXCEPTION_PROLOG_0(PACA_EXGEN) 370 std r11,PACA_EXGEN+EX_R11(r13) 371 std r12,PACA_EXGEN+EX_R12(r13) 372 mfspr r9,SPRN_SPRG_HSCRATCH0 373 std r9,PACA_EXGEN+EX_R13(r13) 374 mfcr r9 375 376#ifdef CONFIG_PPC_DENORMALISATION 377 mfspr r10,SPRN_HSRR1 378 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 379 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ 380 addi r11,r11,-4 /* HSRR0 is next instruction */ 381 bne+ denorm_assist 382#endif 383 384 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) 385 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500) 386 387#ifdef CONFIG_CBE_RAS 388 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 389 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 390#endif /* CONFIG_CBE_RAS */ 391 392 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 393 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 394 395#ifdef CONFIG_CBE_RAS 396 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 397 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 398#else 399 . = 0x1800 400#endif /* CONFIG_CBE_RAS */ 401 402 403/*** Out of line interrupts support ***/ 404 405 .align 7 406 /* moved from 0x200 */ 407machine_check_pSeries: 408 .globl machine_check_fwnmi 409machine_check_fwnmi: 410 HMT_MEDIUM_PPR_DISCARD 411 SET_SCRATCH0(r13) /* save r13 */ 412 EXCEPTION_PROLOG_0(PACA_EXMC) 413machine_check_pSeries_0: 414 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) 415 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) 416 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 417 418 /* moved from 0x300 */ 419data_access_check_stab: 420 GET_PACA(r13) 421 std r9,PACA_EXSLB+EX_R9(r13) 422 std r10,PACA_EXSLB+EX_R10(r13) 423 mfspr r10,SPRN_DAR 424 mfspr r9,SPRN_DSISR 425 srdi r10,r10,60 426 rlwimi r10,r9,16,0x20 427#ifdef CONFIG_KVM_BOOK3S_PR 428 lbz r9,HSTATE_IN_GUEST(r13) 429 rlwimi r10,r9,8,0x300 430#endif 431 mfcr r9 432 cmpwi r10,0x2c 433 beq do_stab_bolted_pSeries 434 mtcrf 0x80,r9 435 ld r9,PACA_EXSLB+EX_R9(r13) 436 ld r10,PACA_EXSLB+EX_R10(r13) 437 b data_access_not_stab 438do_stab_bolted_pSeries: 439 std r11,PACA_EXSLB+EX_R11(r13) 440 std r12,PACA_EXSLB+EX_R12(r13) 441 GET_SCRATCH0(r10) 442 std r10,PACA_EXSLB+EX_R13(r13) 443 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 444 445 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 446 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 447 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 448 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 449 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 450 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 451 452#ifdef CONFIG_PPC_DENORMALISATION 453denorm_assist: 454BEGIN_FTR_SECTION 455/* 456 * To denormalise we need to move a copy of the register to itself. 457 * For POWER6 do that here for all FP regs. 458 */ 459 mfmsr r10 460 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 461 xori r10,r10,(MSR_FE0|MSR_FE1) 462 mtmsrd r10 463 sync 464 465#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 466#define FMR4(n) FMR2(n) ; FMR2(n+2) 467#define FMR8(n) FMR4(n) ; FMR4(n+4) 468#define FMR16(n) FMR8(n) ; FMR8(n+8) 469#define FMR32(n) FMR16(n) ; FMR16(n+16) 470 FMR32(0) 471 472FTR_SECTION_ELSE 473/* 474 * To denormalise we need to move a copy of the register to itself. 475 * For POWER7 do that here for the first 32 VSX registers only. 476 */ 477 mfmsr r10 478 oris r10,r10,MSR_VSX@h 479 mtmsrd r10 480 sync 481 482#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) 483#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) 484#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) 485#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) 486#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) 487 XVCPSGNDP32(0) 488 489ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 490 491BEGIN_FTR_SECTION 492 b denorm_done 493END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 494/* 495 * To denormalise we need to move a copy of the register to itself. 496 * For POWER8 we need to do that for all 64 VSX registers 497 */ 498 XVCPSGNDP32(32) 499denorm_done: 500 mtspr SPRN_HSRR0,r11 501 mtcrf 0x80,r9 502 ld r9,PACA_EXGEN+EX_R9(r13) 503 RESTORE_PPR_PACA(PACA_EXGEN, r10) 504 ld r10,PACA_EXGEN+EX_R10(r13) 505 ld r11,PACA_EXGEN+EX_R11(r13) 506 ld r12,PACA_EXGEN+EX_R12(r13) 507 ld r13,PACA_EXGEN+EX_R13(r13) 508 HRFID 509 b . 510#endif 511 512 .align 7 513 /* moved from 0xe00 */ 514 STD_EXCEPTION_HV_OOL(0xe02, h_data_storage) 515 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 516 STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage) 517 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 518 STD_EXCEPTION_HV_OOL(0xe42, emulation_assist) 519 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 520 STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */ 521 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 522 MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell) 523 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82) 524 525 /* moved from 0xf00 */ 526 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 527 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 528 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 529 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 530 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 531 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 532 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 533 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) 534 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) 535 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) 536 537/* 538 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 539 * - If it was a decrementer interrupt, we bump the dec to max and and return. 540 * - If it was a doorbell we return immediately since doorbells are edge 541 * triggered and won't automatically refire. 542 * - else we hard disable and return. 543 * This is called with r10 containing the value to OR to the paca field. 544 */ 545#define MASKED_INTERRUPT(_H) \ 546masked_##_H##interrupt: \ 547 std r11,PACA_EXGEN+EX_R11(r13); \ 548 lbz r11,PACAIRQHAPPENED(r13); \ 549 or r11,r11,r10; \ 550 stb r11,PACAIRQHAPPENED(r13); \ 551 cmpwi r10,PACA_IRQ_DEC; \ 552 bne 1f; \ 553 lis r10,0x7fff; \ 554 ori r10,r10,0xffff; \ 555 mtspr SPRN_DEC,r10; \ 556 b 2f; \ 5571: cmpwi r10,PACA_IRQ_DBELL; \ 558 beq 2f; \ 559 mfspr r10,SPRN_##_H##SRR1; \ 560 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 561 rotldi r10,r10,16; \ 562 mtspr SPRN_##_H##SRR1,r10; \ 5632: mtcrf 0x80,r9; \ 564 ld r9,PACA_EXGEN+EX_R9(r13); \ 565 ld r10,PACA_EXGEN+EX_R10(r13); \ 566 ld r11,PACA_EXGEN+EX_R11(r13); \ 567 GET_SCRATCH0(r13); \ 568 ##_H##rfid; \ 569 b . 570 571 MASKED_INTERRUPT() 572 MASKED_INTERRUPT(H) 573 574/* 575 * Called from arch_local_irq_enable when an interrupt needs 576 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate 577 * which kind of interrupt. MSR:EE is already off. We generate a 578 * stackframe like if a real interrupt had happened. 579 * 580 * Note: While MSR:EE is off, we need to make sure that _MSR 581 * in the generated frame has EE set to 1 or the exception 582 * handler will not properly re-enable them. 583 */ 584_GLOBAL(__replay_interrupt) 585 /* We are going to jump to the exception common code which 586 * will retrieve various register values from the PACA which 587 * we don't give a damn about, so we don't bother storing them. 588 */ 589 mfmsr r12 590 mflr r11 591 mfcr r9 592 ori r12,r12,MSR_EE 593 cmpwi r3,0x900 594 beq decrementer_common 595 cmpwi r3,0x500 596 beq hardware_interrupt_common 597BEGIN_FTR_SECTION 598 cmpwi r3,0xe80 599 beq h_doorbell_common 600FTR_SECTION_ELSE 601 cmpwi r3,0xa00 602 beq doorbell_super_common 603ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 604 blr 605 606#ifdef CONFIG_PPC_PSERIES 607/* 608 * Vectors for the FWNMI option. Share common code. 609 */ 610 .globl system_reset_fwnmi 611 .align 7 612system_reset_fwnmi: 613 HMT_MEDIUM_PPR_DISCARD 614 SET_SCRATCH0(r13) /* save r13 */ 615 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 616 NOTEST, 0x100) 617 618#endif /* CONFIG_PPC_PSERIES */ 619 620#ifdef __DISABLED__ 621/* 622 * This is used for when the SLB miss handler has to go virtual, 623 * which doesn't happen for now anymore but will once we re-implement 624 * dynamic VSIDs for shared page tables 625 */ 626slb_miss_user_pseries: 627 std r10,PACA_EXGEN+EX_R10(r13) 628 std r11,PACA_EXGEN+EX_R11(r13) 629 std r12,PACA_EXGEN+EX_R12(r13) 630 GET_SCRATCH0(r10) 631 ld r11,PACA_EXSLB+EX_R9(r13) 632 ld r12,PACA_EXSLB+EX_R3(r13) 633 std r10,PACA_EXGEN+EX_R13(r13) 634 std r11,PACA_EXGEN+EX_R9(r13) 635 std r12,PACA_EXGEN+EX_R3(r13) 636 clrrdi r12,r13,32 637 mfmsr r10 638 mfspr r11,SRR0 /* save SRR0 */ 639 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 640 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 641 mtspr SRR0,r12 642 mfspr r12,SRR1 /* and SRR1 */ 643 mtspr SRR1,r10 644 rfid 645 b . /* prevent spec. execution */ 646#endif /* __DISABLED__ */ 647 648/* 649 * Code from here down to __end_handlers is invoked from the 650 * exception prologs above. Because the prologs assemble the 651 * addresses of these handlers using the LOAD_HANDLER macro, 652 * which uses an ori instruction, these handlers must be in 653 * the first 64k of the kernel image. 654 */ 655 656/*** Common interrupt handlers ***/ 657 658 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 659 660 /* 661 * Machine check is different because we use a different 662 * save area: PACA_EXMC instead of PACA_EXGEN. 663 */ 664 .align 7 665 .globl machine_check_common 666machine_check_common: 667 668 mfspr r10,SPRN_DAR 669 std r10,PACA_EXGEN+EX_DAR(r13) 670 mfspr r10,SPRN_DSISR 671 stw r10,PACA_EXGEN+EX_DSISR(r13) 672 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 673 FINISH_NAP 674 DISABLE_INTS 675 ld r3,PACA_EXGEN+EX_DAR(r13) 676 lwz r4,PACA_EXGEN+EX_DSISR(r13) 677 std r3,_DAR(r1) 678 std r4,_DSISR(r1) 679 bl .save_nvgprs 680 addi r3,r1,STACK_FRAME_OVERHEAD 681 bl .machine_check_exception 682 b .ret_from_except 683 684 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 685 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 686 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) 687#ifdef CONFIG_PPC_DOORBELL 688 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) 689#else 690 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) 691#endif 692 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 693 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 694 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 695 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) 696 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 697#ifdef CONFIG_PPC_DOORBELL 698 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) 699#else 700 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) 701#endif 702 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 703 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 704 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) 705#ifdef CONFIG_ALTIVEC 706 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 707#else 708 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 709#endif 710#ifdef CONFIG_CBE_RAS 711 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 712 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 713 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 714#endif /* CONFIG_CBE_RAS */ 715 716 /* 717 * Relocation-on interrupts: A subset of the interrupts can be delivered 718 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 719 * it. Addresses are the same as the original interrupt addresses, but 720 * offset by 0xc000000000004000. 721 * It's impossible to receive interrupts below 0x300 via this mechanism. 722 * KVM: None of these traps are from the guest ; anything that escalated 723 * to HV=1 from HV=0 is delivered via real mode handlers. 724 */ 725 726 /* 727 * This uses the standard macro, since the original 0x300 vector 728 * only has extra guff for STAB-based processors -- which never 729 * come here. 730 */ 731 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access) 732 . = 0x4380 733 .globl data_access_slb_relon_pSeries 734data_access_slb_relon_pSeries: 735 SET_SCRATCH0(r13) 736 EXCEPTION_PROLOG_0(PACA_EXSLB) 737 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) 738 std r3,PACA_EXSLB+EX_R3(r13) 739 mfspr r3,SPRN_DAR 740 mfspr r12,SPRN_SRR1 741#ifndef CONFIG_RELOCATABLE 742 b .slb_miss_realmode 743#else 744 /* 745 * We can't just use a direct branch to .slb_miss_realmode 746 * because the distance from here to there depends on where 747 * the kernel ends up being put. 748 */ 749 mfctr r11 750 ld r10,PACAKBASE(r13) 751 LOAD_HANDLER(r10, .slb_miss_realmode) 752 mtctr r10 753 bctr 754#endif 755 756 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access) 757 . = 0x4480 758 .globl instruction_access_slb_relon_pSeries 759instruction_access_slb_relon_pSeries: 760 SET_SCRATCH0(r13) 761 EXCEPTION_PROLOG_0(PACA_EXSLB) 762 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) 763 std r3,PACA_EXSLB+EX_R3(r13) 764 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 765 mfspr r12,SPRN_SRR1 766#ifndef CONFIG_RELOCATABLE 767 b .slb_miss_realmode 768#else 769 mfctr r11 770 ld r10,PACAKBASE(r13) 771 LOAD_HANDLER(r10, .slb_miss_realmode) 772 mtctr r10 773 bctr 774#endif 775 776 . = 0x4500 777 .globl hardware_interrupt_relon_pSeries; 778 .globl hardware_interrupt_relon_hv; 779hardware_interrupt_relon_pSeries: 780hardware_interrupt_relon_hv: 781 BEGIN_FTR_SECTION 782 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV) 783 FTR_SECTION_ELSE 784 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR) 785 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 786 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment) 787 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check) 788 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable) 789 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer) 790 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer) 791 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super) 792 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b) 793 794 . = 0x4c00 795 .globl system_call_relon_pSeries 796system_call_relon_pSeries: 797 HMT_MEDIUM 798 SYSCALL_PSERIES_1 799 SYSCALL_PSERIES_2_DIRECT 800 SYSCALL_PSERIES_3 801 802 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) 803 804 . = 0x4e00 805 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 806 807 . = 0x4e20 808 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 809 810 . = 0x4e40 811 SET_SCRATCH0(r13) 812 EXCEPTION_PROLOG_0(PACA_EXGEN) 813 b emulation_assist_relon_hv 814 815 . = 0x4e60 816 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 817 818 . = 0x4e80 819 SET_SCRATCH0(r13) 820 EXCEPTION_PROLOG_0(PACA_EXGEN) 821 b h_doorbell_relon_hv 822 823performance_monitor_relon_pSeries_1: 824 . = 0x4f00 825 SET_SCRATCH0(r13) 826 EXCEPTION_PROLOG_0(PACA_EXGEN) 827 b performance_monitor_relon_pSeries 828 829altivec_unavailable_relon_pSeries_1: 830 . = 0x4f20 831 SET_SCRATCH0(r13) 832 EXCEPTION_PROLOG_0(PACA_EXGEN) 833 b altivec_unavailable_relon_pSeries 834 835vsx_unavailable_relon_pSeries_1: 836 . = 0x4f40 837 SET_SCRATCH0(r13) 838 EXCEPTION_PROLOG_0(PACA_EXGEN) 839 b vsx_unavailable_relon_pSeries 840 841facility_unavailable_relon_trampoline: 842 . = 0x4f60 843 SET_SCRATCH0(r13) 844 EXCEPTION_PROLOG_0(PACA_EXGEN) 845 b facility_unavailable_relon_pSeries 846 847hv_facility_unavailable_relon_trampoline: 848 . = 0x4f80 849 SET_SCRATCH0(r13) 850 EXCEPTION_PROLOG_0(PACA_EXGEN) 851 b hv_facility_unavailable_relon_hv 852 853 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 854#ifdef CONFIG_PPC_DENORMALISATION 855 . = 0x5500 856 b denorm_exception_hv 857#endif 858 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) 859 860 /* Other future vectors */ 861 .align 7 862 .globl __end_interrupts 863__end_interrupts: 864 865 .align 7 866system_call_entry_direct: 867#if defined(CONFIG_RELOCATABLE) 868 /* The first level prologue may have used LR to get here, saving 869 * orig in r10. To save hacking/ifdeffing common code, restore here. 870 */ 871 mtlr r10 872#endif 873system_call_entry: 874 b system_call_common 875 876ppc64_runlatch_on_trampoline: 877 b .__ppc64_runlatch_on 878 879/* 880 * Here we have detected that the kernel stack pointer is bad. 881 * R9 contains the saved CR, r13 points to the paca, 882 * r10 contains the (bad) kernel stack pointer, 883 * r11 and r12 contain the saved SRR0 and SRR1. 884 * We switch to using an emergency stack, save the registers there, 885 * and call kernel_bad_stack(), which panics. 886 */ 887bad_stack: 888 ld r1,PACAEMERGSP(r13) 889 subi r1,r1,64+INT_FRAME_SIZE 890 std r9,_CCR(r1) 891 std r10,GPR1(r1) 892 std r11,_NIP(r1) 893 std r12,_MSR(r1) 894 mfspr r11,SPRN_DAR 895 mfspr r12,SPRN_DSISR 896 std r11,_DAR(r1) 897 std r12,_DSISR(r1) 898 mflr r10 899 mfctr r11 900 mfxer r12 901 std r10,_LINK(r1) 902 std r11,_CTR(r1) 903 std r12,_XER(r1) 904 SAVE_GPR(0,r1) 905 SAVE_GPR(2,r1) 906 ld r10,EX_R3(r3) 907 std r10,GPR3(r1) 908 SAVE_GPR(4,r1) 909 SAVE_4GPRS(5,r1) 910 ld r9,EX_R9(r3) 911 ld r10,EX_R10(r3) 912 SAVE_2GPRS(9,r1) 913 ld r9,EX_R11(r3) 914 ld r10,EX_R12(r3) 915 ld r11,EX_R13(r3) 916 std r9,GPR11(r1) 917 std r10,GPR12(r1) 918 std r11,GPR13(r1) 919BEGIN_FTR_SECTION 920 ld r10,EX_CFAR(r3) 921 std r10,ORIG_GPR3(r1) 922END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 923 SAVE_8GPRS(14,r1) 924 SAVE_10GPRS(22,r1) 925 lhz r12,PACA_TRAP_SAVE(r13) 926 std r12,_TRAP(r1) 927 addi r11,r1,INT_FRAME_SIZE 928 std r11,0(r1) 929 li r12,0 930 std r12,0(r11) 931 ld r2,PACATOC(r13) 932 ld r11,exception_marker@toc(r2) 933 std r12,RESULT(r1) 934 std r11,STACK_FRAME_OVERHEAD-16(r1) 9351: addi r3,r1,STACK_FRAME_OVERHEAD 936 bl .kernel_bad_stack 937 b 1b 938 939/* 940 * Here r13 points to the paca, r9 contains the saved CR, 941 * SRR0 and SRR1 are saved in r11 and r12, 942 * r9 - r13 are saved in paca->exgen. 943 */ 944 .align 7 945 .globl data_access_common 946data_access_common: 947 mfspr r10,SPRN_DAR 948 std r10,PACA_EXGEN+EX_DAR(r13) 949 mfspr r10,SPRN_DSISR 950 stw r10,PACA_EXGEN+EX_DSISR(r13) 951 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 952 DISABLE_INTS 953 ld r12,_MSR(r1) 954 ld r3,PACA_EXGEN+EX_DAR(r13) 955 lwz r4,PACA_EXGEN+EX_DSISR(r13) 956 li r5,0x300 957 b .do_hash_page /* Try to handle as hpte fault */ 958 959 .align 7 960 .globl h_data_storage_common 961h_data_storage_common: 962 mfspr r10,SPRN_HDAR 963 std r10,PACA_EXGEN+EX_DAR(r13) 964 mfspr r10,SPRN_HDSISR 965 stw r10,PACA_EXGEN+EX_DSISR(r13) 966 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 967 bl .save_nvgprs 968 DISABLE_INTS 969 addi r3,r1,STACK_FRAME_OVERHEAD 970 bl .unknown_exception 971 b .ret_from_except 972 973 .align 7 974 .globl instruction_access_common 975instruction_access_common: 976 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 977 DISABLE_INTS 978 ld r12,_MSR(r1) 979 ld r3,_NIP(r1) 980 andis. r4,r12,0x5820 981 li r5,0x400 982 b .do_hash_page /* Try to handle as hpte fault */ 983 984 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 985 986/* 987 * Here is the common SLB miss user that is used when going to virtual 988 * mode for SLB misses, that is currently not used 989 */ 990#ifdef __DISABLED__ 991 .align 7 992 .globl slb_miss_user_common 993slb_miss_user_common: 994 mflr r10 995 std r3,PACA_EXGEN+EX_DAR(r13) 996 stw r9,PACA_EXGEN+EX_CCR(r13) 997 std r10,PACA_EXGEN+EX_LR(r13) 998 std r11,PACA_EXGEN+EX_SRR0(r13) 999 bl .slb_allocate_user 1000 1001 ld r10,PACA_EXGEN+EX_LR(r13) 1002 ld r3,PACA_EXGEN+EX_R3(r13) 1003 lwz r9,PACA_EXGEN+EX_CCR(r13) 1004 ld r11,PACA_EXGEN+EX_SRR0(r13) 1005 mtlr r10 1006 beq- slb_miss_fault 1007 1008 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1009 beq- unrecov_user_slb 1010 mfmsr r10 1011 1012.machine push 1013.machine "power4" 1014 mtcrf 0x80,r9 1015.machine pop 1016 1017 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 1018 mtmsrd r10,1 1019 1020 mtspr SRR0,r11 1021 mtspr SRR1,r12 1022 1023 ld r9,PACA_EXGEN+EX_R9(r13) 1024 ld r10,PACA_EXGEN+EX_R10(r13) 1025 ld r11,PACA_EXGEN+EX_R11(r13) 1026 ld r12,PACA_EXGEN+EX_R12(r13) 1027 ld r13,PACA_EXGEN+EX_R13(r13) 1028 rfid 1029 b . 1030 1031slb_miss_fault: 1032 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 1033 ld r4,PACA_EXGEN+EX_DAR(r13) 1034 li r5,0 1035 std r4,_DAR(r1) 1036 std r5,_DSISR(r1) 1037 b handle_page_fault 1038 1039unrecov_user_slb: 1040 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 1041 DISABLE_INTS 1042 bl .save_nvgprs 10431: addi r3,r1,STACK_FRAME_OVERHEAD 1044 bl .unrecoverable_exception 1045 b 1b 1046 1047#endif /* __DISABLED__ */ 1048 1049 1050 .align 7 1051 .globl alignment_common 1052alignment_common: 1053 mfspr r10,SPRN_DAR 1054 std r10,PACA_EXGEN+EX_DAR(r13) 1055 mfspr r10,SPRN_DSISR 1056 stw r10,PACA_EXGEN+EX_DSISR(r13) 1057 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 1058 ld r3,PACA_EXGEN+EX_DAR(r13) 1059 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1060 std r3,_DAR(r1) 1061 std r4,_DSISR(r1) 1062 bl .save_nvgprs 1063 DISABLE_INTS 1064 addi r3,r1,STACK_FRAME_OVERHEAD 1065 bl .alignment_exception 1066 b .ret_from_except 1067 1068 .align 7 1069 .globl program_check_common 1070program_check_common: 1071 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1072 bl .save_nvgprs 1073 DISABLE_INTS 1074 addi r3,r1,STACK_FRAME_OVERHEAD 1075 bl .program_check_exception 1076 b .ret_from_except 1077 1078 .align 7 1079 .globl fp_unavailable_common 1080fp_unavailable_common: 1081 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1082 bne 1f /* if from user, just load it up */ 1083 bl .save_nvgprs 1084 DISABLE_INTS 1085 addi r3,r1,STACK_FRAME_OVERHEAD 1086 bl .kernel_fp_unavailable_exception 1087 BUG_OPCODE 10881: 1089#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1090BEGIN_FTR_SECTION 1091 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1092 * transaction), go do TM stuff 1093 */ 1094 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1095 bne- 2f 1096END_FTR_SECTION_IFSET(CPU_FTR_TM) 1097#endif 1098 bl .load_up_fpu 1099 b fast_exception_return 1100#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11012: /* User process was in a transaction */ 1102 bl .save_nvgprs 1103 DISABLE_INTS 1104 addi r3,r1,STACK_FRAME_OVERHEAD 1105 bl .fp_unavailable_tm 1106 b .ret_from_except 1107#endif 1108 .align 7 1109 .globl altivec_unavailable_common 1110altivec_unavailable_common: 1111 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 1112#ifdef CONFIG_ALTIVEC 1113BEGIN_FTR_SECTION 1114 beq 1f 1115#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1116 BEGIN_FTR_SECTION_NESTED(69) 1117 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1118 * transaction), go do TM stuff 1119 */ 1120 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1121 bne- 2f 1122 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1123#endif 1124 bl .load_up_altivec 1125 b fast_exception_return 1126#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11272: /* User process was in a transaction */ 1128 bl .save_nvgprs 1129 DISABLE_INTS 1130 addi r3,r1,STACK_FRAME_OVERHEAD 1131 bl .altivec_unavailable_tm 1132 b .ret_from_except 1133#endif 11341: 1135END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1136#endif 1137 bl .save_nvgprs 1138 DISABLE_INTS 1139 addi r3,r1,STACK_FRAME_OVERHEAD 1140 bl .altivec_unavailable_exception 1141 b .ret_from_except 1142 1143 .align 7 1144 .globl vsx_unavailable_common 1145vsx_unavailable_common: 1146 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 1147#ifdef CONFIG_VSX 1148BEGIN_FTR_SECTION 1149 beq 1f 1150#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1151 BEGIN_FTR_SECTION_NESTED(69) 1152 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1153 * transaction), go do TM stuff 1154 */ 1155 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1156 bne- 2f 1157 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1158#endif 1159 b .load_up_vsx 1160#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11612: /* User process was in a transaction */ 1162 bl .save_nvgprs 1163 DISABLE_INTS 1164 addi r3,r1,STACK_FRAME_OVERHEAD 1165 bl .vsx_unavailable_tm 1166 b .ret_from_except 1167#endif 11681: 1169END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1170#endif 1171 bl .save_nvgprs 1172 DISABLE_INTS 1173 addi r3,r1,STACK_FRAME_OVERHEAD 1174 bl .vsx_unavailable_exception 1175 b .ret_from_except 1176 1177 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1178 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) 1179 1180 .align 7 1181 .globl __end_handlers 1182__end_handlers: 1183 1184 /* Equivalents to the above handlers for relocation-on interrupt vectors */ 1185 STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist) 1186 MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell) 1187 1188 STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 1189 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1190 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1191 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1192 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) 1193 1194#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1195/* 1196 * Data area reserved for FWNMI option. 1197 * This address (0x7000) is fixed by the RPA. 1198 */ 1199 .= 0x7000 1200 .globl fwnmi_data_area 1201fwnmi_data_area: 1202 1203 /* pseries and powernv need to keep the whole page from 1204 * 0x7000 to 0x8000 free for use by the firmware 1205 */ 1206 . = 0x8000 1207#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1208 1209/* Space for CPU0's segment table */ 1210 .balign 4096 1211 .globl initial_stab 1212initial_stab: 1213 .space 4096 1214 1215#ifdef CONFIG_PPC_POWERNV 1216_GLOBAL(opal_mc_secondary_handler) 1217 HMT_MEDIUM_PPR_DISCARD 1218 SET_SCRATCH0(r13) 1219 GET_PACA(r13) 1220 clrldi r3,r3,2 1221 tovirt(r3,r3) 1222 std r3,PACA_OPAL_MC_EVT(r13) 1223 ld r13,OPAL_MC_SRR0(r3) 1224 mtspr SPRN_SRR0,r13 1225 ld r13,OPAL_MC_SRR1(r3) 1226 mtspr SPRN_SRR1,r13 1227 ld r3,OPAL_MC_GPR3(r3) 1228 GET_SCRATCH0(r13) 1229 b machine_check_pSeries 1230#endif /* CONFIG_PPC_POWERNV */ 1231 1232 1233/* 1234 * r13 points to the PACA, r9 contains the saved CR, 1235 * r12 contain the saved SRR1, SRR0 is still ready for return 1236 * r3 has the faulting address 1237 * r9 - r13 are saved in paca->exslb. 1238 * r3 is saved in paca->slb_r3 1239 * We assume we aren't going to take any exceptions during this procedure. 1240 */ 1241_GLOBAL(slb_miss_realmode) 1242 mflr r10 1243#ifdef CONFIG_RELOCATABLE 1244 mtctr r11 1245#endif 1246 1247 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1248 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1249 1250 bl .slb_allocate_realmode 1251 1252 /* All done -- return from exception. */ 1253 1254 ld r10,PACA_EXSLB+EX_LR(r13) 1255 ld r3,PACA_EXSLB+EX_R3(r13) 1256 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1257 1258 mtlr r10 1259 1260 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1261 beq- 2f 1262 1263.machine push 1264.machine "power4" 1265 mtcrf 0x80,r9 1266 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1267.machine pop 1268 1269 RESTORE_PPR_PACA(PACA_EXSLB, r9) 1270 ld r9,PACA_EXSLB+EX_R9(r13) 1271 ld r10,PACA_EXSLB+EX_R10(r13) 1272 ld r11,PACA_EXSLB+EX_R11(r13) 1273 ld r12,PACA_EXSLB+EX_R12(r13) 1274 ld r13,PACA_EXSLB+EX_R13(r13) 1275 rfid 1276 b . /* prevent speculative execution */ 1277 12782: mfspr r11,SPRN_SRR0 1279 ld r10,PACAKBASE(r13) 1280 LOAD_HANDLER(r10,unrecov_slb) 1281 mtspr SPRN_SRR0,r10 1282 ld r10,PACAKMSR(r13) 1283 mtspr SPRN_SRR1,r10 1284 rfid 1285 b . 1286 1287unrecov_slb: 1288 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1289 DISABLE_INTS 1290 bl .save_nvgprs 12911: addi r3,r1,STACK_FRAME_OVERHEAD 1292 bl .unrecoverable_exception 1293 b 1b 1294 1295 1296#ifdef CONFIG_PPC_970_NAP 1297power4_fixup_nap: 1298 andc r9,r9,r10 1299 std r9,TI_LOCAL_FLAGS(r11) 1300 ld r10,_LINK(r1) /* make idle task do the */ 1301 std r10,_NIP(r1) /* equivalent of a blr */ 1302 blr 1303#endif 1304 1305/* 1306 * Hash table stuff 1307 */ 1308 .align 7 1309_STATIC(do_hash_page) 1310 std r3,_DAR(r1) 1311 std r4,_DSISR(r1) 1312 1313 andis. r0,r4,0xa410 /* weird error? */ 1314 bne- handle_page_fault /* if not, try to insert a HPTE */ 1315 andis. r0,r4,DSISR_DABRMATCH@h 1316 bne- handle_dabr_fault 1317 1318BEGIN_FTR_SECTION 1319 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 1320 bne- do_ste_alloc /* If so handle it */ 1321END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 1322 1323 CURRENT_THREAD_INFO(r11, r1) 1324 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1325 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1326 bne 77f /* then don't call hash_page now */ 1327 /* 1328 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 1329 * accessing a userspace segment (even from the kernel). We assume 1330 * kernel addresses always have the high bit set. 1331 */ 1332 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 1333 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 1334 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 1335 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 1336 ori r4,r4,1 /* add _PAGE_PRESENT */ 1337 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 1338 1339 /* 1340 * r3 contains the faulting address 1341 * r4 contains the required access permissions 1342 * r5 contains the trap number 1343 * 1344 * at return r3 = 0 for success, 1 for page fault, negative for error 1345 */ 1346 bl .hash_page /* build HPTE if possible */ 1347 cmpdi r3,0 /* see if hash_page succeeded */ 1348 1349 /* Success */ 1350 beq fast_exc_return_irq /* Return from exception on success */ 1351 1352 /* Error */ 1353 blt- 13f 1354 1355/* Here we have a page fault that hash_page can't handle. */ 1356handle_page_fault: 135711: ld r4,_DAR(r1) 1358 ld r5,_DSISR(r1) 1359 addi r3,r1,STACK_FRAME_OVERHEAD 1360 bl .do_page_fault 1361 cmpdi r3,0 1362 beq+ 12f 1363 bl .save_nvgprs 1364 mr r5,r3 1365 addi r3,r1,STACK_FRAME_OVERHEAD 1366 lwz r4,_DAR(r1) 1367 bl .bad_page_fault 1368 b .ret_from_except 1369 1370/* We have a data breakpoint exception - handle it */ 1371handle_dabr_fault: 1372 bl .save_nvgprs 1373 ld r4,_DAR(r1) 1374 ld r5,_DSISR(r1) 1375 addi r3,r1,STACK_FRAME_OVERHEAD 1376 bl .do_break 137712: b .ret_from_except_lite 1378 1379 1380/* We have a page fault that hash_page could handle but HV refused 1381 * the PTE insertion 1382 */ 138313: bl .save_nvgprs 1384 mr r5,r3 1385 addi r3,r1,STACK_FRAME_OVERHEAD 1386 ld r4,_DAR(r1) 1387 bl .low_hash_fault 1388 b .ret_from_except 1389 1390/* 1391 * We come here as a result of a DSI at a point where we don't want 1392 * to call hash_page, such as when we are accessing memory (possibly 1393 * user memory) inside a PMU interrupt that occurred while interrupts 1394 * were soft-disabled. We want to invoke the exception handler for 1395 * the access, or panic if there isn't a handler. 1396 */ 139777: bl .save_nvgprs 1398 mr r4,r3 1399 addi r3,r1,STACK_FRAME_OVERHEAD 1400 li r5,SIGSEGV 1401 bl .bad_page_fault 1402 b .ret_from_except 1403 1404 /* here we have a segment miss */ 1405do_ste_alloc: 1406 bl .ste_allocate /* try to insert stab entry */ 1407 cmpdi r3,0 1408 bne- handle_page_fault 1409 b fast_exception_return 1410 1411/* 1412 * r13 points to the PACA, r9 contains the saved CR, 1413 * r11 and r12 contain the saved SRR0 and SRR1. 1414 * r9 - r13 are saved in paca->exslb. 1415 * We assume we aren't going to take any exceptions during this procedure. 1416 * We assume (DAR >> 60) == 0xc. 1417 */ 1418 .align 7 1419_GLOBAL(do_stab_bolted) 1420 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1421 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1422 mfspr r11,SPRN_DAR /* ea */ 1423 1424 /* 1425 * check for bad kernel/user address 1426 * (ea & ~REGION_MASK) >= PGTABLE_RANGE 1427 */ 1428 rldicr. r9,r11,4,(63 - 46 - 4) 1429 li r9,0 /* VSID = 0 for bad address */ 1430 bne- 0f 1431 1432 /* 1433 * Calculate VSID: 1434 * This is the kernel vsid, we take the top for context from 1435 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 1436 * Here we know that (ea >> 60) == 0xc 1437 */ 1438 lis r9,(MAX_USER_CONTEXT + 1)@ha 1439 addi r9,r9,(MAX_USER_CONTEXT + 1)@l 1440 1441 srdi r10,r11,SID_SHIFT 1442 rldimi r10,r9,ESID_BITS,0 /* proto vsid */ 1443 ASM_VSID_SCRAMBLE(r10, r9, 256M) 1444 rldic r9,r10,12,16 /* r9 = vsid << 12 */ 1445 14460: 1447 /* Hash to the primary group */ 1448 ld r10,PACASTABVIRT(r13) 1449 srdi r11,r11,SID_SHIFT 1450 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1451 1452 /* Search the primary group for a free entry */ 14531: ld r11,0(r10) /* Test valid bit of the current ste */ 1454 andi. r11,r11,0x80 1455 beq 2f 1456 addi r10,r10,16 1457 andi. r11,r10,0x70 1458 bne 1b 1459 1460 /* Stick for only searching the primary group for now. */ 1461 /* At least for now, we use a very simple random castout scheme */ 1462 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1463 mftb r11 1464 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1465 ori r11,r11,0x10 1466 1467 /* r10 currently points to an ste one past the group of interest */ 1468 /* make it point to the randomly selected entry */ 1469 subi r10,r10,128 1470 or r10,r10,r11 /* r10 is the entry to invalidate */ 1471 1472 isync /* mark the entry invalid */ 1473 ld r11,0(r10) 1474 rldicl r11,r11,56,1 /* clear the valid bit */ 1475 rotldi r11,r11,8 1476 std r11,0(r10) 1477 sync 1478 1479 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1480 slbie r11 1481 14822: std r9,8(r10) /* Store the vsid part of the ste */ 1483 eieio 1484 1485 mfspr r11,SPRN_DAR /* Get the new esid */ 1486 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1487 ori r11,r11,0x90 /* Turn on valid and kp */ 1488 std r11,0(r10) /* Put new entry back into the stab */ 1489 1490 sync 1491 1492 /* All done -- return from exception. */ 1493 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1494 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1495 1496 andi. r10,r12,MSR_RI 1497 beq- unrecov_slb 1498 1499 mtcrf 0x80,r9 /* restore CR */ 1500 1501 mfmsr r10 1502 clrrdi r10,r10,2 1503 mtmsrd r10,1 1504 1505 mtspr SPRN_SRR0,r11 1506 mtspr SPRN_SRR1,r12 1507 ld r9,PACA_EXSLB+EX_R9(r13) 1508 ld r10,PACA_EXSLB+EX_R10(r13) 1509 ld r11,PACA_EXSLB+EX_R11(r13) 1510 ld r12,PACA_EXSLB+EX_R12(r13) 1511 ld r13,PACA_EXSLB+EX_R13(r13) 1512 rfid 1513 b . /* prevent speculative execution */ 1514