1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependant assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/exception-64s.h> 16 17/* 18 * We layout physical memory as follows: 19 * 0x0000 - 0x00ff : Secondary processor spin code 20 * 0x0100 - 0x2fff : pSeries Interrupt prologs 21 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 22 * 0x6000 - 0x6fff : Initial (CPU0) segment table 23 * 0x7000 - 0x7fff : FWNMI data area 24 * 0x8000 - : Early init and support code 25 */ 26 27/* 28 * This is the start of the interrupt handlers for pSeries 29 * This code runs with relocation off. 30 * Code from here to __end_interrupts gets copied down to real 31 * address 0x100 when we are running a relocatable kernel. 32 * Therefore any relative branches in this section must only 33 * branch to labels in this section. 34 */ 35 . = 0x100 36 .globl __start_interrupts 37__start_interrupts: 38 39 STD_EXCEPTION_PSERIES(0x100, system_reset) 40 41 . = 0x200 42_machine_check_pSeries: 43 HMT_MEDIUM 44 DO_KVM 0x200 45 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 46 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 47 48 . = 0x300 49 .globl data_access_pSeries 50data_access_pSeries: 51 HMT_MEDIUM 52 DO_KVM 0x300 53 mtspr SPRN_SPRG_SCRATCH0,r13 54BEGIN_FTR_SECTION 55 mfspr r13,SPRN_SPRG_PACA 56 std r9,PACA_EXSLB+EX_R9(r13) 57 std r10,PACA_EXSLB+EX_R10(r13) 58 mfspr r10,SPRN_DAR 59 mfspr r9,SPRN_DSISR 60 srdi r10,r10,60 61 rlwimi r10,r9,16,0x20 62 mfcr r9 63 cmpwi r10,0x2c 64 beq do_stab_bolted_pSeries 65 ld r10,PACA_EXSLB+EX_R10(r13) 66 std r11,PACA_EXGEN+EX_R11(r13) 67 ld r11,PACA_EXSLB+EX_R9(r13) 68 std r12,PACA_EXGEN+EX_R12(r13) 69 mfspr r12,SPRN_SPRG_SCRATCH0 70 std r10,PACA_EXGEN+EX_R10(r13) 71 std r11,PACA_EXGEN+EX_R9(r13) 72 std r12,PACA_EXGEN+EX_R13(r13) 73 EXCEPTION_PROLOG_PSERIES_1(data_access_common) 74FTR_SECTION_ELSE 75 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 76ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) 77 78 . = 0x380 79 .globl data_access_slb_pSeries 80data_access_slb_pSeries: 81 HMT_MEDIUM 82 DO_KVM 0x380 83 mtspr SPRN_SPRG_SCRATCH0,r13 84 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ 85 std r3,PACA_EXSLB+EX_R3(r13) 86 mfspr r3,SPRN_DAR 87 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 88 mfcr r9 89#ifdef __DISABLED__ 90 /* Keep that around for when we re-implement dynamic VSIDs */ 91 cmpdi r3,0 92 bge slb_miss_user_pseries 93#endif /* __DISABLED__ */ 94 std r10,PACA_EXSLB+EX_R10(r13) 95 std r11,PACA_EXSLB+EX_R11(r13) 96 std r12,PACA_EXSLB+EX_R12(r13) 97 mfspr r10,SPRN_SPRG_SCRATCH0 98 std r10,PACA_EXSLB+EX_R13(r13) 99 mfspr r12,SPRN_SRR1 /* and SRR1 */ 100#ifndef CONFIG_RELOCATABLE 101 b .slb_miss_realmode 102#else 103 /* 104 * We can't just use a direct branch to .slb_miss_realmode 105 * because the distance from here to there depends on where 106 * the kernel ends up being put. 107 */ 108 mfctr r11 109 ld r10,PACAKBASE(r13) 110 LOAD_HANDLER(r10, .slb_miss_realmode) 111 mtctr r10 112 bctr 113#endif 114 115 STD_EXCEPTION_PSERIES(0x400, instruction_access) 116 117 . = 0x480 118 .globl instruction_access_slb_pSeries 119instruction_access_slb_pSeries: 120 HMT_MEDIUM 121 DO_KVM 0x480 122 mtspr SPRN_SPRG_SCRATCH0,r13 123 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ 124 std r3,PACA_EXSLB+EX_R3(r13) 125 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 126 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 127 mfcr r9 128#ifdef __DISABLED__ 129 /* Keep that around for when we re-implement dynamic VSIDs */ 130 cmpdi r3,0 131 bge slb_miss_user_pseries 132#endif /* __DISABLED__ */ 133 std r10,PACA_EXSLB+EX_R10(r13) 134 std r11,PACA_EXSLB+EX_R11(r13) 135 std r12,PACA_EXSLB+EX_R12(r13) 136 mfspr r10,SPRN_SPRG_SCRATCH0 137 std r10,PACA_EXSLB+EX_R13(r13) 138 mfspr r12,SPRN_SRR1 /* and SRR1 */ 139#ifndef CONFIG_RELOCATABLE 140 b .slb_miss_realmode 141#else 142 mfctr r11 143 ld r10,PACAKBASE(r13) 144 LOAD_HANDLER(r10, .slb_miss_realmode) 145 mtctr r10 146 bctr 147#endif 148 149 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) 150 STD_EXCEPTION_PSERIES(0x600, alignment) 151 STD_EXCEPTION_PSERIES(0x700, program_check) 152 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 153 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) 154 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 155 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 156 157 . = 0xc00 158 .globl system_call_pSeries 159system_call_pSeries: 160 HMT_MEDIUM 161 DO_KVM 0xc00 162BEGIN_FTR_SECTION 163 cmpdi r0,0x1ebe 164 beq- 1f 165END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 166 mr r9,r13 167 mfspr r13,SPRN_SPRG_PACA 168 mfspr r11,SPRN_SRR0 169 ld r12,PACAKBASE(r13) 170 ld r10,PACAKMSR(r13) 171 LOAD_HANDLER(r12, system_call_entry) 172 mtspr SPRN_SRR0,r12 173 mfspr r12,SPRN_SRR1 174 mtspr SPRN_SRR1,r10 175 rfid 176 b . /* prevent speculative execution */ 177 178/* Fast LE/BE switch system call */ 1791: mfspr r12,SPRN_SRR1 180 xori r12,r12,MSR_LE 181 mtspr SPRN_SRR1,r12 182 rfid /* return to userspace */ 183 b . 184 185 STD_EXCEPTION_PSERIES(0xd00, single_step) 186 STD_EXCEPTION_PSERIES(0xe00, trap_0e) 187 188 /* We need to deal with the Altivec unavailable exception 189 * here which is at 0xf20, thus in the middle of the 190 * prolog code of the PerformanceMonitor one. A little 191 * trickery is thus necessary 192 */ 193performance_monitor_pSeries_1: 194 . = 0xf00 195 DO_KVM 0xf00 196 b performance_monitor_pSeries 197 198altivec_unavailable_pSeries_1: 199 . = 0xf20 200 DO_KVM 0xf20 201 b altivec_unavailable_pSeries 202 203vsx_unavailable_pSeries_1: 204 . = 0xf40 205 DO_KVM 0xf40 206 b vsx_unavailable_pSeries 207 208#ifdef CONFIG_CBE_RAS 209 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) 210#endif /* CONFIG_CBE_RAS */ 211 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 212#ifdef CONFIG_CBE_RAS 213 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) 214#endif /* CONFIG_CBE_RAS */ 215 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 216#ifdef CONFIG_CBE_RAS 217 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) 218#endif /* CONFIG_CBE_RAS */ 219 220 . = 0x3000 221 222/*** pSeries interrupt support ***/ 223 224 /* moved from 0xf00 */ 225 STD_EXCEPTION_PSERIES(., performance_monitor) 226 STD_EXCEPTION_PSERIES(., altivec_unavailable) 227 STD_EXCEPTION_PSERIES(., vsx_unavailable) 228 229/* 230 * An interrupt came in while soft-disabled; clear EE in SRR1, 231 * clear paca->hard_enabled and return. 232 */ 233masked_interrupt: 234 stb r10,PACAHARDIRQEN(r13) 235 mtcrf 0x80,r9 236 ld r9,PACA_EXGEN+EX_R9(r13) 237 mfspr r10,SPRN_SRR1 238 rldicl r10,r10,48,1 /* clear MSR_EE */ 239 rotldi r10,r10,16 240 mtspr SPRN_SRR1,r10 241 ld r10,PACA_EXGEN+EX_R10(r13) 242 mfspr r13,SPRN_SPRG_SCRATCH0 243 rfid 244 b . 245 246 .align 7 247do_stab_bolted_pSeries: 248 std r11,PACA_EXSLB+EX_R11(r13) 249 std r12,PACA_EXSLB+EX_R12(r13) 250 mfspr r10,SPRN_SPRG_SCRATCH0 251 std r10,PACA_EXSLB+EX_R13(r13) 252 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) 253 254#ifdef CONFIG_PPC_PSERIES 255/* 256 * Vectors for the FWNMI option. Share common code. 257 */ 258 .globl system_reset_fwnmi 259 .align 7 260system_reset_fwnmi: 261 HMT_MEDIUM 262 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 263 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 264 265 .globl machine_check_fwnmi 266 .align 7 267machine_check_fwnmi: 268 HMT_MEDIUM 269 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 270 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 271 272#endif /* CONFIG_PPC_PSERIES */ 273 274#ifdef __DISABLED__ 275/* 276 * This is used for when the SLB miss handler has to go virtual, 277 * which doesn't happen for now anymore but will once we re-implement 278 * dynamic VSIDs for shared page tables 279 */ 280slb_miss_user_pseries: 281 std r10,PACA_EXGEN+EX_R10(r13) 282 std r11,PACA_EXGEN+EX_R11(r13) 283 std r12,PACA_EXGEN+EX_R12(r13) 284 mfspr r10,SPRG_SCRATCH0 285 ld r11,PACA_EXSLB+EX_R9(r13) 286 ld r12,PACA_EXSLB+EX_R3(r13) 287 std r10,PACA_EXGEN+EX_R13(r13) 288 std r11,PACA_EXGEN+EX_R9(r13) 289 std r12,PACA_EXGEN+EX_R3(r13) 290 clrrdi r12,r13,32 291 mfmsr r10 292 mfspr r11,SRR0 /* save SRR0 */ 293 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 294 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 295 mtspr SRR0,r12 296 mfspr r12,SRR1 /* and SRR1 */ 297 mtspr SRR1,r10 298 rfid 299 b . /* prevent spec. execution */ 300#endif /* __DISABLED__ */ 301 302/* KVM's trampoline code needs to be close to the interrupt handlers */ 303 304#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 305#include "../kvm/book3s_rmhandlers.S" 306#endif 307 308 .align 7 309 .globl __end_interrupts 310__end_interrupts: 311 312/* 313 * Code from here down to __end_handlers is invoked from the 314 * exception prologs above. Because the prologs assemble the 315 * addresses of these handlers using the LOAD_HANDLER macro, 316 * which uses an addi instruction, these handlers must be in 317 * the first 32k of the kernel image. 318 */ 319 320/*** Common interrupt handlers ***/ 321 322 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 323 324 /* 325 * Machine check is different because we use a different 326 * save area: PACA_EXMC instead of PACA_EXGEN. 327 */ 328 .align 7 329 .globl machine_check_common 330machine_check_common: 331 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 332 FINISH_NAP 333 DISABLE_INTS 334 bl .save_nvgprs 335 addi r3,r1,STACK_FRAME_OVERHEAD 336 bl .machine_check_exception 337 b .ret_from_except 338 339 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 340 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 341 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 342 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 343 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 344 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) 345 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 346#ifdef CONFIG_ALTIVEC 347 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 348#else 349 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 350#endif 351#ifdef CONFIG_CBE_RAS 352 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 353 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 354 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 355#endif /* CONFIG_CBE_RAS */ 356 357 .align 7 358system_call_entry: 359 b system_call_common 360 361/* 362 * Here we have detected that the kernel stack pointer is bad. 363 * R9 contains the saved CR, r13 points to the paca, 364 * r10 contains the (bad) kernel stack pointer, 365 * r11 and r12 contain the saved SRR0 and SRR1. 366 * We switch to using an emergency stack, save the registers there, 367 * and call kernel_bad_stack(), which panics. 368 */ 369bad_stack: 370 ld r1,PACAEMERGSP(r13) 371 subi r1,r1,64+INT_FRAME_SIZE 372 std r9,_CCR(r1) 373 std r10,GPR1(r1) 374 std r11,_NIP(r1) 375 std r12,_MSR(r1) 376 mfspr r11,SPRN_DAR 377 mfspr r12,SPRN_DSISR 378 std r11,_DAR(r1) 379 std r12,_DSISR(r1) 380 mflr r10 381 mfctr r11 382 mfxer r12 383 std r10,_LINK(r1) 384 std r11,_CTR(r1) 385 std r12,_XER(r1) 386 SAVE_GPR(0,r1) 387 SAVE_GPR(2,r1) 388 SAVE_4GPRS(3,r1) 389 SAVE_2GPRS(7,r1) 390 SAVE_10GPRS(12,r1) 391 SAVE_10GPRS(22,r1) 392 lhz r12,PACA_TRAP_SAVE(r13) 393 std r12,_TRAP(r1) 394 addi r11,r1,INT_FRAME_SIZE 395 std r11,0(r1) 396 li r12,0 397 std r12,0(r11) 398 ld r2,PACATOC(r13) 3991: addi r3,r1,STACK_FRAME_OVERHEAD 400 bl .kernel_bad_stack 401 b 1b 402 403/* 404 * Here r13 points to the paca, r9 contains the saved CR, 405 * SRR0 and SRR1 are saved in r11 and r12, 406 * r9 - r13 are saved in paca->exgen. 407 */ 408 .align 7 409 .globl data_access_common 410data_access_common: 411 mfspr r10,SPRN_DAR 412 std r10,PACA_EXGEN+EX_DAR(r13) 413 mfspr r10,SPRN_DSISR 414 stw r10,PACA_EXGEN+EX_DSISR(r13) 415 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 416 ld r3,PACA_EXGEN+EX_DAR(r13) 417 lwz r4,PACA_EXGEN+EX_DSISR(r13) 418 li r5,0x300 419 b .do_hash_page /* Try to handle as hpte fault */ 420 421 .align 7 422 .globl instruction_access_common 423instruction_access_common: 424 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 425 ld r3,_NIP(r1) 426 andis. r4,r12,0x5820 427 li r5,0x400 428 b .do_hash_page /* Try to handle as hpte fault */ 429 430/* 431 * Here is the common SLB miss user that is used when going to virtual 432 * mode for SLB misses, that is currently not used 433 */ 434#ifdef __DISABLED__ 435 .align 7 436 .globl slb_miss_user_common 437slb_miss_user_common: 438 mflr r10 439 std r3,PACA_EXGEN+EX_DAR(r13) 440 stw r9,PACA_EXGEN+EX_CCR(r13) 441 std r10,PACA_EXGEN+EX_LR(r13) 442 std r11,PACA_EXGEN+EX_SRR0(r13) 443 bl .slb_allocate_user 444 445 ld r10,PACA_EXGEN+EX_LR(r13) 446 ld r3,PACA_EXGEN+EX_R3(r13) 447 lwz r9,PACA_EXGEN+EX_CCR(r13) 448 ld r11,PACA_EXGEN+EX_SRR0(r13) 449 mtlr r10 450 beq- slb_miss_fault 451 452 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 453 beq- unrecov_user_slb 454 mfmsr r10 455 456.machine push 457.machine "power4" 458 mtcrf 0x80,r9 459.machine pop 460 461 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 462 mtmsrd r10,1 463 464 mtspr SRR0,r11 465 mtspr SRR1,r12 466 467 ld r9,PACA_EXGEN+EX_R9(r13) 468 ld r10,PACA_EXGEN+EX_R10(r13) 469 ld r11,PACA_EXGEN+EX_R11(r13) 470 ld r12,PACA_EXGEN+EX_R12(r13) 471 ld r13,PACA_EXGEN+EX_R13(r13) 472 rfid 473 b . 474 475slb_miss_fault: 476 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 477 ld r4,PACA_EXGEN+EX_DAR(r13) 478 li r5,0 479 std r4,_DAR(r1) 480 std r5,_DSISR(r1) 481 b handle_page_fault 482 483unrecov_user_slb: 484 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 485 DISABLE_INTS 486 bl .save_nvgprs 4871: addi r3,r1,STACK_FRAME_OVERHEAD 488 bl .unrecoverable_exception 489 b 1b 490 491#endif /* __DISABLED__ */ 492 493 494/* 495 * r13 points to the PACA, r9 contains the saved CR, 496 * r12 contain the saved SRR1, SRR0 is still ready for return 497 * r3 has the faulting address 498 * r9 - r13 are saved in paca->exslb. 499 * r3 is saved in paca->slb_r3 500 * We assume we aren't going to take any exceptions during this procedure. 501 */ 502_GLOBAL(slb_miss_realmode) 503 mflr r10 504#ifdef CONFIG_RELOCATABLE 505 mtctr r11 506#endif 507 508 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 509 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 510 511 bl .slb_allocate_realmode 512 513 /* All done -- return from exception. */ 514 515 ld r10,PACA_EXSLB+EX_LR(r13) 516 ld r3,PACA_EXSLB+EX_R3(r13) 517 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 518#ifdef CONFIG_PPC_ISERIES 519BEGIN_FW_FTR_SECTION 520 ld r11,PACALPPACAPTR(r13) 521 ld r11,LPPACASRR0(r11) /* get SRR0 value */ 522END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 523#endif /* CONFIG_PPC_ISERIES */ 524 525 mtlr r10 526 527 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 528 beq- 2f 529 530.machine push 531.machine "power4" 532 mtcrf 0x80,r9 533 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 534.machine pop 535 536#ifdef CONFIG_PPC_ISERIES 537BEGIN_FW_FTR_SECTION 538 mtspr SPRN_SRR0,r11 539 mtspr SPRN_SRR1,r12 540END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 541#endif /* CONFIG_PPC_ISERIES */ 542 ld r9,PACA_EXSLB+EX_R9(r13) 543 ld r10,PACA_EXSLB+EX_R10(r13) 544 ld r11,PACA_EXSLB+EX_R11(r13) 545 ld r12,PACA_EXSLB+EX_R12(r13) 546 ld r13,PACA_EXSLB+EX_R13(r13) 547 rfid 548 b . /* prevent speculative execution */ 549 5502: 551#ifdef CONFIG_PPC_ISERIES 552BEGIN_FW_FTR_SECTION 553 b unrecov_slb 554END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 555#endif /* CONFIG_PPC_ISERIES */ 556 mfspr r11,SPRN_SRR0 557 ld r10,PACAKBASE(r13) 558 LOAD_HANDLER(r10,unrecov_slb) 559 mtspr SPRN_SRR0,r10 560 ld r10,PACAKMSR(r13) 561 mtspr SPRN_SRR1,r10 562 rfid 563 b . 564 565unrecov_slb: 566 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 567 DISABLE_INTS 568 bl .save_nvgprs 5691: addi r3,r1,STACK_FRAME_OVERHEAD 570 bl .unrecoverable_exception 571 b 1b 572 573 .align 7 574 .globl hardware_interrupt_common 575 .globl hardware_interrupt_entry 576hardware_interrupt_common: 577 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 578 FINISH_NAP 579hardware_interrupt_entry: 580 DISABLE_INTS 581BEGIN_FTR_SECTION 582 bl .ppc64_runlatch_on 583END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 584 addi r3,r1,STACK_FRAME_OVERHEAD 585 bl .do_IRQ 586 b .ret_from_except_lite 587 588#ifdef CONFIG_PPC_970_NAP 589power4_fixup_nap: 590 andc r9,r9,r10 591 std r9,TI_LOCAL_FLAGS(r11) 592 ld r10,_LINK(r1) /* make idle task do the */ 593 std r10,_NIP(r1) /* equivalent of a blr */ 594 blr 595#endif 596 597 .align 7 598 .globl alignment_common 599alignment_common: 600 mfspr r10,SPRN_DAR 601 std r10,PACA_EXGEN+EX_DAR(r13) 602 mfspr r10,SPRN_DSISR 603 stw r10,PACA_EXGEN+EX_DSISR(r13) 604 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 605 ld r3,PACA_EXGEN+EX_DAR(r13) 606 lwz r4,PACA_EXGEN+EX_DSISR(r13) 607 std r3,_DAR(r1) 608 std r4,_DSISR(r1) 609 bl .save_nvgprs 610 addi r3,r1,STACK_FRAME_OVERHEAD 611 ENABLE_INTS 612 bl .alignment_exception 613 b .ret_from_except 614 615 .align 7 616 .globl program_check_common 617program_check_common: 618 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 619 bl .save_nvgprs 620 addi r3,r1,STACK_FRAME_OVERHEAD 621 ENABLE_INTS 622 bl .program_check_exception 623 b .ret_from_except 624 625 .align 7 626 .globl fp_unavailable_common 627fp_unavailable_common: 628 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 629 bne 1f /* if from user, just load it up */ 630 bl .save_nvgprs 631 addi r3,r1,STACK_FRAME_OVERHEAD 632 ENABLE_INTS 633 bl .kernel_fp_unavailable_exception 634 BUG_OPCODE 6351: bl .load_up_fpu 636 b fast_exception_return 637 638 .align 7 639 .globl altivec_unavailable_common 640altivec_unavailable_common: 641 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 642#ifdef CONFIG_ALTIVEC 643BEGIN_FTR_SECTION 644 beq 1f 645 bl .load_up_altivec 646 b fast_exception_return 6471: 648END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 649#endif 650 bl .save_nvgprs 651 addi r3,r1,STACK_FRAME_OVERHEAD 652 ENABLE_INTS 653 bl .altivec_unavailable_exception 654 b .ret_from_except 655 656 .align 7 657 .globl vsx_unavailable_common 658vsx_unavailable_common: 659 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 660#ifdef CONFIG_VSX 661BEGIN_FTR_SECTION 662 bne .load_up_vsx 6631: 664END_FTR_SECTION_IFSET(CPU_FTR_VSX) 665#endif 666 bl .save_nvgprs 667 addi r3,r1,STACK_FRAME_OVERHEAD 668 ENABLE_INTS 669 bl .vsx_unavailable_exception 670 b .ret_from_except 671 672 .align 7 673 .globl __end_handlers 674__end_handlers: 675 676/* 677 * Return from an exception with minimal checks. 678 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. 679 * If interrupts have been enabled, or anything has been 680 * done that might have changed the scheduling status of 681 * any task or sent any task a signal, you should use 682 * ret_from_except or ret_from_except_lite instead of this. 683 */ 684fast_exc_return_irq: /* restores irq state too */ 685 ld r3,SOFTE(r1) 686 TRACE_AND_RESTORE_IRQ(r3); 687 ld r12,_MSR(r1) 688 rldicl r4,r12,49,63 /* get MSR_EE to LSB */ 689 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ 690 b 1f 691 692 .globl fast_exception_return 693fast_exception_return: 694 ld r12,_MSR(r1) 6951: ld r11,_NIP(r1) 696 andi. r3,r12,MSR_RI /* check if RI is set */ 697 beq- unrecov_fer 698 699#ifdef CONFIG_VIRT_CPU_ACCOUNTING 700 andi. r3,r12,MSR_PR 701 beq 2f 702 ACCOUNT_CPU_USER_EXIT(r3, r4) 7032: 704#endif 705 706 ld r3,_CCR(r1) 707 ld r4,_LINK(r1) 708 ld r5,_CTR(r1) 709 ld r6,_XER(r1) 710 mtcr r3 711 mtlr r4 712 mtctr r5 713 mtxer r6 714 REST_GPR(0, r1) 715 REST_8GPRS(2, r1) 716 717 mfmsr r10 718 rldicl r10,r10,48,1 /* clear EE */ 719 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ 720 mtmsrd r10,1 721 722 mtspr SPRN_SRR1,r12 723 mtspr SPRN_SRR0,r11 724 REST_4GPRS(10, r1) 725 ld r1,GPR1(r1) 726 rfid 727 b . /* prevent speculative execution */ 728 729unrecov_fer: 730 bl .save_nvgprs 7311: addi r3,r1,STACK_FRAME_OVERHEAD 732 bl .unrecoverable_exception 733 b 1b 734 735 736/* 737 * Hash table stuff 738 */ 739 .align 7 740_STATIC(do_hash_page) 741 std r3,_DAR(r1) 742 std r4,_DSISR(r1) 743 744 andis. r0,r4,0xa410 /* weird error? */ 745 bne- handle_page_fault /* if not, try to insert a HPTE */ 746 andis. r0,r4,DSISR_DABRMATCH@h 747 bne- handle_dabr_fault 748 749BEGIN_FTR_SECTION 750 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 751 bne- do_ste_alloc /* If so handle it */ 752END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 753 754 clrrdi r11,r1,THREAD_SHIFT 755 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 756 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 757 bne 77f /* then don't call hash_page now */ 758 759 /* 760 * On iSeries, we soft-disable interrupts here, then 761 * hard-enable interrupts so that the hash_page code can spin on 762 * the hash_table_lock without problems on a shared processor. 763 */ 764 DISABLE_INTS 765 766 /* 767 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS 768 * and will clobber volatile registers when irq tracing is enabled 769 * so we need to reload them. It may be possible to be smarter here 770 * and move the irq tracing elsewhere but let's keep it simple for 771 * now 772 */ 773#ifdef CONFIG_TRACE_IRQFLAGS 774 ld r3,_DAR(r1) 775 ld r4,_DSISR(r1) 776 ld r5,_TRAP(r1) 777 ld r12,_MSR(r1) 778 clrrdi r5,r5,4 779#endif /* CONFIG_TRACE_IRQFLAGS */ 780 /* 781 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 782 * accessing a userspace segment (even from the kernel). We assume 783 * kernel addresses always have the high bit set. 784 */ 785 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 786 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 787 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 788 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 789 ori r4,r4,1 /* add _PAGE_PRESENT */ 790 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 791 792 /* 793 * r3 contains the faulting address 794 * r4 contains the required access permissions 795 * r5 contains the trap number 796 * 797 * at return r3 = 0 for success 798 */ 799 bl .hash_page /* build HPTE if possible */ 800 cmpdi r3,0 /* see if hash_page succeeded */ 801 802BEGIN_FW_FTR_SECTION 803 /* 804 * If we had interrupts soft-enabled at the point where the 805 * DSI/ISI occurred, and an interrupt came in during hash_page, 806 * handle it now. 807 * We jump to ret_from_except_lite rather than fast_exception_return 808 * because ret_from_except_lite will check for and handle pending 809 * interrupts if necessary. 810 */ 811 beq 13f 812END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 813 814BEGIN_FW_FTR_SECTION 815 /* 816 * Here we have interrupts hard-disabled, so it is sufficient 817 * to restore paca->{soft,hard}_enable and get out. 818 */ 819 beq fast_exc_return_irq /* Return from exception on success */ 820END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 821 822 /* For a hash failure, we don't bother re-enabling interrupts */ 823 ble- 12f 824 825 /* 826 * hash_page couldn't handle it, set soft interrupt enable back 827 * to what it was before the trap. Note that .arch_local_irq_restore 828 * handles any interrupts pending at this point. 829 */ 830 ld r3,SOFTE(r1) 831 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) 832 bl .arch_local_irq_restore 833 b 11f 834 835/* We have a data breakpoint exception - handle it */ 836handle_dabr_fault: 837 bl .save_nvgprs 838 ld r4,_DAR(r1) 839 ld r5,_DSISR(r1) 840 addi r3,r1,STACK_FRAME_OVERHEAD 841 bl .do_dabr 842 b .ret_from_except_lite 843 844/* Here we have a page fault that hash_page can't handle. */ 845handle_page_fault: 846 ENABLE_INTS 84711: ld r4,_DAR(r1) 848 ld r5,_DSISR(r1) 849 addi r3,r1,STACK_FRAME_OVERHEAD 850 bl .do_page_fault 851 cmpdi r3,0 852 beq+ 13f 853 bl .save_nvgprs 854 mr r5,r3 855 addi r3,r1,STACK_FRAME_OVERHEAD 856 lwz r4,_DAR(r1) 857 bl .bad_page_fault 858 b .ret_from_except 859 86013: b .ret_from_except_lite 861 862/* We have a page fault that hash_page could handle but HV refused 863 * the PTE insertion 864 */ 86512: bl .save_nvgprs 866 mr r5,r3 867 addi r3,r1,STACK_FRAME_OVERHEAD 868 ld r4,_DAR(r1) 869 bl .low_hash_fault 870 b .ret_from_except 871 872/* 873 * We come here as a result of a DSI at a point where we don't want 874 * to call hash_page, such as when we are accessing memory (possibly 875 * user memory) inside a PMU interrupt that occurred while interrupts 876 * were soft-disabled. We want to invoke the exception handler for 877 * the access, or panic if there isn't a handler. 878 */ 87977: bl .save_nvgprs 880 mr r4,r3 881 addi r3,r1,STACK_FRAME_OVERHEAD 882 li r5,SIGSEGV 883 bl .bad_page_fault 884 b .ret_from_except 885 886 /* here we have a segment miss */ 887do_ste_alloc: 888 bl .ste_allocate /* try to insert stab entry */ 889 cmpdi r3,0 890 bne- handle_page_fault 891 b fast_exception_return 892 893/* 894 * r13 points to the PACA, r9 contains the saved CR, 895 * r11 and r12 contain the saved SRR0 and SRR1. 896 * r9 - r13 are saved in paca->exslb. 897 * We assume we aren't going to take any exceptions during this procedure. 898 * We assume (DAR >> 60) == 0xc. 899 */ 900 .align 7 901_GLOBAL(do_stab_bolted) 902 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 903 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 904 905 /* Hash to the primary group */ 906 ld r10,PACASTABVIRT(r13) 907 mfspr r11,SPRN_DAR 908 srdi r11,r11,28 909 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 910 911 /* Calculate VSID */ 912 /* This is a kernel address, so protovsid = ESID */ 913 ASM_VSID_SCRAMBLE(r11, r9, 256M) 914 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 915 916 /* Search the primary group for a free entry */ 9171: ld r11,0(r10) /* Test valid bit of the current ste */ 918 andi. r11,r11,0x80 919 beq 2f 920 addi r10,r10,16 921 andi. r11,r10,0x70 922 bne 1b 923 924 /* Stick for only searching the primary group for now. */ 925 /* At least for now, we use a very simple random castout scheme */ 926 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 927 mftb r11 928 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 929 ori r11,r11,0x10 930 931 /* r10 currently points to an ste one past the group of interest */ 932 /* make it point to the randomly selected entry */ 933 subi r10,r10,128 934 or r10,r10,r11 /* r10 is the entry to invalidate */ 935 936 isync /* mark the entry invalid */ 937 ld r11,0(r10) 938 rldicl r11,r11,56,1 /* clear the valid bit */ 939 rotldi r11,r11,8 940 std r11,0(r10) 941 sync 942 943 clrrdi r11,r11,28 /* Get the esid part of the ste */ 944 slbie r11 945 9462: std r9,8(r10) /* Store the vsid part of the ste */ 947 eieio 948 949 mfspr r11,SPRN_DAR /* Get the new esid */ 950 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 951 ori r11,r11,0x90 /* Turn on valid and kp */ 952 std r11,0(r10) /* Put new entry back into the stab */ 953 954 sync 955 956 /* All done -- return from exception. */ 957 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 958 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 959 960 andi. r10,r12,MSR_RI 961 beq- unrecov_slb 962 963 mtcrf 0x80,r9 /* restore CR */ 964 965 mfmsr r10 966 clrrdi r10,r10,2 967 mtmsrd r10,1 968 969 mtspr SPRN_SRR0,r11 970 mtspr SPRN_SRR1,r12 971 ld r9,PACA_EXSLB+EX_R9(r13) 972 ld r10,PACA_EXSLB+EX_R10(r13) 973 ld r11,PACA_EXSLB+EX_R11(r13) 974 ld r12,PACA_EXSLB+EX_R12(r13) 975 ld r13,PACA_EXSLB+EX_R13(r13) 976 rfid 977 b . /* prevent speculative execution */ 978 979/* 980 * Space for CPU0's segment table. 981 * 982 * On iSeries, the hypervisor must fill in at least one entry before 983 * we get control (with relocate on). The address is given to the hv 984 * as a page number (see xLparMap below), so this must be at a 985 * fixed address (the linker can't compute (u64)&initial_stab >> 986 * PAGE_SHIFT). 987 */ 988 . = STAB0_OFFSET /* 0x6000 */ 989 .globl initial_stab 990initial_stab: 991 .space 4096 992 993#ifdef CONFIG_PPC_PSERIES 994/* 995 * Data area reserved for FWNMI option. 996 * This address (0x7000) is fixed by the RPA. 997 */ 998 .= 0x7000 999 .globl fwnmi_data_area 1000fwnmi_data_area: 1001#endif /* CONFIG_PPC_PSERIES */ 1002 1003 /* iSeries does not use the FWNMI stuff, so it is safe to put 1004 * this here, even if we later allow kernels that will boot on 1005 * both pSeries and iSeries */ 1006#ifdef CONFIG_PPC_ISERIES 1007 . = LPARMAP_PHYS 1008 .globl xLparMap 1009xLparMap: 1010 .quad HvEsidsToMap /* xNumberEsids */ 1011 .quad HvRangesToMap /* xNumberRanges */ 1012 .quad STAB0_PAGE /* xSegmentTableOffs */ 1013 .zero 40 /* xRsvd */ 1014 /* xEsids (HvEsidsToMap entries of 2 quads) */ 1015 .quad PAGE_OFFSET_ESID /* xKernelEsid */ 1016 .quad PAGE_OFFSET_VSID /* xKernelVsid */ 1017 .quad VMALLOC_START_ESID /* xKernelEsid */ 1018 .quad VMALLOC_START_VSID /* xKernelVsid */ 1019 /* xRanges (HvRangesToMap entries of 3 quads) */ 1020 .quad HvPagesToMap /* xPages */ 1021 .quad 0 /* xOffset */ 1022 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ 1023 1024#endif /* CONFIG_PPC_ISERIES */ 1025 1026#ifdef CONFIG_PPC_PSERIES 1027 . = 0x8000 1028#endif /* CONFIG_PPC_PSERIES */ 1029