1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* PACA save area offsets (exgen, exmc, etc) */ 25#define EX_R9 0 26#define EX_R10 8 27#define EX_R11 16 28#define EX_R12 24 29#define EX_R13 32 30#define EX_DAR 40 31#define EX_DSISR 48 32#define EX_CCR 52 33#define EX_CFAR 56 34#define EX_PPR 64 35#define EX_CTR 72 36.if EX_SIZE != 10 37 .error "EX_SIZE is wrong" 38.endif 39 40/* 41 * Following are fixed section helper macros. 42 * 43 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 44 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 45 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 46 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 47 * EXC_COMMON - After switching to virtual, relocated mode. 48 */ 49 50#define EXC_REAL_BEGIN(name, start, size) \ 51 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 52 53#define EXC_REAL_END(name, start, size) \ 54 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 55 56#define EXC_VIRT_BEGIN(name, start, size) \ 57 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 58 59#define EXC_VIRT_END(name, start, size) \ 60 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 61 62#define EXC_COMMON_BEGIN(name) \ 63 USE_TEXT_SECTION(); \ 64 .balign IFETCH_ALIGN_BYTES; \ 65 .global name; \ 66 _ASM_NOKPROBE_SYMBOL(name); \ 67 DEFINE_FIXED_SYMBOL(name); \ 68name: 69 70#define TRAMP_REAL_BEGIN(name) \ 71 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 72 73#define TRAMP_VIRT_BEGIN(name) \ 74 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 75 76#define EXC_REAL_NONE(start, size) \ 77 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 78 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 79 80#define EXC_VIRT_NONE(start, size) \ 81 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 82 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 83 84/* 85 * We're short on space and time in the exception prolog, so we can't 86 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 87 * Instead we get the base of the kernel from paca->kernelbase and or in the low 88 * part of label. This requires that the label be within 64KB of kernelbase, and 89 * that kernelbase be 64K aligned. 90 */ 91#define LOAD_HANDLER(reg, label) \ 92 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 93 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 94 95#define __LOAD_HANDLER(reg, label) \ 96 ld reg,PACAKBASE(r13); \ 97 ori reg,reg,(ABS_ADDR(label))@l 98 99/* 100 * Branches from unrelocated code (e.g., interrupts) to labels outside 101 * head-y require >64K offsets. 102 */ 103#define __LOAD_FAR_HANDLER(reg, label) \ 104 ld reg,PACAKBASE(r13); \ 105 ori reg,reg,(ABS_ADDR(label))@l; \ 106 addis reg,reg,(ABS_ADDR(label))@h 107 108/* 109 * Branch to label using its 0xC000 address. This results in instruction 110 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned 111 * on using mtmsr rather than rfid. 112 * 113 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than 114 * load KBASE for a slight optimisation. 115 */ 116#define BRANCH_TO_C000(reg, label) \ 117 __LOAD_FAR_HANDLER(reg, label); \ 118 mtctr reg; \ 119 bctr 120 121/* 122 * Interrupt code generation macros 123 */ 124#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 125#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 126#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 127#define IAREA .L_IAREA_\name\() /* PACA save area */ 128#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 129#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 130#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 131#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 132#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */ 133#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 134#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 135#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 136#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */ 137#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 138#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 139#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 140#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 141#define __ISTACK(name) .L_ISTACK_ ## name 142#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */ 143#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 144 145#define INT_DEFINE_BEGIN(n) \ 146.macro int_define_ ## n name 147 148#define INT_DEFINE_END(n) \ 149.endm ; \ 150int_define_ ## n n ; \ 151do_define_int n 152 153.macro do_define_int name 154 .ifndef IVEC 155 .error "IVEC not defined" 156 .endif 157 .ifndef IHSRR 158 IHSRR=0 159 .endif 160 .ifndef IHSRR_IF_HVMODE 161 IHSRR_IF_HVMODE=0 162 .endif 163 .ifndef IAREA 164 IAREA=PACA_EXGEN 165 .endif 166 .ifndef IVIRT 167 IVIRT=1 168 .endif 169 .ifndef IISIDE 170 IISIDE=0 171 .endif 172 .ifndef IDAR 173 IDAR=0 174 .endif 175 .ifndef IDSISR 176 IDSISR=0 177 .endif 178 .ifndef ISET_RI 179 ISET_RI=1 180 .endif 181 .ifndef IBRANCH_TO_COMMON 182 IBRANCH_TO_COMMON=1 183 .endif 184 .ifndef IREALMODE_COMMON 185 IREALMODE_COMMON=0 186 .else 187 .if ! IBRANCH_TO_COMMON 188 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 189 .endif 190 .endif 191 .ifndef IMASK 192 IMASK=0 193 .endif 194 .ifndef IKVM_SKIP 195 IKVM_SKIP=0 196 .endif 197 .ifndef IKVM_REAL 198 IKVM_REAL=0 199 .endif 200 .ifndef IKVM_VIRT 201 IKVM_VIRT=0 202 .endif 203 .ifndef ISTACK 204 ISTACK=1 205 .endif 206 .ifndef IRECONCILE 207 IRECONCILE=1 208 .endif 209 .ifndef IKUAP 210 IKUAP=1 211 .endif 212.endm 213 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 216/* 217 * All interrupts which set HSRR registers, as well as SRESET and MCE and 218 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 219 * so they all generally need to test whether they were taken in guest context. 220 * 221 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 222 * taken with MSR[HV]=0. 223 * 224 * Interrupts which set SRR registers (with the above exceptions) do not 225 * elevate to MSR[HV]=1 mode, though most can be taken when running with 226 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 227 * not need to test whether a guest is running because they get delivered to 228 * the guest directly, including nested HV KVM guests. 229 * 230 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 231 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 232 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 233 * delivered to the real-mode entry point, therefore such interrupts only test 234 * KVM in their real mode handlers, and only when PR KVM is possible. 235 * 236 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 237 * delivered in real-mode when the MMU is in hash mode because the MMU 238 * registers are not set appropriately to translate host addresses. In nested 239 * radix mode these can be delivered in virt-mode as the host translations are 240 * used implicitly (see: effective LPID, effective PID). 241 */ 242 243/* 244 * If an interrupt is taken while a guest is running, it is immediately routed 245 * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first 246 * to kvmppc_interrupt_hv, which handles the PR guest case. 247 */ 248#define kvmppc_interrupt kvmppc_interrupt_hv 249#else 250#define kvmppc_interrupt kvmppc_interrupt_pr 251#endif 252 253.macro KVMTEST name 254 lbz r10,HSTATE_IN_GUEST(r13) 255 cmpwi r10,0 256 bne \name\()_kvm 257.endm 258 259.macro GEN_KVM name 260 .balign IFETCH_ALIGN_BYTES 261\name\()_kvm: 262 263 .if IKVM_SKIP 264 cmpwi r10,KVM_GUEST_MODE_SKIP 265 beq 89f 266 .else 267BEGIN_FTR_SECTION 268 ld r10,IAREA+EX_CFAR(r13) 269 std r10,HSTATE_CFAR(r13) 270END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 271 .endif 272 273 ld r10,PACA_EXGEN+EX_CTR(r13) 274 mtctr r10 275BEGIN_FTR_SECTION 276 ld r10,IAREA+EX_PPR(r13) 277 std r10,HSTATE_PPR(r13) 278END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 279 ld r11,IAREA+EX_R11(r13) 280 ld r12,IAREA+EX_R12(r13) 281 std r12,HSTATE_SCRATCH0(r13) 282 sldi r12,r9,32 283 ld r9,IAREA+EX_R9(r13) 284 ld r10,IAREA+EX_R10(r13) 285 /* HSRR variants have the 0x2 bit added to their trap number */ 286 .if IHSRR_IF_HVMODE 287 BEGIN_FTR_SECTION 288 ori r12,r12,(IVEC + 0x2) 289 FTR_SECTION_ELSE 290 ori r12,r12,(IVEC) 291 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 292 .elseif IHSRR 293 ori r12,r12,(IVEC+ 0x2) 294 .else 295 ori r12,r12,(IVEC) 296 .endif 297 b kvmppc_interrupt 298 299 .if IKVM_SKIP 30089: mtocrf 0x80,r9 301 ld r10,PACA_EXGEN+EX_CTR(r13) 302 mtctr r10 303 ld r9,IAREA+EX_R9(r13) 304 ld r10,IAREA+EX_R10(r13) 305 ld r11,IAREA+EX_R11(r13) 306 ld r12,IAREA+EX_R12(r13) 307 .if IHSRR_IF_HVMODE 308 BEGIN_FTR_SECTION 309 b kvmppc_skip_Hinterrupt 310 FTR_SECTION_ELSE 311 b kvmppc_skip_interrupt 312 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 313 .elseif IHSRR 314 b kvmppc_skip_Hinterrupt 315 .else 316 b kvmppc_skip_interrupt 317 .endif 318 .endif 319.endm 320 321#else 322.macro KVMTEST name 323.endm 324.macro GEN_KVM name 325.endm 326#endif 327 328/* 329 * This is the BOOK3S interrupt entry code macro. 330 * 331 * This can result in one of several things happening: 332 * - Branch to the _common handler, relocated, in virtual mode. 333 * These are normal interrupts (synchronous and asynchronous) handled by 334 * the kernel. 335 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 336 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 337 * / intended for host or guest kernel, but KVM must always be involved 338 * because the machine state is set for guest execution. 339 * - Branch to the masked handler, unrelocated. 340 * These occur when maskable asynchronous interrupts are taken with the 341 * irq_soft_mask set. 342 * - Branch to an "early" handler in real mode but relocated. 343 * This is done if early=1. MCE and HMI use these to handle errors in real 344 * mode. 345 * - Fall through and continue executing in real, unrelocated mode. 346 * This is done if early=2. 347 */ 348 349.macro GEN_BRANCH_TO_COMMON name, virt 350 .if IREALMODE_COMMON 351 LOAD_HANDLER(r10, \name\()_common) 352 mtctr r10 353 bctr 354 .else 355 .if \virt 356#ifndef CONFIG_RELOCATABLE 357 b \name\()_common_virt 358#else 359 LOAD_HANDLER(r10, \name\()_common_virt) 360 mtctr r10 361 bctr 362#endif 363 .else 364 LOAD_HANDLER(r10, \name\()_common_real) 365 mtctr r10 366 bctr 367 .endif 368 .endif 369.endm 370 371.macro GEN_INT_ENTRY name, virt, ool=0 372 SET_SCRATCH0(r13) /* save r13 */ 373 GET_PACA(r13) 374 std r9,IAREA+EX_R9(r13) /* save r9 */ 375BEGIN_FTR_SECTION 376 mfspr r9,SPRN_PPR 377END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 378 HMT_MEDIUM 379 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 380BEGIN_FTR_SECTION 381 mfspr r10,SPRN_CFAR 382END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 383 .if \ool 384 .if !\virt 385 b tramp_real_\name 386 .pushsection .text 387 TRAMP_REAL_BEGIN(tramp_real_\name) 388 .else 389 b tramp_virt_\name 390 .pushsection .text 391 TRAMP_VIRT_BEGIN(tramp_virt_\name) 392 .endif 393 .endif 394 395BEGIN_FTR_SECTION 396 std r9,IAREA+EX_PPR(r13) 397END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 398BEGIN_FTR_SECTION 399 std r10,IAREA+EX_CFAR(r13) 400END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 401 INTERRUPT_TO_KERNEL 402 mfctr r10 403 std r10,IAREA+EX_CTR(r13) 404 mfcr r9 405 std r11,IAREA+EX_R11(r13) 406 std r12,IAREA+EX_R12(r13) 407 408 /* 409 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 410 * because a d-side MCE will clobber those registers so is 411 * not recoverable if they are live. 412 */ 413 GET_SCRATCH0(r10) 414 std r10,IAREA+EX_R13(r13) 415 .if IDAR && !IISIDE 416 .if IHSRR 417 mfspr r10,SPRN_HDAR 418 .else 419 mfspr r10,SPRN_DAR 420 .endif 421 std r10,IAREA+EX_DAR(r13) 422 .endif 423 .if IDSISR && !IISIDE 424 .if IHSRR 425 mfspr r10,SPRN_HDSISR 426 .else 427 mfspr r10,SPRN_DSISR 428 .endif 429 stw r10,IAREA+EX_DSISR(r13) 430 .endif 431 432 .if IHSRR_IF_HVMODE 433 BEGIN_FTR_SECTION 434 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 435 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 436 FTR_SECTION_ELSE 437 mfspr r11,SPRN_SRR0 /* save SRR0 */ 438 mfspr r12,SPRN_SRR1 /* and SRR1 */ 439 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 440 .elseif IHSRR 441 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 442 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 443 .else 444 mfspr r11,SPRN_SRR0 /* save SRR0 */ 445 mfspr r12,SPRN_SRR1 /* and SRR1 */ 446 .endif 447 448 .if IBRANCH_TO_COMMON 449 GEN_BRANCH_TO_COMMON \name \virt 450 .endif 451 452 .if \ool 453 .popsection 454 .endif 455.endm 456 457/* 458 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 459 * entry, except in the case of the real-mode handlers which require 460 * __GEN_REALMODE_COMMON_ENTRY. 461 * 462 * This switches to virtual mode and sets MSR[RI]. 463 */ 464.macro __GEN_COMMON_ENTRY name 465DEFINE_FIXED_SYMBOL(\name\()_common_real) 466\name\()_common_real: 467 .if IKVM_REAL 468 KVMTEST \name 469 .endif 470 471 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 472 /* MSR[RI] is clear iff using SRR regs */ 473 .if IHSRR == EXC_HV_OR_STD 474 BEGIN_FTR_SECTION 475 xori r10,r10,MSR_RI 476 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 477 .elseif ! IHSRR 478 xori r10,r10,MSR_RI 479 .endif 480 mtmsrd r10 481 482 .if IVIRT 483 .if IKVM_VIRT 484 b 1f /* skip the virt test coming from real */ 485 .endif 486 487 .balign IFETCH_ALIGN_BYTES 488DEFINE_FIXED_SYMBOL(\name\()_common_virt) 489\name\()_common_virt: 490 .if IKVM_VIRT 491 KVMTEST \name 4921: 493 .endif 494 .endif /* IVIRT */ 495.endm 496 497/* 498 * Don't switch to virt mode. Used for early MCE and HMI handlers that 499 * want to run in real mode. 500 */ 501.macro __GEN_REALMODE_COMMON_ENTRY name 502DEFINE_FIXED_SYMBOL(\name\()_common_real) 503\name\()_common_real: 504 .if IKVM_REAL 505 KVMTEST \name 506 .endif 507.endm 508 509.macro __GEN_COMMON_BODY name 510 .if IMASK 511 lbz r10,PACAIRQSOFTMASK(r13) 512 andi. r10,r10,IMASK 513 /* Associate vector numbers with bits in paca->irq_happened */ 514 .if IVEC == 0x500 || IVEC == 0xea0 515 li r10,PACA_IRQ_EE 516 .elseif IVEC == 0x900 517 li r10,PACA_IRQ_DEC 518 .elseif IVEC == 0xa00 || IVEC == 0xe80 519 li r10,PACA_IRQ_DBELL 520 .elseif IVEC == 0xe60 521 li r10,PACA_IRQ_HMI 522 .elseif IVEC == 0xf00 523 li r10,PACA_IRQ_PMI 524 .else 525 .abort "Bad maskable vector" 526 .endif 527 528 .if IHSRR_IF_HVMODE 529 BEGIN_FTR_SECTION 530 bne masked_Hinterrupt 531 FTR_SECTION_ELSE 532 bne masked_interrupt 533 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 534 .elseif IHSRR 535 bne masked_Hinterrupt 536 .else 537 bne masked_interrupt 538 .endif 539 .endif 540 541 .if ISTACK 542 andi. r10,r12,MSR_PR /* See if coming from user */ 543 mr r10,r1 /* Save r1 */ 544 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 545 beq- 100f 546 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 547100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 548 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 549 .endif 550 551 std r9,_CCR(r1) /* save CR in stackframe */ 552 std r11,_NIP(r1) /* save SRR0 in stackframe */ 553 std r12,_MSR(r1) /* save SRR1 in stackframe */ 554 std r10,0(r1) /* make stack chain pointer */ 555 std r0,GPR0(r1) /* save r0 in stackframe */ 556 std r10,GPR1(r1) /* save r1 in stackframe */ 557 558 .if ISET_RI 559 li r10,MSR_RI 560 mtmsrd r10,1 /* Set MSR_RI */ 561 .endif 562 563 .if ISTACK 564 .if IKUAP 565 kuap_save_amr_and_lock r9, r10, cr1, cr0 566 .endif 567 beq 101f /* if from kernel mode */ 568 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10) 569BEGIN_FTR_SECTION 570 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 571 std r9,_PPR(r1) 572END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 573101: 574 .else 575 .if IKUAP 576 kuap_save_amr_and_lock r9, r10, cr1 577 .endif 578 .endif 579 580 /* Save original regs values from save area to stack frame. */ 581 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 582 ld r10,IAREA+EX_R10(r13) 583 std r9,GPR9(r1) 584 std r10,GPR10(r1) 585 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 586 ld r10,IAREA+EX_R12(r13) 587 ld r11,IAREA+EX_R13(r13) 588 std r9,GPR11(r1) 589 std r10,GPR12(r1) 590 std r11,GPR13(r1) 591 592 SAVE_NVGPRS(r1) 593 594 .if IDAR 595 .if IISIDE 596 ld r10,_NIP(r1) 597 .else 598 ld r10,IAREA+EX_DAR(r13) 599 .endif 600 std r10,_DAR(r1) 601 .endif 602 603 .if IDSISR 604 .if IISIDE 605 ld r10,_MSR(r1) 606 lis r11,DSISR_SRR1_MATCH_64S@h 607 and r10,r10,r11 608 .else 609 lwz r10,IAREA+EX_DSISR(r13) 610 .endif 611 std r10,_DSISR(r1) 612 .endif 613 614BEGIN_FTR_SECTION 615 ld r10,IAREA+EX_CFAR(r13) 616 std r10,ORIG_GPR3(r1) 617END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 618 ld r10,IAREA+EX_CTR(r13) 619 std r10,_CTR(r1) 620 std r2,GPR2(r1) /* save r2 in stackframe */ 621 SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */ 622 SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */ 623 mflr r9 /* Get LR, later save to stack */ 624 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 625 std r9,_LINK(r1) 626 lbz r10,PACAIRQSOFTMASK(r13) 627 mfspr r11,SPRN_XER /* save XER in stackframe */ 628 std r10,SOFTE(r1) 629 std r11,_XER(r1) 630 li r9,IVEC 631 std r9,_TRAP(r1) /* set trap number */ 632 li r10,0 633 ld r11,exception_marker@toc(r2) 634 std r10,RESULT(r1) /* clear regs->result */ 635 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 636 637 .if ISTACK 638 ACCOUNT_STOLEN_TIME 639 .endif 640 641 .if IRECONCILE 642 RECONCILE_IRQ_STATE(r10, r11) 643 .endif 644.endm 645 646/* 647 * On entry r13 points to the paca, r9-r13 are saved in the paca, 648 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 649 * SRR1, and relocation is on. 650 * 651 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 652 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 653 */ 654.macro GEN_COMMON name 655 __GEN_COMMON_ENTRY \name 656 __GEN_COMMON_BODY \name 657.endm 658 659/* 660 * Restore all registers including H/SRR0/1 saved in a stack frame of a 661 * standard exception. 662 */ 663.macro EXCEPTION_RESTORE_REGS hsrr=0 664 /* Move original SRR0 and SRR1 into the respective regs */ 665 ld r9,_MSR(r1) 666 .if \hsrr 667 mtspr SPRN_HSRR1,r9 668 .else 669 mtspr SPRN_SRR1,r9 670 .endif 671 ld r9,_NIP(r1) 672 .if \hsrr 673 mtspr SPRN_HSRR0,r9 674 .else 675 mtspr SPRN_SRR0,r9 676 .endif 677 ld r9,_CTR(r1) 678 mtctr r9 679 ld r9,_XER(r1) 680 mtxer r9 681 ld r9,_LINK(r1) 682 mtlr r9 683 ld r9,_CCR(r1) 684 mtcr r9 685 REST_8GPRS(2, r1) 686 REST_4GPRS(10, r1) 687 REST_GPR(0, r1) 688 /* restore original r1. */ 689 ld r1,GPR1(r1) 690.endm 691 692#define RUNLATCH_ON \ 693BEGIN_FTR_SECTION \ 694 ld r3, PACA_THREAD_INFO(r13); \ 695 ld r4,TI_LOCAL_FLAGS(r3); \ 696 andi. r0,r4,_TLF_RUNLATCH; \ 697 beql ppc64_runlatch_on_trampoline; \ 698END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 699 700/* 701 * When the idle code in power4_idle puts the CPU into NAP mode, 702 * it has to do so in a loop, and relies on the external interrupt 703 * and decrementer interrupt entry code to get it out of the loop. 704 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 705 * to signal that it is in the loop and needs help to get out. 706 */ 707#ifdef CONFIG_PPC_970_NAP 708#define FINISH_NAP \ 709BEGIN_FTR_SECTION \ 710 ld r11, PACA_THREAD_INFO(r13); \ 711 ld r9,TI_LOCAL_FLAGS(r11); \ 712 andi. r10,r9,_TLF_NAPPING; \ 713 bnel power4_fixup_nap; \ 714END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 715#else 716#define FINISH_NAP 717#endif 718 719/* 720 * There are a few constraints to be concerned with. 721 * - Real mode exceptions code/data must be located at their physical location. 722 * - Virtual mode exceptions must be mapped at their 0xc000... location. 723 * - Fixed location code must not call directly beyond the __end_interrupts 724 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 725 * must be used. 726 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 727 * virtual 0xc00... 728 * - Conditional branch targets must be within +/-32K of caller. 729 * 730 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 731 * therefore don't have to run in physically located code or rfid to 732 * virtual mode kernel code. However on relocatable kernels they do have 733 * to branch to KERNELBASE offset because the rest of the kernel (outside 734 * the exception vectors) may be located elsewhere. 735 * 736 * Virtual exceptions correspond with physical, except their entry points 737 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 738 * offset applied. Virtual exceptions are enabled with the Alternate 739 * Interrupt Location (AIL) bit set in the LPCR. However this does not 740 * guarantee they will be delivered virtually. Some conditions (see the ISA) 741 * cause exceptions to be delivered in real mode. 742 * 743 * It's impossible to receive interrupts below 0x300 via AIL. 744 * 745 * KVM: None of the virtual exceptions are from the guest. Anything that 746 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 747 * 748 * 749 * We layout physical memory as follows: 750 * 0x0000 - 0x00ff : Secondary processor spin code 751 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 752 * 0x1900 - 0x3fff : Real mode trampolines 753 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 754 * 0x5900 - 0x6fff : Relon mode trampolines 755 * 0x7000 - 0x7fff : FWNMI data area 756 * 0x8000 - .... : Common interrupt handlers, remaining early 757 * setup code, rest of kernel. 758 * 759 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 760 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 761 * vectors there. 762 */ 763OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 764OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000) 765OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900) 766OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 767 768#ifdef CONFIG_PPC_POWERNV 769 .globl start_real_trampolines 770 .globl end_real_trampolines 771 .globl start_virt_trampolines 772 .globl end_virt_trampolines 773#endif 774 775#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 776/* 777 * Data area reserved for FWNMI option. 778 * This address (0x7000) is fixed by the RPA. 779 * pseries and powernv need to keep the whole page from 780 * 0x7000 to 0x8000 free for use by the firmware 781 */ 782ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 783OPEN_TEXT_SECTION(0x8000) 784#else 785OPEN_TEXT_SECTION(0x7000) 786#endif 787 788USE_FIXED_SECTION(real_vectors) 789 790/* 791 * This is the start of the interrupt handlers for pSeries 792 * This code runs with relocation off. 793 * Code from here to __end_interrupts gets copied down to real 794 * address 0x100 when we are running a relocatable kernel. 795 * Therefore any relative branches in this section must only 796 * branch to labels in this section. 797 */ 798 .globl __start_interrupts 799__start_interrupts: 800 801/* No virt vectors corresponding with 0x0..0x100 */ 802EXC_VIRT_NONE(0x4000, 0x100) 803 804 805/** 806 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 807 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 808 * It is caused by: 809 * - Wake from power-saving state, on powernv. 810 * - An NMI from another CPU, triggered by firmware or hypercall. 811 * - As crash/debug signal injected from BMC, firmware or hypervisor. 812 * 813 * Handling: 814 * Power-save wakeup is the only performance critical path, so this is 815 * determined quickly as possible first. In this case volatile registers 816 * can be discarded and SPRs like CFAR don't need to be read. 817 * 818 * If not a powersave wakeup, then it's run as a regular interrupt, however 819 * it uses its own stack and PACA save area to preserve the regular kernel 820 * environment for debugging. 821 * 822 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 823 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 824 * correct to switch to virtual mode to run the regular interrupt handler 825 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 826 * is clear). 827 * 828 * FWNMI: 829 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 830 * entry point with a different register set up. Some hypervisors will 831 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 832 * 833 * KVM: 834 * Unlike most SRR interrupts, this may be taken by the host while executing 835 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 836 * mode and then raise the sreset. 837 */ 838INT_DEFINE_BEGIN(system_reset) 839 IVEC=0x100 840 IAREA=PACA_EXNMI 841 IVIRT=0 /* no virt entry point */ 842 /* 843 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is 844 * being used, so a nested NMI exception would corrupt it. 845 */ 846 ISET_RI=0 847 ISTACK=0 848 IRECONCILE=0 849 IKVM_REAL=1 850INT_DEFINE_END(system_reset) 851 852EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 853#ifdef CONFIG_PPC_P7_NAP 854 /* 855 * If running native on arch 2.06 or later, check if we are waking up 856 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 857 * bits 46:47. A non-0 value indicates that we are coming from a power 858 * saving state. The idle wakeup handler initially runs in real mode, 859 * but we branch to the 0xc000... address so we can turn on relocation 860 * with mtmsrd later, after SPRs are restored. 861 * 862 * Careful to minimise cost for the fast path (idle wakeup) while 863 * also avoiding clobbering CFAR for the debug path (non-idle). 864 * 865 * For the idle wake case volatile registers can be clobbered, which 866 * is why we use those initially. If it turns out to not be an idle 867 * wake, carefully put everything back the way it was, so we can use 868 * common exception macros to handle it. 869 */ 870BEGIN_FTR_SECTION 871 SET_SCRATCH0(r13) 872 GET_PACA(r13) 873 std r3,PACA_EXNMI+0*8(r13) 874 std r4,PACA_EXNMI+1*8(r13) 875 std r5,PACA_EXNMI+2*8(r13) 876 mfspr r3,SPRN_SRR1 877 mfocrf r4,0x80 878 rlwinm. r5,r3,47-31,30,31 879 bne+ system_reset_idle_wake 880 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 881 mtocrf 0x80,r4 882 ld r3,PACA_EXNMI+0*8(r13) 883 ld r4,PACA_EXNMI+1*8(r13) 884 ld r5,PACA_EXNMI+2*8(r13) 885 GET_SCRATCH0(r13) 886END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 887#endif 888 889 GEN_INT_ENTRY system_reset, virt=0 890 /* 891 * In theory, we should not enable relocation here if it was disabled 892 * in SRR1, because the MMU may not be configured to support it (e.g., 893 * SLB may have been cleared). In practice, there should only be a few 894 * small windows where that's the case, and sreset is considered to 895 * be dangerous anyway. 896 */ 897EXC_REAL_END(system_reset, 0x100, 0x100) 898EXC_VIRT_NONE(0x4100, 0x100) 899 900#ifdef CONFIG_PPC_P7_NAP 901TRAMP_REAL_BEGIN(system_reset_idle_wake) 902 /* We are waking up from idle, so may clobber any volatile register */ 903 cmpwi cr1,r5,2 904 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 905 BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss)) 906#endif 907 908#ifdef CONFIG_PPC_PSERIES 909/* 910 * Vectors for the FWNMI option. Share common code. 911 */ 912TRAMP_REAL_BEGIN(system_reset_fwnmi) 913 /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */ 914 __IKVM_REAL(system_reset)=0 915 GEN_INT_ENTRY system_reset, virt=0 916 917#endif /* CONFIG_PPC_PSERIES */ 918 919EXC_COMMON_BEGIN(system_reset_common) 920 __GEN_COMMON_ENTRY system_reset 921 /* 922 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able 923 * to recover, but nested NMI will notice in_nmi and not recover 924 * because of the use of the NMI stack. in_nmi reentrancy is tested in 925 * system_reset_exception. 926 */ 927 lhz r10,PACA_IN_NMI(r13) 928 addi r10,r10,1 929 sth r10,PACA_IN_NMI(r13) 930 li r10,MSR_RI 931 mtmsrd r10,1 932 933 mr r10,r1 934 ld r1,PACA_NMI_EMERG_SP(r13) 935 subi r1,r1,INT_FRAME_SIZE 936 __GEN_COMMON_BODY system_reset 937 /* 938 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 939 * the right thing. We do not want to reconcile because that goes 940 * through irq tracing which we don't want in NMI. 941 * 942 * Save PACAIRQHAPPENED to _DAR (otherwise unused), and set HARD_DIS 943 * as we are running with MSR[EE]=0. 944 */ 945 li r10,IRQS_ALL_DISABLED 946 stb r10,PACAIRQSOFTMASK(r13) 947 lbz r10,PACAIRQHAPPENED(r13) 948 std r10,_DAR(r1) 949 ori r10,r10,PACA_IRQ_HARD_DIS 950 stb r10,PACAIRQHAPPENED(r13) 951 952 addi r3,r1,STACK_FRAME_OVERHEAD 953 bl system_reset_exception 954 955 /* Clear MSR_RI before setting SRR0 and SRR1. */ 956 li r9,0 957 mtmsrd r9,1 958 959 /* 960 * MSR_RI is clear, now we can decrement paca->in_nmi. 961 */ 962 lhz r10,PACA_IN_NMI(r13) 963 subi r10,r10,1 964 sth r10,PACA_IN_NMI(r13) 965 966 /* 967 * Restore soft mask settings. 968 */ 969 ld r10,_DAR(r1) 970 stb r10,PACAIRQHAPPENED(r13) 971 ld r10,SOFTE(r1) 972 stb r10,PACAIRQSOFTMASK(r13) 973 974 EXCEPTION_RESTORE_REGS 975 RFI_TO_USER_OR_KERNEL 976 977 GEN_KVM system_reset 978 979 980/** 981 * Interrupt 0x200 - Machine Check Interrupt (MCE). 982 * This is a non-maskable interrupt always taken in real-mode. It can be 983 * synchronous or asynchronous, caused by hardware or software, and it may be 984 * taken in a power-saving state. 985 * 986 * Handling: 987 * Similarly to system reset, this uses its own stack and PACA save area, 988 * the difference is re-entrancy is allowed on the machine check stack. 989 * 990 * machine_check_early is run in real mode, and carefully decodes the 991 * machine check and tries to handle it (e.g., flush the SLB if there was an 992 * error detected there), determines if it was recoverable and logs the 993 * event. 994 * 995 * This early code does not "reconcile" irq soft-mask state like SRESET or 996 * regular interrupts do, so irqs_disabled() among other things may not work 997 * properly (irq disable/enable already doesn't work because irq tracing can 998 * not work in real mode). 999 * 1000 * Then, depending on the execution context when the interrupt is taken, there 1001 * are 3 main actions: 1002 * - Executing in kernel mode. The event is queued with irq_work, which means 1003 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1004 * interrupts), which could be immediately when the interrupt returns. This 1005 * avoids nasty issues like switching to virtual mode when the MMU is in a 1006 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1007 * but it has different priorities). Check to see if the CPU was in power 1008 * save, and return via the wake up code if it was. 1009 * 1010 * - Executing in user mode. machine_check_exception is run like a normal 1011 * interrupt handler, which processes the data generated by the early handler. 1012 * 1013 * - Executing in guest mode. The interrupt is run with its KVM test, and 1014 * branches to KVM to deal with. KVM may queue the event for the host 1015 * to report later. 1016 * 1017 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1018 * or SCRATCH0 is in use, it may cause a crash. 1019 * 1020 * KVM: 1021 * See SRESET. 1022 */ 1023INT_DEFINE_BEGIN(machine_check_early) 1024 IVEC=0x200 1025 IAREA=PACA_EXMC 1026 IVIRT=0 /* no virt entry point */ 1027 IREALMODE_COMMON=1 1028 /* 1029 * MSR_RI is not enabled, because PACA_EXMC is being used, so a 1030 * nested machine check corrupts it. machine_check_common enables 1031 * MSR_RI. 1032 */ 1033 ISET_RI=0 1034 ISTACK=0 1035 IDAR=1 1036 IDSISR=1 1037 IRECONCILE=0 1038 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1039INT_DEFINE_END(machine_check_early) 1040 1041INT_DEFINE_BEGIN(machine_check) 1042 IVEC=0x200 1043 IAREA=PACA_EXMC 1044 IVIRT=0 /* no virt entry point */ 1045 ISET_RI=0 1046 IDAR=1 1047 IDSISR=1 1048 IKVM_SKIP=1 1049 IKVM_REAL=1 1050INT_DEFINE_END(machine_check) 1051 1052EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1053 GEN_INT_ENTRY machine_check_early, virt=0 1054EXC_REAL_END(machine_check, 0x200, 0x100) 1055EXC_VIRT_NONE(0x4200, 0x100) 1056 1057#ifdef CONFIG_PPC_PSERIES 1058TRAMP_REAL_BEGIN(machine_check_fwnmi) 1059 /* See comment at machine_check exception, don't turn on RI */ 1060 GEN_INT_ENTRY machine_check_early, virt=0 1061#endif 1062 1063#define MACHINE_CHECK_HANDLER_WINDUP \ 1064 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1065 li r9,0; \ 1066 mtmsrd r9,1; /* Clear MSR_RI */ \ 1067 /* Decrement paca->in_mce now RI is clear. */ \ 1068 lhz r12,PACA_IN_MCE(r13); \ 1069 subi r12,r12,1; \ 1070 sth r12,PACA_IN_MCE(r13); \ 1071 EXCEPTION_RESTORE_REGS 1072 1073EXC_COMMON_BEGIN(machine_check_early_common) 1074 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1075 1076 /* 1077 * Switch to mc_emergency stack and handle re-entrancy (we limit 1078 * the nested MCE upto level 4 to avoid stack overflow). 1079 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1080 * 1081 * We use paca->in_mce to check whether this is the first entry or 1082 * nested machine check. We increment paca->in_mce to track nested 1083 * machine checks. 1084 * 1085 * If this is the first entry then set stack pointer to 1086 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1087 * stack frame on mc_emergency stack. 1088 * 1089 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1090 * checkstop if we get another machine check exception before we do 1091 * rfid with MSR_ME=1. 1092 * 1093 * This interrupt can wake directly from idle. If that is the case, 1094 * the machine check is handled then the idle wakeup code is called 1095 * to restore state. 1096 */ 1097 lhz r10,PACA_IN_MCE(r13) 1098 cmpwi r10,0 /* Are we in nested machine check */ 1099 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1100 addi r10,r10,1 /* increment paca->in_mce */ 1101 sth r10,PACA_IN_MCE(r13) 1102 1103 mr r10,r1 /* Save r1 */ 1104 bne 1f 1105 /* First machine check entry */ 1106 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11071: /* Limit nested MCE to level 4 to avoid stack overflow */ 1108 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1109 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1110 1111 __GEN_COMMON_BODY machine_check_early 1112 1113BEGIN_FTR_SECTION 1114 bl enable_machine_check 1115END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1116 li r10,MSR_RI 1117 mtmsrd r10,1 1118 1119 addi r3,r1,STACK_FRAME_OVERHEAD 1120 bl machine_check_early 1121 std r3,RESULT(r1) /* Save result */ 1122 ld r12,_MSR(r1) 1123 1124#ifdef CONFIG_PPC_P7_NAP 1125 /* 1126 * Check if thread was in power saving mode. We come here when any 1127 * of the following is true: 1128 * a. thread wasn't in power saving mode 1129 * b. thread was in power saving mode with no state loss, 1130 * supervisor state loss or hypervisor state loss. 1131 * 1132 * Go back to nap/sleep/winkle mode again if (b) is true. 1133 */ 1134BEGIN_FTR_SECTION 1135 rlwinm. r11,r12,47-31,30,31 1136 bne machine_check_idle_common 1137END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1138#endif 1139 1140#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1141 /* 1142 * Check if we are coming from guest. If yes, then run the normal 1143 * exception handler which will take the 1144 * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event 1145 * to guest. 1146 */ 1147 lbz r11,HSTATE_IN_GUEST(r13) 1148 cmpwi r11,0 /* Check if coming from guest */ 1149 bne mce_deliver /* continue if we are. */ 1150#endif 1151 1152 /* 1153 * Check if we are coming from userspace. If yes, then run the normal 1154 * exception handler which will deliver the MC event to this kernel. 1155 */ 1156 andi. r11,r12,MSR_PR /* See if coming from user. */ 1157 bne mce_deliver /* continue in V mode if we are. */ 1158 1159 /* 1160 * At this point we are coming from kernel context. 1161 * Queue up the MCE event and return from the interrupt. 1162 * But before that, check if this is an un-recoverable exception. 1163 * If yes, then stay on emergency stack and panic. 1164 */ 1165 andi. r11,r12,MSR_RI 1166 beq unrecoverable_mce 1167 1168 /* 1169 * Check if we have successfully handled/recovered from error, if not 1170 * then stay on emergency stack and panic. 1171 */ 1172 ld r3,RESULT(r1) /* Load result */ 1173 cmpdi r3,0 /* see if we handled MCE successfully */ 1174 beq unrecoverable_mce /* if !handled then panic */ 1175 1176 /* 1177 * Return from MC interrupt. 1178 * Queue up the MCE event so that we can log it later, while 1179 * returning from kernel or opal call. 1180 */ 1181 bl machine_check_queue_event 1182 MACHINE_CHECK_HANDLER_WINDUP 1183 RFI_TO_KERNEL 1184 1185mce_deliver: 1186 /* 1187 * This is a host user or guest MCE. Restore all registers, then 1188 * run the "late" handler. For host user, this will run the 1189 * machine_check_exception handler in virtual mode like a normal 1190 * interrupt handler. For guest, this will trigger the KVM test 1191 * and branch to the KVM interrupt similarly to other interrupts. 1192 */ 1193BEGIN_FTR_SECTION 1194 ld r10,ORIG_GPR3(r1) 1195 mtspr SPRN_CFAR,r10 1196END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1197 MACHINE_CHECK_HANDLER_WINDUP 1198 GEN_INT_ENTRY machine_check, virt=0 1199 1200EXC_COMMON_BEGIN(machine_check_common) 1201 /* 1202 * Machine check is different because we use a different 1203 * save area: PACA_EXMC instead of PACA_EXGEN. 1204 */ 1205 GEN_COMMON machine_check 1206 1207 FINISH_NAP 1208 /* Enable MSR_RI when finished with PACA_EXMC */ 1209 li r10,MSR_RI 1210 mtmsrd r10,1 1211 addi r3,r1,STACK_FRAME_OVERHEAD 1212 bl machine_check_exception 1213 b interrupt_return 1214 1215 GEN_KVM machine_check 1216 1217 1218#ifdef CONFIG_PPC_P7_NAP 1219/* 1220 * This is an idle wakeup. Low level machine check has already been 1221 * done. Queue the event then call the idle code to do the wake up. 1222 */ 1223EXC_COMMON_BEGIN(machine_check_idle_common) 1224 bl machine_check_queue_event 1225 1226 /* 1227 * We have not used any non-volatile GPRs here, and as a rule 1228 * most exception code including machine check does not. 1229 * Therefore PACA_NAPSTATELOST does not need to be set. Idle 1230 * wakeup will restore volatile registers. 1231 * 1232 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. 1233 * 1234 * Then decrement MCE nesting after finishing with the stack. 1235 */ 1236 ld r3,_MSR(r1) 1237 ld r4,_LINK(r1) 1238 1239 lhz r11,PACA_IN_MCE(r13) 1240 subi r11,r11,1 1241 sth r11,PACA_IN_MCE(r13) 1242 1243 mtlr r4 1244 rlwinm r10,r3,47-31,30,31 1245 cmpwi cr1,r10,2 1246 bltlr cr1 /* no state loss, return to idle caller */ 1247 b idle_return_gpr_loss 1248#endif 1249 1250EXC_COMMON_BEGIN(unrecoverable_mce) 1251 /* 1252 * We are going down. But there are chances that we might get hit by 1253 * another MCE during panic path and we may run into unstable state 1254 * with no way out. Hence, turn ME bit off while going down, so that 1255 * when another MCE is hit during panic path, system will checkstop 1256 * and hypervisor will get restarted cleanly by SP. 1257 */ 1258BEGIN_FTR_SECTION 1259 li r10,0 /* clear MSR_RI */ 1260 mtmsrd r10,1 1261 bl disable_machine_check 1262END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1263 ld r10,PACAKMSR(r13) 1264 li r3,MSR_ME 1265 andc r10,r10,r3 1266 mtmsrd r10 1267 1268 /* Invoke machine_check_exception to print MCE event and panic. */ 1269 addi r3,r1,STACK_FRAME_OVERHEAD 1270 bl machine_check_exception 1271 1272 /* 1273 * We will not reach here. Even if we did, there is no way out. 1274 * Call unrecoverable_exception and die. 1275 */ 1276 addi r3,r1,STACK_FRAME_OVERHEAD 1277 bl unrecoverable_exception 1278 b . 1279 1280 1281/** 1282 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1283 * This is a synchronous interrupt generated due to a data access exception, 1284 * e.g., a load orstore which does not have a valid page table entry with 1285 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1286 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1287 * 1288 * Handling: 1289 * - Hash MMU 1290 * Go to do_hash_page first to see if the HPT can be filled from an entry in 1291 * the Linux page table. Hash faults can hit in kernel mode in a fairly 1292 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1293 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1294 * backed by Linux page tables. 1295 * 1296 * If none is found, do a Linux page fault. Linux page faults can happen in 1297 * kernel mode due to user copy operations of course. 1298 * 1299 * - Radix MMU 1300 * The hardware loads from the Linux page table directly, so a fault goes 1301 * immediately to Linux page fault. 1302 * 1303 * Conditions like DAWR match are handled on the way in to Linux page fault. 1304 */ 1305INT_DEFINE_BEGIN(data_access) 1306 IVEC=0x300 1307 IDAR=1 1308 IDSISR=1 1309#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1310 IKVM_SKIP=1 1311 IKVM_REAL=1 1312#endif 1313INT_DEFINE_END(data_access) 1314 1315EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1316 GEN_INT_ENTRY data_access, virt=0 1317EXC_REAL_END(data_access, 0x300, 0x80) 1318EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1319 GEN_INT_ENTRY data_access, virt=1 1320EXC_VIRT_END(data_access, 0x4300, 0x80) 1321EXC_COMMON_BEGIN(data_access_common) 1322 GEN_COMMON data_access 1323 ld r4,_DAR(r1) 1324 ld r5,_DSISR(r1) 1325BEGIN_MMU_FTR_SECTION 1326 ld r6,_MSR(r1) 1327 li r3,0x300 1328 b do_hash_page /* Try to handle as hpte fault */ 1329MMU_FTR_SECTION_ELSE 1330 b handle_page_fault 1331ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1332 1333 GEN_KVM data_access 1334 1335 1336/** 1337 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1338 * This is a synchronous interrupt in response to an MMU fault missing SLB 1339 * entry for HPT, or an address outside RPT translation range. 1340 * 1341 * Handling: 1342 * - HPT: 1343 * This refills the SLB, or reports an access fault similarly to a bad page 1344 * fault. When coming from user-mode, the SLB handler may access any kernel 1345 * data, though it may itself take a DSLB. When coming from kernel mode, 1346 * recursive faults must be avoided so access is restricted to the kernel 1347 * image text/data, kernel stack, and any data allocated below 1348 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1349 * on user-handler data structures. 1350 * 1351 * A dedicated save area EXSLB is used (XXX: but it actually need not be 1352 * these days, we could use EXGEN). 1353 */ 1354INT_DEFINE_BEGIN(data_access_slb) 1355 IVEC=0x380 1356 IAREA=PACA_EXSLB 1357 IRECONCILE=0 1358 IDAR=1 1359#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1360 IKVM_SKIP=1 1361 IKVM_REAL=1 1362#endif 1363INT_DEFINE_END(data_access_slb) 1364 1365EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1366 GEN_INT_ENTRY data_access_slb, virt=0 1367EXC_REAL_END(data_access_slb, 0x380, 0x80) 1368EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1369 GEN_INT_ENTRY data_access_slb, virt=1 1370EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1371EXC_COMMON_BEGIN(data_access_slb_common) 1372 GEN_COMMON data_access_slb 1373 ld r4,_DAR(r1) 1374 addi r3,r1,STACK_FRAME_OVERHEAD 1375BEGIN_MMU_FTR_SECTION 1376 /* HPT case, do SLB fault */ 1377 bl do_slb_fault 1378 cmpdi r3,0 1379 bne- 1f 1380 b fast_interrupt_return 13811: /* Error case */ 1382MMU_FTR_SECTION_ELSE 1383 /* Radix case, access is outside page table range */ 1384 li r3,-EFAULT 1385ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1386 std r3,RESULT(r1) 1387 RECONCILE_IRQ_STATE(r10, r11) 1388 ld r4,_DAR(r1) 1389 ld r5,RESULT(r1) 1390 addi r3,r1,STACK_FRAME_OVERHEAD 1391 bl do_bad_slb_fault 1392 b interrupt_return 1393 1394 GEN_KVM data_access_slb 1395 1396 1397/** 1398 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1399 * This is a synchronous interrupt in response to an MMU fault due to an 1400 * instruction fetch. 1401 * 1402 * Handling: 1403 * Similar to DSI, though in response to fetch. The faulting address is found 1404 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1405 */ 1406INT_DEFINE_BEGIN(instruction_access) 1407 IVEC=0x400 1408 IISIDE=1 1409 IDAR=1 1410 IDSISR=1 1411#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1412 IKVM_REAL=1 1413#endif 1414INT_DEFINE_END(instruction_access) 1415 1416EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1417 GEN_INT_ENTRY instruction_access, virt=0 1418EXC_REAL_END(instruction_access, 0x400, 0x80) 1419EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1420 GEN_INT_ENTRY instruction_access, virt=1 1421EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1422EXC_COMMON_BEGIN(instruction_access_common) 1423 GEN_COMMON instruction_access 1424 ld r4,_DAR(r1) 1425 ld r5,_DSISR(r1) 1426BEGIN_MMU_FTR_SECTION 1427 ld r6,_MSR(r1) 1428 li r3,0x400 1429 b do_hash_page /* Try to handle as hpte fault */ 1430MMU_FTR_SECTION_ELSE 1431 b handle_page_fault 1432ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1433 1434 GEN_KVM instruction_access 1435 1436 1437/** 1438 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1439 * This is a synchronous interrupt in response to an MMU fault due to an 1440 * instruction fetch. 1441 * 1442 * Handling: 1443 * Similar to DSLB, though in response to fetch. The faulting address is found 1444 * in SRR0 (rather than DAR). 1445 */ 1446INT_DEFINE_BEGIN(instruction_access_slb) 1447 IVEC=0x480 1448 IAREA=PACA_EXSLB 1449 IRECONCILE=0 1450 IISIDE=1 1451 IDAR=1 1452#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1453 IKVM_REAL=1 1454#endif 1455INT_DEFINE_END(instruction_access_slb) 1456 1457EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1458 GEN_INT_ENTRY instruction_access_slb, virt=0 1459EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1460EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1461 GEN_INT_ENTRY instruction_access_slb, virt=1 1462EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1463EXC_COMMON_BEGIN(instruction_access_slb_common) 1464 GEN_COMMON instruction_access_slb 1465 ld r4,_DAR(r1) 1466 addi r3,r1,STACK_FRAME_OVERHEAD 1467BEGIN_MMU_FTR_SECTION 1468 /* HPT case, do SLB fault */ 1469 bl do_slb_fault 1470 cmpdi r3,0 1471 bne- 1f 1472 b fast_interrupt_return 14731: /* Error case */ 1474MMU_FTR_SECTION_ELSE 1475 /* Radix case, access is outside page table range */ 1476 li r3,-EFAULT 1477ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1478 std r3,RESULT(r1) 1479 RECONCILE_IRQ_STATE(r10, r11) 1480 ld r4,_DAR(r1) 1481 ld r5,RESULT(r1) 1482 addi r3,r1,STACK_FRAME_OVERHEAD 1483 bl do_bad_slb_fault 1484 b interrupt_return 1485 1486 GEN_KVM instruction_access_slb 1487 1488 1489/** 1490 * Interrupt 0x500 - External Interrupt. 1491 * This is an asynchronous maskable interrupt in response to an "external 1492 * exception" from the interrupt controller or hypervisor (e.g., device 1493 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1494 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1495 * 1496 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1497 * interrupts are delivered with HSRR registers, guests use SRRs, which 1498 * reqiures IHSRR_IF_HVMODE. 1499 * 1500 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1501 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1502 * rather than External Interrupts. 1503 * 1504 * Handling: 1505 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1506 * because registers at the time of the interrupt are not so important as it is 1507 * asynchronous. 1508 * 1509 * If soft masked, the masked handler will note the pending interrupt for 1510 * replay, and clear MSR[EE] in the interrupted context. 1511 */ 1512INT_DEFINE_BEGIN(hardware_interrupt) 1513 IVEC=0x500 1514 IHSRR_IF_HVMODE=1 1515 IMASK=IRQS_DISABLED 1516 IKVM_REAL=1 1517 IKVM_VIRT=1 1518INT_DEFINE_END(hardware_interrupt) 1519 1520EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1521 GEN_INT_ENTRY hardware_interrupt, virt=0 1522EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1523EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1524 GEN_INT_ENTRY hardware_interrupt, virt=1 1525EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1526EXC_COMMON_BEGIN(hardware_interrupt_common) 1527 GEN_COMMON hardware_interrupt 1528 FINISH_NAP 1529 RUNLATCH_ON 1530 addi r3,r1,STACK_FRAME_OVERHEAD 1531 bl do_IRQ 1532 b interrupt_return 1533 1534 GEN_KVM hardware_interrupt 1535 1536 1537/** 1538 * Interrupt 0x600 - Alignment Interrupt 1539 * This is a synchronous interrupt in response to data alignment fault. 1540 */ 1541INT_DEFINE_BEGIN(alignment) 1542 IVEC=0x600 1543 IDAR=1 1544 IDSISR=1 1545#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1546 IKVM_REAL=1 1547#endif 1548INT_DEFINE_END(alignment) 1549 1550EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1551 GEN_INT_ENTRY alignment, virt=0 1552EXC_REAL_END(alignment, 0x600, 0x100) 1553EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1554 GEN_INT_ENTRY alignment, virt=1 1555EXC_VIRT_END(alignment, 0x4600, 0x100) 1556EXC_COMMON_BEGIN(alignment_common) 1557 GEN_COMMON alignment 1558 addi r3,r1,STACK_FRAME_OVERHEAD 1559 bl alignment_exception 1560 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1561 b interrupt_return 1562 1563 GEN_KVM alignment 1564 1565 1566/** 1567 * Interrupt 0x700 - Program Interrupt (program check). 1568 * This is a synchronous interrupt in response to various instruction faults: 1569 * traps, privilege errors, TM errors, floating point exceptions. 1570 * 1571 * Handling: 1572 * This interrupt may use the "emergency stack" in some cases when being taken 1573 * from kernel context, which complicates handling. 1574 */ 1575INT_DEFINE_BEGIN(program_check) 1576 IVEC=0x700 1577#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1578 IKVM_REAL=1 1579#endif 1580INT_DEFINE_END(program_check) 1581 1582EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1583 GEN_INT_ENTRY program_check, virt=0 1584EXC_REAL_END(program_check, 0x700, 0x100) 1585EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1586 GEN_INT_ENTRY program_check, virt=1 1587EXC_VIRT_END(program_check, 0x4700, 0x100) 1588EXC_COMMON_BEGIN(program_check_common) 1589 __GEN_COMMON_ENTRY program_check 1590 1591 /* 1592 * It's possible to receive a TM Bad Thing type program check with 1593 * userspace register values (in particular r1), but with SRR1 reporting 1594 * that we came from the kernel. Normally that would confuse the bad 1595 * stack logic, and we would report a bad kernel stack pointer. Instead 1596 * we switch to the emergency stack if we're taking a TM Bad Thing from 1597 * the kernel. 1598 */ 1599 1600 andi. r10,r12,MSR_PR 1601 bne 2f /* If userspace, go normal path */ 1602 1603 andis. r10,r12,(SRR1_PROGTM)@h 1604 bne 1f /* If TM, emergency */ 1605 1606 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1607 blt 2f /* normal path if not */ 1608 1609 /* Use the emergency stack */ 16101: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1611 /* 3 in EXCEPTION_PROLOG_COMMON */ 1612 mr r10,r1 /* Save r1 */ 1613 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1614 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1615 __ISTACK(program_check)=0 1616 __GEN_COMMON_BODY program_check 1617 b 3f 16182: 1619 __ISTACK(program_check)=1 1620 __GEN_COMMON_BODY program_check 16213: 1622 addi r3,r1,STACK_FRAME_OVERHEAD 1623 bl program_check_exception 1624 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1625 b interrupt_return 1626 1627 GEN_KVM program_check 1628 1629 1630/* 1631 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1632 * This is a synchronous interrupt in response to executing an fp instruction 1633 * with MSR[FP]=0. 1634 * 1635 * Handling: 1636 * This will load FP registers and enable the FP bit if coming from userspace, 1637 * otherwise report a bad kernel use of FP. 1638 */ 1639INT_DEFINE_BEGIN(fp_unavailable) 1640 IVEC=0x800 1641 IRECONCILE=0 1642#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1643 IKVM_REAL=1 1644#endif 1645INT_DEFINE_END(fp_unavailable) 1646 1647EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1648 GEN_INT_ENTRY fp_unavailable, virt=0 1649EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1650EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1651 GEN_INT_ENTRY fp_unavailable, virt=1 1652EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1653EXC_COMMON_BEGIN(fp_unavailable_common) 1654 GEN_COMMON fp_unavailable 1655 bne 1f /* if from user, just load it up */ 1656 RECONCILE_IRQ_STATE(r10, r11) 1657 addi r3,r1,STACK_FRAME_OVERHEAD 1658 bl kernel_fp_unavailable_exception 16590: trap 1660 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 16611: 1662#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1663BEGIN_FTR_SECTION 1664 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1665 * transaction), go do TM stuff 1666 */ 1667 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1668 bne- 2f 1669END_FTR_SECTION_IFSET(CPU_FTR_TM) 1670#endif 1671 bl load_up_fpu 1672 b fast_interrupt_return 1673#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 16742: /* User process was in a transaction */ 1675 RECONCILE_IRQ_STATE(r10, r11) 1676 addi r3,r1,STACK_FRAME_OVERHEAD 1677 bl fp_unavailable_tm 1678 b interrupt_return 1679#endif 1680 1681 GEN_KVM fp_unavailable 1682 1683 1684/** 1685 * Interrupt 0x900 - Decrementer Interrupt. 1686 * This is an asynchronous interrupt in response to a decrementer exception 1687 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1688 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1689 * local_irq_disable()). 1690 * 1691 * Handling: 1692 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1693 * 1694 * If soft masked, the masked handler will note the pending interrupt for 1695 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1696 * in the interrupted context. 1697 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1698 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1699 * on the emergency stack. 1700 */ 1701INT_DEFINE_BEGIN(decrementer) 1702 IVEC=0x900 1703 IMASK=IRQS_DISABLED 1704#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1705 IKVM_REAL=1 1706#endif 1707INT_DEFINE_END(decrementer) 1708 1709EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1710 GEN_INT_ENTRY decrementer, virt=0 1711EXC_REAL_END(decrementer, 0x900, 0x80) 1712EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1713 GEN_INT_ENTRY decrementer, virt=1 1714EXC_VIRT_END(decrementer, 0x4900, 0x80) 1715EXC_COMMON_BEGIN(decrementer_common) 1716 GEN_COMMON decrementer 1717 FINISH_NAP 1718 RUNLATCH_ON 1719 addi r3,r1,STACK_FRAME_OVERHEAD 1720 bl timer_interrupt 1721 b interrupt_return 1722 1723 GEN_KVM decrementer 1724 1725 1726/** 1727 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1728 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1729 * register. 1730 * 1731 * Handling: 1732 * Linux does not use this outside KVM where it's used to keep a host timer 1733 * while the guest is given control of DEC. It should normally be caught by 1734 * the KVM test and routed there. 1735 */ 1736INT_DEFINE_BEGIN(hdecrementer) 1737 IVEC=0x980 1738 IHSRR=1 1739 ISTACK=0 1740 IRECONCILE=0 1741 IKVM_REAL=1 1742 IKVM_VIRT=1 1743INT_DEFINE_END(hdecrementer) 1744 1745EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1746 GEN_INT_ENTRY hdecrementer, virt=0 1747EXC_REAL_END(hdecrementer, 0x980, 0x80) 1748EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1749 GEN_INT_ENTRY hdecrementer, virt=1 1750EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1751EXC_COMMON_BEGIN(hdecrementer_common) 1752 __GEN_COMMON_ENTRY hdecrementer 1753 /* 1754 * Hypervisor decrementer interrupts not caught by the KVM test 1755 * shouldn't occur but are sometimes left pending on exit from a KVM 1756 * guest. We don't need to do anything to clear them, as they are 1757 * edge-triggered. 1758 * 1759 * Be careful to avoid touching the kernel stack. 1760 */ 1761 ld r10,PACA_EXGEN+EX_CTR(r13) 1762 mtctr r10 1763 mtcrf 0x80,r9 1764 ld r9,PACA_EXGEN+EX_R9(r13) 1765 ld r10,PACA_EXGEN+EX_R10(r13) 1766 ld r11,PACA_EXGEN+EX_R11(r13) 1767 ld r12,PACA_EXGEN+EX_R12(r13) 1768 ld r13,PACA_EXGEN+EX_R13(r13) 1769 HRFI_TO_KERNEL 1770 1771 GEN_KVM hdecrementer 1772 1773 1774/** 1775 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1776 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1777 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1778 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1779 * 1780 * Handling: 1781 * Guests may use this for IPIs between threads in a core if the 1782 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1783 * 1784 * If soft masked, the masked handler will note the pending interrupt for 1785 * replay, leaving MSR[EE] enabled in the interrupted context because the 1786 * doorbells are edge triggered. 1787 */ 1788INT_DEFINE_BEGIN(doorbell_super) 1789 IVEC=0xa00 1790 IMASK=IRQS_DISABLED 1791#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1792 IKVM_REAL=1 1793#endif 1794INT_DEFINE_END(doorbell_super) 1795 1796EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1797 GEN_INT_ENTRY doorbell_super, virt=0 1798EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1799EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1800 GEN_INT_ENTRY doorbell_super, virt=1 1801EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1802EXC_COMMON_BEGIN(doorbell_super_common) 1803 GEN_COMMON doorbell_super 1804 FINISH_NAP 1805 RUNLATCH_ON 1806 addi r3,r1,STACK_FRAME_OVERHEAD 1807#ifdef CONFIG_PPC_DOORBELL 1808 bl doorbell_exception 1809#else 1810 bl unknown_exception 1811#endif 1812 b interrupt_return 1813 1814 GEN_KVM doorbell_super 1815 1816 1817EXC_REAL_NONE(0xb00, 0x100) 1818EXC_VIRT_NONE(0x4b00, 0x100) 1819 1820/** 1821 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1822 * This is a synchronous interrupt invoked with the "sc" instruction. The 1823 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1824 * is directed to the currently running OS. The hypercall is invoked with 1825 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1826 * 1827 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1828 * 0x4c00 virtual mode. 1829 * 1830 * Handling: 1831 * If the KVM test fires then it was due to a hypercall and is accordingly 1832 * routed to KVM. Otherwise this executes a normal Linux system call. 1833 * 1834 * Call convention: 1835 * 1836 * syscall and hypercalls register conventions are documented in 1837 * Documentation/powerpc/syscall64-abi.rst and 1838 * Documentation/powerpc/papr_hcalls.rst respectively. 1839 * 1840 * The intersection of volatile registers that don't contain possible 1841 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1842 * without saving, though xer is not a good idea to use, as hardware may 1843 * interpret some bits so it may be costly to change them. 1844 */ 1845INT_DEFINE_BEGIN(system_call) 1846 IVEC=0xc00 1847 IKVM_REAL=1 1848 IKVM_VIRT=1 1849INT_DEFINE_END(system_call) 1850 1851.macro SYSTEM_CALL virt 1852#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1853 /* 1854 * There is a little bit of juggling to get syscall and hcall 1855 * working well. Save r13 in ctr to avoid using SPRG scratch 1856 * register. 1857 * 1858 * Userspace syscalls have already saved the PPR, hcalls must save 1859 * it before setting HMT_MEDIUM. 1860 */ 1861 mtctr r13 1862 GET_PACA(r13) 1863 std r10,PACA_EXGEN+EX_R10(r13) 1864 INTERRUPT_TO_KERNEL 1865 KVMTEST system_call /* uses r10, branch to system_call_kvm */ 1866 mfctr r9 1867#else 1868 mr r9,r13 1869 GET_PACA(r13) 1870 INTERRUPT_TO_KERNEL 1871#endif 1872 1873#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1874BEGIN_FTR_SECTION 1875 cmpdi r0,0x1ebe 1876 beq- 1f 1877END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1878#endif 1879 1880 /* We reach here with PACA in r13, r13 in r9. */ 1881 mfspr r11,SPRN_SRR0 1882 mfspr r12,SPRN_SRR1 1883 1884 HMT_MEDIUM 1885 1886 .if ! \virt 1887 __LOAD_HANDLER(r10, system_call_common) 1888 mtspr SPRN_SRR0,r10 1889 ld r10,PACAKMSR(r13) 1890 mtspr SPRN_SRR1,r10 1891 RFI_TO_KERNEL 1892 b . /* prevent speculative execution */ 1893 .else 1894 li r10,MSR_RI 1895 mtmsrd r10,1 /* Set RI (EE=0) */ 1896#ifdef CONFIG_RELOCATABLE 1897 __LOAD_HANDLER(r10, system_call_common) 1898 mtctr r10 1899 bctr 1900#else 1901 b system_call_common 1902#endif 1903 .endif 1904 1905#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1906 /* Fast LE/BE switch system call */ 19071: mfspr r12,SPRN_SRR1 1908 xori r12,r12,MSR_LE 1909 mtspr SPRN_SRR1,r12 1910 mr r13,r9 1911 RFI_TO_USER /* return to userspace */ 1912 b . /* prevent speculative execution */ 1913#endif 1914.endm 1915 1916EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 1917 SYSTEM_CALL 0 1918EXC_REAL_END(system_call, 0xc00, 0x100) 1919EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 1920 SYSTEM_CALL 1 1921EXC_VIRT_END(system_call, 0x4c00, 0x100) 1922 1923#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1924TRAMP_REAL_BEGIN(system_call_kvm) 1925 /* 1926 * This is a hcall, so register convention is as above, with these 1927 * differences: 1928 * r13 = PACA 1929 * ctr = orig r13 1930 * orig r10 saved in PACA 1931 */ 1932 /* 1933 * Save the PPR (on systems that support it) before changing to 1934 * HMT_MEDIUM. That allows the KVM code to save that value into the 1935 * guest state (it is the guest's PPR value). 1936 */ 1937BEGIN_FTR_SECTION 1938 mfspr r10,SPRN_PPR 1939 std r10,HSTATE_PPR(r13) 1940END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1941 HMT_MEDIUM 1942 mfctr r10 1943 SET_SCRATCH0(r10) 1944 mfcr r10 1945 std r12,HSTATE_SCRATCH0(r13) 1946 sldi r12,r10,32 1947 ori r12,r12,0xc00 1948#ifdef CONFIG_RELOCATABLE 1949 /* 1950 * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives 1951 * outside the head section. 1952 */ 1953 __LOAD_FAR_HANDLER(r10, kvmppc_interrupt) 1954 mtctr r10 1955 ld r10,PACA_EXGEN+EX_R10(r13) 1956 bctr 1957#else 1958 ld r10,PACA_EXGEN+EX_R10(r13) 1959 b kvmppc_interrupt 1960#endif 1961#endif 1962 1963 1964/** 1965 * Interrupt 0xd00 - Trace Interrupt. 1966 * This is a synchronous interrupt in response to instruction step or 1967 * breakpoint faults. 1968 */ 1969INT_DEFINE_BEGIN(single_step) 1970 IVEC=0xd00 1971#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1972 IKVM_REAL=1 1973#endif 1974INT_DEFINE_END(single_step) 1975 1976EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 1977 GEN_INT_ENTRY single_step, virt=0 1978EXC_REAL_END(single_step, 0xd00, 0x100) 1979EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 1980 GEN_INT_ENTRY single_step, virt=1 1981EXC_VIRT_END(single_step, 0x4d00, 0x100) 1982EXC_COMMON_BEGIN(single_step_common) 1983 GEN_COMMON single_step 1984 addi r3,r1,STACK_FRAME_OVERHEAD 1985 bl single_step_exception 1986 b interrupt_return 1987 1988 GEN_KVM single_step 1989 1990 1991/** 1992 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 1993 * This is a synchronous interrupt in response to an MMU fault caused by a 1994 * guest data access. 1995 * 1996 * Handling: 1997 * This should always get routed to KVM. In radix MMU mode, this is caused 1998 * by a guest nested radix access that can't be performed due to the 1999 * partition scope page table. In hash mode, this can be caused by guests 2000 * running with translation disabled (virtual real mode) or with VPM enabled. 2001 * KVM will update the page table structures or disallow the access. 2002 */ 2003INT_DEFINE_BEGIN(h_data_storage) 2004 IVEC=0xe00 2005 IHSRR=1 2006 IDAR=1 2007 IDSISR=1 2008 IKVM_SKIP=1 2009 IKVM_REAL=1 2010 IKVM_VIRT=1 2011INT_DEFINE_END(h_data_storage) 2012 2013EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2014 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2015EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2016EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2017 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2018EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2019EXC_COMMON_BEGIN(h_data_storage_common) 2020 GEN_COMMON h_data_storage 2021 addi r3,r1,STACK_FRAME_OVERHEAD 2022BEGIN_MMU_FTR_SECTION 2023 ld r4,_DAR(r1) 2024 li r5,SIGSEGV 2025 bl bad_page_fault 2026MMU_FTR_SECTION_ELSE 2027 bl unknown_exception 2028ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2029 b interrupt_return 2030 2031 GEN_KVM h_data_storage 2032 2033 2034/** 2035 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2036 * This is a synchronous interrupt in response to an MMU fault caused by a 2037 * guest instruction fetch, similar to HDSI. 2038 */ 2039INT_DEFINE_BEGIN(h_instr_storage) 2040 IVEC=0xe20 2041 IHSRR=1 2042 IKVM_REAL=1 2043 IKVM_VIRT=1 2044INT_DEFINE_END(h_instr_storage) 2045 2046EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2047 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2048EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2049EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2050 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2051EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2052EXC_COMMON_BEGIN(h_instr_storage_common) 2053 GEN_COMMON h_instr_storage 2054 addi r3,r1,STACK_FRAME_OVERHEAD 2055 bl unknown_exception 2056 b interrupt_return 2057 2058 GEN_KVM h_instr_storage 2059 2060 2061/** 2062 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2063 */ 2064INT_DEFINE_BEGIN(emulation_assist) 2065 IVEC=0xe40 2066 IHSRR=1 2067 IKVM_REAL=1 2068 IKVM_VIRT=1 2069INT_DEFINE_END(emulation_assist) 2070 2071EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2072 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2073EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2074EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2075 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2076EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2077EXC_COMMON_BEGIN(emulation_assist_common) 2078 GEN_COMMON emulation_assist 2079 addi r3,r1,STACK_FRAME_OVERHEAD 2080 bl emulation_assist_interrupt 2081 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2082 b interrupt_return 2083 2084 GEN_KVM emulation_assist 2085 2086 2087/** 2088 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2089 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2090 * Exception. It is always taken in real mode but uses HSRR registers 2091 * unlike SRESET and MCE. 2092 * 2093 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2094 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2095 * 2096 * Handling: 2097 * This is a special case, this is handled similarly to machine checks, with an 2098 * initial real mode handler that is not soft-masked, which attempts to fix the 2099 * problem. Then a regular handler which is soft-maskable and reports the 2100 * problem. 2101 * 2102 * The emergency stack is used for the early real mode handler. 2103 * 2104 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2105 * either use soft-masking for the MCE, or use irq_work for the HMI. 2106 * 2107 * KVM: 2108 * Unlike MCE, this calls into KVM without calling the real mode handler 2109 * first. 2110 */ 2111INT_DEFINE_BEGIN(hmi_exception_early) 2112 IVEC=0xe60 2113 IHSRR=1 2114 IREALMODE_COMMON=1 2115 ISTACK=0 2116 IRECONCILE=0 2117 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2118 IKVM_REAL=1 2119INT_DEFINE_END(hmi_exception_early) 2120 2121INT_DEFINE_BEGIN(hmi_exception) 2122 IVEC=0xe60 2123 IHSRR=1 2124 IMASK=IRQS_DISABLED 2125 IKVM_REAL=1 2126INT_DEFINE_END(hmi_exception) 2127 2128EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2129 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2130EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2131EXC_VIRT_NONE(0x4e60, 0x20) 2132 2133EXC_COMMON_BEGIN(hmi_exception_early_common) 2134 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2135 2136 mr r10,r1 /* Save r1 */ 2137 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2138 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2139 2140 __GEN_COMMON_BODY hmi_exception_early 2141 2142 addi r3,r1,STACK_FRAME_OVERHEAD 2143 bl hmi_exception_realmode 2144 cmpdi cr0,r3,0 2145 bne 1f 2146 2147 EXCEPTION_RESTORE_REGS hsrr=1 2148 HRFI_TO_USER_OR_KERNEL 2149 21501: 2151 /* 2152 * Go to virtual mode and pull the HMI event information from 2153 * firmware. 2154 */ 2155 EXCEPTION_RESTORE_REGS hsrr=1 2156 GEN_INT_ENTRY hmi_exception, virt=0 2157 2158 GEN_KVM hmi_exception_early 2159 2160EXC_COMMON_BEGIN(hmi_exception_common) 2161 GEN_COMMON hmi_exception 2162 FINISH_NAP 2163 RUNLATCH_ON 2164 addi r3,r1,STACK_FRAME_OVERHEAD 2165 bl handle_hmi_exception 2166 b interrupt_return 2167 2168 GEN_KVM hmi_exception 2169 2170 2171/** 2172 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2173 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2174 * Similar to the 0xa00 doorbell but for host rather than guest. 2175 */ 2176INT_DEFINE_BEGIN(h_doorbell) 2177 IVEC=0xe80 2178 IHSRR=1 2179 IMASK=IRQS_DISABLED 2180 IKVM_REAL=1 2181 IKVM_VIRT=1 2182INT_DEFINE_END(h_doorbell) 2183 2184EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2185 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2186EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2187EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2188 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2189EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2190EXC_COMMON_BEGIN(h_doorbell_common) 2191 GEN_COMMON h_doorbell 2192 FINISH_NAP 2193 RUNLATCH_ON 2194 addi r3,r1,STACK_FRAME_OVERHEAD 2195#ifdef CONFIG_PPC_DOORBELL 2196 bl doorbell_exception 2197#else 2198 bl unknown_exception 2199#endif 2200 b interrupt_return 2201 2202 GEN_KVM h_doorbell 2203 2204 2205/** 2206 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2207 * This is an asynchronous interrupt in response to an "external exception". 2208 * Similar to 0x500 but for host only. 2209 */ 2210INT_DEFINE_BEGIN(h_virt_irq) 2211 IVEC=0xea0 2212 IHSRR=1 2213 IMASK=IRQS_DISABLED 2214 IKVM_REAL=1 2215 IKVM_VIRT=1 2216INT_DEFINE_END(h_virt_irq) 2217 2218EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2219 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2220EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2221EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2222 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2223EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2224EXC_COMMON_BEGIN(h_virt_irq_common) 2225 GEN_COMMON h_virt_irq 2226 FINISH_NAP 2227 RUNLATCH_ON 2228 addi r3,r1,STACK_FRAME_OVERHEAD 2229 bl do_IRQ 2230 b interrupt_return 2231 2232 GEN_KVM h_virt_irq 2233 2234 2235EXC_REAL_NONE(0xec0, 0x20) 2236EXC_VIRT_NONE(0x4ec0, 0x20) 2237EXC_REAL_NONE(0xee0, 0x20) 2238EXC_VIRT_NONE(0x4ee0, 0x20) 2239 2240 2241/* 2242 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2243 * This is an asynchronous interrupt in response to a PMU exception. 2244 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2245 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2246 * 2247 * Handling: 2248 * This calls into the perf subsystem. 2249 * 2250 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2251 * runs under local_irq_disable. However it may be soft-masked in 2252 * powerpc-specific code. 2253 * 2254 * If soft masked, the masked handler will note the pending interrupt for 2255 * replay, and clear MSR[EE] in the interrupted context. 2256 */ 2257INT_DEFINE_BEGIN(performance_monitor) 2258 IVEC=0xf00 2259 IMASK=IRQS_PMI_DISABLED 2260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2261 IKVM_REAL=1 2262#endif 2263INT_DEFINE_END(performance_monitor) 2264 2265EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2266 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2267EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2268EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2269 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2270EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2271EXC_COMMON_BEGIN(performance_monitor_common) 2272 GEN_COMMON performance_monitor 2273 FINISH_NAP 2274 RUNLATCH_ON 2275 addi r3,r1,STACK_FRAME_OVERHEAD 2276 bl performance_monitor_exception 2277 b interrupt_return 2278 2279 GEN_KVM performance_monitor 2280 2281 2282/** 2283 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2284 * This is a synchronous interrupt in response to 2285 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2286 * Similar to FP unavailable. 2287 */ 2288INT_DEFINE_BEGIN(altivec_unavailable) 2289 IVEC=0xf20 2290 IRECONCILE=0 2291#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2292 IKVM_REAL=1 2293#endif 2294INT_DEFINE_END(altivec_unavailable) 2295 2296EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2297 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2298EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2299EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2300 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2301EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2302EXC_COMMON_BEGIN(altivec_unavailable_common) 2303 GEN_COMMON altivec_unavailable 2304#ifdef CONFIG_ALTIVEC 2305BEGIN_FTR_SECTION 2306 beq 1f 2307#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2308 BEGIN_FTR_SECTION_NESTED(69) 2309 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2310 * transaction), go do TM stuff 2311 */ 2312 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2313 bne- 2f 2314 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2315#endif 2316 bl load_up_altivec 2317 b fast_interrupt_return 2318#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23192: /* User process was in a transaction */ 2320 RECONCILE_IRQ_STATE(r10, r11) 2321 addi r3,r1,STACK_FRAME_OVERHEAD 2322 bl altivec_unavailable_tm 2323 b interrupt_return 2324#endif 23251: 2326END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2327#endif 2328 RECONCILE_IRQ_STATE(r10, r11) 2329 addi r3,r1,STACK_FRAME_OVERHEAD 2330 bl altivec_unavailable_exception 2331 b interrupt_return 2332 2333 GEN_KVM altivec_unavailable 2334 2335 2336/** 2337 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2338 * This is a synchronous interrupt in response to 2339 * executing a VSX instruction with MSR[VSX]=0. 2340 * Similar to FP unavailable. 2341 */ 2342INT_DEFINE_BEGIN(vsx_unavailable) 2343 IVEC=0xf40 2344 IRECONCILE=0 2345#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2346 IKVM_REAL=1 2347#endif 2348INT_DEFINE_END(vsx_unavailable) 2349 2350EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2351 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2352EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2353EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2354 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2355EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2356EXC_COMMON_BEGIN(vsx_unavailable_common) 2357 GEN_COMMON vsx_unavailable 2358#ifdef CONFIG_VSX 2359BEGIN_FTR_SECTION 2360 beq 1f 2361#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2362 BEGIN_FTR_SECTION_NESTED(69) 2363 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2364 * transaction), go do TM stuff 2365 */ 2366 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2367 bne- 2f 2368 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2369#endif 2370 b load_up_vsx 2371#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23722: /* User process was in a transaction */ 2373 RECONCILE_IRQ_STATE(r10, r11) 2374 addi r3,r1,STACK_FRAME_OVERHEAD 2375 bl vsx_unavailable_tm 2376 b interrupt_return 2377#endif 23781: 2379END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2380#endif 2381 RECONCILE_IRQ_STATE(r10, r11) 2382 addi r3,r1,STACK_FRAME_OVERHEAD 2383 bl vsx_unavailable_exception 2384 b interrupt_return 2385 2386 GEN_KVM vsx_unavailable 2387 2388 2389/** 2390 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2391 * This is a synchronous interrupt in response to 2392 * executing an instruction without access to the facility that can be 2393 * resolved by the OS (e.g., FSCR, MSR). 2394 * Similar to FP unavailable. 2395 */ 2396INT_DEFINE_BEGIN(facility_unavailable) 2397 IVEC=0xf60 2398#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2399 IKVM_REAL=1 2400#endif 2401INT_DEFINE_END(facility_unavailable) 2402 2403EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2404 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2405EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2406EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2407 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2408EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2409EXC_COMMON_BEGIN(facility_unavailable_common) 2410 GEN_COMMON facility_unavailable 2411 addi r3,r1,STACK_FRAME_OVERHEAD 2412 bl facility_unavailable_exception 2413 b interrupt_return 2414 2415 GEN_KVM facility_unavailable 2416 2417 2418/** 2419 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2420 * This is a synchronous interrupt in response to 2421 * executing an instruction without access to the facility that can only 2422 * be resolved in HV mode (e.g., HFSCR). 2423 * Similar to FP unavailable. 2424 */ 2425INT_DEFINE_BEGIN(h_facility_unavailable) 2426 IVEC=0xf80 2427 IHSRR=1 2428 IKVM_REAL=1 2429 IKVM_VIRT=1 2430INT_DEFINE_END(h_facility_unavailable) 2431 2432EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2433 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2434EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2435EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2436 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2437EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2438EXC_COMMON_BEGIN(h_facility_unavailable_common) 2439 GEN_COMMON h_facility_unavailable 2440 addi r3,r1,STACK_FRAME_OVERHEAD 2441 bl facility_unavailable_exception 2442 b interrupt_return 2443 2444 GEN_KVM h_facility_unavailable 2445 2446 2447EXC_REAL_NONE(0xfa0, 0x20) 2448EXC_VIRT_NONE(0x4fa0, 0x20) 2449EXC_REAL_NONE(0xfc0, 0x20) 2450EXC_VIRT_NONE(0x4fc0, 0x20) 2451EXC_REAL_NONE(0xfe0, 0x20) 2452EXC_VIRT_NONE(0x4fe0, 0x20) 2453 2454EXC_REAL_NONE(0x1000, 0x100) 2455EXC_VIRT_NONE(0x5000, 0x100) 2456EXC_REAL_NONE(0x1100, 0x100) 2457EXC_VIRT_NONE(0x5100, 0x100) 2458 2459#ifdef CONFIG_CBE_RAS 2460INT_DEFINE_BEGIN(cbe_system_error) 2461 IVEC=0x1200 2462 IHSRR=1 2463 IKVM_SKIP=1 2464 IKVM_REAL=1 2465INT_DEFINE_END(cbe_system_error) 2466 2467EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2468 GEN_INT_ENTRY cbe_system_error, virt=0 2469EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2470EXC_VIRT_NONE(0x5200, 0x100) 2471EXC_COMMON_BEGIN(cbe_system_error_common) 2472 GEN_COMMON cbe_system_error 2473 addi r3,r1,STACK_FRAME_OVERHEAD 2474 bl cbe_system_error_exception 2475 b interrupt_return 2476 2477 GEN_KVM cbe_system_error 2478 2479#else /* CONFIG_CBE_RAS */ 2480EXC_REAL_NONE(0x1200, 0x100) 2481EXC_VIRT_NONE(0x5200, 0x100) 2482#endif 2483 2484 2485INT_DEFINE_BEGIN(instruction_breakpoint) 2486 IVEC=0x1300 2487#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2488 IKVM_SKIP=1 2489 IKVM_REAL=1 2490#endif 2491INT_DEFINE_END(instruction_breakpoint) 2492 2493EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2494 GEN_INT_ENTRY instruction_breakpoint, virt=0 2495EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2496EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2497 GEN_INT_ENTRY instruction_breakpoint, virt=1 2498EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2499EXC_COMMON_BEGIN(instruction_breakpoint_common) 2500 GEN_COMMON instruction_breakpoint 2501 addi r3,r1,STACK_FRAME_OVERHEAD 2502 bl instruction_breakpoint_exception 2503 b interrupt_return 2504 2505 GEN_KVM instruction_breakpoint 2506 2507 2508EXC_REAL_NONE(0x1400, 0x100) 2509EXC_VIRT_NONE(0x5400, 0x100) 2510 2511/** 2512 * Interrupt 0x1500 - Soft Patch Interrupt 2513 * 2514 * Handling: 2515 * This is an implementation specific interrupt which can be used for a 2516 * range of exceptions. 2517 * 2518 * This interrupt handler is unique in that it runs the denormal assist 2519 * code even for guests (and even in guest context) without going to KVM, 2520 * for speed. POWER9 does not raise denorm exceptions, so this special case 2521 * could be phased out in future to reduce special cases. 2522 */ 2523INT_DEFINE_BEGIN(denorm_exception) 2524 IVEC=0x1500 2525 IHSRR=1 2526 IBRANCH_COMMON=0 2527 IKVM_REAL=1 2528INT_DEFINE_END(denorm_exception) 2529 2530EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2531 GEN_INT_ENTRY denorm_exception, virt=0 2532#ifdef CONFIG_PPC_DENORMALISATION 2533 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2534 bne+ denorm_assist 2535#endif 2536 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2537EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2538#ifdef CONFIG_PPC_DENORMALISATION 2539EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2540 GEN_INT_ENTRY denorm_exception, virt=1 2541 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2542 bne+ denorm_assist 2543 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2544EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2545#else 2546EXC_VIRT_NONE(0x5500, 0x100) 2547#endif 2548 2549#ifdef CONFIG_PPC_DENORMALISATION 2550TRAMP_REAL_BEGIN(denorm_assist) 2551BEGIN_FTR_SECTION 2552/* 2553 * To denormalise we need to move a copy of the register to itself. 2554 * For POWER6 do that here for all FP regs. 2555 */ 2556 mfmsr r10 2557 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2558 xori r10,r10,(MSR_FE0|MSR_FE1) 2559 mtmsrd r10 2560 sync 2561 2562 .Lreg=0 2563 .rept 32 2564 fmr .Lreg,.Lreg 2565 .Lreg=.Lreg+1 2566 .endr 2567 2568FTR_SECTION_ELSE 2569/* 2570 * To denormalise we need to move a copy of the register to itself. 2571 * For POWER7 do that here for the first 32 VSX registers only. 2572 */ 2573 mfmsr r10 2574 oris r10,r10,MSR_VSX@h 2575 mtmsrd r10 2576 sync 2577 2578 .Lreg=0 2579 .rept 32 2580 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2581 .Lreg=.Lreg+1 2582 .endr 2583 2584ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2585 2586BEGIN_FTR_SECTION 2587 b denorm_done 2588END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2589/* 2590 * To denormalise we need to move a copy of the register to itself. 2591 * For POWER8 we need to do that for all 64 VSX registers 2592 */ 2593 .Lreg=32 2594 .rept 32 2595 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2596 .Lreg=.Lreg+1 2597 .endr 2598 2599denorm_done: 2600 mfspr r11,SPRN_HSRR0 2601 subi r11,r11,4 2602 mtspr SPRN_HSRR0,r11 2603 mtcrf 0x80,r9 2604 ld r9,PACA_EXGEN+EX_R9(r13) 2605BEGIN_FTR_SECTION 2606 ld r10,PACA_EXGEN+EX_PPR(r13) 2607 mtspr SPRN_PPR,r10 2608END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2609BEGIN_FTR_SECTION 2610 ld r10,PACA_EXGEN+EX_CFAR(r13) 2611 mtspr SPRN_CFAR,r10 2612END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2613 ld r10,PACA_EXGEN+EX_R10(r13) 2614 ld r11,PACA_EXGEN+EX_R11(r13) 2615 ld r12,PACA_EXGEN+EX_R12(r13) 2616 ld r13,PACA_EXGEN+EX_R13(r13) 2617 HRFI_TO_UNKNOWN 2618 b . 2619#endif 2620 2621EXC_COMMON_BEGIN(denorm_exception_common) 2622 GEN_COMMON denorm_exception 2623 addi r3,r1,STACK_FRAME_OVERHEAD 2624 bl unknown_exception 2625 b interrupt_return 2626 2627 GEN_KVM denorm_exception 2628 2629 2630#ifdef CONFIG_CBE_RAS 2631INT_DEFINE_BEGIN(cbe_maintenance) 2632 IVEC=0x1600 2633 IHSRR=1 2634 IKVM_SKIP=1 2635 IKVM_REAL=1 2636INT_DEFINE_END(cbe_maintenance) 2637 2638EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2639 GEN_INT_ENTRY cbe_maintenance, virt=0 2640EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2641EXC_VIRT_NONE(0x5600, 0x100) 2642EXC_COMMON_BEGIN(cbe_maintenance_common) 2643 GEN_COMMON cbe_maintenance 2644 addi r3,r1,STACK_FRAME_OVERHEAD 2645 bl cbe_maintenance_exception 2646 b interrupt_return 2647 2648 GEN_KVM cbe_maintenance 2649 2650#else /* CONFIG_CBE_RAS */ 2651EXC_REAL_NONE(0x1600, 0x100) 2652EXC_VIRT_NONE(0x5600, 0x100) 2653#endif 2654 2655 2656INT_DEFINE_BEGIN(altivec_assist) 2657 IVEC=0x1700 2658#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2659 IKVM_REAL=1 2660#endif 2661INT_DEFINE_END(altivec_assist) 2662 2663EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2664 GEN_INT_ENTRY altivec_assist, virt=0 2665EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2666EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2667 GEN_INT_ENTRY altivec_assist, virt=1 2668EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2669EXC_COMMON_BEGIN(altivec_assist_common) 2670 GEN_COMMON altivec_assist 2671 addi r3,r1,STACK_FRAME_OVERHEAD 2672#ifdef CONFIG_ALTIVEC 2673 bl altivec_assist_exception 2674 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2675#else 2676 bl unknown_exception 2677#endif 2678 b interrupt_return 2679 2680 GEN_KVM altivec_assist 2681 2682 2683#ifdef CONFIG_CBE_RAS 2684INT_DEFINE_BEGIN(cbe_thermal) 2685 IVEC=0x1800 2686 IHSRR=1 2687 IKVM_SKIP=1 2688 IKVM_REAL=1 2689INT_DEFINE_END(cbe_thermal) 2690 2691EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2692 GEN_INT_ENTRY cbe_thermal, virt=0 2693EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2694EXC_VIRT_NONE(0x5800, 0x100) 2695EXC_COMMON_BEGIN(cbe_thermal_common) 2696 GEN_COMMON cbe_thermal 2697 addi r3,r1,STACK_FRAME_OVERHEAD 2698 bl cbe_thermal_exception 2699 b interrupt_return 2700 2701 GEN_KVM cbe_thermal 2702 2703#else /* CONFIG_CBE_RAS */ 2704EXC_REAL_NONE(0x1800, 0x100) 2705EXC_VIRT_NONE(0x5800, 0x100) 2706#endif 2707 2708 2709#ifdef CONFIG_PPC_WATCHDOG 2710 2711INT_DEFINE_BEGIN(soft_nmi) 2712 IVEC=0x900 2713 ISTACK=0 2714 IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */ 2715INT_DEFINE_END(soft_nmi) 2716 2717/* 2718 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2719 * stack is one that is usable by maskable interrupts so long as MSR_EE 2720 * remains off. It is used for recovery when something has corrupted the 2721 * normal kernel stack, for example. The "soft NMI" must not use the process 2722 * stack because we want irq disabled sections to avoid touching the stack 2723 * at all (other than PMU interrupts), so use the emergency stack for this, 2724 * and run it entirely with interrupts hard disabled. 2725 */ 2726EXC_COMMON_BEGIN(soft_nmi_common) 2727 mfspr r11,SPRN_SRR0 2728 mr r10,r1 2729 ld r1,PACAEMERGSP(r13) 2730 subi r1,r1,INT_FRAME_SIZE 2731 __GEN_COMMON_BODY soft_nmi 2732 2733 /* 2734 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see 2735 * system_reset_common) 2736 */ 2737 li r10,IRQS_ALL_DISABLED 2738 stb r10,PACAIRQSOFTMASK(r13) 2739 lbz r10,PACAIRQHAPPENED(r13) 2740 std r10,_DAR(r1) 2741 ori r10,r10,PACA_IRQ_HARD_DIS 2742 stb r10,PACAIRQHAPPENED(r13) 2743 2744 addi r3,r1,STACK_FRAME_OVERHEAD 2745 bl soft_nmi_interrupt 2746 2747 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2748 li r9,0 2749 mtmsrd r9,1 2750 2751 /* 2752 * Restore soft mask settings. 2753 */ 2754 ld r10,_DAR(r1) 2755 stb r10,PACAIRQHAPPENED(r13) 2756 ld r10,SOFTE(r1) 2757 stb r10,PACAIRQSOFTMASK(r13) 2758 2759 kuap_restore_amr r10 2760 EXCEPTION_RESTORE_REGS hsrr=0 2761 RFI_TO_KERNEL 2762 2763#endif /* CONFIG_PPC_WATCHDOG */ 2764 2765/* 2766 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2767 * - If it was a decrementer interrupt, we bump the dec to max and and return. 2768 * - If it was a doorbell we return immediately since doorbells are edge 2769 * triggered and won't automatically refire. 2770 * - If it was a HMI we return immediately since we handled it in realmode 2771 * and it won't refire. 2772 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2773 * This is called with r10 containing the value to OR to the paca field. 2774 */ 2775.macro MASKED_INTERRUPT hsrr=0 2776 .if \hsrr 2777masked_Hinterrupt: 2778 .else 2779masked_interrupt: 2780 .endif 2781 lbz r11,PACAIRQHAPPENED(r13) 2782 or r11,r11,r10 2783 stb r11,PACAIRQHAPPENED(r13) 2784 cmpwi r10,PACA_IRQ_DEC 2785 bne 1f 2786 lis r10,0x7fff 2787 ori r10,r10,0xffff 2788 mtspr SPRN_DEC,r10 2789#ifdef CONFIG_PPC_WATCHDOG 2790 b soft_nmi_common 2791#else 2792 b 2f 2793#endif 27941: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2795 beq 2f 2796 xori r12,r12,MSR_EE /* clear MSR_EE */ 2797 .if \hsrr 2798 mtspr SPRN_HSRR1,r12 2799 .else 2800 mtspr SPRN_SRR1,r12 2801 .endif 2802 ori r11,r11,PACA_IRQ_HARD_DIS 2803 stb r11,PACAIRQHAPPENED(r13) 28042: /* done */ 2805 ld r10,PACA_EXGEN+EX_CTR(r13) 2806 mtctr r10 2807 mtcrf 0x80,r9 2808 std r1,PACAR1(r13) 2809 ld r9,PACA_EXGEN+EX_R9(r13) 2810 ld r10,PACA_EXGEN+EX_R10(r13) 2811 ld r11,PACA_EXGEN+EX_R11(r13) 2812 ld r12,PACA_EXGEN+EX_R12(r13) 2813 /* returns to kernel where r13 must be set up, so don't restore it */ 2814 .if \hsrr 2815 HRFI_TO_KERNEL 2816 .else 2817 RFI_TO_KERNEL 2818 .endif 2819 b . 2820.endm 2821 2822TRAMP_REAL_BEGIN(stf_barrier_fallback) 2823 std r9,PACA_EXRFI+EX_R9(r13) 2824 std r10,PACA_EXRFI+EX_R10(r13) 2825 sync 2826 ld r9,PACA_EXRFI+EX_R9(r13) 2827 ld r10,PACA_EXRFI+EX_R10(r13) 2828 ori 31,31,0 2829 .rept 14 2830 b 1f 28311: 2832 .endr 2833 blr 2834 2835TRAMP_REAL_BEGIN(rfi_flush_fallback) 2836 SET_SCRATCH0(r13); 2837 GET_PACA(r13); 2838 std r1,PACA_EXRFI+EX_R12(r13) 2839 ld r1,PACAKSAVE(r13) 2840 std r9,PACA_EXRFI+EX_R9(r13) 2841 std r10,PACA_EXRFI+EX_R10(r13) 2842 std r11,PACA_EXRFI+EX_R11(r13) 2843 mfctr r9 2844 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2845 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2846 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2847 mtctr r11 2848 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2849 2850 /* order ld/st prior to dcbt stop all streams with flushing */ 2851 sync 2852 2853 /* 2854 * The load adresses are at staggered offsets within cachelines, 2855 * which suits some pipelines better (on others it should not 2856 * hurt). 2857 */ 28581: 2859 ld r11,(0x80 + 8)*0(r10) 2860 ld r11,(0x80 + 8)*1(r10) 2861 ld r11,(0x80 + 8)*2(r10) 2862 ld r11,(0x80 + 8)*3(r10) 2863 ld r11,(0x80 + 8)*4(r10) 2864 ld r11,(0x80 + 8)*5(r10) 2865 ld r11,(0x80 + 8)*6(r10) 2866 ld r11,(0x80 + 8)*7(r10) 2867 addi r10,r10,0x80*8 2868 bdnz 1b 2869 2870 mtctr r9 2871 ld r9,PACA_EXRFI+EX_R9(r13) 2872 ld r10,PACA_EXRFI+EX_R10(r13) 2873 ld r11,PACA_EXRFI+EX_R11(r13) 2874 ld r1,PACA_EXRFI+EX_R12(r13) 2875 GET_SCRATCH0(r13); 2876 rfid 2877 2878TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2879 SET_SCRATCH0(r13); 2880 GET_PACA(r13); 2881 std r1,PACA_EXRFI+EX_R12(r13) 2882 ld r1,PACAKSAVE(r13) 2883 std r9,PACA_EXRFI+EX_R9(r13) 2884 std r10,PACA_EXRFI+EX_R10(r13) 2885 std r11,PACA_EXRFI+EX_R11(r13) 2886 mfctr r9 2887 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2888 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2889 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2890 mtctr r11 2891 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2892 2893 /* order ld/st prior to dcbt stop all streams with flushing */ 2894 sync 2895 2896 /* 2897 * The load adresses are at staggered offsets within cachelines, 2898 * which suits some pipelines better (on others it should not 2899 * hurt). 2900 */ 29011: 2902 ld r11,(0x80 + 8)*0(r10) 2903 ld r11,(0x80 + 8)*1(r10) 2904 ld r11,(0x80 + 8)*2(r10) 2905 ld r11,(0x80 + 8)*3(r10) 2906 ld r11,(0x80 + 8)*4(r10) 2907 ld r11,(0x80 + 8)*5(r10) 2908 ld r11,(0x80 + 8)*6(r10) 2909 ld r11,(0x80 + 8)*7(r10) 2910 addi r10,r10,0x80*8 2911 bdnz 1b 2912 2913 mtctr r9 2914 ld r9,PACA_EXRFI+EX_R9(r13) 2915 ld r10,PACA_EXRFI+EX_R10(r13) 2916 ld r11,PACA_EXRFI+EX_R11(r13) 2917 ld r1,PACA_EXRFI+EX_R12(r13) 2918 GET_SCRATCH0(r13); 2919 hrfid 2920 2921USE_TEXT_SECTION() 2922 MASKED_INTERRUPT 2923 MASKED_INTERRUPT hsrr=1 2924 2925#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2926kvmppc_skip_interrupt: 2927 /* 2928 * Here all GPRs are unchanged from when the interrupt happened 2929 * except for r13, which is saved in SPRG_SCRATCH0. 2930 */ 2931 mfspr r13, SPRN_SRR0 2932 addi r13, r13, 4 2933 mtspr SPRN_SRR0, r13 2934 GET_SCRATCH0(r13) 2935 RFI_TO_KERNEL 2936 b . 2937 2938kvmppc_skip_Hinterrupt: 2939 /* 2940 * Here all GPRs are unchanged from when the interrupt happened 2941 * except for r13, which is saved in SPRG_SCRATCH0. 2942 */ 2943 mfspr r13, SPRN_HSRR0 2944 addi r13, r13, 4 2945 mtspr SPRN_HSRR0, r13 2946 GET_SCRATCH0(r13) 2947 HRFI_TO_KERNEL 2948 b . 2949#endif 2950 2951 /* 2952 * Relocation-on interrupts: A subset of the interrupts can be delivered 2953 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 2954 * it. Addresses are the same as the original interrupt addresses, but 2955 * offset by 0xc000000000004000. 2956 * It's impossible to receive interrupts below 0x300 via this mechanism. 2957 * KVM: None of these traps are from the guest ; anything that escalated 2958 * to HV=1 from HV=0 is delivered via real mode handlers. 2959 */ 2960 2961 /* 2962 * This uses the standard macro, since the original 0x300 vector 2963 * only has extra guff for STAB-based processors -- which never 2964 * come here. 2965 */ 2966 2967EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) 2968 b __ppc64_runlatch_on 2969 2970USE_FIXED_SECTION(virt_trampolines) 2971 /* 2972 * The __end_interrupts marker must be past the out-of-line (OOL) 2973 * handlers, so that they are copied to real address 0x100 when running 2974 * a relocatable kernel. This ensures they can be reached from the short 2975 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 2976 * directly, without using LOAD_HANDLER(). 2977 */ 2978 .align 7 2979 .globl __end_interrupts 2980__end_interrupts: 2981DEFINE_FIXED_SYMBOL(__end_interrupts) 2982 2983#ifdef CONFIG_PPC_970_NAP 2984 /* 2985 * Called by exception entry code if _TLF_NAPPING was set, this clears 2986 * the NAPPING flag, and redirects the exception exit to 2987 * power4_fixup_nap_return. 2988 */ 2989 .globl power4_fixup_nap 2990EXC_COMMON_BEGIN(power4_fixup_nap) 2991 andc r9,r9,r10 2992 std r9,TI_LOCAL_FLAGS(r11) 2993 LOAD_REG_ADDR(r10, power4_idle_nap_return) 2994 std r10,_NIP(r1) 2995 blr 2996 2997power4_idle_nap_return: 2998 blr 2999#endif 3000 3001CLOSE_FIXED_SECTION(real_vectors); 3002CLOSE_FIXED_SECTION(real_trampolines); 3003CLOSE_FIXED_SECTION(virt_vectors); 3004CLOSE_FIXED_SECTION(virt_trampolines); 3005 3006USE_TEXT_SECTION() 3007 3008/* MSR[RI] should be clear because this uses SRR[01] */ 3009enable_machine_check: 3010 mflr r0 3011 bcl 20,31,$+4 30120: mflr r3 3013 addi r3,r3,(1f - 0b) 3014 mtspr SPRN_SRR0,r3 3015 mfmsr r3 3016 ori r3,r3,MSR_ME 3017 mtspr SPRN_SRR1,r3 3018 RFI_TO_KERNEL 30191: mtlr r0 3020 blr 3021 3022/* MSR[RI] should be clear because this uses SRR[01] */ 3023disable_machine_check: 3024 mflr r0 3025 bcl 20,31,$+4 30260: mflr r3 3027 addi r3,r3,(1f - 0b) 3028 mtspr SPRN_SRR0,r3 3029 mfmsr r3 3030 li r4,MSR_ME 3031 andc r3,r3,r4 3032 mtspr SPRN_SRR1,r3 3033 RFI_TO_KERNEL 30341: mtlr r0 3035 blr 3036 3037/* 3038 * Hash table stuff 3039 */ 3040 .balign IFETCH_ALIGN_BYTES 3041do_hash_page: 3042#ifdef CONFIG_PPC_BOOK3S_64 3043 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h 3044 ori r0,r0,DSISR_BAD_FAULT_64S@l 3045 and. r0,r5,r0 /* weird error? */ 3046 bne- handle_page_fault /* if not, try to insert a HPTE */ 3047 ld r11, PACA_THREAD_INFO(r13) 3048 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 3049 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 3050 bne 77f /* then don't call hash_page now */ 3051 3052 /* 3053 * r3 contains the trap number 3054 * r4 contains the faulting address 3055 * r5 contains dsisr 3056 * r6 msr 3057 * 3058 * at return r3 = 0 for success, 1 for page fault, negative for error 3059 */ 3060 bl __hash_page /* build HPTE if possible */ 3061 cmpdi r3,0 /* see if __hash_page succeeded */ 3062 3063 /* Success */ 3064 beq interrupt_return /* Return from exception on success */ 3065 3066 /* Error */ 3067 blt- 13f 3068 3069 /* Reload DAR/DSISR into r4/r5 for the DABR check below */ 3070 ld r4,_DAR(r1) 3071 ld r5,_DSISR(r1) 3072#endif /* CONFIG_PPC_BOOK3S_64 */ 3073 3074/* Here we have a page fault that hash_page can't handle. */ 3075handle_page_fault: 307611: andis. r0,r5,DSISR_DABRMATCH@h 3077 bne- handle_dabr_fault 3078 addi r3,r1,STACK_FRAME_OVERHEAD 3079 bl do_page_fault 3080 cmpdi r3,0 3081 beq+ interrupt_return 3082 mr r5,r3 3083 addi r3,r1,STACK_FRAME_OVERHEAD 3084 ld r4,_DAR(r1) 3085 bl bad_page_fault 3086 b interrupt_return 3087 3088/* We have a data breakpoint exception - handle it */ 3089handle_dabr_fault: 3090 ld r4,_DAR(r1) 3091 ld r5,_DSISR(r1) 3092 addi r3,r1,STACK_FRAME_OVERHEAD 3093 bl do_break 3094 /* 3095 * do_break() may have changed the NV GPRS while handling a breakpoint. 3096 * If so, we need to restore them with their updated values. 3097 */ 3098 REST_NVGPRS(r1) 3099 b interrupt_return 3100 3101 3102#ifdef CONFIG_PPC_BOOK3S_64 3103/* We have a page fault that hash_page could handle but HV refused 3104 * the PTE insertion 3105 */ 310613: mr r5,r3 3107 addi r3,r1,STACK_FRAME_OVERHEAD 3108 ld r4,_DAR(r1) 3109 bl low_hash_fault 3110 b interrupt_return 3111#endif 3112 3113/* 3114 * We come here as a result of a DSI at a point where we don't want 3115 * to call hash_page, such as when we are accessing memory (possibly 3116 * user memory) inside a PMU interrupt that occurred while interrupts 3117 * were soft-disabled. We want to invoke the exception handler for 3118 * the access, or panic if there isn't a handler. 3119 */ 312077: addi r3,r1,STACK_FRAME_OVERHEAD 3121 li r5,SIGSEGV 3122 bl bad_page_fault 3123 b interrupt_return 3124