1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name, text); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label, section) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label, section))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label, section) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label, section))@l; \ 90 addis reg,reg,(ABS_ADDR(label, section))@h 91 92/* 93 * Interrupt code generation macros 94 */ 95#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 96#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 97#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 98#define IAREA .L_IAREA_\name\() /* PACA save area */ 99#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 100#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 101#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */ 102#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */ 103#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 104#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 105#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 106#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 107#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 108#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 109#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 110#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 111#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 112#define __ISTACK(name) .L_ISTACK_ ## name 113#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 114 115#define INT_DEFINE_BEGIN(n) \ 116.macro int_define_ ## n name 117 118#define INT_DEFINE_END(n) \ 119.endm ; \ 120int_define_ ## n n ; \ 121do_define_int n 122 123.macro do_define_int name 124 .ifndef IVEC 125 .error "IVEC not defined" 126 .endif 127 .ifndef IHSRR 128 IHSRR=0 129 .endif 130 .ifndef IHSRR_IF_HVMODE 131 IHSRR_IF_HVMODE=0 132 .endif 133 .ifndef IAREA 134 IAREA=PACA_EXGEN 135 .endif 136 .ifndef IVIRT 137 IVIRT=1 138 .endif 139 .ifndef IISIDE 140 IISIDE=0 141 .endif 142 .ifndef ICFAR 143 ICFAR=1 144 .endif 145 .ifndef ICFAR_IF_HVMODE 146 ICFAR_IF_HVMODE=0 147 .endif 148 .ifndef IDAR 149 IDAR=0 150 .endif 151 .ifndef IDSISR 152 IDSISR=0 153 .endif 154 .ifndef IBRANCH_TO_COMMON 155 IBRANCH_TO_COMMON=1 156 .endif 157 .ifndef IREALMODE_COMMON 158 IREALMODE_COMMON=0 159 .else 160 .if ! IBRANCH_TO_COMMON 161 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 162 .endif 163 .endif 164 .ifndef IMASK 165 IMASK=0 166 .endif 167 .ifndef IKVM_REAL 168 IKVM_REAL=0 169 .endif 170 .ifndef IKVM_VIRT 171 IKVM_VIRT=0 172 .endif 173 .ifndef ISTACK 174 ISTACK=1 175 .endif 176 .ifndef IKUAP 177 IKUAP=1 178 .endif 179.endm 180 181/* 182 * All interrupts which set HSRR registers, as well as SRESET and MCE and 183 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 184 * so they all generally need to test whether they were taken in guest context. 185 * 186 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 187 * taken with MSR[HV]=0. 188 * 189 * Interrupts which set SRR registers (with the above exceptions) do not 190 * elevate to MSR[HV]=1 mode, though most can be taken when running with 191 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 192 * not need to test whether a guest is running because they get delivered to 193 * the guest directly, including nested HV KVM guests. 194 * 195 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 196 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 197 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 198 * delivered to the real-mode entry point, therefore such interrupts only test 199 * KVM in their real mode handlers, and only when PR KVM is possible. 200 * 201 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 202 * delivered in real-mode when the MMU is in hash mode because the MMU 203 * registers are not set appropriately to translate host addresses. In nested 204 * radix mode these can be delivered in virt-mode as the host translations are 205 * used implicitly (see: effective LPID, effective PID). 206 */ 207 208/* 209 * If an interrupt is taken while a guest is running, it is immediately routed 210 * to KVM to handle. 211 */ 212 213.macro KVMTEST name handler 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215 lbz r10,HSTATE_IN_GUEST(r13) 216 cmpwi r10,0 217 /* HSRR variants have the 0x2 bit added to their trap number */ 218 .if IHSRR_IF_HVMODE 219 BEGIN_FTR_SECTION 220 li r10,(IVEC + 0x2) 221 FTR_SECTION_ELSE 222 li r10,(IVEC) 223 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 224 .elseif IHSRR 225 li r10,(IVEC + 0x2) 226 .else 227 li r10,(IVEC) 228 .endif 229 bne \handler 230#endif 231.endm 232 233/* 234 * This is the BOOK3S interrupt entry code macro. 235 * 236 * This can result in one of several things happening: 237 * - Branch to the _common handler, relocated, in virtual mode. 238 * These are normal interrupts (synchronous and asynchronous) handled by 239 * the kernel. 240 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 241 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 242 * / intended for host or guest kernel, but KVM must always be involved 243 * because the machine state is set for guest execution. 244 * - Branch to the masked handler, unrelocated. 245 * These occur when maskable asynchronous interrupts are taken with the 246 * irq_soft_mask set. 247 * - Branch to an "early" handler in real mode but relocated. 248 * This is done if early=1. MCE and HMI use these to handle errors in real 249 * mode. 250 * - Fall through and continue executing in real, unrelocated mode. 251 * This is done if early=2. 252 */ 253 254.macro GEN_BRANCH_TO_COMMON name, virt 255 .if IREALMODE_COMMON 256 LOAD_HANDLER(r10, \name\()_common) 257 mtctr r10 258 bctr 259 .else 260 .if \virt 261#ifndef CONFIG_RELOCATABLE 262 b \name\()_common_virt 263#else 264 LOAD_HANDLER(r10, \name\()_common_virt) 265 mtctr r10 266 bctr 267#endif 268 .else 269 LOAD_HANDLER(r10, \name\()_common_real) 270 mtctr r10 271 bctr 272 .endif 273 .endif 274.endm 275 276.macro GEN_INT_ENTRY name, virt, ool=0 277 SET_SCRATCH0(r13) /* save r13 */ 278 GET_PACA(r13) 279 std r9,IAREA+EX_R9(r13) /* save r9 */ 280BEGIN_FTR_SECTION 281 mfspr r9,SPRN_PPR 282END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 283 HMT_MEDIUM 284 std r10,IAREA+EX_R10(r13) /* save r10 */ 285 .if ICFAR 286BEGIN_FTR_SECTION 287 mfspr r10,SPRN_CFAR 288END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 289 .elseif ICFAR_IF_HVMODE 290BEGIN_FTR_SECTION 291 BEGIN_FTR_SECTION_NESTED(69) 292 mfspr r10,SPRN_CFAR 293 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 294FTR_SECTION_ELSE 295 BEGIN_FTR_SECTION_NESTED(69) 296 li r10,0 297 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 298ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 299 .endif 300 .if \ool 301 .if !\virt 302 b tramp_real_\name 303 .pushsection .text 304 TRAMP_REAL_BEGIN(tramp_real_\name) 305 .else 306 b tramp_virt_\name 307 .pushsection .text 308 TRAMP_VIRT_BEGIN(tramp_virt_\name) 309 .endif 310 .endif 311 312BEGIN_FTR_SECTION 313 std r9,IAREA+EX_PPR(r13) 314END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 315 .if ICFAR || ICFAR_IF_HVMODE 316BEGIN_FTR_SECTION 317 std r10,IAREA+EX_CFAR(r13) 318END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 319 .endif 320 INTERRUPT_TO_KERNEL 321 mfctr r10 322 std r10,IAREA+EX_CTR(r13) 323 mfcr r9 324 std r11,IAREA+EX_R11(r13) /* save r11 - r12 */ 325 std r12,IAREA+EX_R12(r13) 326 327 /* 328 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 329 * because a d-side MCE will clobber those registers so is 330 * not recoverable if they are live. 331 */ 332 GET_SCRATCH0(r10) 333 std r10,IAREA+EX_R13(r13) 334 .if IDAR && !IISIDE 335 .if IHSRR 336 mfspr r10,SPRN_HDAR 337 .else 338 mfspr r10,SPRN_DAR 339 .endif 340 std r10,IAREA+EX_DAR(r13) 341 .endif 342 .if IDSISR && !IISIDE 343 .if IHSRR 344 mfspr r10,SPRN_HDSISR 345 .else 346 mfspr r10,SPRN_DSISR 347 .endif 348 stw r10,IAREA+EX_DSISR(r13) 349 .endif 350 351 .if IHSRR_IF_HVMODE 352 BEGIN_FTR_SECTION 353 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 354 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 355 FTR_SECTION_ELSE 356 mfspr r11,SPRN_SRR0 /* save SRR0 */ 357 mfspr r12,SPRN_SRR1 /* and SRR1 */ 358 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 359 .elseif IHSRR 360 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 361 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 362 .else 363 mfspr r11,SPRN_SRR0 /* save SRR0 */ 364 mfspr r12,SPRN_SRR1 /* and SRR1 */ 365 .endif 366 367 .if IBRANCH_TO_COMMON 368 GEN_BRANCH_TO_COMMON \name \virt 369 .endif 370 371 .if \ool 372 .popsection 373 .endif 374.endm 375 376/* 377 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 378 * entry, except in the case of the real-mode handlers which require 379 * __GEN_REALMODE_COMMON_ENTRY. 380 * 381 * This switches to virtual mode and sets MSR[RI]. 382 */ 383.macro __GEN_COMMON_ENTRY name 384DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 385\name\()_common_real: 386 .if IKVM_REAL 387 KVMTEST \name kvm_interrupt 388 .endif 389 390 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 391 /* MSR[RI] is clear iff using SRR regs */ 392 .if IHSRR_IF_HVMODE 393 BEGIN_FTR_SECTION 394 xori r10,r10,MSR_RI 395 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 396 .elseif ! IHSRR 397 xori r10,r10,MSR_RI 398 .endif 399 mtmsrd r10 400 401 .if IVIRT 402 .if IKVM_VIRT 403 b 1f /* skip the virt test coming from real */ 404 .endif 405 406 .balign IFETCH_ALIGN_BYTES 407DEFINE_FIXED_SYMBOL(\name\()_common_virt, text) 408\name\()_common_virt: 409 .if IKVM_VIRT 410 KVMTEST \name kvm_interrupt 4111: 412 .endif 413 .endif /* IVIRT */ 414.endm 415 416/* 417 * Don't switch to virt mode. Used for early MCE and HMI handlers that 418 * want to run in real mode. 419 */ 420.macro __GEN_REALMODE_COMMON_ENTRY name 421DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 422\name\()_common_real: 423 .if IKVM_REAL 424 KVMTEST \name kvm_interrupt 425 .endif 426.endm 427 428.macro __GEN_COMMON_BODY name 429 .if IMASK 430 .if ! ISTACK 431 .error "No support for masked interrupt to use custom stack" 432 .endif 433 434 /* If coming from user, skip soft-mask tests. */ 435 andi. r10,r12,MSR_PR 436 bne 3f 437 438 /* 439 * Kernel code running below __end_soft_masked may be 440 * implicitly soft-masked if it is within the regions 441 * in the soft mask table. 442 */ 443 LOAD_HANDLER(r10, __end_soft_masked) 444 cmpld r11,r10 445 bge+ 1f 446 447 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 448 mtctr r12 449 stw r9,PACA_EXGEN+EX_CCR(r13) 450 SEARCH_SOFT_MASK_TABLE 451 cmpdi r12,0 452 mfctr r12 /* Restore r12 to SRR1 */ 453 lwz r9,PACA_EXGEN+EX_CCR(r13) 454 beq 1f /* Not in soft-mask table */ 455 li r10,IMASK 456 b 2f /* In soft-mask table, always mask */ 457 458 /* Test the soft mask state against our interrupt's bit */ 4591: lbz r10,PACAIRQSOFTMASK(r13) 4602: andi. r10,r10,IMASK 461 /* Associate vector numbers with bits in paca->irq_happened */ 462 .if IVEC == 0x500 || IVEC == 0xea0 463 li r10,PACA_IRQ_EE 464 .elseif IVEC == 0x900 465 li r10,PACA_IRQ_DEC 466 .elseif IVEC == 0xa00 || IVEC == 0xe80 467 li r10,PACA_IRQ_DBELL 468 .elseif IVEC == 0xe60 469 li r10,PACA_IRQ_HMI 470 .elseif IVEC == 0xf00 471 li r10,PACA_IRQ_PMI 472 .else 473 .abort "Bad maskable vector" 474 .endif 475 476 .if IHSRR_IF_HVMODE 477 BEGIN_FTR_SECTION 478 bne masked_Hinterrupt 479 FTR_SECTION_ELSE 480 bne masked_interrupt 481 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 482 .elseif IHSRR 483 bne masked_Hinterrupt 484 .else 485 bne masked_interrupt 486 .endif 487 .endif 488 489 .if ISTACK 490 andi. r10,r12,MSR_PR /* See if coming from user */ 4913: mr r10,r1 /* Save r1 */ 492 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 493 beq- 100f 494 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 495100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 496 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 497 .endif 498 499 std r9,_CCR(r1) /* save CR in stackframe */ 500 std r11,_NIP(r1) /* save SRR0 in stackframe */ 501 std r12,_MSR(r1) /* save SRR1 in stackframe */ 502 std r10,0(r1) /* make stack chain pointer */ 503 std r0,GPR0(r1) /* save r0 in stackframe */ 504 std r10,GPR1(r1) /* save r1 in stackframe */ 505 506 /* Mark our [H]SRRs valid for return */ 507 li r10,1 508 .if IHSRR_IF_HVMODE 509 BEGIN_FTR_SECTION 510 stb r10,PACAHSRR_VALID(r13) 511 FTR_SECTION_ELSE 512 stb r10,PACASRR_VALID(r13) 513 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 514 .elseif IHSRR 515 stb r10,PACAHSRR_VALID(r13) 516 .else 517 stb r10,PACASRR_VALID(r13) 518 .endif 519 520 .if ISTACK 521 .if IKUAP 522 kuap_save_amr_and_lock r9, r10, cr1, cr0 523 .endif 524 beq 101f /* if from kernel mode */ 525BEGIN_FTR_SECTION 526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 527 std r9,_PPR(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 529101: 530 .else 531 .if IKUAP 532 kuap_save_amr_and_lock r9, r10, cr1 533 .endif 534 .endif 535 536 /* Save original regs values from save area to stack frame. */ 537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 538 ld r10,IAREA+EX_R10(r13) 539 std r9,GPR9(r1) 540 std r10,GPR10(r1) 541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 542 ld r10,IAREA+EX_R12(r13) 543 ld r11,IAREA+EX_R13(r13) 544 std r9,GPR11(r1) 545 std r10,GPR12(r1) 546 std r11,GPR13(r1) 547 548 SAVE_NVGPRS(r1) 549 550 .if IDAR 551 .if IISIDE 552 ld r10,_NIP(r1) 553 .else 554 ld r10,IAREA+EX_DAR(r13) 555 .endif 556 std r10,_DAR(r1) 557 .endif 558 559 .if IDSISR 560 .if IISIDE 561 ld r10,_MSR(r1) 562 lis r11,DSISR_SRR1_MATCH_64S@h 563 and r10,r10,r11 564 .else 565 lwz r10,IAREA+EX_DSISR(r13) 566 .endif 567 std r10,_DSISR(r1) 568 .endif 569 570BEGIN_FTR_SECTION 571 .if ICFAR || ICFAR_IF_HVMODE 572 ld r10,IAREA+EX_CFAR(r13) 573 .else 574 li r10,0 575 .endif 576 std r10,ORIG_GPR3(r1) 577END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 578 ld r10,IAREA+EX_CTR(r13) 579 std r10,_CTR(r1) 580 std r2,GPR2(r1) /* save r2 in stackframe */ 581 SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ 582 mflr r9 /* Get LR, later save to stack */ 583 LOAD_PACA_TOC() /* get kernel TOC into r2 */ 584 std r9,_LINK(r1) 585 lbz r10,PACAIRQSOFTMASK(r13) 586 mfspr r11,SPRN_XER /* save XER in stackframe */ 587 std r10,SOFTE(r1) 588 std r11,_XER(r1) 589 li r9,IVEC 590 std r9,_TRAP(r1) /* set trap number */ 591 li r10,0 592 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) 593 std r10,RESULT(r1) /* clear regs->result */ 594 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 595.endm 596 597/* 598 * On entry r13 points to the paca, r9-r13 are saved in the paca, 599 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 600 * SRR1, and relocation is on. 601 * 602 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 603 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 604 */ 605.macro GEN_COMMON name 606 __GEN_COMMON_ENTRY \name 607 __GEN_COMMON_BODY \name 608.endm 609 610.macro SEARCH_RESTART_TABLE 611#ifdef CONFIG_RELOCATABLE 612 mr r12,r2 613 LOAD_PACA_TOC() 614 LOAD_REG_ADDR(r9, __start___restart_table) 615 LOAD_REG_ADDR(r10, __stop___restart_table) 616 mr r2,r12 617#else 618 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 619 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 620#endif 621300: 622 cmpd r9,r10 623 beq 302f 624 ld r12,0(r9) 625 cmpld r11,r12 626 blt 301f 627 ld r12,8(r9) 628 cmpld r11,r12 629 bge 301f 630 ld r12,16(r9) 631 b 303f 632301: 633 addi r9,r9,24 634 b 300b 635302: 636 li r12,0 637303: 638.endm 639 640.macro SEARCH_SOFT_MASK_TABLE 641#ifdef CONFIG_RELOCATABLE 642 mr r12,r2 643 LOAD_PACA_TOC() 644 LOAD_REG_ADDR(r9, __start___soft_mask_table) 645 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 646 mr r2,r12 647#else 648 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 649 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 650#endif 651300: 652 cmpd r9,r10 653 beq 302f 654 ld r12,0(r9) 655 cmpld r11,r12 656 blt 301f 657 ld r12,8(r9) 658 cmpld r11,r12 659 bge 301f 660 li r12,1 661 b 303f 662301: 663 addi r9,r9,16 664 b 300b 665302: 666 li r12,0 667303: 668.endm 669 670/* 671 * Restore all registers including H/SRR0/1 saved in a stack frame of a 672 * standard exception. 673 */ 674.macro EXCEPTION_RESTORE_REGS hsrr=0 675 /* Move original SRR0 and SRR1 into the respective regs */ 676 ld r9,_MSR(r1) 677 li r10,0 678 .if \hsrr 679 mtspr SPRN_HSRR1,r9 680 stb r10,PACAHSRR_VALID(r13) 681 .else 682 mtspr SPRN_SRR1,r9 683 stb r10,PACASRR_VALID(r13) 684 .endif 685 ld r9,_NIP(r1) 686 .if \hsrr 687 mtspr SPRN_HSRR0,r9 688 .else 689 mtspr SPRN_SRR0,r9 690 .endif 691 ld r9,_CTR(r1) 692 mtctr r9 693 ld r9,_XER(r1) 694 mtxer r9 695 ld r9,_LINK(r1) 696 mtlr r9 697 ld r9,_CCR(r1) 698 mtcr r9 699 REST_GPRS(2, 13, r1) 700 REST_GPR(0, r1) 701 /* restore original r1. */ 702 ld r1,GPR1(r1) 703.endm 704 705/* 706 * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot. 707 * 708 * There's a short window during boot where although the kernel is running 709 * little endian, any exceptions will cause the CPU to switch back to big 710 * endian. For example a WARN() boils down to a trap instruction, which will 711 * cause a program check, and we end up here but with the CPU in big endian 712 * mode. The first instruction of the program check handler (in GEN_INT_ENTRY 713 * below) is an mtsprg, which when executed in the wrong endian is an lhzu with 714 * a ~3GB displacement from r3. The content of r3 is random, so that is a load 715 * from some random location, and depending on the system can easily lead to a 716 * checkstop, or an infinitely recursive page fault. 717 * 718 * So to handle that case we have a trampoline here that can detect we are in 719 * the wrong endian and flip us back to the correct endian. We can't flip 720 * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1 721 * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for 722 * the paca. SPRG3 is user readable, but this trampoline is only active very 723 * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before 724 * userspace starts. 725 */ 726.macro EARLY_BOOT_FIXUP 727BEGIN_FTR_SECTION 728#ifdef CONFIG_CPU_LITTLE_ENDIAN 729 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 730 b 2f // Skip trampoline if endian is correct 731 .long 0xa643707d // mtsprg 0, r11 Backup r11 732 .long 0xa6027a7d // mfsrr0 r11 733 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 734 .long 0xa6027b7d // mfsrr1 r11 735 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 736 .long 0xa600607d // mfmsr r11 737 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 738 .long 0xa6037b7d // mtsrr1 r11 739 /* 740 * This is 'li r11,1f' where 1f is the absolute address of that 741 * label, byteswapped into the SI field of the instruction. 742 */ 743 .long 0x00006039 | \ 744 ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \ 745 ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8) 746 .long 0xa6037a7d // mtsrr0 r11 747 .long 0x2400004c // rfid 7481: 749 mfsprg r11, 3 750 mtsrr1 r11 // Restore SRR1 751 mfsprg r11, 2 752 mtsrr0 r11 // Restore SRR0 753 mfsprg r11, 0 // Restore r11 7542: 755#endif 756 /* 757 * program check could hit at any time, and pseries can not block 758 * MSR[ME] in early boot. So check if there is anything useful in r13 759 * yet, and spin forever if not. 760 */ 761 mtsprg 0, r11 762 mfcr r11 763 cmpdi r13, 0 764 beq . 765 mtcr r11 766 mfsprg r11, 0 767END_FTR_SECTION(0, 1) // nop out after boot 768.endm 769 770/* 771 * There are a few constraints to be concerned with. 772 * - Real mode exceptions code/data must be located at their physical location. 773 * - Virtual mode exceptions must be mapped at their 0xc000... location. 774 * - Fixed location code must not call directly beyond the __end_interrupts 775 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 776 * must be used. 777 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 778 * virtual 0xc00... 779 * - Conditional branch targets must be within +/-32K of caller. 780 * 781 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 782 * therefore don't have to run in physically located code or rfid to 783 * virtual mode kernel code. However on relocatable kernels they do have 784 * to branch to KERNELBASE offset because the rest of the kernel (outside 785 * the exception vectors) may be located elsewhere. 786 * 787 * Virtual exceptions correspond with physical, except their entry points 788 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 789 * offset applied. Virtual exceptions are enabled with the Alternate 790 * Interrupt Location (AIL) bit set in the LPCR. However this does not 791 * guarantee they will be delivered virtually. Some conditions (see the ISA) 792 * cause exceptions to be delivered in real mode. 793 * 794 * The scv instructions are a special case. They get a 0x3000 offset applied. 795 * scv exceptions have unique reentrancy properties, see below. 796 * 797 * It's impossible to receive interrupts below 0x300 via AIL. 798 * 799 * KVM: None of the virtual exceptions are from the guest. Anything that 800 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 801 * 802 * 803 * We layout physical memory as follows: 804 * 0x0000 - 0x00ff : Secondary processor spin code 805 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 806 * 0x1900 - 0x2fff : Real mode trampolines 807 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 808 * 0x5900 - 0x6fff : Relon mode trampolines 809 * 0x7000 - 0x7fff : FWNMI data area 810 * 0x8000 - .... : Common interrupt handlers, remaining early 811 * setup code, rest of kernel. 812 * 813 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 814 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 815 * vectors there. 816 */ 817OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 818OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 819OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 820OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 821 822#ifdef CONFIG_PPC_POWERNV 823 .globl start_real_trampolines 824 .globl end_real_trampolines 825 .globl start_virt_trampolines 826 .globl end_virt_trampolines 827#endif 828 829#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 830/* 831 * Data area reserved for FWNMI option. 832 * This address (0x7000) is fixed by the RPA. 833 * pseries and powernv need to keep the whole page from 834 * 0x7000 to 0x8000 free for use by the firmware 835 */ 836ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 837OPEN_TEXT_SECTION(0x8000) 838#else 839OPEN_TEXT_SECTION(0x7000) 840#endif 841 842USE_FIXED_SECTION(real_vectors) 843 844/* 845 * This is the start of the interrupt handlers for pSeries 846 * This code runs with relocation off. 847 * Code from here to __end_interrupts gets copied down to real 848 * address 0x100 when we are running a relocatable kernel. 849 * Therefore any relative branches in this section must only 850 * branch to labels in this section. 851 */ 852 .globl __start_interrupts 853__start_interrupts: 854 855/** 856 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 857 * This is a synchronous interrupt invoked with the "scv" instruction. The 858 * system call does not alter the HV bit, so it is directed to the OS. 859 * 860 * Handling: 861 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 862 * In particular, this means we can take a maskable interrupt at any point 863 * in the scv handler, which is unlike any other interrupt. This is solved 864 * by treating the instruction addresses in the handler as being soft-masked, 865 * by adding a SOFT_MASK_TABLE entry for them. 866 * 867 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 868 * ensure scv is never executed with relocation off, which means AIL-0 869 * should never happen. 870 * 871 * Before leaving the following inside-__end_soft_masked text, at least of the 872 * following must be true: 873 * - MSR[PR]=1 (i.e., return to userspace) 874 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 875 * - Standard kernel environment is set up (stack, paca, etc) 876 * 877 * KVM: 878 * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM 879 * ensures that FSCR[SCV] is disabled whenever it has to force AIL off. 880 * 881 * Call convention: 882 * 883 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 884 */ 885EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 886 /* SCV 0 */ 887 mr r9,r13 888 GET_PACA(r13) 889 mflr r11 890 mfctr r12 891 li r10,IRQS_ALL_DISABLED 892 stb r10,PACAIRQSOFTMASK(r13) 893#ifdef CONFIG_RELOCATABLE 894 b system_call_vectored_tramp 895#else 896 b system_call_vectored_common 897#endif 898 nop 899 900 /* SCV 1 - 127 */ 901 .rept 127 902 mr r9,r13 903 GET_PACA(r13) 904 mflr r11 905 mfctr r12 906 li r10,IRQS_ALL_DISABLED 907 stb r10,PACAIRQSOFTMASK(r13) 908 li r0,-1 /* cause failure */ 909#ifdef CONFIG_RELOCATABLE 910 b system_call_vectored_sigill_tramp 911#else 912 b system_call_vectored_sigill 913#endif 914 .endr 915EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 916 917// Treat scv vectors as soft-masked, see comment above. 918// Use absolute values rather than labels here, so they don't get relocated, 919// because this code runs unrelocated. 920SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) 921 922#ifdef CONFIG_RELOCATABLE 923TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 924 __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines) 925 mtctr r10 926 bctr 927 928TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 929 __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines) 930 mtctr r10 931 bctr 932#endif 933 934 935/* No virt vectors corresponding with 0x0..0x100 */ 936EXC_VIRT_NONE(0x4000, 0x100) 937 938 939/** 940 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 941 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 942 * It is caused by: 943 * - Wake from power-saving state, on powernv. 944 * - An NMI from another CPU, triggered by firmware or hypercall. 945 * - As crash/debug signal injected from BMC, firmware or hypervisor. 946 * 947 * Handling: 948 * Power-save wakeup is the only performance critical path, so this is 949 * determined quickly as possible first. In this case volatile registers 950 * can be discarded and SPRs like CFAR don't need to be read. 951 * 952 * If not a powersave wakeup, then it's run as a regular interrupt, however 953 * it uses its own stack and PACA save area to preserve the regular kernel 954 * environment for debugging. 955 * 956 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 957 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 958 * correct to switch to virtual mode to run the regular interrupt handler 959 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 960 * is clear). 961 * 962 * FWNMI: 963 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 964 * entry point with a different register set up. Some hypervisors will 965 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 966 * 967 * KVM: 968 * Unlike most SRR interrupts, this may be taken by the host while executing 969 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 970 * mode and then raise the sreset. 971 */ 972INT_DEFINE_BEGIN(system_reset) 973 IVEC=0x100 974 IAREA=PACA_EXNMI 975 IVIRT=0 /* no virt entry point */ 976 ISTACK=0 977 IKVM_REAL=1 978INT_DEFINE_END(system_reset) 979 980EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 981#ifdef CONFIG_PPC_P7_NAP 982 /* 983 * If running native on arch 2.06 or later, check if we are waking up 984 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 985 * bits 46:47. A non-0 value indicates that we are coming from a power 986 * saving state. The idle wakeup handler initially runs in real mode, 987 * but we branch to the 0xc000... address so we can turn on relocation 988 * with mtmsrd later, after SPRs are restored. 989 * 990 * Careful to minimise cost for the fast path (idle wakeup) while 991 * also avoiding clobbering CFAR for the debug path (non-idle). 992 * 993 * For the idle wake case volatile registers can be clobbered, which 994 * is why we use those initially. If it turns out to not be an idle 995 * wake, carefully put everything back the way it was, so we can use 996 * common exception macros to handle it. 997 */ 998BEGIN_FTR_SECTION 999 SET_SCRATCH0(r13) 1000 GET_PACA(r13) 1001 std r3,PACA_EXNMI+0*8(r13) 1002 std r4,PACA_EXNMI+1*8(r13) 1003 std r5,PACA_EXNMI+2*8(r13) 1004 mfspr r3,SPRN_SRR1 1005 mfocrf r4,0x80 1006 rlwinm. r5,r3,47-31,30,31 1007 bne+ system_reset_idle_wake 1008 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 1009 mtocrf 0x80,r4 1010 ld r3,PACA_EXNMI+0*8(r13) 1011 ld r4,PACA_EXNMI+1*8(r13) 1012 ld r5,PACA_EXNMI+2*8(r13) 1013 GET_SCRATCH0(r13) 1014END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1015#endif 1016 1017 GEN_INT_ENTRY system_reset, virt=0 1018 /* 1019 * In theory, we should not enable relocation here if it was disabled 1020 * in SRR1, because the MMU may not be configured to support it (e.g., 1021 * SLB may have been cleared). In practice, there should only be a few 1022 * small windows where that's the case, and sreset is considered to 1023 * be dangerous anyway. 1024 */ 1025EXC_REAL_END(system_reset, 0x100, 0x100) 1026EXC_VIRT_NONE(0x4100, 0x100) 1027 1028#ifdef CONFIG_PPC_P7_NAP 1029TRAMP_REAL_BEGIN(system_reset_idle_wake) 1030 /* We are waking up from idle, so may clobber any volatile register */ 1031 cmpwi cr1,r5,2 1032 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1033 __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines) 1034 mtctr r12 1035 bctr 1036#endif 1037 1038#ifdef CONFIG_PPC_PSERIES 1039/* 1040 * Vectors for the FWNMI option. Share common code. 1041 */ 1042TRAMP_REAL_BEGIN(system_reset_fwnmi) 1043 GEN_INT_ENTRY system_reset, virt=0 1044 1045#endif /* CONFIG_PPC_PSERIES */ 1046 1047EXC_COMMON_BEGIN(system_reset_common) 1048 __GEN_COMMON_ENTRY system_reset 1049 /* 1050 * Increment paca->in_nmi. When the interrupt entry wrapper later 1051 * enable MSR_RI, then SLB or MCE will be able to recover, but a nested 1052 * NMI will notice in_nmi and not recover because of the use of the NMI 1053 * stack. in_nmi reentrancy is tested in system_reset_exception. 1054 */ 1055 lhz r10,PACA_IN_NMI(r13) 1056 addi r10,r10,1 1057 sth r10,PACA_IN_NMI(r13) 1058 1059 mr r10,r1 1060 ld r1,PACA_NMI_EMERG_SP(r13) 1061 subi r1,r1,INT_FRAME_SIZE 1062 __GEN_COMMON_BODY system_reset 1063 1064 addi r3,r1,STACK_FRAME_OVERHEAD 1065 bl system_reset_exception 1066 1067 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1068 li r9,0 1069 mtmsrd r9,1 1070 1071 /* 1072 * MSR_RI is clear, now we can decrement paca->in_nmi. 1073 */ 1074 lhz r10,PACA_IN_NMI(r13) 1075 subi r10,r10,1 1076 sth r10,PACA_IN_NMI(r13) 1077 1078 kuap_kernel_restore r9, r10 1079 EXCEPTION_RESTORE_REGS 1080 RFI_TO_USER_OR_KERNEL 1081 1082 1083/** 1084 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1085 * This is a non-maskable interrupt always taken in real-mode. It can be 1086 * synchronous or asynchronous, caused by hardware or software, and it may be 1087 * taken in a power-saving state. 1088 * 1089 * Handling: 1090 * Similarly to system reset, this uses its own stack and PACA save area, 1091 * the difference is re-entrancy is allowed on the machine check stack. 1092 * 1093 * machine_check_early is run in real mode, and carefully decodes the 1094 * machine check and tries to handle it (e.g., flush the SLB if there was an 1095 * error detected there), determines if it was recoverable and logs the 1096 * event. 1097 * 1098 * This early code does not "reconcile" irq soft-mask state like SRESET or 1099 * regular interrupts do, so irqs_disabled() among other things may not work 1100 * properly (irq disable/enable already doesn't work because irq tracing can 1101 * not work in real mode). 1102 * 1103 * Then, depending on the execution context when the interrupt is taken, there 1104 * are 3 main actions: 1105 * - Executing in kernel mode. The event is queued with irq_work, which means 1106 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1107 * interrupts), which could be immediately when the interrupt returns. This 1108 * avoids nasty issues like switching to virtual mode when the MMU is in a 1109 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1110 * but it has different priorities). Check to see if the CPU was in power 1111 * save, and return via the wake up code if it was. 1112 * 1113 * - Executing in user mode. machine_check_exception is run like a normal 1114 * interrupt handler, which processes the data generated by the early handler. 1115 * 1116 * - Executing in guest mode. The interrupt is run with its KVM test, and 1117 * branches to KVM to deal with. KVM may queue the event for the host 1118 * to report later. 1119 * 1120 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1121 * or SCRATCH0 is in use, it may cause a crash. 1122 * 1123 * KVM: 1124 * See SRESET. 1125 */ 1126INT_DEFINE_BEGIN(machine_check_early) 1127 IVEC=0x200 1128 IAREA=PACA_EXMC 1129 IVIRT=0 /* no virt entry point */ 1130 IREALMODE_COMMON=1 1131 ISTACK=0 1132 IDAR=1 1133 IDSISR=1 1134 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1135INT_DEFINE_END(machine_check_early) 1136 1137INT_DEFINE_BEGIN(machine_check) 1138 IVEC=0x200 1139 IAREA=PACA_EXMC 1140 IVIRT=0 /* no virt entry point */ 1141 IDAR=1 1142 IDSISR=1 1143 IKVM_REAL=1 1144INT_DEFINE_END(machine_check) 1145 1146EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1147 EARLY_BOOT_FIXUP 1148 GEN_INT_ENTRY machine_check_early, virt=0 1149EXC_REAL_END(machine_check, 0x200, 0x100) 1150EXC_VIRT_NONE(0x4200, 0x100) 1151 1152#ifdef CONFIG_PPC_PSERIES 1153TRAMP_REAL_BEGIN(machine_check_fwnmi) 1154 /* See comment at machine_check exception, don't turn on RI */ 1155 GEN_INT_ENTRY machine_check_early, virt=0 1156#endif 1157 1158#define MACHINE_CHECK_HANDLER_WINDUP \ 1159 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1160 li r9,0; \ 1161 mtmsrd r9,1; /* Clear MSR_RI */ \ 1162 /* Decrement paca->in_mce now RI is clear. */ \ 1163 lhz r12,PACA_IN_MCE(r13); \ 1164 subi r12,r12,1; \ 1165 sth r12,PACA_IN_MCE(r13); \ 1166 EXCEPTION_RESTORE_REGS 1167 1168EXC_COMMON_BEGIN(machine_check_early_common) 1169 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1170 1171 /* 1172 * Switch to mc_emergency stack and handle re-entrancy (we limit 1173 * the nested MCE upto level 4 to avoid stack overflow). 1174 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1175 * 1176 * We use paca->in_mce to check whether this is the first entry or 1177 * nested machine check. We increment paca->in_mce to track nested 1178 * machine checks. 1179 * 1180 * If this is the first entry then set stack pointer to 1181 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1182 * stack frame on mc_emergency stack. 1183 * 1184 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1185 * checkstop if we get another machine check exception before we do 1186 * rfid with MSR_ME=1. 1187 * 1188 * This interrupt can wake directly from idle. If that is the case, 1189 * the machine check is handled then the idle wakeup code is called 1190 * to restore state. 1191 */ 1192 lhz r10,PACA_IN_MCE(r13) 1193 cmpwi r10,0 /* Are we in nested machine check */ 1194 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1195 addi r10,r10,1 /* increment paca->in_mce */ 1196 sth r10,PACA_IN_MCE(r13) 1197 1198 mr r10,r1 /* Save r1 */ 1199 bne 1f 1200 /* First machine check entry */ 1201 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 12021: /* Limit nested MCE to level 4 to avoid stack overflow */ 1203 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1204 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1205 1206 __GEN_COMMON_BODY machine_check_early 1207 1208BEGIN_FTR_SECTION 1209 bl enable_machine_check 1210END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1211 addi r3,r1,STACK_FRAME_OVERHEAD 1212BEGIN_FTR_SECTION 1213 bl machine_check_early_boot 1214END_FTR_SECTION(0, 1) // nop out after boot 1215 bl machine_check_early 1216 std r3,RESULT(r1) /* Save result */ 1217 ld r12,_MSR(r1) 1218 1219#ifdef CONFIG_PPC_P7_NAP 1220 /* 1221 * Check if thread was in power saving mode. We come here when any 1222 * of the following is true: 1223 * a. thread wasn't in power saving mode 1224 * b. thread was in power saving mode with no state loss, 1225 * supervisor state loss or hypervisor state loss. 1226 * 1227 * Go back to nap/sleep/winkle mode again if (b) is true. 1228 */ 1229BEGIN_FTR_SECTION 1230 rlwinm. r11,r12,47-31,30,31 1231 bne machine_check_idle_common 1232END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1233#endif 1234 1235#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1236 /* 1237 * Check if we are coming from guest. If yes, then run the normal 1238 * exception handler which will take the 1239 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1240 * to guest. 1241 */ 1242 lbz r11,HSTATE_IN_GUEST(r13) 1243 cmpwi r11,0 /* Check if coming from guest */ 1244 bne mce_deliver /* continue if we are. */ 1245#endif 1246 1247 /* 1248 * Check if we are coming from userspace. If yes, then run the normal 1249 * exception handler which will deliver the MC event to this kernel. 1250 */ 1251 andi. r11,r12,MSR_PR /* See if coming from user. */ 1252 bne mce_deliver /* continue in V mode if we are. */ 1253 1254 /* 1255 * At this point we are coming from kernel context. 1256 * Queue up the MCE event and return from the interrupt. 1257 * But before that, check if this is an un-recoverable exception. 1258 * If yes, then stay on emergency stack and panic. 1259 */ 1260 andi. r11,r12,MSR_RI 1261 beq unrecoverable_mce 1262 1263 /* 1264 * Check if we have successfully handled/recovered from error, if not 1265 * then stay on emergency stack and panic. 1266 */ 1267 ld r3,RESULT(r1) /* Load result */ 1268 cmpdi r3,0 /* see if we handled MCE successfully */ 1269 beq unrecoverable_mce /* if !handled then panic */ 1270 1271 /* 1272 * Return from MC interrupt. 1273 * Queue up the MCE event so that we can log it later, while 1274 * returning from kernel or opal call. 1275 */ 1276 bl machine_check_queue_event 1277 MACHINE_CHECK_HANDLER_WINDUP 1278 RFI_TO_KERNEL 1279 1280mce_deliver: 1281 /* 1282 * This is a host user or guest MCE. Restore all registers, then 1283 * run the "late" handler. For host user, this will run the 1284 * machine_check_exception handler in virtual mode like a normal 1285 * interrupt handler. For guest, this will trigger the KVM test 1286 * and branch to the KVM interrupt similarly to other interrupts. 1287 */ 1288BEGIN_FTR_SECTION 1289 ld r10,ORIG_GPR3(r1) 1290 mtspr SPRN_CFAR,r10 1291END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1292 MACHINE_CHECK_HANDLER_WINDUP 1293 GEN_INT_ENTRY machine_check, virt=0 1294 1295EXC_COMMON_BEGIN(machine_check_common) 1296 /* 1297 * Machine check is different because we use a different 1298 * save area: PACA_EXMC instead of PACA_EXGEN. 1299 */ 1300 GEN_COMMON machine_check 1301 addi r3,r1,STACK_FRAME_OVERHEAD 1302 bl machine_check_exception_async 1303 b interrupt_return_srr 1304 1305 1306#ifdef CONFIG_PPC_P7_NAP 1307/* 1308 * This is an idle wakeup. Low level machine check has already been 1309 * done. Queue the event then call the idle code to do the wake up. 1310 */ 1311EXC_COMMON_BEGIN(machine_check_idle_common) 1312 bl machine_check_queue_event 1313 1314 /* 1315 * GPR-loss wakeups are relatively straightforward, because the 1316 * idle sleep code has saved all non-volatile registers on its 1317 * own stack, and r1 in PACAR1. 1318 * 1319 * For no-loss wakeups the r1 and lr registers used by the 1320 * early machine check handler have to be restored first. r2 is 1321 * the kernel TOC, so no need to restore it. 1322 * 1323 * Then decrement MCE nesting after finishing with the stack. 1324 */ 1325 ld r3,_MSR(r1) 1326 ld r4,_LINK(r1) 1327 ld r1,GPR1(r1) 1328 1329 lhz r11,PACA_IN_MCE(r13) 1330 subi r11,r11,1 1331 sth r11,PACA_IN_MCE(r13) 1332 1333 mtlr r4 1334 rlwinm r10,r3,47-31,30,31 1335 cmpwi cr1,r10,2 1336 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1337 b idle_return_gpr_loss 1338#endif 1339 1340EXC_COMMON_BEGIN(unrecoverable_mce) 1341 /* 1342 * We are going down. But there are chances that we might get hit by 1343 * another MCE during panic path and we may run into unstable state 1344 * with no way out. Hence, turn ME bit off while going down, so that 1345 * when another MCE is hit during panic path, system will checkstop 1346 * and hypervisor will get restarted cleanly by SP. 1347 */ 1348BEGIN_FTR_SECTION 1349 li r10,0 /* clear MSR_RI */ 1350 mtmsrd r10,1 1351 bl disable_machine_check 1352END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1353 ld r10,PACAKMSR(r13) 1354 li r3,MSR_ME 1355 andc r10,r10,r3 1356 mtmsrd r10 1357 1358 lhz r12,PACA_IN_MCE(r13) 1359 subi r12,r12,1 1360 sth r12,PACA_IN_MCE(r13) 1361 1362 /* 1363 * Invoke machine_check_exception to print MCE event and panic. 1364 * This is the NMI version of the handler because we are called from 1365 * the early handler which is a true NMI. 1366 */ 1367 addi r3,r1,STACK_FRAME_OVERHEAD 1368 bl machine_check_exception 1369 1370 /* 1371 * We will not reach here. Even if we did, there is no way out. 1372 * Call unrecoverable_exception and die. 1373 */ 1374 addi r3,r1,STACK_FRAME_OVERHEAD 1375 bl unrecoverable_exception 1376 b . 1377 1378 1379/** 1380 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1381 * This is a synchronous interrupt generated due to a data access exception, 1382 * e.g., a load orstore which does not have a valid page table entry with 1383 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1384 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1385 * 1386 * Handling: 1387 * - Hash MMU 1388 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1389 * Linux page table. Hash faults can hit in kernel mode in a fairly 1390 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1391 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1392 * backed by Linux page table entries. 1393 * 1394 * If no entry is found the Linux page fault handler is invoked (by 1395 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1396 * copy operations of course. 1397 * 1398 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1399 * MMU context, which may cause a DSI in the host, which must go to the 1400 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1401 * always be used regardless of AIL setting. 1402 * 1403 * - Radix MMU 1404 * The hardware loads from the Linux page table directly, so a fault goes 1405 * immediately to Linux page fault. 1406 * 1407 * Conditions like DAWR match are handled on the way in to Linux page fault. 1408 */ 1409INT_DEFINE_BEGIN(data_access) 1410 IVEC=0x300 1411 IDAR=1 1412 IDSISR=1 1413 IKVM_REAL=1 1414INT_DEFINE_END(data_access) 1415 1416EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1417 GEN_INT_ENTRY data_access, virt=0 1418EXC_REAL_END(data_access, 0x300, 0x80) 1419EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1420 GEN_INT_ENTRY data_access, virt=1 1421EXC_VIRT_END(data_access, 0x4300, 0x80) 1422EXC_COMMON_BEGIN(data_access_common) 1423 GEN_COMMON data_access 1424 ld r4,_DSISR(r1) 1425 addi r3,r1,STACK_FRAME_OVERHEAD 1426 andis. r0,r4,DSISR_DABRMATCH@h 1427 bne- 1f 1428#ifdef CONFIG_PPC_64S_HASH_MMU 1429BEGIN_MMU_FTR_SECTION 1430 bl do_hash_fault 1431MMU_FTR_SECTION_ELSE 1432 bl do_page_fault 1433ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1434#else 1435 bl do_page_fault 1436#endif 1437 b interrupt_return_srr 1438 14391: bl do_break 1440 /* 1441 * do_break() may have changed the NV GPRS while handling a breakpoint. 1442 * If so, we need to restore them with their updated values. 1443 */ 1444 REST_NVGPRS(r1) 1445 b interrupt_return_srr 1446 1447 1448/** 1449 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1450 * This is a synchronous interrupt in response to an MMU fault missing SLB 1451 * entry for HPT, or an address outside RPT translation range. 1452 * 1453 * Handling: 1454 * - HPT: 1455 * This refills the SLB, or reports an access fault similarly to a bad page 1456 * fault. When coming from user-mode, the SLB handler may access any kernel 1457 * data, though it may itself take a DSLB. When coming from kernel mode, 1458 * recursive faults must be avoided so access is restricted to the kernel 1459 * image text/data, kernel stack, and any data allocated below 1460 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1461 * on user-handler data structures. 1462 * 1463 * KVM: Same as 0x300, DSLB must test for KVM guest. 1464 */ 1465INT_DEFINE_BEGIN(data_access_slb) 1466 IVEC=0x380 1467 IDAR=1 1468 IKVM_REAL=1 1469INT_DEFINE_END(data_access_slb) 1470 1471EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1472 GEN_INT_ENTRY data_access_slb, virt=0 1473EXC_REAL_END(data_access_slb, 0x380, 0x80) 1474EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1475 GEN_INT_ENTRY data_access_slb, virt=1 1476EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1477EXC_COMMON_BEGIN(data_access_slb_common) 1478 GEN_COMMON data_access_slb 1479#ifdef CONFIG_PPC_64S_HASH_MMU 1480BEGIN_MMU_FTR_SECTION 1481 /* HPT case, do SLB fault */ 1482 addi r3,r1,STACK_FRAME_OVERHEAD 1483 bl do_slb_fault 1484 cmpdi r3,0 1485 bne- 1f 1486 b fast_interrupt_return_srr 14871: /* Error case */ 1488MMU_FTR_SECTION_ELSE 1489 /* Radix case, access is outside page table range */ 1490 li r3,-EFAULT 1491ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1492#else 1493 li r3,-EFAULT 1494#endif 1495 std r3,RESULT(r1) 1496 addi r3,r1,STACK_FRAME_OVERHEAD 1497 bl do_bad_segment_interrupt 1498 b interrupt_return_srr 1499 1500 1501/** 1502 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1503 * This is a synchronous interrupt in response to an MMU fault due to an 1504 * instruction fetch. 1505 * 1506 * Handling: 1507 * Similar to DSI, though in response to fetch. The faulting address is found 1508 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1509 */ 1510INT_DEFINE_BEGIN(instruction_access) 1511 IVEC=0x400 1512 IISIDE=1 1513 IDAR=1 1514 IDSISR=1 1515#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1516 IKVM_REAL=1 1517#endif 1518INT_DEFINE_END(instruction_access) 1519 1520EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1521 GEN_INT_ENTRY instruction_access, virt=0 1522EXC_REAL_END(instruction_access, 0x400, 0x80) 1523EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1524 GEN_INT_ENTRY instruction_access, virt=1 1525EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1526EXC_COMMON_BEGIN(instruction_access_common) 1527 GEN_COMMON instruction_access 1528 addi r3,r1,STACK_FRAME_OVERHEAD 1529#ifdef CONFIG_PPC_64S_HASH_MMU 1530BEGIN_MMU_FTR_SECTION 1531 bl do_hash_fault 1532MMU_FTR_SECTION_ELSE 1533 bl do_page_fault 1534ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1535#else 1536 bl do_page_fault 1537#endif 1538 b interrupt_return_srr 1539 1540 1541/** 1542 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1543 * This is a synchronous interrupt in response to an MMU fault due to an 1544 * instruction fetch. 1545 * 1546 * Handling: 1547 * Similar to DSLB, though in response to fetch. The faulting address is found 1548 * in SRR0 (rather than DAR). 1549 */ 1550INT_DEFINE_BEGIN(instruction_access_slb) 1551 IVEC=0x480 1552 IISIDE=1 1553 IDAR=1 1554#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1555 IKVM_REAL=1 1556#endif 1557INT_DEFINE_END(instruction_access_slb) 1558 1559EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1560 GEN_INT_ENTRY instruction_access_slb, virt=0 1561EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1562EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1563 GEN_INT_ENTRY instruction_access_slb, virt=1 1564EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1565EXC_COMMON_BEGIN(instruction_access_slb_common) 1566 GEN_COMMON instruction_access_slb 1567#ifdef CONFIG_PPC_64S_HASH_MMU 1568BEGIN_MMU_FTR_SECTION 1569 /* HPT case, do SLB fault */ 1570 addi r3,r1,STACK_FRAME_OVERHEAD 1571 bl do_slb_fault 1572 cmpdi r3,0 1573 bne- 1f 1574 b fast_interrupt_return_srr 15751: /* Error case */ 1576MMU_FTR_SECTION_ELSE 1577 /* Radix case, access is outside page table range */ 1578 li r3,-EFAULT 1579ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1580#else 1581 li r3,-EFAULT 1582#endif 1583 std r3,RESULT(r1) 1584 addi r3,r1,STACK_FRAME_OVERHEAD 1585 bl do_bad_segment_interrupt 1586 b interrupt_return_srr 1587 1588 1589/** 1590 * Interrupt 0x500 - External Interrupt. 1591 * This is an asynchronous maskable interrupt in response to an "external 1592 * exception" from the interrupt controller or hypervisor (e.g., device 1593 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1594 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1595 * 1596 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1597 * interrupts are delivered with HSRR registers, guests use SRRs, which 1598 * reqiures IHSRR_IF_HVMODE. 1599 * 1600 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1601 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1602 * rather than External Interrupts. 1603 * 1604 * Handling: 1605 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1606 * because registers at the time of the interrupt are not so important as it is 1607 * asynchronous. 1608 * 1609 * If soft masked, the masked handler will note the pending interrupt for 1610 * replay, and clear MSR[EE] in the interrupted context. 1611 * 1612 * CFAR is not required because this is an asynchronous interrupt that in 1613 * general won't have much bearing on the state of the CPU, with the possible 1614 * exception of crash/debug IPIs, but those are generally moving to use SRESET 1615 * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case 1616 * it may be exiting the guest and need CFAR to be saved. 1617 */ 1618INT_DEFINE_BEGIN(hardware_interrupt) 1619 IVEC=0x500 1620 IHSRR_IF_HVMODE=1 1621 IMASK=IRQS_DISABLED 1622 IKVM_REAL=1 1623 IKVM_VIRT=1 1624 ICFAR=0 1625#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1626 ICFAR_IF_HVMODE=1 1627#endif 1628INT_DEFINE_END(hardware_interrupt) 1629 1630EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1631 GEN_INT_ENTRY hardware_interrupt, virt=0 1632EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1633EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1634 GEN_INT_ENTRY hardware_interrupt, virt=1 1635EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1636EXC_COMMON_BEGIN(hardware_interrupt_common) 1637 GEN_COMMON hardware_interrupt 1638 addi r3,r1,STACK_FRAME_OVERHEAD 1639 bl do_IRQ 1640 BEGIN_FTR_SECTION 1641 b interrupt_return_hsrr 1642 FTR_SECTION_ELSE 1643 b interrupt_return_srr 1644 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1645 1646 1647/** 1648 * Interrupt 0x600 - Alignment Interrupt 1649 * This is a synchronous interrupt in response to data alignment fault. 1650 */ 1651INT_DEFINE_BEGIN(alignment) 1652 IVEC=0x600 1653 IDAR=1 1654 IDSISR=1 1655#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1656 IKVM_REAL=1 1657#endif 1658INT_DEFINE_END(alignment) 1659 1660EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1661 GEN_INT_ENTRY alignment, virt=0 1662EXC_REAL_END(alignment, 0x600, 0x100) 1663EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1664 GEN_INT_ENTRY alignment, virt=1 1665EXC_VIRT_END(alignment, 0x4600, 0x100) 1666EXC_COMMON_BEGIN(alignment_common) 1667 GEN_COMMON alignment 1668 addi r3,r1,STACK_FRAME_OVERHEAD 1669 bl alignment_exception 1670 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1671 b interrupt_return_srr 1672 1673 1674/** 1675 * Interrupt 0x700 - Program Interrupt (program check). 1676 * This is a synchronous interrupt in response to various instruction faults: 1677 * traps, privilege errors, TM errors, floating point exceptions. 1678 * 1679 * Handling: 1680 * This interrupt may use the "emergency stack" in some cases when being taken 1681 * from kernel context, which complicates handling. 1682 */ 1683INT_DEFINE_BEGIN(program_check) 1684 IVEC=0x700 1685#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1686 IKVM_REAL=1 1687#endif 1688INT_DEFINE_END(program_check) 1689 1690EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1691 EARLY_BOOT_FIXUP 1692 GEN_INT_ENTRY program_check, virt=0 1693EXC_REAL_END(program_check, 0x700, 0x100) 1694EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1695 GEN_INT_ENTRY program_check, virt=1 1696EXC_VIRT_END(program_check, 0x4700, 0x100) 1697EXC_COMMON_BEGIN(program_check_common) 1698 __GEN_COMMON_ENTRY program_check 1699 1700 /* 1701 * It's possible to receive a TM Bad Thing type program check with 1702 * userspace register values (in particular r1), but with SRR1 reporting 1703 * that we came from the kernel. Normally that would confuse the bad 1704 * stack logic, and we would report a bad kernel stack pointer. Instead 1705 * we switch to the emergency stack if we're taking a TM Bad Thing from 1706 * the kernel. 1707 */ 1708 1709 andi. r10,r12,MSR_PR 1710 bne .Lnormal_stack /* If userspace, go normal path */ 1711 1712 andis. r10,r12,(SRR1_PROGTM)@h 1713 bne .Lemergency_stack /* If TM, emergency */ 1714 1715 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1716 blt .Lnormal_stack /* normal path if not */ 1717 1718 /* Use the emergency stack */ 1719.Lemergency_stack: 1720 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1721 /* 3 in EXCEPTION_PROLOG_COMMON */ 1722 mr r10,r1 /* Save r1 */ 1723 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1724 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1725 __ISTACK(program_check)=0 1726 __GEN_COMMON_BODY program_check 1727 b .Ldo_program_check 1728 1729.Lnormal_stack: 1730 __ISTACK(program_check)=1 1731 __GEN_COMMON_BODY program_check 1732 1733.Ldo_program_check: 1734 addi r3,r1,STACK_FRAME_OVERHEAD 1735 bl program_check_exception 1736 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1737 b interrupt_return_srr 1738 1739 1740/* 1741 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1742 * This is a synchronous interrupt in response to executing an fp instruction 1743 * with MSR[FP]=0. 1744 * 1745 * Handling: 1746 * This will load FP registers and enable the FP bit if coming from userspace, 1747 * otherwise report a bad kernel use of FP. 1748 */ 1749INT_DEFINE_BEGIN(fp_unavailable) 1750 IVEC=0x800 1751#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1752 IKVM_REAL=1 1753#endif 1754INT_DEFINE_END(fp_unavailable) 1755 1756EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1757 GEN_INT_ENTRY fp_unavailable, virt=0 1758EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1759EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1760 GEN_INT_ENTRY fp_unavailable, virt=1 1761EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1762EXC_COMMON_BEGIN(fp_unavailable_common) 1763 GEN_COMMON fp_unavailable 1764 bne 1f /* if from user, just load it up */ 1765 addi r3,r1,STACK_FRAME_OVERHEAD 1766 bl kernel_fp_unavailable_exception 17670: trap 1768 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17691: 1770#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1771BEGIN_FTR_SECTION 1772 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1773 * transaction), go do TM stuff 1774 */ 1775 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1776 bne- 2f 1777END_FTR_SECTION_IFSET(CPU_FTR_TM) 1778#endif 1779 bl load_up_fpu 1780 b fast_interrupt_return_srr 1781#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17822: /* User process was in a transaction */ 1783 addi r3,r1,STACK_FRAME_OVERHEAD 1784 bl fp_unavailable_tm 1785 b interrupt_return_srr 1786#endif 1787 1788 1789/** 1790 * Interrupt 0x900 - Decrementer Interrupt. 1791 * This is an asynchronous interrupt in response to a decrementer exception 1792 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1793 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1794 * local_irq_disable()). 1795 * 1796 * Handling: 1797 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1798 * 1799 * If soft masked, the masked handler will note the pending interrupt for 1800 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1801 * in the interrupted context. 1802 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1803 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1804 * on the emergency stack. 1805 * 1806 * CFAR is not required because this is asynchronous (see hardware_interrupt). 1807 * A watchdog interrupt may like to have CFAR, but usually the interesting 1808 * branch is long gone by that point (e.g., infinite loop). 1809 */ 1810INT_DEFINE_BEGIN(decrementer) 1811 IVEC=0x900 1812 IMASK=IRQS_DISABLED 1813#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1814 IKVM_REAL=1 1815#endif 1816 ICFAR=0 1817INT_DEFINE_END(decrementer) 1818 1819EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1820 GEN_INT_ENTRY decrementer, virt=0 1821EXC_REAL_END(decrementer, 0x900, 0x80) 1822EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1823 GEN_INT_ENTRY decrementer, virt=1 1824EXC_VIRT_END(decrementer, 0x4900, 0x80) 1825EXC_COMMON_BEGIN(decrementer_common) 1826 GEN_COMMON decrementer 1827 addi r3,r1,STACK_FRAME_OVERHEAD 1828 bl timer_interrupt 1829 b interrupt_return_srr 1830 1831 1832/** 1833 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1834 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1835 * register. 1836 * 1837 * Handling: 1838 * Linux does not use this outside KVM where it's used to keep a host timer 1839 * while the guest is given control of DEC. It should normally be caught by 1840 * the KVM test and routed there. 1841 */ 1842INT_DEFINE_BEGIN(hdecrementer) 1843 IVEC=0x980 1844 IHSRR=1 1845 ISTACK=0 1846 IKVM_REAL=1 1847 IKVM_VIRT=1 1848INT_DEFINE_END(hdecrementer) 1849 1850EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1851 GEN_INT_ENTRY hdecrementer, virt=0 1852EXC_REAL_END(hdecrementer, 0x980, 0x80) 1853EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1854 GEN_INT_ENTRY hdecrementer, virt=1 1855EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1856EXC_COMMON_BEGIN(hdecrementer_common) 1857 __GEN_COMMON_ENTRY hdecrementer 1858 /* 1859 * Hypervisor decrementer interrupts not caught by the KVM test 1860 * shouldn't occur but are sometimes left pending on exit from a KVM 1861 * guest. We don't need to do anything to clear them, as they are 1862 * edge-triggered. 1863 * 1864 * Be careful to avoid touching the kernel stack. 1865 */ 1866 li r10,0 1867 stb r10,PACAHSRR_VALID(r13) 1868 ld r10,PACA_EXGEN+EX_CTR(r13) 1869 mtctr r10 1870 mtcrf 0x80,r9 1871 ld r9,PACA_EXGEN+EX_R9(r13) 1872 ld r10,PACA_EXGEN+EX_R10(r13) 1873 ld r11,PACA_EXGEN+EX_R11(r13) 1874 ld r12,PACA_EXGEN+EX_R12(r13) 1875 ld r13,PACA_EXGEN+EX_R13(r13) 1876 HRFI_TO_KERNEL 1877 1878 1879/** 1880 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1881 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1882 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1883 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1884 * 1885 * Handling: 1886 * Guests may use this for IPIs between threads in a core if the 1887 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1888 * 1889 * If soft masked, the masked handler will note the pending interrupt for 1890 * replay, leaving MSR[EE] enabled in the interrupted context because the 1891 * doorbells are edge triggered. 1892 * 1893 * CFAR is not required, similarly to hardware_interrupt. 1894 */ 1895INT_DEFINE_BEGIN(doorbell_super) 1896 IVEC=0xa00 1897 IMASK=IRQS_DISABLED 1898#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1899 IKVM_REAL=1 1900#endif 1901 ICFAR=0 1902INT_DEFINE_END(doorbell_super) 1903 1904EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1905 GEN_INT_ENTRY doorbell_super, virt=0 1906EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1907EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1908 GEN_INT_ENTRY doorbell_super, virt=1 1909EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1910EXC_COMMON_BEGIN(doorbell_super_common) 1911 GEN_COMMON doorbell_super 1912 addi r3,r1,STACK_FRAME_OVERHEAD 1913#ifdef CONFIG_PPC_DOORBELL 1914 bl doorbell_exception 1915#else 1916 bl unknown_async_exception 1917#endif 1918 b interrupt_return_srr 1919 1920 1921EXC_REAL_NONE(0xb00, 0x100) 1922EXC_VIRT_NONE(0x4b00, 0x100) 1923 1924/** 1925 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1926 * This is a synchronous interrupt invoked with the "sc" instruction. The 1927 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1928 * is directed to the currently running OS. The hypercall is invoked with 1929 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1930 * 1931 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1932 * 0x4c00 virtual mode. 1933 * 1934 * Handling: 1935 * If the KVM test fires then it was due to a hypercall and is accordingly 1936 * routed to KVM. Otherwise this executes a normal Linux system call. 1937 * 1938 * Call convention: 1939 * 1940 * syscall and hypercalls register conventions are documented in 1941 * Documentation/powerpc/syscall64-abi.rst and 1942 * Documentation/powerpc/papr_hcalls.rst respectively. 1943 * 1944 * The intersection of volatile registers that don't contain possible 1945 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1946 * without saving, though xer is not a good idea to use, as hardware may 1947 * interpret some bits so it may be costly to change them. 1948 */ 1949INT_DEFINE_BEGIN(system_call) 1950 IVEC=0xc00 1951 IKVM_REAL=1 1952 IKVM_VIRT=1 1953 ICFAR=0 1954INT_DEFINE_END(system_call) 1955 1956.macro SYSTEM_CALL virt 1957#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1958 /* 1959 * There is a little bit of juggling to get syscall and hcall 1960 * working well. Save r13 in ctr to avoid using SPRG scratch 1961 * register. 1962 * 1963 * Userspace syscalls have already saved the PPR, hcalls must save 1964 * it before setting HMT_MEDIUM. 1965 */ 1966 mtctr r13 1967 GET_PACA(r13) 1968 std r10,PACA_EXGEN+EX_R10(r13) 1969 INTERRUPT_TO_KERNEL 1970 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1971 mfctr r9 1972#else 1973 mr r9,r13 1974 GET_PACA(r13) 1975 INTERRUPT_TO_KERNEL 1976#endif 1977 1978#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1979BEGIN_FTR_SECTION 1980 cmpdi r0,0x1ebe 1981 beq- 1f 1982END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1983#endif 1984 1985 /* We reach here with PACA in r13, r13 in r9. */ 1986 mfspr r11,SPRN_SRR0 1987 mfspr r12,SPRN_SRR1 1988 1989 HMT_MEDIUM 1990 1991 .if ! \virt 1992 __LOAD_HANDLER(r10, system_call_common_real, real_vectors) 1993 mtctr r10 1994 bctr 1995 .else 1996#ifdef CONFIG_RELOCATABLE 1997 __LOAD_HANDLER(r10, system_call_common, virt_vectors) 1998 mtctr r10 1999 bctr 2000#else 2001 b system_call_common 2002#endif 2003 .endif 2004 2005#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 2006 /* Fast LE/BE switch system call */ 20071: mfspr r12,SPRN_SRR1 2008 xori r12,r12,MSR_LE 2009 mtspr SPRN_SRR1,r12 2010 mr r13,r9 2011 RFI_TO_USER /* return to userspace */ 2012 b . /* prevent speculative execution */ 2013#endif 2014.endm 2015 2016EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 2017 SYSTEM_CALL 0 2018EXC_REAL_END(system_call, 0xc00, 0x100) 2019EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 2020 SYSTEM_CALL 1 2021EXC_VIRT_END(system_call, 0x4c00, 0x100) 2022 2023#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2024TRAMP_REAL_BEGIN(kvm_hcall) 2025 std r9,PACA_EXGEN+EX_R9(r13) 2026 std r11,PACA_EXGEN+EX_R11(r13) 2027 std r12,PACA_EXGEN+EX_R12(r13) 2028 mfcr r9 2029 mfctr r10 2030 std r10,PACA_EXGEN+EX_R13(r13) 2031 li r10,0 2032 std r10,PACA_EXGEN+EX_CFAR(r13) 2033 std r10,PACA_EXGEN+EX_CTR(r13) 2034 /* 2035 * Save the PPR (on systems that support it) before changing to 2036 * HMT_MEDIUM. That allows the KVM code to save that value into the 2037 * guest state (it is the guest's PPR value). 2038 */ 2039BEGIN_FTR_SECTION 2040 mfspr r10,SPRN_PPR 2041 std r10,PACA_EXGEN+EX_PPR(r13) 2042END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2043 2044 HMT_MEDIUM 2045 2046#ifdef CONFIG_RELOCATABLE 2047 /* 2048 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 2049 * outside the head section. 2050 */ 2051 __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines) 2052 mtctr r10 2053 bctr 2054#else 2055 b kvmppc_hcall 2056#endif 2057#endif 2058 2059/** 2060 * Interrupt 0xd00 - Trace Interrupt. 2061 * This is a synchronous interrupt in response to instruction step or 2062 * breakpoint faults. 2063 */ 2064INT_DEFINE_BEGIN(single_step) 2065 IVEC=0xd00 2066#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2067 IKVM_REAL=1 2068#endif 2069INT_DEFINE_END(single_step) 2070 2071EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2072 GEN_INT_ENTRY single_step, virt=0 2073EXC_REAL_END(single_step, 0xd00, 0x100) 2074EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2075 GEN_INT_ENTRY single_step, virt=1 2076EXC_VIRT_END(single_step, 0x4d00, 0x100) 2077EXC_COMMON_BEGIN(single_step_common) 2078 GEN_COMMON single_step 2079 addi r3,r1,STACK_FRAME_OVERHEAD 2080 bl single_step_exception 2081 b interrupt_return_srr 2082 2083 2084/** 2085 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2086 * This is a synchronous interrupt in response to an MMU fault caused by a 2087 * guest data access. 2088 * 2089 * Handling: 2090 * This should always get routed to KVM. In radix MMU mode, this is caused 2091 * by a guest nested radix access that can't be performed due to the 2092 * partition scope page table. In hash mode, this can be caused by guests 2093 * running with translation disabled (virtual real mode) or with VPM enabled. 2094 * KVM will update the page table structures or disallow the access. 2095 */ 2096INT_DEFINE_BEGIN(h_data_storage) 2097 IVEC=0xe00 2098 IHSRR=1 2099 IDAR=1 2100 IDSISR=1 2101 IKVM_REAL=1 2102 IKVM_VIRT=1 2103INT_DEFINE_END(h_data_storage) 2104 2105EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2106 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2107EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2108EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2109 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2110EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2111EXC_COMMON_BEGIN(h_data_storage_common) 2112 GEN_COMMON h_data_storage 2113 addi r3,r1,STACK_FRAME_OVERHEAD 2114BEGIN_MMU_FTR_SECTION 2115 bl do_bad_page_fault_segv 2116MMU_FTR_SECTION_ELSE 2117 bl unknown_exception 2118ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2119 b interrupt_return_hsrr 2120 2121 2122/** 2123 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2124 * This is a synchronous interrupt in response to an MMU fault caused by a 2125 * guest instruction fetch, similar to HDSI. 2126 */ 2127INT_DEFINE_BEGIN(h_instr_storage) 2128 IVEC=0xe20 2129 IHSRR=1 2130 IKVM_REAL=1 2131 IKVM_VIRT=1 2132INT_DEFINE_END(h_instr_storage) 2133 2134EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2135 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2136EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2137EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2138 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2139EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2140EXC_COMMON_BEGIN(h_instr_storage_common) 2141 GEN_COMMON h_instr_storage 2142 addi r3,r1,STACK_FRAME_OVERHEAD 2143 bl unknown_exception 2144 b interrupt_return_hsrr 2145 2146 2147/** 2148 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2149 */ 2150INT_DEFINE_BEGIN(emulation_assist) 2151 IVEC=0xe40 2152 IHSRR=1 2153 IKVM_REAL=1 2154 IKVM_VIRT=1 2155INT_DEFINE_END(emulation_assist) 2156 2157EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2158 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2159EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2160EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2161 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2162EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2163EXC_COMMON_BEGIN(emulation_assist_common) 2164 GEN_COMMON emulation_assist 2165 addi r3,r1,STACK_FRAME_OVERHEAD 2166 bl emulation_assist_interrupt 2167 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2168 b interrupt_return_hsrr 2169 2170 2171/** 2172 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2173 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2174 * Exception. It is always taken in real mode but uses HSRR registers 2175 * unlike SRESET and MCE. 2176 * 2177 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2178 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2179 * 2180 * Handling: 2181 * This is a special case, this is handled similarly to machine checks, with an 2182 * initial real mode handler that is not soft-masked, which attempts to fix the 2183 * problem. Then a regular handler which is soft-maskable and reports the 2184 * problem. 2185 * 2186 * The emergency stack is used for the early real mode handler. 2187 * 2188 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2189 * either use soft-masking for the MCE, or use irq_work for the HMI. 2190 * 2191 * KVM: 2192 * Unlike MCE, this calls into KVM without calling the real mode handler 2193 * first. 2194 */ 2195INT_DEFINE_BEGIN(hmi_exception_early) 2196 IVEC=0xe60 2197 IHSRR=1 2198 IREALMODE_COMMON=1 2199 ISTACK=0 2200 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2201 IKVM_REAL=1 2202INT_DEFINE_END(hmi_exception_early) 2203 2204INT_DEFINE_BEGIN(hmi_exception) 2205 IVEC=0xe60 2206 IHSRR=1 2207 IMASK=IRQS_DISABLED 2208 IKVM_REAL=1 2209INT_DEFINE_END(hmi_exception) 2210 2211EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2212 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2213EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2214EXC_VIRT_NONE(0x4e60, 0x20) 2215 2216EXC_COMMON_BEGIN(hmi_exception_early_common) 2217 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2218 2219 mr r10,r1 /* Save r1 */ 2220 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2221 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2222 2223 __GEN_COMMON_BODY hmi_exception_early 2224 2225 addi r3,r1,STACK_FRAME_OVERHEAD 2226 bl hmi_exception_realmode 2227 cmpdi cr0,r3,0 2228 bne 1f 2229 2230 EXCEPTION_RESTORE_REGS hsrr=1 2231 HRFI_TO_USER_OR_KERNEL 2232 22331: 2234 /* 2235 * Go to virtual mode and pull the HMI event information from 2236 * firmware. 2237 */ 2238 EXCEPTION_RESTORE_REGS hsrr=1 2239 GEN_INT_ENTRY hmi_exception, virt=0 2240 2241EXC_COMMON_BEGIN(hmi_exception_common) 2242 GEN_COMMON hmi_exception 2243 addi r3,r1,STACK_FRAME_OVERHEAD 2244 bl handle_hmi_exception 2245 b interrupt_return_hsrr 2246 2247 2248/** 2249 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2250 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2251 * Similar to the 0xa00 doorbell but for host rather than guest. 2252 * 2253 * CFAR is not required (similar to doorbell_interrupt), unless KVM HV 2254 * is enabled, in which case it may be a guest exit. Most PowerNV kernels 2255 * include KVM support so it would be nice if this could be dynamically 2256 * patched out if KVM was not currently running any guests. 2257 */ 2258INT_DEFINE_BEGIN(h_doorbell) 2259 IVEC=0xe80 2260 IHSRR=1 2261 IMASK=IRQS_DISABLED 2262 IKVM_REAL=1 2263 IKVM_VIRT=1 2264#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2265 ICFAR=0 2266#endif 2267INT_DEFINE_END(h_doorbell) 2268 2269EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2270 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2271EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2272EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2273 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2274EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2275EXC_COMMON_BEGIN(h_doorbell_common) 2276 GEN_COMMON h_doorbell 2277 addi r3,r1,STACK_FRAME_OVERHEAD 2278#ifdef CONFIG_PPC_DOORBELL 2279 bl doorbell_exception 2280#else 2281 bl unknown_async_exception 2282#endif 2283 b interrupt_return_hsrr 2284 2285 2286/** 2287 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2288 * This is an asynchronous interrupt in response to an "external exception". 2289 * Similar to 0x500 but for host only. 2290 * 2291 * Like h_doorbell, CFAR is only required for KVM HV because this can be 2292 * a guest exit. 2293 */ 2294INT_DEFINE_BEGIN(h_virt_irq) 2295 IVEC=0xea0 2296 IHSRR=1 2297 IMASK=IRQS_DISABLED 2298 IKVM_REAL=1 2299 IKVM_VIRT=1 2300#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2301 ICFAR=0 2302#endif 2303INT_DEFINE_END(h_virt_irq) 2304 2305EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2306 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2307EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2308EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2309 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2310EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2311EXC_COMMON_BEGIN(h_virt_irq_common) 2312 GEN_COMMON h_virt_irq 2313 addi r3,r1,STACK_FRAME_OVERHEAD 2314 bl do_IRQ 2315 b interrupt_return_hsrr 2316 2317 2318EXC_REAL_NONE(0xec0, 0x20) 2319EXC_VIRT_NONE(0x4ec0, 0x20) 2320EXC_REAL_NONE(0xee0, 0x20) 2321EXC_VIRT_NONE(0x4ee0, 0x20) 2322 2323 2324/* 2325 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2326 * This is an asynchronous interrupt in response to a PMU exception. 2327 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2328 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2329 * 2330 * Handling: 2331 * This calls into the perf subsystem. 2332 * 2333 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2334 * runs under local_irq_disable. However it may be soft-masked in 2335 * powerpc-specific code. 2336 * 2337 * If soft masked, the masked handler will note the pending interrupt for 2338 * replay, and clear MSR[EE] in the interrupted context. 2339 * 2340 * CFAR is not used by perf interrupts so not required. 2341 */ 2342INT_DEFINE_BEGIN(performance_monitor) 2343 IVEC=0xf00 2344 IMASK=IRQS_PMI_DISABLED 2345#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2346 IKVM_REAL=1 2347#endif 2348 ICFAR=0 2349INT_DEFINE_END(performance_monitor) 2350 2351EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2352 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2353EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2354EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2355 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2356EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2357EXC_COMMON_BEGIN(performance_monitor_common) 2358 GEN_COMMON performance_monitor 2359 addi r3,r1,STACK_FRAME_OVERHEAD 2360 bl performance_monitor_exception 2361 b interrupt_return_srr 2362 2363 2364/** 2365 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2366 * This is a synchronous interrupt in response to 2367 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2368 * Similar to FP unavailable. 2369 */ 2370INT_DEFINE_BEGIN(altivec_unavailable) 2371 IVEC=0xf20 2372#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2373 IKVM_REAL=1 2374#endif 2375INT_DEFINE_END(altivec_unavailable) 2376 2377EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2378 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2379EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2380EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2381 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2382EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2383EXC_COMMON_BEGIN(altivec_unavailable_common) 2384 GEN_COMMON altivec_unavailable 2385#ifdef CONFIG_ALTIVEC 2386BEGIN_FTR_SECTION 2387 beq 1f 2388#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2389 BEGIN_FTR_SECTION_NESTED(69) 2390 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2391 * transaction), go do TM stuff 2392 */ 2393 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2394 bne- 2f 2395 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2396#endif 2397 bl load_up_altivec 2398 b fast_interrupt_return_srr 2399#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24002: /* User process was in a transaction */ 2401 addi r3,r1,STACK_FRAME_OVERHEAD 2402 bl altivec_unavailable_tm 2403 b interrupt_return_srr 2404#endif 24051: 2406END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2407#endif 2408 addi r3,r1,STACK_FRAME_OVERHEAD 2409 bl altivec_unavailable_exception 2410 b interrupt_return_srr 2411 2412 2413/** 2414 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2415 * This is a synchronous interrupt in response to 2416 * executing a VSX instruction with MSR[VSX]=0. 2417 * Similar to FP unavailable. 2418 */ 2419INT_DEFINE_BEGIN(vsx_unavailable) 2420 IVEC=0xf40 2421#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2422 IKVM_REAL=1 2423#endif 2424INT_DEFINE_END(vsx_unavailable) 2425 2426EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2427 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2428EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2429EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2430 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2431EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2432EXC_COMMON_BEGIN(vsx_unavailable_common) 2433 GEN_COMMON vsx_unavailable 2434#ifdef CONFIG_VSX 2435BEGIN_FTR_SECTION 2436 beq 1f 2437#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2438 BEGIN_FTR_SECTION_NESTED(69) 2439 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2440 * transaction), go do TM stuff 2441 */ 2442 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2443 bne- 2f 2444 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2445#endif 2446 b load_up_vsx 2447#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24482: /* User process was in a transaction */ 2449 addi r3,r1,STACK_FRAME_OVERHEAD 2450 bl vsx_unavailable_tm 2451 b interrupt_return_srr 2452#endif 24531: 2454END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2455#endif 2456 addi r3,r1,STACK_FRAME_OVERHEAD 2457 bl vsx_unavailable_exception 2458 b interrupt_return_srr 2459 2460 2461/** 2462 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2463 * This is a synchronous interrupt in response to 2464 * executing an instruction without access to the facility that can be 2465 * resolved by the OS (e.g., FSCR, MSR). 2466 * Similar to FP unavailable. 2467 */ 2468INT_DEFINE_BEGIN(facility_unavailable) 2469 IVEC=0xf60 2470#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2471 IKVM_REAL=1 2472#endif 2473INT_DEFINE_END(facility_unavailable) 2474 2475EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2476 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2477EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2478EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2479 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2480EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2481EXC_COMMON_BEGIN(facility_unavailable_common) 2482 GEN_COMMON facility_unavailable 2483 addi r3,r1,STACK_FRAME_OVERHEAD 2484 bl facility_unavailable_exception 2485 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2486 b interrupt_return_srr 2487 2488 2489/** 2490 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2491 * This is a synchronous interrupt in response to 2492 * executing an instruction without access to the facility that can only 2493 * be resolved in HV mode (e.g., HFSCR). 2494 * Similar to FP unavailable. 2495 */ 2496INT_DEFINE_BEGIN(h_facility_unavailable) 2497 IVEC=0xf80 2498 IHSRR=1 2499 IKVM_REAL=1 2500 IKVM_VIRT=1 2501INT_DEFINE_END(h_facility_unavailable) 2502 2503EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2504 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2505EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2506EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2507 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2508EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2509EXC_COMMON_BEGIN(h_facility_unavailable_common) 2510 GEN_COMMON h_facility_unavailable 2511 addi r3,r1,STACK_FRAME_OVERHEAD 2512 bl facility_unavailable_exception 2513 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2514 b interrupt_return_hsrr 2515 2516 2517EXC_REAL_NONE(0xfa0, 0x20) 2518EXC_VIRT_NONE(0x4fa0, 0x20) 2519EXC_REAL_NONE(0xfc0, 0x20) 2520EXC_VIRT_NONE(0x4fc0, 0x20) 2521EXC_REAL_NONE(0xfe0, 0x20) 2522EXC_VIRT_NONE(0x4fe0, 0x20) 2523 2524EXC_REAL_NONE(0x1000, 0x100) 2525EXC_VIRT_NONE(0x5000, 0x100) 2526EXC_REAL_NONE(0x1100, 0x100) 2527EXC_VIRT_NONE(0x5100, 0x100) 2528 2529#ifdef CONFIG_CBE_RAS 2530INT_DEFINE_BEGIN(cbe_system_error) 2531 IVEC=0x1200 2532 IHSRR=1 2533INT_DEFINE_END(cbe_system_error) 2534 2535EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2536 GEN_INT_ENTRY cbe_system_error, virt=0 2537EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2538EXC_VIRT_NONE(0x5200, 0x100) 2539EXC_COMMON_BEGIN(cbe_system_error_common) 2540 GEN_COMMON cbe_system_error 2541 addi r3,r1,STACK_FRAME_OVERHEAD 2542 bl cbe_system_error_exception 2543 b interrupt_return_hsrr 2544 2545#else /* CONFIG_CBE_RAS */ 2546EXC_REAL_NONE(0x1200, 0x100) 2547EXC_VIRT_NONE(0x5200, 0x100) 2548#endif 2549 2550/** 2551 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2552 * This has been removed from the ISA before 2.01, which is the earliest 2553 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2554 * interrupt with a non-architected feature available through the support 2555 * processor interface. 2556 */ 2557INT_DEFINE_BEGIN(instruction_breakpoint) 2558 IVEC=0x1300 2559#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2560 IKVM_REAL=1 2561#endif 2562INT_DEFINE_END(instruction_breakpoint) 2563 2564EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2565 GEN_INT_ENTRY instruction_breakpoint, virt=0 2566EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2567EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2568 GEN_INT_ENTRY instruction_breakpoint, virt=1 2569EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2570EXC_COMMON_BEGIN(instruction_breakpoint_common) 2571 GEN_COMMON instruction_breakpoint 2572 addi r3,r1,STACK_FRAME_OVERHEAD 2573 bl instruction_breakpoint_exception 2574 b interrupt_return_srr 2575 2576 2577EXC_REAL_NONE(0x1400, 0x100) 2578EXC_VIRT_NONE(0x5400, 0x100) 2579 2580/** 2581 * Interrupt 0x1500 - Soft Patch Interrupt 2582 * 2583 * Handling: 2584 * This is an implementation specific interrupt which can be used for a 2585 * range of exceptions. 2586 * 2587 * This interrupt handler is unique in that it runs the denormal assist 2588 * code even for guests (and even in guest context) without going to KVM, 2589 * for speed. POWER9 does not raise denorm exceptions, so this special case 2590 * could be phased out in future to reduce special cases. 2591 */ 2592INT_DEFINE_BEGIN(denorm_exception) 2593 IVEC=0x1500 2594 IHSRR=1 2595 IBRANCH_TO_COMMON=0 2596 IKVM_REAL=1 2597INT_DEFINE_END(denorm_exception) 2598 2599EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2600 GEN_INT_ENTRY denorm_exception, virt=0 2601#ifdef CONFIG_PPC_DENORMALISATION 2602 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2603 bne+ denorm_assist 2604#endif 2605 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2606EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2607#ifdef CONFIG_PPC_DENORMALISATION 2608EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2609 GEN_INT_ENTRY denorm_exception, virt=1 2610 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2611 bne+ denorm_assist 2612 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2613EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2614#else 2615EXC_VIRT_NONE(0x5500, 0x100) 2616#endif 2617 2618#ifdef CONFIG_PPC_DENORMALISATION 2619TRAMP_REAL_BEGIN(denorm_assist) 2620BEGIN_FTR_SECTION 2621/* 2622 * To denormalise we need to move a copy of the register to itself. 2623 * For POWER6 do that here for all FP regs. 2624 */ 2625 mfmsr r10 2626 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2627 xori r10,r10,(MSR_FE0|MSR_FE1) 2628 mtmsrd r10 2629 sync 2630 2631 .Lreg=0 2632 .rept 32 2633 fmr .Lreg,.Lreg 2634 .Lreg=.Lreg+1 2635 .endr 2636 2637FTR_SECTION_ELSE 2638/* 2639 * To denormalise we need to move a copy of the register to itself. 2640 * For POWER7 do that here for the first 32 VSX registers only. 2641 */ 2642 mfmsr r10 2643 oris r10,r10,MSR_VSX@h 2644 mtmsrd r10 2645 sync 2646 2647 .Lreg=0 2648 .rept 32 2649 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2650 .Lreg=.Lreg+1 2651 .endr 2652 2653ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2654 2655BEGIN_FTR_SECTION 2656 b denorm_done 2657END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2658/* 2659 * To denormalise we need to move a copy of the register to itself. 2660 * For POWER8 we need to do that for all 64 VSX registers 2661 */ 2662 .Lreg=32 2663 .rept 32 2664 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2665 .Lreg=.Lreg+1 2666 .endr 2667 2668denorm_done: 2669 mfspr r11,SPRN_HSRR0 2670 subi r11,r11,4 2671 mtspr SPRN_HSRR0,r11 2672 mtcrf 0x80,r9 2673 ld r9,PACA_EXGEN+EX_R9(r13) 2674BEGIN_FTR_SECTION 2675 ld r10,PACA_EXGEN+EX_PPR(r13) 2676 mtspr SPRN_PPR,r10 2677END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2678BEGIN_FTR_SECTION 2679 ld r10,PACA_EXGEN+EX_CFAR(r13) 2680 mtspr SPRN_CFAR,r10 2681END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2682 li r10,0 2683 stb r10,PACAHSRR_VALID(r13) 2684 ld r10,PACA_EXGEN+EX_R10(r13) 2685 ld r11,PACA_EXGEN+EX_R11(r13) 2686 ld r12,PACA_EXGEN+EX_R12(r13) 2687 ld r13,PACA_EXGEN+EX_R13(r13) 2688 HRFI_TO_UNKNOWN 2689 b . 2690#endif 2691 2692EXC_COMMON_BEGIN(denorm_exception_common) 2693 GEN_COMMON denorm_exception 2694 addi r3,r1,STACK_FRAME_OVERHEAD 2695 bl unknown_exception 2696 b interrupt_return_hsrr 2697 2698 2699#ifdef CONFIG_CBE_RAS 2700INT_DEFINE_BEGIN(cbe_maintenance) 2701 IVEC=0x1600 2702 IHSRR=1 2703INT_DEFINE_END(cbe_maintenance) 2704 2705EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2706 GEN_INT_ENTRY cbe_maintenance, virt=0 2707EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2708EXC_VIRT_NONE(0x5600, 0x100) 2709EXC_COMMON_BEGIN(cbe_maintenance_common) 2710 GEN_COMMON cbe_maintenance 2711 addi r3,r1,STACK_FRAME_OVERHEAD 2712 bl cbe_maintenance_exception 2713 b interrupt_return_hsrr 2714 2715#else /* CONFIG_CBE_RAS */ 2716EXC_REAL_NONE(0x1600, 0x100) 2717EXC_VIRT_NONE(0x5600, 0x100) 2718#endif 2719 2720 2721INT_DEFINE_BEGIN(altivec_assist) 2722 IVEC=0x1700 2723#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2724 IKVM_REAL=1 2725#endif 2726INT_DEFINE_END(altivec_assist) 2727 2728EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2729 GEN_INT_ENTRY altivec_assist, virt=0 2730EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2731EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2732 GEN_INT_ENTRY altivec_assist, virt=1 2733EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2734EXC_COMMON_BEGIN(altivec_assist_common) 2735 GEN_COMMON altivec_assist 2736 addi r3,r1,STACK_FRAME_OVERHEAD 2737#ifdef CONFIG_ALTIVEC 2738 bl altivec_assist_exception 2739 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2740#else 2741 bl unknown_exception 2742#endif 2743 b interrupt_return_srr 2744 2745 2746#ifdef CONFIG_CBE_RAS 2747INT_DEFINE_BEGIN(cbe_thermal) 2748 IVEC=0x1800 2749 IHSRR=1 2750INT_DEFINE_END(cbe_thermal) 2751 2752EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2753 GEN_INT_ENTRY cbe_thermal, virt=0 2754EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2755EXC_VIRT_NONE(0x5800, 0x100) 2756EXC_COMMON_BEGIN(cbe_thermal_common) 2757 GEN_COMMON cbe_thermal 2758 addi r3,r1,STACK_FRAME_OVERHEAD 2759 bl cbe_thermal_exception 2760 b interrupt_return_hsrr 2761 2762#else /* CONFIG_CBE_RAS */ 2763EXC_REAL_NONE(0x1800, 0x100) 2764EXC_VIRT_NONE(0x5800, 0x100) 2765#endif 2766 2767 2768#ifdef CONFIG_PPC_WATCHDOG 2769 2770INT_DEFINE_BEGIN(soft_nmi) 2771 IVEC=0x900 2772 ISTACK=0 2773 ICFAR=0 2774INT_DEFINE_END(soft_nmi) 2775 2776/* 2777 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2778 * stack is one that is usable by maskable interrupts so long as MSR_EE 2779 * remains off. It is used for recovery when something has corrupted the 2780 * normal kernel stack, for example. The "soft NMI" must not use the process 2781 * stack because we want irq disabled sections to avoid touching the stack 2782 * at all (other than PMU interrupts), so use the emergency stack for this, 2783 * and run it entirely with interrupts hard disabled. 2784 */ 2785EXC_COMMON_BEGIN(soft_nmi_common) 2786 mr r10,r1 2787 ld r1,PACAEMERGSP(r13) 2788 subi r1,r1,INT_FRAME_SIZE 2789 __GEN_COMMON_BODY soft_nmi 2790 2791 addi r3,r1,STACK_FRAME_OVERHEAD 2792 bl soft_nmi_interrupt 2793 2794 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2795 li r9,0 2796 mtmsrd r9,1 2797 2798 kuap_kernel_restore r9, r10 2799 2800 EXCEPTION_RESTORE_REGS hsrr=0 2801 RFI_TO_KERNEL 2802 2803#endif /* CONFIG_PPC_WATCHDOG */ 2804 2805/* 2806 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2807 * - If it was a decrementer interrupt, we bump the dec to max and return. 2808 * - If it was a doorbell we return immediately since doorbells are edge 2809 * triggered and won't automatically refire. 2810 * - If it was a HMI we return immediately since we handled it in realmode 2811 * and it won't refire. 2812 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2813 * This is called with r10 containing the value to OR to the paca field. 2814 */ 2815.macro MASKED_INTERRUPT hsrr=0 2816 .if \hsrr 2817masked_Hinterrupt: 2818 .else 2819masked_interrupt: 2820 .endif 2821 stw r9,PACA_EXGEN+EX_CCR(r13) 2822#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 2823 /* 2824 * Ensure there was no previous MUST_HARD_MASK interrupt or 2825 * HARD_DIS setting. If this does fire, the interrupt is still 2826 * masked and MSR[EE] will be cleared on return, so no need to 2827 * panic, but somebody probably enabled MSR[EE] under 2828 * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common 2829 * cause. 2830 */ 2831 lbz r9,PACAIRQHAPPENED(r13) 2832 andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS) 28330: tdnei r9,0 2834 EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 2835#endif 2836 lbz r9,PACAIRQHAPPENED(r13) 2837 or r9,r9,r10 2838 stb r9,PACAIRQHAPPENED(r13) 2839 2840 .if ! \hsrr 2841 cmpwi r10,PACA_IRQ_DEC 2842 bne 1f 2843 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2844 mtspr SPRN_DEC,r9 2845#ifdef CONFIG_PPC_WATCHDOG 2846 lwz r9,PACA_EXGEN+EX_CCR(r13) 2847 b soft_nmi_common 2848#else 2849 b 2f 2850#endif 2851 .endif 2852 28531: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2854 beq 2f 2855 xori r12,r12,MSR_EE /* clear MSR_EE */ 2856 .if \hsrr 2857 mtspr SPRN_HSRR1,r12 2858 .else 2859 mtspr SPRN_SRR1,r12 2860 .endif 2861 ori r9,r9,PACA_IRQ_HARD_DIS 2862 stb r9,PACAIRQHAPPENED(r13) 28632: /* done */ 2864 li r9,0 2865 .if \hsrr 2866 stb r9,PACAHSRR_VALID(r13) 2867 .else 2868 stb r9,PACASRR_VALID(r13) 2869 .endif 2870 2871 SEARCH_RESTART_TABLE 2872 cmpdi r12,0 2873 beq 3f 2874 .if \hsrr 2875 mtspr SPRN_HSRR0,r12 2876 .else 2877 mtspr SPRN_SRR0,r12 2878 .endif 28793: 2880 2881 ld r9,PACA_EXGEN+EX_CTR(r13) 2882 mtctr r9 2883 lwz r9,PACA_EXGEN+EX_CCR(r13) 2884 mtcrf 0x80,r9 2885 std r1,PACAR1(r13) 2886 ld r9,PACA_EXGEN+EX_R9(r13) 2887 ld r10,PACA_EXGEN+EX_R10(r13) 2888 ld r11,PACA_EXGEN+EX_R11(r13) 2889 ld r12,PACA_EXGEN+EX_R12(r13) 2890 ld r13,PACA_EXGEN+EX_R13(r13) 2891 /* May return to masked low address where r13 is not set up */ 2892 .if \hsrr 2893 HRFI_TO_KERNEL 2894 .else 2895 RFI_TO_KERNEL 2896 .endif 2897 b . 2898.endm 2899 2900TRAMP_REAL_BEGIN(stf_barrier_fallback) 2901 std r9,PACA_EXRFI+EX_R9(r13) 2902 std r10,PACA_EXRFI+EX_R10(r13) 2903 sync 2904 ld r9,PACA_EXRFI+EX_R9(r13) 2905 ld r10,PACA_EXRFI+EX_R10(r13) 2906 ori 31,31,0 2907 .rept 14 2908 b 1f 29091: 2910 .endr 2911 blr 2912 2913/* Clobbers r10, r11, ctr */ 2914.macro L1D_DISPLACEMENT_FLUSH 2915 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2916 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2917 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2918 mtctr r11 2919 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2920 2921 /* order ld/st prior to dcbt stop all streams with flushing */ 2922 sync 2923 2924 /* 2925 * The load addresses are at staggered offsets within cachelines, 2926 * which suits some pipelines better (on others it should not 2927 * hurt). 2928 */ 29291: 2930 ld r11,(0x80 + 8)*0(r10) 2931 ld r11,(0x80 + 8)*1(r10) 2932 ld r11,(0x80 + 8)*2(r10) 2933 ld r11,(0x80 + 8)*3(r10) 2934 ld r11,(0x80 + 8)*4(r10) 2935 ld r11,(0x80 + 8)*5(r10) 2936 ld r11,(0x80 + 8)*6(r10) 2937 ld r11,(0x80 + 8)*7(r10) 2938 addi r10,r10,0x80*8 2939 bdnz 1b 2940.endm 2941 2942TRAMP_REAL_BEGIN(entry_flush_fallback) 2943 std r9,PACA_EXRFI+EX_R9(r13) 2944 std r10,PACA_EXRFI+EX_R10(r13) 2945 std r11,PACA_EXRFI+EX_R11(r13) 2946 mfctr r9 2947 L1D_DISPLACEMENT_FLUSH 2948 mtctr r9 2949 ld r9,PACA_EXRFI+EX_R9(r13) 2950 ld r10,PACA_EXRFI+EX_R10(r13) 2951 ld r11,PACA_EXRFI+EX_R11(r13) 2952 blr 2953 2954/* 2955 * The SCV entry flush happens with interrupts enabled, so it must disable 2956 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2957 * (containing LR) does not need to be preserved here because scv entry 2958 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2959 */ 2960TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2961 li r10,0 2962 mtmsrd r10,1 2963 lbz r10,PACAIRQHAPPENED(r13) 2964 ori r10,r10,PACA_IRQ_HARD_DIS 2965 stb r10,PACAIRQHAPPENED(r13) 2966 std r11,PACA_EXRFI+EX_R11(r13) 2967 L1D_DISPLACEMENT_FLUSH 2968 ld r11,PACA_EXRFI+EX_R11(r13) 2969 li r10,MSR_RI 2970 mtmsrd r10,1 2971 blr 2972 2973TRAMP_REAL_BEGIN(rfi_flush_fallback) 2974 SET_SCRATCH0(r13); 2975 GET_PACA(r13); 2976 std r1,PACA_EXRFI+EX_R12(r13) 2977 ld r1,PACAKSAVE(r13) 2978 std r9,PACA_EXRFI+EX_R9(r13) 2979 std r10,PACA_EXRFI+EX_R10(r13) 2980 std r11,PACA_EXRFI+EX_R11(r13) 2981 mfctr r9 2982 L1D_DISPLACEMENT_FLUSH 2983 mtctr r9 2984 ld r9,PACA_EXRFI+EX_R9(r13) 2985 ld r10,PACA_EXRFI+EX_R10(r13) 2986 ld r11,PACA_EXRFI+EX_R11(r13) 2987 ld r1,PACA_EXRFI+EX_R12(r13) 2988 GET_SCRATCH0(r13); 2989 rfid 2990 2991TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2992 SET_SCRATCH0(r13); 2993 GET_PACA(r13); 2994 std r1,PACA_EXRFI+EX_R12(r13) 2995 ld r1,PACAKSAVE(r13) 2996 std r9,PACA_EXRFI+EX_R9(r13) 2997 std r10,PACA_EXRFI+EX_R10(r13) 2998 std r11,PACA_EXRFI+EX_R11(r13) 2999 mfctr r9 3000 L1D_DISPLACEMENT_FLUSH 3001 mtctr r9 3002 ld r9,PACA_EXRFI+EX_R9(r13) 3003 ld r10,PACA_EXRFI+EX_R10(r13) 3004 ld r11,PACA_EXRFI+EX_R11(r13) 3005 ld r1,PACA_EXRFI+EX_R12(r13) 3006 GET_SCRATCH0(r13); 3007 hrfid 3008 3009TRAMP_REAL_BEGIN(rfscv_flush_fallback) 3010 /* system call volatile */ 3011 mr r7,r13 3012 GET_PACA(r13); 3013 mr r8,r1 3014 ld r1,PACAKSAVE(r13) 3015 mfctr r9 3016 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 3017 ld r11,PACA_L1D_FLUSH_SIZE(r13) 3018 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 3019 mtctr r11 3020 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 3021 3022 /* order ld/st prior to dcbt stop all streams with flushing */ 3023 sync 3024 3025 /* 3026 * The load adresses are at staggered offsets within cachelines, 3027 * which suits some pipelines better (on others it should not 3028 * hurt). 3029 */ 30301: 3031 ld r11,(0x80 + 8)*0(r10) 3032 ld r11,(0x80 + 8)*1(r10) 3033 ld r11,(0x80 + 8)*2(r10) 3034 ld r11,(0x80 + 8)*3(r10) 3035 ld r11,(0x80 + 8)*4(r10) 3036 ld r11,(0x80 + 8)*5(r10) 3037 ld r11,(0x80 + 8)*6(r10) 3038 ld r11,(0x80 + 8)*7(r10) 3039 addi r10,r10,0x80*8 3040 bdnz 1b 3041 3042 mtctr r9 3043 li r9,0 3044 li r10,0 3045 li r11,0 3046 mr r1,r8 3047 mr r13,r7 3048 RFSCV 3049 3050USE_TEXT_SECTION() 3051 3052#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3053kvm_interrupt: 3054 /* 3055 * The conditional branch in KVMTEST can't reach all the way, 3056 * make a stub. 3057 */ 3058 b kvmppc_interrupt 3059#endif 3060 3061_GLOBAL(do_uaccess_flush) 3062 UACCESS_FLUSH_FIXUP_SECTION 3063 nop 3064 nop 3065 nop 3066 blr 3067 L1D_DISPLACEMENT_FLUSH 3068 blr 3069_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3070EXPORT_SYMBOL(do_uaccess_flush) 3071 3072 3073MASKED_INTERRUPT 3074MASKED_INTERRUPT hsrr=1 3075 3076USE_FIXED_SECTION(virt_trampolines) 3077 /* 3078 * All code below __end_soft_masked is treated as soft-masked. If 3079 * any code runs here with MSR[EE]=1, it must then cope with pending 3080 * soft interrupt being raised (i.e., by ensuring it is replayed). 3081 * 3082 * The __end_interrupts marker must be past the out-of-line (OOL) 3083 * handlers, so that they are copied to real address 0x100 when running 3084 * a relocatable kernel. This ensures they can be reached from the short 3085 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3086 * directly, without using LOAD_HANDLER(). 3087 */ 3088 .align 7 3089 .globl __end_interrupts 3090__end_interrupts: 3091DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines) 3092 3093CLOSE_FIXED_SECTION(real_vectors); 3094CLOSE_FIXED_SECTION(real_trampolines); 3095CLOSE_FIXED_SECTION(virt_vectors); 3096CLOSE_FIXED_SECTION(virt_trampolines); 3097 3098USE_TEXT_SECTION() 3099 3100/* MSR[RI] should be clear because this uses SRR[01] */ 3101_GLOBAL(enable_machine_check) 3102 mflr r0 3103 bcl 20,31,$+4 31040: mflr r3 3105 addi r3,r3,(1f - 0b) 3106 mtspr SPRN_SRR0,r3 3107 mfmsr r3 3108 ori r3,r3,MSR_ME 3109 mtspr SPRN_SRR1,r3 3110 RFI_TO_KERNEL 31111: mtlr r0 3112 blr 3113 3114/* MSR[RI] should be clear because this uses SRR[01] */ 3115disable_machine_check: 3116 mflr r0 3117 bcl 20,31,$+4 31180: mflr r3 3119 addi r3,r3,(1f - 0b) 3120 mtspr SPRN_SRR0,r3 3121 mfmsr r3 3122 li r4,MSR_ME 3123 andc r3,r3,r4 3124 mtspr SPRN_SRR1,r3 3125 RFI_TO_KERNEL 31261: mtlr r0 3127 blr 3128