1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name, text); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label, section) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label, section))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label, section) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label, section))@l; \ 90 addis reg,reg,(ABS_ADDR(label, section))@h 91 92/* 93 * Interrupt code generation macros 94 */ 95#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 96#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 97#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 98#define IAREA .L_IAREA_\name\() /* PACA save area */ 99#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 100#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 101#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */ 102#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */ 103#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 104#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 105#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 106#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 107#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 108#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 109#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 110#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 111#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 112#define __ISTACK(name) .L_ISTACK_ ## name 113#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 114#define IMSR_R12 .L_IMSR_R12_\name\() /* Assumes MSR saved to r12 */ 115 116#define INT_DEFINE_BEGIN(n) \ 117.macro int_define_ ## n name 118 119#define INT_DEFINE_END(n) \ 120.endm ; \ 121int_define_ ## n n ; \ 122do_define_int n 123 124.macro do_define_int name 125 .ifndef IVEC 126 .error "IVEC not defined" 127 .endif 128 .ifndef IHSRR 129 IHSRR=0 130 .endif 131 .ifndef IHSRR_IF_HVMODE 132 IHSRR_IF_HVMODE=0 133 .endif 134 .ifndef IAREA 135 IAREA=PACA_EXGEN 136 .endif 137 .ifndef IVIRT 138 IVIRT=1 139 .endif 140 .ifndef IISIDE 141 IISIDE=0 142 .endif 143 .ifndef ICFAR 144 ICFAR=1 145 .endif 146 .ifndef ICFAR_IF_HVMODE 147 ICFAR_IF_HVMODE=0 148 .endif 149 .ifndef IDAR 150 IDAR=0 151 .endif 152 .ifndef IDSISR 153 IDSISR=0 154 .endif 155 .ifndef IBRANCH_TO_COMMON 156 IBRANCH_TO_COMMON=1 157 .endif 158 .ifndef IREALMODE_COMMON 159 IREALMODE_COMMON=0 160 .else 161 .if ! IBRANCH_TO_COMMON 162 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 163 .endif 164 .endif 165 .ifndef IMASK 166 IMASK=0 167 .endif 168 .ifndef IKVM_REAL 169 IKVM_REAL=0 170 .endif 171 .ifndef IKVM_VIRT 172 IKVM_VIRT=0 173 .endif 174 .ifndef ISTACK 175 ISTACK=1 176 .endif 177 .ifndef IKUAP 178 IKUAP=1 179 .endif 180 .ifndef IMSR_R12 181 IMSR_R12=0 182 .endif 183.endm 184 185/* 186 * All interrupts which set HSRR registers, as well as SRESET and MCE and 187 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 188 * so they all generally need to test whether they were taken in guest context. 189 * 190 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 191 * taken with MSR[HV]=0. 192 * 193 * Interrupts which set SRR registers (with the above exceptions) do not 194 * elevate to MSR[HV]=1 mode, though most can be taken when running with 195 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 196 * not need to test whether a guest is running because they get delivered to 197 * the guest directly, including nested HV KVM guests. 198 * 199 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 200 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 201 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 202 * delivered to the real-mode entry point, therefore such interrupts only test 203 * KVM in their real mode handlers, and only when PR KVM is possible. 204 * 205 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 206 * delivered in real-mode when the MMU is in hash mode because the MMU 207 * registers are not set appropriately to translate host addresses. In nested 208 * radix mode these can be delivered in virt-mode as the host translations are 209 * used implicitly (see: effective LPID, effective PID). 210 */ 211 212/* 213 * If an interrupt is taken while a guest is running, it is immediately routed 214 * to KVM to handle. 215 */ 216 217.macro KVMTEST name handler 218#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 219 lbz r10,HSTATE_IN_GUEST(r13) 220 cmpwi r10,0 221 /* HSRR variants have the 0x2 bit added to their trap number */ 222 .if IHSRR_IF_HVMODE 223 BEGIN_FTR_SECTION 224 li r10,(IVEC + 0x2) 225 FTR_SECTION_ELSE 226 li r10,(IVEC) 227 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 228 .elseif IHSRR 229 li r10,(IVEC + 0x2) 230 .else 231 li r10,(IVEC) 232 .endif 233 bne \handler 234#endif 235.endm 236 237/* 238 * This is the BOOK3S interrupt entry code macro. 239 * 240 * This can result in one of several things happening: 241 * - Branch to the _common handler, relocated, in virtual mode. 242 * These are normal interrupts (synchronous and asynchronous) handled by 243 * the kernel. 244 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 245 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 246 * / intended for host or guest kernel, but KVM must always be involved 247 * because the machine state is set for guest execution. 248 * - Branch to the masked handler, unrelocated. 249 * These occur when maskable asynchronous interrupts are taken with the 250 * irq_soft_mask set. 251 * - Branch to an "early" handler in real mode but relocated. 252 * This is done if early=1. MCE and HMI use these to handle errors in real 253 * mode. 254 * - Fall through and continue executing in real, unrelocated mode. 255 * This is done if early=2. 256 */ 257 258.macro GEN_BRANCH_TO_COMMON name, virt 259 .if IREALMODE_COMMON 260 LOAD_HANDLER(r10, \name\()_common) 261 mtctr r10 262 bctr 263 .else 264 .if \virt 265#ifndef CONFIG_RELOCATABLE 266 b \name\()_common_virt 267#else 268 LOAD_HANDLER(r10, \name\()_common_virt) 269 mtctr r10 270 bctr 271#endif 272 .else 273 LOAD_HANDLER(r10, \name\()_common_real) 274 mtctr r10 275 bctr 276 .endif 277 .endif 278.endm 279 280.macro GEN_INT_ENTRY name, virt, ool=0 281 SET_SCRATCH0(r13) /* save r13 */ 282 GET_PACA(r13) 283 std r9,IAREA+EX_R9(r13) /* save r9 */ 284BEGIN_FTR_SECTION 285 mfspr r9,SPRN_PPR 286END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 287 HMT_MEDIUM 288 std r10,IAREA+EX_R10(r13) /* save r10 */ 289 .if ICFAR 290BEGIN_FTR_SECTION 291 mfspr r10,SPRN_CFAR 292END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 293 .elseif ICFAR_IF_HVMODE 294BEGIN_FTR_SECTION 295 BEGIN_FTR_SECTION_NESTED(69) 296 mfspr r10,SPRN_CFAR 297 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 298FTR_SECTION_ELSE 299 BEGIN_FTR_SECTION_NESTED(69) 300 li r10,0 301 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 302ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 303 .endif 304 .if \ool 305 .if !\virt 306 b tramp_real_\name 307 .pushsection .text 308 TRAMP_REAL_BEGIN(tramp_real_\name) 309 .else 310 b tramp_virt_\name 311 .pushsection .text 312 TRAMP_VIRT_BEGIN(tramp_virt_\name) 313 .endif 314 .endif 315 316BEGIN_FTR_SECTION 317 std r9,IAREA+EX_PPR(r13) 318END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 319 .if ICFAR || ICFAR_IF_HVMODE 320BEGIN_FTR_SECTION 321 std r10,IAREA+EX_CFAR(r13) 322END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 323 .endif 324 INTERRUPT_TO_KERNEL 325 mfctr r10 326 std r10,IAREA+EX_CTR(r13) 327 mfcr r9 328 std r11,IAREA+EX_R11(r13) /* save r11 - r12 */ 329 std r12,IAREA+EX_R12(r13) 330 331 /* 332 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 333 * because a d-side MCE will clobber those registers so is 334 * not recoverable if they are live. 335 */ 336 GET_SCRATCH0(r10) 337 std r10,IAREA+EX_R13(r13) 338 .if IDAR && !IISIDE 339 .if IHSRR 340 mfspr r10,SPRN_HDAR 341 .else 342 mfspr r10,SPRN_DAR 343 .endif 344 std r10,IAREA+EX_DAR(r13) 345 .endif 346 .if IDSISR && !IISIDE 347 .if IHSRR 348 mfspr r10,SPRN_HDSISR 349 .else 350 mfspr r10,SPRN_DSISR 351 .endif 352 stw r10,IAREA+EX_DSISR(r13) 353 .endif 354 355 .if IHSRR_IF_HVMODE 356 BEGIN_FTR_SECTION 357 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 358 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 359 FTR_SECTION_ELSE 360 mfspr r11,SPRN_SRR0 /* save SRR0 */ 361 mfspr r12,SPRN_SRR1 /* and SRR1 */ 362 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 363 .elseif IHSRR 364 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 365 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 366 .else 367 mfspr r11,SPRN_SRR0 /* save SRR0 */ 368 mfspr r12,SPRN_SRR1 /* and SRR1 */ 369 .endif 370 371 .if IBRANCH_TO_COMMON 372 GEN_BRANCH_TO_COMMON \name \virt 373 .endif 374 375 .if \ool 376 .popsection 377 .endif 378.endm 379 380/* 381 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 382 * entry, except in the case of the real-mode handlers which require 383 * __GEN_REALMODE_COMMON_ENTRY. 384 * 385 * This switches to virtual mode and sets MSR[RI]. 386 */ 387.macro __GEN_COMMON_ENTRY name 388DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 389\name\()_common_real: 390 .if IKVM_REAL 391 KVMTEST \name kvm_interrupt 392 .endif 393 394 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 395 /* MSR[RI] is clear iff using SRR regs */ 396 .if IHSRR_IF_HVMODE 397 BEGIN_FTR_SECTION 398 xori r10,r10,MSR_RI 399 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 400 .elseif ! IHSRR 401 xori r10,r10,MSR_RI 402 .endif 403 mtmsrd r10 404 405 .if IVIRT 406 .if IKVM_VIRT 407 b 1f /* skip the virt test coming from real */ 408 .endif 409 410 .balign IFETCH_ALIGN_BYTES 411DEFINE_FIXED_SYMBOL(\name\()_common_virt, text) 412\name\()_common_virt: 413 .if IKVM_VIRT 414 KVMTEST \name kvm_interrupt 4151: 416 .endif 417 .endif /* IVIRT */ 418.endm 419 420/* 421 * Don't switch to virt mode. Used for early MCE and HMI handlers that 422 * want to run in real mode. 423 */ 424.macro __GEN_REALMODE_COMMON_ENTRY name 425DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 426\name\()_common_real: 427 .if IKVM_REAL 428 KVMTEST \name kvm_interrupt 429 .endif 430.endm 431 432.macro __GEN_COMMON_BODY name 433 .if IMASK 434 .if ! ISTACK 435 .error "No support for masked interrupt to use custom stack" 436 .endif 437 438 /* If coming from user, skip soft-mask tests. */ 439 andi. r10,r12,MSR_PR 440 bne 3f 441 442 /* 443 * Kernel code running below __end_soft_masked may be 444 * implicitly soft-masked if it is within the regions 445 * in the soft mask table. 446 */ 447 LOAD_HANDLER(r10, __end_soft_masked) 448 cmpld r11,r10 449 bge+ 1f 450 451 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 452 mtctr r12 453 stw r9,PACA_EXGEN+EX_CCR(r13) 454 SEARCH_SOFT_MASK_TABLE 455 cmpdi r12,0 456 mfctr r12 /* Restore r12 to SRR1 */ 457 lwz r9,PACA_EXGEN+EX_CCR(r13) 458 beq 1f /* Not in soft-mask table */ 459 li r10,IMASK 460 b 2f /* In soft-mask table, always mask */ 461 462 /* Test the soft mask state against our interrupt's bit */ 4631: lbz r10,PACAIRQSOFTMASK(r13) 4642: andi. r10,r10,IMASK 465 /* Associate vector numbers with bits in paca->irq_happened */ 466 .if IVEC == 0x500 || IVEC == 0xea0 467 li r10,PACA_IRQ_EE 468 .elseif IVEC == 0x900 469 li r10,PACA_IRQ_DEC 470 .elseif IVEC == 0xa00 || IVEC == 0xe80 471 li r10,PACA_IRQ_DBELL 472 .elseif IVEC == 0xe60 473 li r10,PACA_IRQ_HMI 474 .elseif IVEC == 0xf00 475 li r10,PACA_IRQ_PMI 476 .else 477 .abort "Bad maskable vector" 478 .endif 479 480 .if IHSRR_IF_HVMODE 481 BEGIN_FTR_SECTION 482 bne masked_Hinterrupt 483 FTR_SECTION_ELSE 484 bne masked_interrupt 485 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 486 .elseif IHSRR 487 bne masked_Hinterrupt 488 .else 489 bne masked_interrupt 490 .endif 491 .endif 492 493 .if ISTACK 494 andi. r10,r12,MSR_PR /* See if coming from user */ 4953: mr r10,r1 /* Save r1 */ 496 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 497 beq- 100f 498 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 499100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 500 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 501 .endif 502 503 std r9,_CCR(r1) /* save CR in stackframe */ 504 std r11,_NIP(r1) /* save SRR0 in stackframe */ 505 std r12,_MSR(r1) /* save SRR1 in stackframe */ 506 std r10,0(r1) /* make stack chain pointer */ 507 std r0,GPR0(r1) /* save r0 in stackframe */ 508 std r10,GPR1(r1) /* save r1 in stackframe */ 509 SANITIZE_GPR(0) 510 511 /* Mark our [H]SRRs valid for return */ 512 li r10,1 513 .if IHSRR_IF_HVMODE 514 BEGIN_FTR_SECTION 515 stb r10,PACAHSRR_VALID(r13) 516 FTR_SECTION_ELSE 517 stb r10,PACASRR_VALID(r13) 518 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 519 .elseif IHSRR 520 stb r10,PACAHSRR_VALID(r13) 521 .else 522 stb r10,PACASRR_VALID(r13) 523 .endif 524 525 .if ISTACK 526 .if IKUAP 527 kuap_save_amr_and_lock r9, r10, cr1, cr0 528 .endif 529 beq 101f /* if from kernel mode */ 530BEGIN_FTR_SECTION 531 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 532 std r9,_PPR(r1) 533END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 534101: 535 .else 536 .if IKUAP 537 kuap_save_amr_and_lock r9, r10, cr1 538 .endif 539 .endif 540 541 /* Save original regs values from save area to stack frame. */ 542 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 543 ld r10,IAREA+EX_R10(r13) 544 std r9,GPR9(r1) 545 std r10,GPR10(r1) 546 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 547 ld r10,IAREA+EX_R12(r13) 548 ld r11,IAREA+EX_R13(r13) 549 std r9,GPR11(r1) 550 std r10,GPR12(r1) 551 std r11,GPR13(r1) 552 .if !IMSR_R12 553 SANITIZE_GPRS(9, 12) 554 .else 555 SANITIZE_GPRS(9, 11) 556 .endif 557 558 SAVE_NVGPRS(r1) 559 SANITIZE_NVGPRS() 560 561 .if IDAR 562 .if IISIDE 563 ld r10,_NIP(r1) 564 .else 565 ld r10,IAREA+EX_DAR(r13) 566 .endif 567 std r10,_DAR(r1) 568 .endif 569 570 .if IDSISR 571 .if IISIDE 572 ld r10,_MSR(r1) 573 lis r11,DSISR_SRR1_MATCH_64S@h 574 and r10,r10,r11 575 .else 576 lwz r10,IAREA+EX_DSISR(r13) 577 .endif 578 std r10,_DSISR(r1) 579 .endif 580 581BEGIN_FTR_SECTION 582 .if ICFAR || ICFAR_IF_HVMODE 583 ld r10,IAREA+EX_CFAR(r13) 584 .else 585 li r10,0 586 .endif 587 std r10,ORIG_GPR3(r1) 588END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 589 ld r10,IAREA+EX_CTR(r13) 590 std r10,_CTR(r1) 591 SAVE_GPRS(2, 8, r1) /* save r2 - r8 in stackframe */ 592 SANITIZE_GPRS(2, 8) 593 mflr r9 /* Get LR, later save to stack */ 594 LOAD_PACA_TOC() /* get kernel TOC into r2 */ 595 std r9,_LINK(r1) 596 lbz r10,PACAIRQSOFTMASK(r13) 597 mfspr r11,SPRN_XER /* save XER in stackframe */ 598 std r10,SOFTE(r1) 599 std r11,_XER(r1) 600 li r9,IVEC 601 std r9,_TRAP(r1) /* set trap number */ 602 li r10,0 603 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) 604 std r10,RESULT(r1) /* clear regs->result */ 605 std r11,STACK_INT_FRAME_MARKER(r1) /* mark the frame */ 606.endm 607 608/* 609 * On entry r13 points to the paca, r9-r13 are saved in the paca, 610 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 611 * SRR1, and relocation is on. 612 * 613 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 614 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 615 */ 616.macro GEN_COMMON name 617 __GEN_COMMON_ENTRY \name 618 __GEN_COMMON_BODY \name 619.endm 620 621.macro SEARCH_RESTART_TABLE 622#ifdef CONFIG_RELOCATABLE 623 mr r12,r2 624 LOAD_PACA_TOC() 625 LOAD_REG_ADDR(r9, __start___restart_table) 626 LOAD_REG_ADDR(r10, __stop___restart_table) 627 mr r2,r12 628#else 629 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 630 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 631#endif 632300: 633 cmpd r9,r10 634 beq 302f 635 ld r12,0(r9) 636 cmpld r11,r12 637 blt 301f 638 ld r12,8(r9) 639 cmpld r11,r12 640 bge 301f 641 ld r12,16(r9) 642 b 303f 643301: 644 addi r9,r9,24 645 b 300b 646302: 647 li r12,0 648303: 649.endm 650 651.macro SEARCH_SOFT_MASK_TABLE 652#ifdef CONFIG_RELOCATABLE 653 mr r12,r2 654 LOAD_PACA_TOC() 655 LOAD_REG_ADDR(r9, __start___soft_mask_table) 656 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 657 mr r2,r12 658#else 659 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 660 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 661#endif 662300: 663 cmpd r9,r10 664 beq 302f 665 ld r12,0(r9) 666 cmpld r11,r12 667 blt 301f 668 ld r12,8(r9) 669 cmpld r11,r12 670 bge 301f 671 li r12,1 672 b 303f 673301: 674 addi r9,r9,16 675 b 300b 676302: 677 li r12,0 678303: 679.endm 680 681/* 682 * Restore all registers including H/SRR0/1 saved in a stack frame of a 683 * standard exception. 684 */ 685.macro EXCEPTION_RESTORE_REGS hsrr=0 686 /* Move original SRR0 and SRR1 into the respective regs */ 687 ld r9,_MSR(r1) 688 li r10,0 689 .if \hsrr 690 mtspr SPRN_HSRR1,r9 691 stb r10,PACAHSRR_VALID(r13) 692 .else 693 mtspr SPRN_SRR1,r9 694 stb r10,PACASRR_VALID(r13) 695 .endif 696 ld r9,_NIP(r1) 697 .if \hsrr 698 mtspr SPRN_HSRR0,r9 699 .else 700 mtspr SPRN_SRR0,r9 701 .endif 702 ld r9,_CTR(r1) 703 mtctr r9 704 ld r9,_XER(r1) 705 mtxer r9 706 ld r9,_LINK(r1) 707 mtlr r9 708 ld r9,_CCR(r1) 709 mtcr r9 710 SANITIZE_RESTORE_NVGPRS() 711 REST_GPRS(2, 13, r1) 712 REST_GPR(0, r1) 713 /* restore original r1. */ 714 ld r1,GPR1(r1) 715.endm 716 717/* 718 * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot. 719 * 720 * There's a short window during boot where although the kernel is running 721 * little endian, any exceptions will cause the CPU to switch back to big 722 * endian. For example a WARN() boils down to a trap instruction, which will 723 * cause a program check, and we end up here but with the CPU in big endian 724 * mode. The first instruction of the program check handler (in GEN_INT_ENTRY 725 * below) is an mtsprg, which when executed in the wrong endian is an lhzu with 726 * a ~3GB displacement from r3. The content of r3 is random, so that is a load 727 * from some random location, and depending on the system can easily lead to a 728 * checkstop, or an infinitely recursive page fault. 729 * 730 * So to handle that case we have a trampoline here that can detect we are in 731 * the wrong endian and flip us back to the correct endian. We can't flip 732 * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1 733 * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for 734 * the paca. SPRG3 is user readable, but this trampoline is only active very 735 * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before 736 * userspace starts. 737 */ 738.macro EARLY_BOOT_FIXUP 739BEGIN_FTR_SECTION 740#ifdef CONFIG_CPU_LITTLE_ENDIAN 741 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 742 b 2f // Skip trampoline if endian is correct 743 .long 0xa643707d // mtsprg 0, r11 Backup r11 744 .long 0xa6027a7d // mfsrr0 r11 745 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 746 .long 0xa6027b7d // mfsrr1 r11 747 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 748 .long 0xa600607d // mfmsr r11 749 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 750 .long 0xa6037b7d // mtsrr1 r11 751 /* 752 * This is 'li r11,1f' where 1f is the absolute address of that 753 * label, byteswapped into the SI field of the instruction. 754 */ 755 .long 0x00006039 | \ 756 ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \ 757 ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8) 758 .long 0xa6037a7d // mtsrr0 r11 759 .long 0x2400004c // rfid 7601: 761 mfsprg r11, 3 762 mtsrr1 r11 // Restore SRR1 763 mfsprg r11, 2 764 mtsrr0 r11 // Restore SRR0 765 mfsprg r11, 0 // Restore r11 7662: 767#endif 768 /* 769 * program check could hit at any time, and pseries can not block 770 * MSR[ME] in early boot. So check if there is anything useful in r13 771 * yet, and spin forever if not. 772 */ 773 mtsprg 0, r11 774 mfcr r11 775 cmpdi r13, 0 776 beq . 777 mtcr r11 778 mfsprg r11, 0 779END_FTR_SECTION(0, 1) // nop out after boot 780.endm 781 782/* 783 * There are a few constraints to be concerned with. 784 * - Real mode exceptions code/data must be located at their physical location. 785 * - Virtual mode exceptions must be mapped at their 0xc000... location. 786 * - Fixed location code must not call directly beyond the __end_interrupts 787 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 788 * must be used. 789 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 790 * virtual 0xc00... 791 * - Conditional branch targets must be within +/-32K of caller. 792 * 793 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 794 * therefore don't have to run in physically located code or rfid to 795 * virtual mode kernel code. However on relocatable kernels they do have 796 * to branch to KERNELBASE offset because the rest of the kernel (outside 797 * the exception vectors) may be located elsewhere. 798 * 799 * Virtual exceptions correspond with physical, except their entry points 800 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 801 * offset applied. Virtual exceptions are enabled with the Alternate 802 * Interrupt Location (AIL) bit set in the LPCR. However this does not 803 * guarantee they will be delivered virtually. Some conditions (see the ISA) 804 * cause exceptions to be delivered in real mode. 805 * 806 * The scv instructions are a special case. They get a 0x3000 offset applied. 807 * scv exceptions have unique reentrancy properties, see below. 808 * 809 * It's impossible to receive interrupts below 0x300 via AIL. 810 * 811 * KVM: None of the virtual exceptions are from the guest. Anything that 812 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 813 * 814 * 815 * We layout physical memory as follows: 816 * 0x0000 - 0x00ff : Secondary processor spin code 817 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 818 * 0x1900 - 0x2fff : Real mode trampolines 819 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 820 * 0x5900 - 0x6fff : Relon mode trampolines 821 * 0x7000 - 0x7fff : FWNMI data area 822 * 0x8000 - .... : Common interrupt handlers, remaining early 823 * setup code, rest of kernel. 824 * 825 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 826 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 827 * vectors there. 828 */ 829OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 830OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 831OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 832OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 833 834#ifdef CONFIG_PPC_POWERNV 835 .globl start_real_trampolines 836 .globl end_real_trampolines 837 .globl start_virt_trampolines 838 .globl end_virt_trampolines 839#endif 840 841#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 842/* 843 * Data area reserved for FWNMI option. 844 * This address (0x7000) is fixed by the RPA. 845 * pseries and powernv need to keep the whole page from 846 * 0x7000 to 0x8000 free for use by the firmware 847 */ 848ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 849OPEN_TEXT_SECTION(0x8000) 850#else 851OPEN_TEXT_SECTION(0x7000) 852#endif 853 854USE_FIXED_SECTION(real_vectors) 855 856/* 857 * This is the start of the interrupt handlers for pSeries 858 * This code runs with relocation off. 859 * Code from here to __end_interrupts gets copied down to real 860 * address 0x100 when we are running a relocatable kernel. 861 * Therefore any relative branches in this section must only 862 * branch to labels in this section. 863 */ 864 .globl __start_interrupts 865__start_interrupts: 866 867/** 868 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 869 * This is a synchronous interrupt invoked with the "scv" instruction. The 870 * system call does not alter the HV bit, so it is directed to the OS. 871 * 872 * Handling: 873 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 874 * In particular, this means we can take a maskable interrupt at any point 875 * in the scv handler, which is unlike any other interrupt. This is solved 876 * by treating the instruction addresses in the handler as being soft-masked, 877 * by adding a SOFT_MASK_TABLE entry for them. 878 * 879 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 880 * ensure scv is never executed with relocation off, which means AIL-0 881 * should never happen. 882 * 883 * Before leaving the following inside-__end_soft_masked text, at least of the 884 * following must be true: 885 * - MSR[PR]=1 (i.e., return to userspace) 886 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 887 * - Standard kernel environment is set up (stack, paca, etc) 888 * 889 * KVM: 890 * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM 891 * ensures that FSCR[SCV] is disabled whenever it has to force AIL off. 892 * 893 * Call convention: 894 * 895 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 896 */ 897EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 898 /* SCV 0 */ 899 mr r9,r13 900 GET_PACA(r13) 901 mflr r11 902 mfctr r12 903 li r10,IRQS_ALL_DISABLED 904 stb r10,PACAIRQSOFTMASK(r13) 905#ifdef CONFIG_RELOCATABLE 906 b system_call_vectored_tramp 907#else 908 b system_call_vectored_common 909#endif 910 nop 911 912 /* SCV 1 - 127 */ 913 .rept 127 914 mr r9,r13 915 GET_PACA(r13) 916 mflr r11 917 mfctr r12 918 li r10,IRQS_ALL_DISABLED 919 stb r10,PACAIRQSOFTMASK(r13) 920 li r0,-1 /* cause failure */ 921#ifdef CONFIG_RELOCATABLE 922 b system_call_vectored_sigill_tramp 923#else 924 b system_call_vectored_sigill 925#endif 926 .endr 927EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 928 929// Treat scv vectors as soft-masked, see comment above. 930// Use absolute values rather than labels here, so they don't get relocated, 931// because this code runs unrelocated. 932SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) 933 934#ifdef CONFIG_RELOCATABLE 935TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 936 __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines) 937 mtctr r10 938 bctr 939 940TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 941 __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines) 942 mtctr r10 943 bctr 944#endif 945 946 947/* No virt vectors corresponding with 0x0..0x100 */ 948EXC_VIRT_NONE(0x4000, 0x100) 949 950 951/** 952 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 953 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 954 * It is caused by: 955 * - Wake from power-saving state, on powernv. 956 * - An NMI from another CPU, triggered by firmware or hypercall. 957 * - As crash/debug signal injected from BMC, firmware or hypervisor. 958 * 959 * Handling: 960 * Power-save wakeup is the only performance critical path, so this is 961 * determined quickly as possible first. In this case volatile registers 962 * can be discarded and SPRs like CFAR don't need to be read. 963 * 964 * If not a powersave wakeup, then it's run as a regular interrupt, however 965 * it uses its own stack and PACA save area to preserve the regular kernel 966 * environment for debugging. 967 * 968 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 969 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 970 * correct to switch to virtual mode to run the regular interrupt handler 971 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 972 * is clear). 973 * 974 * FWNMI: 975 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 976 * entry point with a different register set up. Some hypervisors will 977 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 978 * 979 * KVM: 980 * Unlike most SRR interrupts, this may be taken by the host while executing 981 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 982 * mode and then raise the sreset. 983 */ 984INT_DEFINE_BEGIN(system_reset) 985 IVEC=0x100 986 IAREA=PACA_EXNMI 987 IVIRT=0 /* no virt entry point */ 988 ISTACK=0 989 IKVM_REAL=1 990INT_DEFINE_END(system_reset) 991 992EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 993#ifdef CONFIG_PPC_P7_NAP 994 /* 995 * If running native on arch 2.06 or later, check if we are waking up 996 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 997 * bits 46:47. A non-0 value indicates that we are coming from a power 998 * saving state. The idle wakeup handler initially runs in real mode, 999 * but we branch to the 0xc000... address so we can turn on relocation 1000 * with mtmsrd later, after SPRs are restored. 1001 * 1002 * Careful to minimise cost for the fast path (idle wakeup) while 1003 * also avoiding clobbering CFAR for the debug path (non-idle). 1004 * 1005 * For the idle wake case volatile registers can be clobbered, which 1006 * is why we use those initially. If it turns out to not be an idle 1007 * wake, carefully put everything back the way it was, so we can use 1008 * common exception macros to handle it. 1009 */ 1010BEGIN_FTR_SECTION 1011 SET_SCRATCH0(r13) 1012 GET_PACA(r13) 1013 std r3,PACA_EXNMI+0*8(r13) 1014 std r4,PACA_EXNMI+1*8(r13) 1015 std r5,PACA_EXNMI+2*8(r13) 1016 mfspr r3,SPRN_SRR1 1017 mfocrf r4,0x80 1018 rlwinm. r5,r3,47-31,30,31 1019 bne+ system_reset_idle_wake 1020 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 1021 mtocrf 0x80,r4 1022 ld r3,PACA_EXNMI+0*8(r13) 1023 ld r4,PACA_EXNMI+1*8(r13) 1024 ld r5,PACA_EXNMI+2*8(r13) 1025 GET_SCRATCH0(r13) 1026END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1027#endif 1028 1029 GEN_INT_ENTRY system_reset, virt=0 1030 /* 1031 * In theory, we should not enable relocation here if it was disabled 1032 * in SRR1, because the MMU may not be configured to support it (e.g., 1033 * SLB may have been cleared). In practice, there should only be a few 1034 * small windows where that's the case, and sreset is considered to 1035 * be dangerous anyway. 1036 */ 1037EXC_REAL_END(system_reset, 0x100, 0x100) 1038EXC_VIRT_NONE(0x4100, 0x100) 1039 1040#ifdef CONFIG_PPC_P7_NAP 1041TRAMP_REAL_BEGIN(system_reset_idle_wake) 1042 /* We are waking up from idle, so may clobber any volatile register */ 1043 cmpwi cr1,r5,2 1044 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1045 __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines) 1046 mtctr r12 1047 bctr 1048#endif 1049 1050#ifdef CONFIG_PPC_PSERIES 1051/* 1052 * Vectors for the FWNMI option. Share common code. 1053 */ 1054TRAMP_REAL_BEGIN(system_reset_fwnmi) 1055 GEN_INT_ENTRY system_reset, virt=0 1056 1057#endif /* CONFIG_PPC_PSERIES */ 1058 1059EXC_COMMON_BEGIN(system_reset_common) 1060 __GEN_COMMON_ENTRY system_reset 1061 /* 1062 * Increment paca->in_nmi. When the interrupt entry wrapper later 1063 * enable MSR_RI, then SLB or MCE will be able to recover, but a nested 1064 * NMI will notice in_nmi and not recover because of the use of the NMI 1065 * stack. in_nmi reentrancy is tested in system_reset_exception. 1066 */ 1067 lhz r10,PACA_IN_NMI(r13) 1068 addi r10,r10,1 1069 sth r10,PACA_IN_NMI(r13) 1070 1071 mr r10,r1 1072 ld r1,PACA_NMI_EMERG_SP(r13) 1073 subi r1,r1,INT_FRAME_SIZE 1074 __GEN_COMMON_BODY system_reset 1075 1076 addi r3,r1,STACK_INT_FRAME_REGS 1077 bl system_reset_exception 1078 1079 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1080 li r9,0 1081 mtmsrd r9,1 1082 1083 /* 1084 * MSR_RI is clear, now we can decrement paca->in_nmi. 1085 */ 1086 lhz r10,PACA_IN_NMI(r13) 1087 subi r10,r10,1 1088 sth r10,PACA_IN_NMI(r13) 1089 1090 kuap_kernel_restore r9, r10 1091 EXCEPTION_RESTORE_REGS 1092 RFI_TO_USER_OR_KERNEL 1093 1094 1095/** 1096 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1097 * This is a non-maskable interrupt always taken in real-mode. It can be 1098 * synchronous or asynchronous, caused by hardware or software, and it may be 1099 * taken in a power-saving state. 1100 * 1101 * Handling: 1102 * Similarly to system reset, this uses its own stack and PACA save area, 1103 * the difference is re-entrancy is allowed on the machine check stack. 1104 * 1105 * machine_check_early is run in real mode, and carefully decodes the 1106 * machine check and tries to handle it (e.g., flush the SLB if there was an 1107 * error detected there), determines if it was recoverable and logs the 1108 * event. 1109 * 1110 * This early code does not "reconcile" irq soft-mask state like SRESET or 1111 * regular interrupts do, so irqs_disabled() among other things may not work 1112 * properly (irq disable/enable already doesn't work because irq tracing can 1113 * not work in real mode). 1114 * 1115 * Then, depending on the execution context when the interrupt is taken, there 1116 * are 3 main actions: 1117 * - Executing in kernel mode. The event is queued with irq_work, which means 1118 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1119 * interrupts), which could be immediately when the interrupt returns. This 1120 * avoids nasty issues like switching to virtual mode when the MMU is in a 1121 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1122 * but it has different priorities). Check to see if the CPU was in power 1123 * save, and return via the wake up code if it was. 1124 * 1125 * - Executing in user mode. machine_check_exception is run like a normal 1126 * interrupt handler, which processes the data generated by the early handler. 1127 * 1128 * - Executing in guest mode. The interrupt is run with its KVM test, and 1129 * branches to KVM to deal with. KVM may queue the event for the host 1130 * to report later. 1131 * 1132 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1133 * or SCRATCH0 is in use, it may cause a crash. 1134 * 1135 * KVM: 1136 * See SRESET. 1137 */ 1138INT_DEFINE_BEGIN(machine_check_early) 1139 IVEC=0x200 1140 IAREA=PACA_EXMC 1141 IVIRT=0 /* no virt entry point */ 1142 IREALMODE_COMMON=1 1143 ISTACK=0 1144 IDAR=1 1145 IDSISR=1 1146 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1147INT_DEFINE_END(machine_check_early) 1148 1149INT_DEFINE_BEGIN(machine_check) 1150 IVEC=0x200 1151 IAREA=PACA_EXMC 1152 IVIRT=0 /* no virt entry point */ 1153 IDAR=1 1154 IDSISR=1 1155 IKVM_REAL=1 1156INT_DEFINE_END(machine_check) 1157 1158EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1159 EARLY_BOOT_FIXUP 1160 GEN_INT_ENTRY machine_check_early, virt=0 1161EXC_REAL_END(machine_check, 0x200, 0x100) 1162EXC_VIRT_NONE(0x4200, 0x100) 1163 1164#ifdef CONFIG_PPC_PSERIES 1165TRAMP_REAL_BEGIN(machine_check_fwnmi) 1166 /* See comment at machine_check exception, don't turn on RI */ 1167 GEN_INT_ENTRY machine_check_early, virt=0 1168#endif 1169 1170#define MACHINE_CHECK_HANDLER_WINDUP \ 1171 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1172 li r9,0; \ 1173 mtmsrd r9,1; /* Clear MSR_RI */ \ 1174 /* Decrement paca->in_mce now RI is clear. */ \ 1175 lhz r12,PACA_IN_MCE(r13); \ 1176 subi r12,r12,1; \ 1177 sth r12,PACA_IN_MCE(r13); \ 1178 EXCEPTION_RESTORE_REGS 1179 1180EXC_COMMON_BEGIN(machine_check_early_common) 1181 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1182 1183 /* 1184 * Switch to mc_emergency stack and handle re-entrancy (we limit 1185 * the nested MCE upto level 4 to avoid stack overflow). 1186 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1187 * 1188 * We use paca->in_mce to check whether this is the first entry or 1189 * nested machine check. We increment paca->in_mce to track nested 1190 * machine checks. 1191 * 1192 * If this is the first entry then set stack pointer to 1193 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1194 * stack frame on mc_emergency stack. 1195 * 1196 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1197 * checkstop if we get another machine check exception before we do 1198 * rfid with MSR_ME=1. 1199 * 1200 * This interrupt can wake directly from idle. If that is the case, 1201 * the machine check is handled then the idle wakeup code is called 1202 * to restore state. 1203 */ 1204 lhz r10,PACA_IN_MCE(r13) 1205 cmpwi r10,0 /* Are we in nested machine check */ 1206 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1207 addi r10,r10,1 /* increment paca->in_mce */ 1208 sth r10,PACA_IN_MCE(r13) 1209 1210 mr r10,r1 /* Save r1 */ 1211 bne 1f 1212 /* First machine check entry */ 1213 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 12141: /* Limit nested MCE to level 4 to avoid stack overflow */ 1215 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1216 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1217 1218 __GEN_COMMON_BODY machine_check_early 1219 1220BEGIN_FTR_SECTION 1221 bl enable_machine_check 1222END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1223 addi r3,r1,STACK_INT_FRAME_REGS 1224BEGIN_FTR_SECTION 1225 bl machine_check_early_boot 1226END_FTR_SECTION(0, 1) // nop out after boot 1227 bl machine_check_early 1228 std r3,RESULT(r1) /* Save result */ 1229 ld r12,_MSR(r1) 1230 1231#ifdef CONFIG_PPC_P7_NAP 1232 /* 1233 * Check if thread was in power saving mode. We come here when any 1234 * of the following is true: 1235 * a. thread wasn't in power saving mode 1236 * b. thread was in power saving mode with no state loss, 1237 * supervisor state loss or hypervisor state loss. 1238 * 1239 * Go back to nap/sleep/winkle mode again if (b) is true. 1240 */ 1241BEGIN_FTR_SECTION 1242 rlwinm. r11,r12,47-31,30,31 1243 bne machine_check_idle_common 1244END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1245#endif 1246 1247#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1248 /* 1249 * Check if we are coming from guest. If yes, then run the normal 1250 * exception handler which will take the 1251 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1252 * to guest. 1253 */ 1254 lbz r11,HSTATE_IN_GUEST(r13) 1255 cmpwi r11,0 /* Check if coming from guest */ 1256 bne mce_deliver /* continue if we are. */ 1257#endif 1258 1259 /* 1260 * Check if we are coming from userspace. If yes, then run the normal 1261 * exception handler which will deliver the MC event to this kernel. 1262 */ 1263 andi. r11,r12,MSR_PR /* See if coming from user. */ 1264 bne mce_deliver /* continue in V mode if we are. */ 1265 1266 /* 1267 * At this point we are coming from kernel context. 1268 * Queue up the MCE event and return from the interrupt. 1269 * But before that, check if this is an un-recoverable exception. 1270 * If yes, then stay on emergency stack and panic. 1271 */ 1272 andi. r11,r12,MSR_RI 1273 beq unrecoverable_mce 1274 1275 /* 1276 * Check if we have successfully handled/recovered from error, if not 1277 * then stay on emergency stack and panic. 1278 */ 1279 ld r3,RESULT(r1) /* Load result */ 1280 cmpdi r3,0 /* see if we handled MCE successfully */ 1281 beq unrecoverable_mce /* if !handled then panic */ 1282 1283 /* 1284 * Return from MC interrupt. 1285 * Queue up the MCE event so that we can log it later, while 1286 * returning from kernel or opal call. 1287 */ 1288 bl machine_check_queue_event 1289 MACHINE_CHECK_HANDLER_WINDUP 1290 RFI_TO_KERNEL 1291 1292mce_deliver: 1293 /* 1294 * This is a host user or guest MCE. Restore all registers, then 1295 * run the "late" handler. For host user, this will run the 1296 * machine_check_exception handler in virtual mode like a normal 1297 * interrupt handler. For guest, this will trigger the KVM test 1298 * and branch to the KVM interrupt similarly to other interrupts. 1299 */ 1300BEGIN_FTR_SECTION 1301 ld r10,ORIG_GPR3(r1) 1302 mtspr SPRN_CFAR,r10 1303END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1304 MACHINE_CHECK_HANDLER_WINDUP 1305 GEN_INT_ENTRY machine_check, virt=0 1306 1307EXC_COMMON_BEGIN(machine_check_common) 1308 /* 1309 * Machine check is different because we use a different 1310 * save area: PACA_EXMC instead of PACA_EXGEN. 1311 */ 1312 GEN_COMMON machine_check 1313 addi r3,r1,STACK_INT_FRAME_REGS 1314 bl machine_check_exception_async 1315 b interrupt_return_srr 1316 1317 1318#ifdef CONFIG_PPC_P7_NAP 1319/* 1320 * This is an idle wakeup. Low level machine check has already been 1321 * done. Queue the event then call the idle code to do the wake up. 1322 */ 1323EXC_COMMON_BEGIN(machine_check_idle_common) 1324 bl machine_check_queue_event 1325 1326 /* 1327 * GPR-loss wakeups are relatively straightforward, because the 1328 * idle sleep code has saved all non-volatile registers on its 1329 * own stack, and r1 in PACAR1. 1330 * 1331 * For no-loss wakeups the r1 and lr registers used by the 1332 * early machine check handler have to be restored first. r2 is 1333 * the kernel TOC, so no need to restore it. 1334 * 1335 * Then decrement MCE nesting after finishing with the stack. 1336 */ 1337 ld r3,_MSR(r1) 1338 ld r4,_LINK(r1) 1339 ld r1,GPR1(r1) 1340 1341 lhz r11,PACA_IN_MCE(r13) 1342 subi r11,r11,1 1343 sth r11,PACA_IN_MCE(r13) 1344 1345 mtlr r4 1346 rlwinm r10,r3,47-31,30,31 1347 cmpwi cr1,r10,2 1348 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1349 b idle_return_gpr_loss 1350#endif 1351 1352EXC_COMMON_BEGIN(unrecoverable_mce) 1353 /* 1354 * We are going down. But there are chances that we might get hit by 1355 * another MCE during panic path and we may run into unstable state 1356 * with no way out. Hence, turn ME bit off while going down, so that 1357 * when another MCE is hit during panic path, system will checkstop 1358 * and hypervisor will get restarted cleanly by SP. 1359 */ 1360BEGIN_FTR_SECTION 1361 li r10,0 /* clear MSR_RI */ 1362 mtmsrd r10,1 1363 bl disable_machine_check 1364END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1365 ld r10,PACAKMSR(r13) 1366 li r3,MSR_ME 1367 andc r10,r10,r3 1368 mtmsrd r10 1369 1370 lhz r12,PACA_IN_MCE(r13) 1371 subi r12,r12,1 1372 sth r12,PACA_IN_MCE(r13) 1373 1374 /* 1375 * Invoke machine_check_exception to print MCE event and panic. 1376 * This is the NMI version of the handler because we are called from 1377 * the early handler which is a true NMI. 1378 */ 1379 addi r3,r1,STACK_INT_FRAME_REGS 1380 bl machine_check_exception 1381 1382 /* 1383 * We will not reach here. Even if we did, there is no way out. 1384 * Call unrecoverable_exception and die. 1385 */ 1386 addi r3,r1,STACK_INT_FRAME_REGS 1387 bl unrecoverable_exception 1388 b . 1389 1390 1391/** 1392 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1393 * This is a synchronous interrupt generated due to a data access exception, 1394 * e.g., a load orstore which does not have a valid page table entry with 1395 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1396 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1397 * 1398 * Handling: 1399 * - Hash MMU 1400 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1401 * Linux page table. Hash faults can hit in kernel mode in a fairly 1402 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1403 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1404 * backed by Linux page table entries. 1405 * 1406 * If no entry is found the Linux page fault handler is invoked (by 1407 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1408 * copy operations of course. 1409 * 1410 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1411 * MMU context, which may cause a DSI in the host, which must go to the 1412 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1413 * always be used regardless of AIL setting. 1414 * 1415 * - Radix MMU 1416 * The hardware loads from the Linux page table directly, so a fault goes 1417 * immediately to Linux page fault. 1418 * 1419 * Conditions like DAWR match are handled on the way in to Linux page fault. 1420 */ 1421INT_DEFINE_BEGIN(data_access) 1422 IVEC=0x300 1423 IDAR=1 1424 IDSISR=1 1425 IKVM_REAL=1 1426INT_DEFINE_END(data_access) 1427 1428EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1429 GEN_INT_ENTRY data_access, virt=0 1430EXC_REAL_END(data_access, 0x300, 0x80) 1431EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1432 GEN_INT_ENTRY data_access, virt=1 1433EXC_VIRT_END(data_access, 0x4300, 0x80) 1434EXC_COMMON_BEGIN(data_access_common) 1435 GEN_COMMON data_access 1436 ld r4,_DSISR(r1) 1437 addi r3,r1,STACK_INT_FRAME_REGS 1438 andis. r0,r4,DSISR_DABRMATCH@h 1439 bne- 1f 1440#ifdef CONFIG_PPC_64S_HASH_MMU 1441BEGIN_MMU_FTR_SECTION 1442 bl do_hash_fault 1443MMU_FTR_SECTION_ELSE 1444 bl do_page_fault 1445ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1446#else 1447 bl do_page_fault 1448#endif 1449 b interrupt_return_srr 1450 14511: bl do_break 1452 /* 1453 * do_break() may have changed the NV GPRS while handling a breakpoint. 1454 * If so, we need to restore them with their updated values. 1455 */ 1456 HANDLER_RESTORE_NVGPRS() 1457 b interrupt_return_srr 1458 1459 1460/** 1461 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1462 * This is a synchronous interrupt in response to an MMU fault missing SLB 1463 * entry for HPT, or an address outside RPT translation range. 1464 * 1465 * Handling: 1466 * - HPT: 1467 * This refills the SLB, or reports an access fault similarly to a bad page 1468 * fault. When coming from user-mode, the SLB handler may access any kernel 1469 * data, though it may itself take a DSLB. When coming from kernel mode, 1470 * recursive faults must be avoided so access is restricted to the kernel 1471 * image text/data, kernel stack, and any data allocated below 1472 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1473 * on user-handler data structures. 1474 * 1475 * KVM: Same as 0x300, DSLB must test for KVM guest. 1476 */ 1477INT_DEFINE_BEGIN(data_access_slb) 1478 IVEC=0x380 1479 IDAR=1 1480 IKVM_REAL=1 1481INT_DEFINE_END(data_access_slb) 1482 1483EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1484 GEN_INT_ENTRY data_access_slb, virt=0 1485EXC_REAL_END(data_access_slb, 0x380, 0x80) 1486EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1487 GEN_INT_ENTRY data_access_slb, virt=1 1488EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1489EXC_COMMON_BEGIN(data_access_slb_common) 1490 GEN_COMMON data_access_slb 1491#ifdef CONFIG_PPC_64S_HASH_MMU 1492BEGIN_MMU_FTR_SECTION 1493 /* HPT case, do SLB fault */ 1494 addi r3,r1,STACK_INT_FRAME_REGS 1495 bl do_slb_fault 1496 cmpdi r3,0 1497 bne- 1f 1498 b fast_interrupt_return_srr 14991: /* Error case */ 1500MMU_FTR_SECTION_ELSE 1501 /* Radix case, access is outside page table range */ 1502 li r3,-EFAULT 1503ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1504#else 1505 li r3,-EFAULT 1506#endif 1507 std r3,RESULT(r1) 1508 addi r3,r1,STACK_INT_FRAME_REGS 1509 bl do_bad_segment_interrupt 1510 b interrupt_return_srr 1511 1512 1513/** 1514 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1515 * This is a synchronous interrupt in response to an MMU fault due to an 1516 * instruction fetch. 1517 * 1518 * Handling: 1519 * Similar to DSI, though in response to fetch. The faulting address is found 1520 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1521 */ 1522INT_DEFINE_BEGIN(instruction_access) 1523 IVEC=0x400 1524 IISIDE=1 1525 IDAR=1 1526 IDSISR=1 1527#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1528 IKVM_REAL=1 1529#endif 1530INT_DEFINE_END(instruction_access) 1531 1532EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1533 GEN_INT_ENTRY instruction_access, virt=0 1534EXC_REAL_END(instruction_access, 0x400, 0x80) 1535EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1536 GEN_INT_ENTRY instruction_access, virt=1 1537EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1538EXC_COMMON_BEGIN(instruction_access_common) 1539 GEN_COMMON instruction_access 1540 addi r3,r1,STACK_INT_FRAME_REGS 1541#ifdef CONFIG_PPC_64S_HASH_MMU 1542BEGIN_MMU_FTR_SECTION 1543 bl do_hash_fault 1544MMU_FTR_SECTION_ELSE 1545 bl do_page_fault 1546ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1547#else 1548 bl do_page_fault 1549#endif 1550 b interrupt_return_srr 1551 1552 1553/** 1554 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1555 * This is a synchronous interrupt in response to an MMU fault due to an 1556 * instruction fetch. 1557 * 1558 * Handling: 1559 * Similar to DSLB, though in response to fetch. The faulting address is found 1560 * in SRR0 (rather than DAR). 1561 */ 1562INT_DEFINE_BEGIN(instruction_access_slb) 1563 IVEC=0x480 1564 IISIDE=1 1565 IDAR=1 1566#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1567 IKVM_REAL=1 1568#endif 1569INT_DEFINE_END(instruction_access_slb) 1570 1571EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1572 GEN_INT_ENTRY instruction_access_slb, virt=0 1573EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1574EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1575 GEN_INT_ENTRY instruction_access_slb, virt=1 1576EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1577EXC_COMMON_BEGIN(instruction_access_slb_common) 1578 GEN_COMMON instruction_access_slb 1579#ifdef CONFIG_PPC_64S_HASH_MMU 1580BEGIN_MMU_FTR_SECTION 1581 /* HPT case, do SLB fault */ 1582 addi r3,r1,STACK_INT_FRAME_REGS 1583 bl do_slb_fault 1584 cmpdi r3,0 1585 bne- 1f 1586 b fast_interrupt_return_srr 15871: /* Error case */ 1588MMU_FTR_SECTION_ELSE 1589 /* Radix case, access is outside page table range */ 1590 li r3,-EFAULT 1591ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1592#else 1593 li r3,-EFAULT 1594#endif 1595 std r3,RESULT(r1) 1596 addi r3,r1,STACK_INT_FRAME_REGS 1597 bl do_bad_segment_interrupt 1598 b interrupt_return_srr 1599 1600 1601/** 1602 * Interrupt 0x500 - External Interrupt. 1603 * This is an asynchronous maskable interrupt in response to an "external 1604 * exception" from the interrupt controller or hypervisor (e.g., device 1605 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1606 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1607 * 1608 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1609 * interrupts are delivered with HSRR registers, guests use SRRs, which 1610 * reqiures IHSRR_IF_HVMODE. 1611 * 1612 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1613 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1614 * rather than External Interrupts. 1615 * 1616 * Handling: 1617 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1618 * because registers at the time of the interrupt are not so important as it is 1619 * asynchronous. 1620 * 1621 * If soft masked, the masked handler will note the pending interrupt for 1622 * replay, and clear MSR[EE] in the interrupted context. 1623 * 1624 * CFAR is not required because this is an asynchronous interrupt that in 1625 * general won't have much bearing on the state of the CPU, with the possible 1626 * exception of crash/debug IPIs, but those are generally moving to use SRESET 1627 * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case 1628 * it may be exiting the guest and need CFAR to be saved. 1629 */ 1630INT_DEFINE_BEGIN(hardware_interrupt) 1631 IVEC=0x500 1632 IHSRR_IF_HVMODE=1 1633 IMASK=IRQS_DISABLED 1634 IKVM_REAL=1 1635 IKVM_VIRT=1 1636 ICFAR=0 1637#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1638 ICFAR_IF_HVMODE=1 1639#endif 1640INT_DEFINE_END(hardware_interrupt) 1641 1642EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1643 GEN_INT_ENTRY hardware_interrupt, virt=0 1644EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1645EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1646 GEN_INT_ENTRY hardware_interrupt, virt=1 1647EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1648EXC_COMMON_BEGIN(hardware_interrupt_common) 1649 GEN_COMMON hardware_interrupt 1650 addi r3,r1,STACK_INT_FRAME_REGS 1651 bl do_IRQ 1652 BEGIN_FTR_SECTION 1653 b interrupt_return_hsrr 1654 FTR_SECTION_ELSE 1655 b interrupt_return_srr 1656 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1657 1658 1659/** 1660 * Interrupt 0x600 - Alignment Interrupt 1661 * This is a synchronous interrupt in response to data alignment fault. 1662 */ 1663INT_DEFINE_BEGIN(alignment) 1664 IVEC=0x600 1665 IDAR=1 1666 IDSISR=1 1667#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1668 IKVM_REAL=1 1669#endif 1670INT_DEFINE_END(alignment) 1671 1672EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1673 GEN_INT_ENTRY alignment, virt=0 1674EXC_REAL_END(alignment, 0x600, 0x100) 1675EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1676 GEN_INT_ENTRY alignment, virt=1 1677EXC_VIRT_END(alignment, 0x4600, 0x100) 1678EXC_COMMON_BEGIN(alignment_common) 1679 GEN_COMMON alignment 1680 addi r3,r1,STACK_INT_FRAME_REGS 1681 bl alignment_exception 1682 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 1683 b interrupt_return_srr 1684 1685 1686/** 1687 * Interrupt 0x700 - Program Interrupt (program check). 1688 * This is a synchronous interrupt in response to various instruction faults: 1689 * traps, privilege errors, TM errors, floating point exceptions. 1690 * 1691 * Handling: 1692 * This interrupt may use the "emergency stack" in some cases when being taken 1693 * from kernel context, which complicates handling. 1694 */ 1695INT_DEFINE_BEGIN(program_check) 1696 IVEC=0x700 1697#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1698 IKVM_REAL=1 1699#endif 1700INT_DEFINE_END(program_check) 1701 1702EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1703 EARLY_BOOT_FIXUP 1704 GEN_INT_ENTRY program_check, virt=0 1705EXC_REAL_END(program_check, 0x700, 0x100) 1706EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1707 GEN_INT_ENTRY program_check, virt=1 1708EXC_VIRT_END(program_check, 0x4700, 0x100) 1709EXC_COMMON_BEGIN(program_check_common) 1710 __GEN_COMMON_ENTRY program_check 1711 1712 /* 1713 * It's possible to receive a TM Bad Thing type program check with 1714 * userspace register values (in particular r1), but with SRR1 reporting 1715 * that we came from the kernel. Normally that would confuse the bad 1716 * stack logic, and we would report a bad kernel stack pointer. Instead 1717 * we switch to the emergency stack if we're taking a TM Bad Thing from 1718 * the kernel. 1719 */ 1720 1721 andi. r10,r12,MSR_PR 1722 bne .Lnormal_stack /* If userspace, go normal path */ 1723 1724 andis. r10,r12,(SRR1_PROGTM)@h 1725 bne .Lemergency_stack /* If TM, emergency */ 1726 1727 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1728 blt .Lnormal_stack /* normal path if not */ 1729 1730 /* Use the emergency stack */ 1731.Lemergency_stack: 1732 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1733 /* 3 in EXCEPTION_PROLOG_COMMON */ 1734 mr r10,r1 /* Save r1 */ 1735 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1736 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1737 __ISTACK(program_check)=0 1738 __GEN_COMMON_BODY program_check 1739 b .Ldo_program_check 1740 1741.Lnormal_stack: 1742 __ISTACK(program_check)=1 1743 __GEN_COMMON_BODY program_check 1744 1745.Ldo_program_check: 1746 addi r3,r1,STACK_INT_FRAME_REGS 1747 bl program_check_exception 1748 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 1749 b interrupt_return_srr 1750 1751 1752/* 1753 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1754 * This is a synchronous interrupt in response to executing an fp instruction 1755 * with MSR[FP]=0. 1756 * 1757 * Handling: 1758 * This will load FP registers and enable the FP bit if coming from userspace, 1759 * otherwise report a bad kernel use of FP. 1760 */ 1761INT_DEFINE_BEGIN(fp_unavailable) 1762 IVEC=0x800 1763#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1764 IKVM_REAL=1 1765#endif 1766 IMSR_R12=1 1767INT_DEFINE_END(fp_unavailable) 1768 1769EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1770 GEN_INT_ENTRY fp_unavailable, virt=0 1771EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1772EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1773 GEN_INT_ENTRY fp_unavailable, virt=1 1774EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1775EXC_COMMON_BEGIN(fp_unavailable_common) 1776 GEN_COMMON fp_unavailable 1777 bne 1f /* if from user, just load it up */ 1778 addi r3,r1,STACK_INT_FRAME_REGS 1779 bl kernel_fp_unavailable_exception 17800: trap 1781 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17821: 1783#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1784BEGIN_FTR_SECTION 1785 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1786 * transaction), go do TM stuff 1787 */ 1788 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1789 bne- 2f 1790END_FTR_SECTION_IFSET(CPU_FTR_TM) 1791#endif 1792 bl load_up_fpu 1793 b fast_interrupt_return_srr 1794#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17952: /* User process was in a transaction */ 1796 addi r3,r1,STACK_INT_FRAME_REGS 1797 bl fp_unavailable_tm 1798 b interrupt_return_srr 1799#endif 1800 1801 1802/** 1803 * Interrupt 0x900 - Decrementer Interrupt. 1804 * This is an asynchronous interrupt in response to a decrementer exception 1805 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1806 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1807 * local_irq_disable()). 1808 * 1809 * Handling: 1810 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1811 * 1812 * If soft masked, the masked handler will note the pending interrupt for 1813 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1814 * in the interrupted context. 1815 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1816 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1817 * on the emergency stack. 1818 * 1819 * CFAR is not required because this is asynchronous (see hardware_interrupt). 1820 * A watchdog interrupt may like to have CFAR, but usually the interesting 1821 * branch is long gone by that point (e.g., infinite loop). 1822 */ 1823INT_DEFINE_BEGIN(decrementer) 1824 IVEC=0x900 1825 IMASK=IRQS_DISABLED 1826#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1827 IKVM_REAL=1 1828#endif 1829 ICFAR=0 1830INT_DEFINE_END(decrementer) 1831 1832EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1833 GEN_INT_ENTRY decrementer, virt=0 1834EXC_REAL_END(decrementer, 0x900, 0x80) 1835EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1836 GEN_INT_ENTRY decrementer, virt=1 1837EXC_VIRT_END(decrementer, 0x4900, 0x80) 1838EXC_COMMON_BEGIN(decrementer_common) 1839 GEN_COMMON decrementer 1840 addi r3,r1,STACK_INT_FRAME_REGS 1841 bl timer_interrupt 1842 b interrupt_return_srr 1843 1844 1845/** 1846 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1847 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1848 * register. 1849 * 1850 * Handling: 1851 * Linux does not use this outside KVM where it's used to keep a host timer 1852 * while the guest is given control of DEC. It should normally be caught by 1853 * the KVM test and routed there. 1854 */ 1855INT_DEFINE_BEGIN(hdecrementer) 1856 IVEC=0x980 1857 IHSRR=1 1858 ISTACK=0 1859 IKVM_REAL=1 1860 IKVM_VIRT=1 1861INT_DEFINE_END(hdecrementer) 1862 1863EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1864 GEN_INT_ENTRY hdecrementer, virt=0 1865EXC_REAL_END(hdecrementer, 0x980, 0x80) 1866EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1867 GEN_INT_ENTRY hdecrementer, virt=1 1868EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1869EXC_COMMON_BEGIN(hdecrementer_common) 1870 __GEN_COMMON_ENTRY hdecrementer 1871 /* 1872 * Hypervisor decrementer interrupts not caught by the KVM test 1873 * shouldn't occur but are sometimes left pending on exit from a KVM 1874 * guest. We don't need to do anything to clear them, as they are 1875 * edge-triggered. 1876 * 1877 * Be careful to avoid touching the kernel stack. 1878 */ 1879 li r10,0 1880 stb r10,PACAHSRR_VALID(r13) 1881 ld r10,PACA_EXGEN+EX_CTR(r13) 1882 mtctr r10 1883 mtcrf 0x80,r9 1884 ld r9,PACA_EXGEN+EX_R9(r13) 1885 ld r10,PACA_EXGEN+EX_R10(r13) 1886 ld r11,PACA_EXGEN+EX_R11(r13) 1887 ld r12,PACA_EXGEN+EX_R12(r13) 1888 ld r13,PACA_EXGEN+EX_R13(r13) 1889 HRFI_TO_KERNEL 1890 1891 1892/** 1893 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1894 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1895 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1896 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1897 * 1898 * Handling: 1899 * Guests may use this for IPIs between threads in a core if the 1900 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1901 * 1902 * If soft masked, the masked handler will note the pending interrupt for 1903 * replay, leaving MSR[EE] enabled in the interrupted context because the 1904 * doorbells are edge triggered. 1905 * 1906 * CFAR is not required, similarly to hardware_interrupt. 1907 */ 1908INT_DEFINE_BEGIN(doorbell_super) 1909 IVEC=0xa00 1910 IMASK=IRQS_DISABLED 1911#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1912 IKVM_REAL=1 1913#endif 1914 ICFAR=0 1915INT_DEFINE_END(doorbell_super) 1916 1917EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1918 GEN_INT_ENTRY doorbell_super, virt=0 1919EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1920EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1921 GEN_INT_ENTRY doorbell_super, virt=1 1922EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1923EXC_COMMON_BEGIN(doorbell_super_common) 1924 GEN_COMMON doorbell_super 1925 addi r3,r1,STACK_INT_FRAME_REGS 1926#ifdef CONFIG_PPC_DOORBELL 1927 bl doorbell_exception 1928#else 1929 bl unknown_async_exception 1930#endif 1931 b interrupt_return_srr 1932 1933 1934EXC_REAL_NONE(0xb00, 0x100) 1935EXC_VIRT_NONE(0x4b00, 0x100) 1936 1937/** 1938 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1939 * This is a synchronous interrupt invoked with the "sc" instruction. The 1940 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1941 * is directed to the currently running OS. The hypercall is invoked with 1942 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1943 * 1944 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1945 * 0x4c00 virtual mode. 1946 * 1947 * Handling: 1948 * If the KVM test fires then it was due to a hypercall and is accordingly 1949 * routed to KVM. Otherwise this executes a normal Linux system call. 1950 * 1951 * Call convention: 1952 * 1953 * syscall and hypercalls register conventions are documented in 1954 * Documentation/powerpc/syscall64-abi.rst and 1955 * Documentation/powerpc/papr_hcalls.rst respectively. 1956 * 1957 * The intersection of volatile registers that don't contain possible 1958 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1959 * without saving, though xer is not a good idea to use, as hardware may 1960 * interpret some bits so it may be costly to change them. 1961 */ 1962INT_DEFINE_BEGIN(system_call) 1963 IVEC=0xc00 1964 IKVM_REAL=1 1965 IKVM_VIRT=1 1966 ICFAR=0 1967INT_DEFINE_END(system_call) 1968 1969.macro SYSTEM_CALL virt 1970#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1971 /* 1972 * There is a little bit of juggling to get syscall and hcall 1973 * working well. Save r13 in ctr to avoid using SPRG scratch 1974 * register. 1975 * 1976 * Userspace syscalls have already saved the PPR, hcalls must save 1977 * it before setting HMT_MEDIUM. 1978 */ 1979 mtctr r13 1980 GET_PACA(r13) 1981 std r10,PACA_EXGEN+EX_R10(r13) 1982 INTERRUPT_TO_KERNEL 1983 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1984 mfctr r9 1985#else 1986 mr r9,r13 1987 GET_PACA(r13) 1988 INTERRUPT_TO_KERNEL 1989#endif 1990 1991#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1992BEGIN_FTR_SECTION 1993 cmpdi r0,0x1ebe 1994 beq- 1f 1995END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1996#endif 1997 1998 /* We reach here with PACA in r13, r13 in r9. */ 1999 mfspr r11,SPRN_SRR0 2000 mfspr r12,SPRN_SRR1 2001 2002 HMT_MEDIUM 2003 2004 .if ! \virt 2005 __LOAD_HANDLER(r10, system_call_common_real, real_vectors) 2006 mtctr r10 2007 bctr 2008 .else 2009#ifdef CONFIG_RELOCATABLE 2010 __LOAD_HANDLER(r10, system_call_common, virt_vectors) 2011 mtctr r10 2012 bctr 2013#else 2014 b system_call_common 2015#endif 2016 .endif 2017 2018#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 2019 /* Fast LE/BE switch system call */ 20201: mfspr r12,SPRN_SRR1 2021 xori r12,r12,MSR_LE 2022 mtspr SPRN_SRR1,r12 2023 mr r13,r9 2024 RFI_TO_USER /* return to userspace */ 2025 b . /* prevent speculative execution */ 2026#endif 2027.endm 2028 2029EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 2030 SYSTEM_CALL 0 2031EXC_REAL_END(system_call, 0xc00, 0x100) 2032EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 2033 SYSTEM_CALL 1 2034EXC_VIRT_END(system_call, 0x4c00, 0x100) 2035 2036#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2037TRAMP_REAL_BEGIN(kvm_hcall) 2038 std r9,PACA_EXGEN+EX_R9(r13) 2039 std r11,PACA_EXGEN+EX_R11(r13) 2040 std r12,PACA_EXGEN+EX_R12(r13) 2041 mfcr r9 2042 mfctr r10 2043 std r10,PACA_EXGEN+EX_R13(r13) 2044 li r10,0 2045 std r10,PACA_EXGEN+EX_CFAR(r13) 2046 std r10,PACA_EXGEN+EX_CTR(r13) 2047 /* 2048 * Save the PPR (on systems that support it) before changing to 2049 * HMT_MEDIUM. That allows the KVM code to save that value into the 2050 * guest state (it is the guest's PPR value). 2051 */ 2052BEGIN_FTR_SECTION 2053 mfspr r10,SPRN_PPR 2054 std r10,PACA_EXGEN+EX_PPR(r13) 2055END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2056 2057 HMT_MEDIUM 2058 2059#ifdef CONFIG_RELOCATABLE 2060 /* 2061 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 2062 * outside the head section. 2063 */ 2064 __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines) 2065 mtctr r10 2066 bctr 2067#else 2068 b kvmppc_hcall 2069#endif 2070#endif 2071 2072/** 2073 * Interrupt 0xd00 - Trace Interrupt. 2074 * This is a synchronous interrupt in response to instruction step or 2075 * breakpoint faults. 2076 */ 2077INT_DEFINE_BEGIN(single_step) 2078 IVEC=0xd00 2079#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2080 IKVM_REAL=1 2081#endif 2082INT_DEFINE_END(single_step) 2083 2084EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2085 GEN_INT_ENTRY single_step, virt=0 2086EXC_REAL_END(single_step, 0xd00, 0x100) 2087EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2088 GEN_INT_ENTRY single_step, virt=1 2089EXC_VIRT_END(single_step, 0x4d00, 0x100) 2090EXC_COMMON_BEGIN(single_step_common) 2091 GEN_COMMON single_step 2092 addi r3,r1,STACK_INT_FRAME_REGS 2093 bl single_step_exception 2094 b interrupt_return_srr 2095 2096 2097/** 2098 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2099 * This is a synchronous interrupt in response to an MMU fault caused by a 2100 * guest data access. 2101 * 2102 * Handling: 2103 * This should always get routed to KVM. In radix MMU mode, this is caused 2104 * by a guest nested radix access that can't be performed due to the 2105 * partition scope page table. In hash mode, this can be caused by guests 2106 * running with translation disabled (virtual real mode) or with VPM enabled. 2107 * KVM will update the page table structures or disallow the access. 2108 */ 2109INT_DEFINE_BEGIN(h_data_storage) 2110 IVEC=0xe00 2111 IHSRR=1 2112 IDAR=1 2113 IDSISR=1 2114 IKVM_REAL=1 2115 IKVM_VIRT=1 2116INT_DEFINE_END(h_data_storage) 2117 2118EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2119 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2120EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2121EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2122 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2123EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2124EXC_COMMON_BEGIN(h_data_storage_common) 2125 GEN_COMMON h_data_storage 2126 addi r3,r1,STACK_INT_FRAME_REGS 2127BEGIN_MMU_FTR_SECTION 2128 bl do_bad_page_fault_segv 2129MMU_FTR_SECTION_ELSE 2130 bl unknown_exception 2131ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2132 b interrupt_return_hsrr 2133 2134 2135/** 2136 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2137 * This is a synchronous interrupt in response to an MMU fault caused by a 2138 * guest instruction fetch, similar to HDSI. 2139 */ 2140INT_DEFINE_BEGIN(h_instr_storage) 2141 IVEC=0xe20 2142 IHSRR=1 2143 IKVM_REAL=1 2144 IKVM_VIRT=1 2145INT_DEFINE_END(h_instr_storage) 2146 2147EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2148 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2149EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2150EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2151 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2152EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2153EXC_COMMON_BEGIN(h_instr_storage_common) 2154 GEN_COMMON h_instr_storage 2155 addi r3,r1,STACK_INT_FRAME_REGS 2156 bl unknown_exception 2157 b interrupt_return_hsrr 2158 2159 2160/** 2161 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2162 */ 2163INT_DEFINE_BEGIN(emulation_assist) 2164 IVEC=0xe40 2165 IHSRR=1 2166 IKVM_REAL=1 2167 IKVM_VIRT=1 2168INT_DEFINE_END(emulation_assist) 2169 2170EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2171 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2172EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2173EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2174 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2175EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2176EXC_COMMON_BEGIN(emulation_assist_common) 2177 GEN_COMMON emulation_assist 2178 addi r3,r1,STACK_INT_FRAME_REGS 2179 bl emulation_assist_interrupt 2180 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 2181 b interrupt_return_hsrr 2182 2183 2184/** 2185 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2186 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2187 * Exception. It is always taken in real mode but uses HSRR registers 2188 * unlike SRESET and MCE. 2189 * 2190 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2191 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2192 * 2193 * Handling: 2194 * This is a special case, this is handled similarly to machine checks, with an 2195 * initial real mode handler that is not soft-masked, which attempts to fix the 2196 * problem. Then a regular handler which is soft-maskable and reports the 2197 * problem. 2198 * 2199 * The emergency stack is used for the early real mode handler. 2200 * 2201 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2202 * either use soft-masking for the MCE, or use irq_work for the HMI. 2203 * 2204 * KVM: 2205 * Unlike MCE, this calls into KVM without calling the real mode handler 2206 * first. 2207 */ 2208INT_DEFINE_BEGIN(hmi_exception_early) 2209 IVEC=0xe60 2210 IHSRR=1 2211 IREALMODE_COMMON=1 2212 ISTACK=0 2213 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2214 IKVM_REAL=1 2215INT_DEFINE_END(hmi_exception_early) 2216 2217INT_DEFINE_BEGIN(hmi_exception) 2218 IVEC=0xe60 2219 IHSRR=1 2220 IMASK=IRQS_DISABLED 2221 IKVM_REAL=1 2222INT_DEFINE_END(hmi_exception) 2223 2224EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2225 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2226EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2227EXC_VIRT_NONE(0x4e60, 0x20) 2228 2229EXC_COMMON_BEGIN(hmi_exception_early_common) 2230 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2231 2232 mr r10,r1 /* Save r1 */ 2233 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2234 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2235 2236 __GEN_COMMON_BODY hmi_exception_early 2237 2238 addi r3,r1,STACK_INT_FRAME_REGS 2239 bl hmi_exception_realmode 2240 cmpdi cr0,r3,0 2241 bne 1f 2242 2243 EXCEPTION_RESTORE_REGS hsrr=1 2244 HRFI_TO_USER_OR_KERNEL 2245 22461: 2247 /* 2248 * Go to virtual mode and pull the HMI event information from 2249 * firmware. 2250 */ 2251 EXCEPTION_RESTORE_REGS hsrr=1 2252 GEN_INT_ENTRY hmi_exception, virt=0 2253 2254EXC_COMMON_BEGIN(hmi_exception_common) 2255 GEN_COMMON hmi_exception 2256 addi r3,r1,STACK_INT_FRAME_REGS 2257 bl handle_hmi_exception 2258 b interrupt_return_hsrr 2259 2260 2261/** 2262 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2263 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2264 * Similar to the 0xa00 doorbell but for host rather than guest. 2265 * 2266 * CFAR is not required (similar to doorbell_interrupt), unless KVM HV 2267 * is enabled, in which case it may be a guest exit. Most PowerNV kernels 2268 * include KVM support so it would be nice if this could be dynamically 2269 * patched out if KVM was not currently running any guests. 2270 */ 2271INT_DEFINE_BEGIN(h_doorbell) 2272 IVEC=0xe80 2273 IHSRR=1 2274 IMASK=IRQS_DISABLED 2275 IKVM_REAL=1 2276 IKVM_VIRT=1 2277#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2278 ICFAR=0 2279#endif 2280INT_DEFINE_END(h_doorbell) 2281 2282EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2283 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2284EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2285EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2286 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2287EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2288EXC_COMMON_BEGIN(h_doorbell_common) 2289 GEN_COMMON h_doorbell 2290 addi r3,r1,STACK_INT_FRAME_REGS 2291#ifdef CONFIG_PPC_DOORBELL 2292 bl doorbell_exception 2293#else 2294 bl unknown_async_exception 2295#endif 2296 b interrupt_return_hsrr 2297 2298 2299/** 2300 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2301 * This is an asynchronous interrupt in response to an "external exception". 2302 * Similar to 0x500 but for host only. 2303 * 2304 * Like h_doorbell, CFAR is only required for KVM HV because this can be 2305 * a guest exit. 2306 */ 2307INT_DEFINE_BEGIN(h_virt_irq) 2308 IVEC=0xea0 2309 IHSRR=1 2310 IMASK=IRQS_DISABLED 2311 IKVM_REAL=1 2312 IKVM_VIRT=1 2313#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2314 ICFAR=0 2315#endif 2316INT_DEFINE_END(h_virt_irq) 2317 2318EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2319 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2320EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2321EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2322 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2323EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2324EXC_COMMON_BEGIN(h_virt_irq_common) 2325 GEN_COMMON h_virt_irq 2326 addi r3,r1,STACK_INT_FRAME_REGS 2327 bl do_IRQ 2328 b interrupt_return_hsrr 2329 2330 2331EXC_REAL_NONE(0xec0, 0x20) 2332EXC_VIRT_NONE(0x4ec0, 0x20) 2333EXC_REAL_NONE(0xee0, 0x20) 2334EXC_VIRT_NONE(0x4ee0, 0x20) 2335 2336 2337/* 2338 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2339 * This is an asynchronous interrupt in response to a PMU exception. 2340 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2341 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2342 * 2343 * Handling: 2344 * This calls into the perf subsystem. 2345 * 2346 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2347 * runs under local_irq_disable. However it may be soft-masked in 2348 * powerpc-specific code. 2349 * 2350 * If soft masked, the masked handler will note the pending interrupt for 2351 * replay, and clear MSR[EE] in the interrupted context. 2352 * 2353 * CFAR is not used by perf interrupts so not required. 2354 */ 2355INT_DEFINE_BEGIN(performance_monitor) 2356 IVEC=0xf00 2357 IMASK=IRQS_PMI_DISABLED 2358#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2359 IKVM_REAL=1 2360#endif 2361 ICFAR=0 2362INT_DEFINE_END(performance_monitor) 2363 2364EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2365 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2366EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2367EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2368 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2369EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2370EXC_COMMON_BEGIN(performance_monitor_common) 2371 GEN_COMMON performance_monitor 2372 addi r3,r1,STACK_INT_FRAME_REGS 2373 lbz r4,PACAIRQSOFTMASK(r13) 2374 cmpdi r4,IRQS_ENABLED 2375 bne 1f 2376 bl performance_monitor_exception_async 2377 b interrupt_return_srr 23781: 2379 bl performance_monitor_exception_nmi 2380 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2381 li r9,0 2382 mtmsrd r9,1 2383 2384 kuap_kernel_restore r9, r10 2385 2386 EXCEPTION_RESTORE_REGS hsrr=0 2387 RFI_TO_KERNEL 2388 2389/** 2390 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2391 * This is a synchronous interrupt in response to 2392 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2393 * Similar to FP unavailable. 2394 */ 2395INT_DEFINE_BEGIN(altivec_unavailable) 2396 IVEC=0xf20 2397#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2398 IKVM_REAL=1 2399#endif 2400 IMSR_R12=1 2401INT_DEFINE_END(altivec_unavailable) 2402 2403EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2404 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2405EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2406EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2407 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2408EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2409EXC_COMMON_BEGIN(altivec_unavailable_common) 2410 GEN_COMMON altivec_unavailable 2411#ifdef CONFIG_ALTIVEC 2412BEGIN_FTR_SECTION 2413 beq 1f 2414#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2415 BEGIN_FTR_SECTION_NESTED(69) 2416 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2417 * transaction), go do TM stuff 2418 */ 2419 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2420 bne- 2f 2421 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2422#endif 2423 bl load_up_altivec 2424 b fast_interrupt_return_srr 2425#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24262: /* User process was in a transaction */ 2427 addi r3,r1,STACK_INT_FRAME_REGS 2428 bl altivec_unavailable_tm 2429 b interrupt_return_srr 2430#endif 24311: 2432END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2433#endif 2434 addi r3,r1,STACK_INT_FRAME_REGS 2435 bl altivec_unavailable_exception 2436 b interrupt_return_srr 2437 2438 2439/** 2440 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2441 * This is a synchronous interrupt in response to 2442 * executing a VSX instruction with MSR[VSX]=0. 2443 * Similar to FP unavailable. 2444 */ 2445INT_DEFINE_BEGIN(vsx_unavailable) 2446 IVEC=0xf40 2447#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2448 IKVM_REAL=1 2449#endif 2450 IMSR_R12=1 2451INT_DEFINE_END(vsx_unavailable) 2452 2453EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2454 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2455EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2456EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2457 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2458EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2459EXC_COMMON_BEGIN(vsx_unavailable_common) 2460 GEN_COMMON vsx_unavailable 2461#ifdef CONFIG_VSX 2462BEGIN_FTR_SECTION 2463 beq 1f 2464#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2465 BEGIN_FTR_SECTION_NESTED(69) 2466 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2467 * transaction), go do TM stuff 2468 */ 2469 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2470 bne- 2f 2471 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2472#endif 2473 b load_up_vsx 2474#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24752: /* User process was in a transaction */ 2476 addi r3,r1,STACK_INT_FRAME_REGS 2477 bl vsx_unavailable_tm 2478 b interrupt_return_srr 2479#endif 24801: 2481END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2482#endif 2483 addi r3,r1,STACK_INT_FRAME_REGS 2484 bl vsx_unavailable_exception 2485 b interrupt_return_srr 2486 2487 2488/** 2489 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2490 * This is a synchronous interrupt in response to 2491 * executing an instruction without access to the facility that can be 2492 * resolved by the OS (e.g., FSCR, MSR). 2493 * Similar to FP unavailable. 2494 */ 2495INT_DEFINE_BEGIN(facility_unavailable) 2496 IVEC=0xf60 2497#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2498 IKVM_REAL=1 2499#endif 2500INT_DEFINE_END(facility_unavailable) 2501 2502EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2503 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2504EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2505EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2506 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2507EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2508EXC_COMMON_BEGIN(facility_unavailable_common) 2509 GEN_COMMON facility_unavailable 2510 addi r3,r1,STACK_INT_FRAME_REGS 2511 bl facility_unavailable_exception 2512 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 2513 b interrupt_return_srr 2514 2515 2516/** 2517 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2518 * This is a synchronous interrupt in response to 2519 * executing an instruction without access to the facility that can only 2520 * be resolved in HV mode (e.g., HFSCR). 2521 * Similar to FP unavailable. 2522 */ 2523INT_DEFINE_BEGIN(h_facility_unavailable) 2524 IVEC=0xf80 2525 IHSRR=1 2526 IKVM_REAL=1 2527 IKVM_VIRT=1 2528INT_DEFINE_END(h_facility_unavailable) 2529 2530EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2531 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2532EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2533EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2534 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2535EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2536EXC_COMMON_BEGIN(h_facility_unavailable_common) 2537 GEN_COMMON h_facility_unavailable 2538 addi r3,r1,STACK_INT_FRAME_REGS 2539 bl facility_unavailable_exception 2540 /* XXX Shouldn't be necessary in practice */ 2541 HANDLER_RESTORE_NVGPRS() 2542 b interrupt_return_hsrr 2543 2544 2545EXC_REAL_NONE(0xfa0, 0x20) 2546EXC_VIRT_NONE(0x4fa0, 0x20) 2547EXC_REAL_NONE(0xfc0, 0x20) 2548EXC_VIRT_NONE(0x4fc0, 0x20) 2549EXC_REAL_NONE(0xfe0, 0x20) 2550EXC_VIRT_NONE(0x4fe0, 0x20) 2551 2552EXC_REAL_NONE(0x1000, 0x100) 2553EXC_VIRT_NONE(0x5000, 0x100) 2554EXC_REAL_NONE(0x1100, 0x100) 2555EXC_VIRT_NONE(0x5100, 0x100) 2556 2557#ifdef CONFIG_CBE_RAS 2558INT_DEFINE_BEGIN(cbe_system_error) 2559 IVEC=0x1200 2560 IHSRR=1 2561INT_DEFINE_END(cbe_system_error) 2562 2563EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2564 GEN_INT_ENTRY cbe_system_error, virt=0 2565EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2566EXC_VIRT_NONE(0x5200, 0x100) 2567EXC_COMMON_BEGIN(cbe_system_error_common) 2568 GEN_COMMON cbe_system_error 2569 addi r3,r1,STACK_INT_FRAME_REGS 2570 bl cbe_system_error_exception 2571 b interrupt_return_hsrr 2572 2573#else /* CONFIG_CBE_RAS */ 2574EXC_REAL_NONE(0x1200, 0x100) 2575EXC_VIRT_NONE(0x5200, 0x100) 2576#endif 2577 2578/** 2579 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2580 * This has been removed from the ISA before 2.01, which is the earliest 2581 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2582 * interrupt with a non-architected feature available through the support 2583 * processor interface. 2584 */ 2585INT_DEFINE_BEGIN(instruction_breakpoint) 2586 IVEC=0x1300 2587#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2588 IKVM_REAL=1 2589#endif 2590INT_DEFINE_END(instruction_breakpoint) 2591 2592EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2593 GEN_INT_ENTRY instruction_breakpoint, virt=0 2594EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2595EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2596 GEN_INT_ENTRY instruction_breakpoint, virt=1 2597EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2598EXC_COMMON_BEGIN(instruction_breakpoint_common) 2599 GEN_COMMON instruction_breakpoint 2600 addi r3,r1,STACK_INT_FRAME_REGS 2601 bl instruction_breakpoint_exception 2602 b interrupt_return_srr 2603 2604 2605EXC_REAL_NONE(0x1400, 0x100) 2606EXC_VIRT_NONE(0x5400, 0x100) 2607 2608/** 2609 * Interrupt 0x1500 - Soft Patch Interrupt 2610 * 2611 * Handling: 2612 * This is an implementation specific interrupt which can be used for a 2613 * range of exceptions. 2614 * 2615 * This interrupt handler is unique in that it runs the denormal assist 2616 * code even for guests (and even in guest context) without going to KVM, 2617 * for speed. POWER9 does not raise denorm exceptions, so this special case 2618 * could be phased out in future to reduce special cases. 2619 */ 2620INT_DEFINE_BEGIN(denorm_exception) 2621 IVEC=0x1500 2622 IHSRR=1 2623 IBRANCH_TO_COMMON=0 2624 IKVM_REAL=1 2625INT_DEFINE_END(denorm_exception) 2626 2627EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2628 GEN_INT_ENTRY denorm_exception, virt=0 2629#ifdef CONFIG_PPC_DENORMALISATION 2630 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2631 bne+ denorm_assist 2632#endif 2633 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2634EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2635#ifdef CONFIG_PPC_DENORMALISATION 2636EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2637 GEN_INT_ENTRY denorm_exception, virt=1 2638 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2639 bne+ denorm_assist 2640 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2641EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2642#else 2643EXC_VIRT_NONE(0x5500, 0x100) 2644#endif 2645 2646#ifdef CONFIG_PPC_DENORMALISATION 2647TRAMP_REAL_BEGIN(denorm_assist) 2648BEGIN_FTR_SECTION 2649/* 2650 * To denormalise we need to move a copy of the register to itself. 2651 * For POWER6 do that here for all FP regs. 2652 */ 2653 mfmsr r10 2654 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2655 xori r10,r10,(MSR_FE0|MSR_FE1) 2656 mtmsrd r10 2657 sync 2658 2659 .Lreg=0 2660 .rept 32 2661 fmr .Lreg,.Lreg 2662 .Lreg=.Lreg+1 2663 .endr 2664 2665FTR_SECTION_ELSE 2666/* 2667 * To denormalise we need to move a copy of the register to itself. 2668 * For POWER7 do that here for the first 32 VSX registers only. 2669 */ 2670 mfmsr r10 2671 oris r10,r10,MSR_VSX@h 2672 mtmsrd r10 2673 sync 2674 2675 .Lreg=0 2676 .rept 32 2677 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2678 .Lreg=.Lreg+1 2679 .endr 2680 2681ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2682 2683BEGIN_FTR_SECTION 2684 b denorm_done 2685END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2686/* 2687 * To denormalise we need to move a copy of the register to itself. 2688 * For POWER8 we need to do that for all 64 VSX registers 2689 */ 2690 .Lreg=32 2691 .rept 32 2692 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2693 .Lreg=.Lreg+1 2694 .endr 2695 2696denorm_done: 2697 mfspr r11,SPRN_HSRR0 2698 subi r11,r11,4 2699 mtspr SPRN_HSRR0,r11 2700 mtcrf 0x80,r9 2701 ld r9,PACA_EXGEN+EX_R9(r13) 2702BEGIN_FTR_SECTION 2703 ld r10,PACA_EXGEN+EX_PPR(r13) 2704 mtspr SPRN_PPR,r10 2705END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2706BEGIN_FTR_SECTION 2707 ld r10,PACA_EXGEN+EX_CFAR(r13) 2708 mtspr SPRN_CFAR,r10 2709END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2710 li r10,0 2711 stb r10,PACAHSRR_VALID(r13) 2712 ld r10,PACA_EXGEN+EX_R10(r13) 2713 ld r11,PACA_EXGEN+EX_R11(r13) 2714 ld r12,PACA_EXGEN+EX_R12(r13) 2715 ld r13,PACA_EXGEN+EX_R13(r13) 2716 HRFI_TO_UNKNOWN 2717 b . 2718#endif 2719 2720EXC_COMMON_BEGIN(denorm_exception_common) 2721 GEN_COMMON denorm_exception 2722 addi r3,r1,STACK_INT_FRAME_REGS 2723 bl unknown_exception 2724 b interrupt_return_hsrr 2725 2726 2727#ifdef CONFIG_CBE_RAS 2728INT_DEFINE_BEGIN(cbe_maintenance) 2729 IVEC=0x1600 2730 IHSRR=1 2731INT_DEFINE_END(cbe_maintenance) 2732 2733EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2734 GEN_INT_ENTRY cbe_maintenance, virt=0 2735EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2736EXC_VIRT_NONE(0x5600, 0x100) 2737EXC_COMMON_BEGIN(cbe_maintenance_common) 2738 GEN_COMMON cbe_maintenance 2739 addi r3,r1,STACK_INT_FRAME_REGS 2740 bl cbe_maintenance_exception 2741 b interrupt_return_hsrr 2742 2743#else /* CONFIG_CBE_RAS */ 2744EXC_REAL_NONE(0x1600, 0x100) 2745EXC_VIRT_NONE(0x5600, 0x100) 2746#endif 2747 2748 2749INT_DEFINE_BEGIN(altivec_assist) 2750 IVEC=0x1700 2751#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2752 IKVM_REAL=1 2753#endif 2754INT_DEFINE_END(altivec_assist) 2755 2756EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2757 GEN_INT_ENTRY altivec_assist, virt=0 2758EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2759EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2760 GEN_INT_ENTRY altivec_assist, virt=1 2761EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2762EXC_COMMON_BEGIN(altivec_assist_common) 2763 GEN_COMMON altivec_assist 2764 addi r3,r1,STACK_INT_FRAME_REGS 2765#ifdef CONFIG_ALTIVEC 2766 bl altivec_assist_exception 2767 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 2768#else 2769 bl unknown_exception 2770#endif 2771 b interrupt_return_srr 2772 2773 2774#ifdef CONFIG_CBE_RAS 2775INT_DEFINE_BEGIN(cbe_thermal) 2776 IVEC=0x1800 2777 IHSRR=1 2778INT_DEFINE_END(cbe_thermal) 2779 2780EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2781 GEN_INT_ENTRY cbe_thermal, virt=0 2782EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2783EXC_VIRT_NONE(0x5800, 0x100) 2784EXC_COMMON_BEGIN(cbe_thermal_common) 2785 GEN_COMMON cbe_thermal 2786 addi r3,r1,STACK_INT_FRAME_REGS 2787 bl cbe_thermal_exception 2788 b interrupt_return_hsrr 2789 2790#else /* CONFIG_CBE_RAS */ 2791EXC_REAL_NONE(0x1800, 0x100) 2792EXC_VIRT_NONE(0x5800, 0x100) 2793#endif 2794 2795 2796#ifdef CONFIG_PPC_WATCHDOG 2797 2798INT_DEFINE_BEGIN(soft_nmi) 2799 IVEC=0x900 2800 ISTACK=0 2801 ICFAR=0 2802INT_DEFINE_END(soft_nmi) 2803 2804/* 2805 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2806 * stack is one that is usable by maskable interrupts so long as MSR_EE 2807 * remains off. It is used for recovery when something has corrupted the 2808 * normal kernel stack, for example. The "soft NMI" must not use the process 2809 * stack because we want irq disabled sections to avoid touching the stack 2810 * at all (other than PMU interrupts), so use the emergency stack for this, 2811 * and run it entirely with interrupts hard disabled. 2812 */ 2813EXC_COMMON_BEGIN(soft_nmi_common) 2814 mr r10,r1 2815 ld r1,PACAEMERGSP(r13) 2816 subi r1,r1,INT_FRAME_SIZE 2817 __GEN_COMMON_BODY soft_nmi 2818 2819 addi r3,r1,STACK_INT_FRAME_REGS 2820 bl soft_nmi_interrupt 2821 2822 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2823 li r9,0 2824 mtmsrd r9,1 2825 2826 kuap_kernel_restore r9, r10 2827 2828 EXCEPTION_RESTORE_REGS hsrr=0 2829 RFI_TO_KERNEL 2830 2831#endif /* CONFIG_PPC_WATCHDOG */ 2832 2833/* 2834 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2835 * - If it was a decrementer interrupt, we bump the dec to max and return. 2836 * - If it was a doorbell we return immediately since doorbells are edge 2837 * triggered and won't automatically refire. 2838 * - If it was a HMI we return immediately since we handled it in realmode 2839 * and it won't refire. 2840 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2841 * This is called with r10 containing the value to OR to the paca field. 2842 */ 2843.macro MASKED_INTERRUPT hsrr=0 2844 .if \hsrr 2845masked_Hinterrupt: 2846 .else 2847masked_interrupt: 2848 .endif 2849 stw r9,PACA_EXGEN+EX_CCR(r13) 2850#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 2851 /* 2852 * Ensure there was no previous MUST_HARD_MASK interrupt or 2853 * HARD_DIS setting. If this does fire, the interrupt is still 2854 * masked and MSR[EE] will be cleared on return, so no need to 2855 * panic, but somebody probably enabled MSR[EE] under 2856 * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common 2857 * cause. 2858 */ 2859 lbz r9,PACAIRQHAPPENED(r13) 2860 andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS) 28610: tdnei r9,0 2862 EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 2863#endif 2864 lbz r9,PACAIRQHAPPENED(r13) 2865 or r9,r9,r10 2866 stb r9,PACAIRQHAPPENED(r13) 2867 2868 .if ! \hsrr 2869 cmpwi r10,PACA_IRQ_DEC 2870 bne 1f 2871 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2872 mtspr SPRN_DEC,r9 2873#ifdef CONFIG_PPC_WATCHDOG 2874 lwz r9,PACA_EXGEN+EX_CCR(r13) 2875 b soft_nmi_common 2876#else 2877 b 2f 2878#endif 2879 .endif 2880 28811: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2882 beq 2f 2883 xori r12,r12,MSR_EE /* clear MSR_EE */ 2884 .if \hsrr 2885 mtspr SPRN_HSRR1,r12 2886 .else 2887 mtspr SPRN_SRR1,r12 2888 .endif 2889 ori r9,r9,PACA_IRQ_HARD_DIS 2890 stb r9,PACAIRQHAPPENED(r13) 28912: /* done */ 2892 li r9,0 2893 .if \hsrr 2894 stb r9,PACAHSRR_VALID(r13) 2895 .else 2896 stb r9,PACASRR_VALID(r13) 2897 .endif 2898 2899 SEARCH_RESTART_TABLE 2900 cmpdi r12,0 2901 beq 3f 2902 .if \hsrr 2903 mtspr SPRN_HSRR0,r12 2904 .else 2905 mtspr SPRN_SRR0,r12 2906 .endif 29073: 2908 2909 ld r9,PACA_EXGEN+EX_CTR(r13) 2910 mtctr r9 2911 lwz r9,PACA_EXGEN+EX_CCR(r13) 2912 mtcrf 0x80,r9 2913 std r1,PACAR1(r13) 2914 ld r9,PACA_EXGEN+EX_R9(r13) 2915 ld r10,PACA_EXGEN+EX_R10(r13) 2916 ld r11,PACA_EXGEN+EX_R11(r13) 2917 ld r12,PACA_EXGEN+EX_R12(r13) 2918 ld r13,PACA_EXGEN+EX_R13(r13) 2919 /* May return to masked low address where r13 is not set up */ 2920 .if \hsrr 2921 HRFI_TO_KERNEL 2922 .else 2923 RFI_TO_KERNEL 2924 .endif 2925 b . 2926.endm 2927 2928TRAMP_REAL_BEGIN(stf_barrier_fallback) 2929 std r9,PACA_EXRFI+EX_R9(r13) 2930 std r10,PACA_EXRFI+EX_R10(r13) 2931 sync 2932 ld r9,PACA_EXRFI+EX_R9(r13) 2933 ld r10,PACA_EXRFI+EX_R10(r13) 2934 ori 31,31,0 2935 .rept 14 2936 b 1f 29371: 2938 .endr 2939 blr 2940 2941/* Clobbers r10, r11, ctr */ 2942.macro L1D_DISPLACEMENT_FLUSH 2943 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2944 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2945 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2946 mtctr r11 2947 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2948 2949 /* order ld/st prior to dcbt stop all streams with flushing */ 2950 sync 2951 2952 /* 2953 * The load addresses are at staggered offsets within cachelines, 2954 * which suits some pipelines better (on others it should not 2955 * hurt). 2956 */ 29571: 2958 ld r11,(0x80 + 8)*0(r10) 2959 ld r11,(0x80 + 8)*1(r10) 2960 ld r11,(0x80 + 8)*2(r10) 2961 ld r11,(0x80 + 8)*3(r10) 2962 ld r11,(0x80 + 8)*4(r10) 2963 ld r11,(0x80 + 8)*5(r10) 2964 ld r11,(0x80 + 8)*6(r10) 2965 ld r11,(0x80 + 8)*7(r10) 2966 addi r10,r10,0x80*8 2967 bdnz 1b 2968.endm 2969 2970TRAMP_REAL_BEGIN(entry_flush_fallback) 2971 std r9,PACA_EXRFI+EX_R9(r13) 2972 std r10,PACA_EXRFI+EX_R10(r13) 2973 std r11,PACA_EXRFI+EX_R11(r13) 2974 mfctr r9 2975 L1D_DISPLACEMENT_FLUSH 2976 mtctr r9 2977 ld r9,PACA_EXRFI+EX_R9(r13) 2978 ld r10,PACA_EXRFI+EX_R10(r13) 2979 ld r11,PACA_EXRFI+EX_R11(r13) 2980 blr 2981 2982/* 2983 * The SCV entry flush happens with interrupts enabled, so it must disable 2984 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2985 * (containing LR) does not need to be preserved here because scv entry 2986 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2987 */ 2988TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2989 li r10,0 2990 mtmsrd r10,1 2991 lbz r10,PACAIRQHAPPENED(r13) 2992 ori r10,r10,PACA_IRQ_HARD_DIS 2993 stb r10,PACAIRQHAPPENED(r13) 2994 std r11,PACA_EXRFI+EX_R11(r13) 2995 L1D_DISPLACEMENT_FLUSH 2996 ld r11,PACA_EXRFI+EX_R11(r13) 2997 li r10,MSR_RI 2998 mtmsrd r10,1 2999 blr 3000 3001TRAMP_REAL_BEGIN(rfi_flush_fallback) 3002 SET_SCRATCH0(r13); 3003 GET_PACA(r13); 3004 std r1,PACA_EXRFI+EX_R12(r13) 3005 ld r1,PACAKSAVE(r13) 3006 std r9,PACA_EXRFI+EX_R9(r13) 3007 std r10,PACA_EXRFI+EX_R10(r13) 3008 std r11,PACA_EXRFI+EX_R11(r13) 3009 mfctr r9 3010 L1D_DISPLACEMENT_FLUSH 3011 mtctr r9 3012 ld r9,PACA_EXRFI+EX_R9(r13) 3013 ld r10,PACA_EXRFI+EX_R10(r13) 3014 ld r11,PACA_EXRFI+EX_R11(r13) 3015 ld r1,PACA_EXRFI+EX_R12(r13) 3016 GET_SCRATCH0(r13); 3017 rfid 3018 3019TRAMP_REAL_BEGIN(hrfi_flush_fallback) 3020 SET_SCRATCH0(r13); 3021 GET_PACA(r13); 3022 std r1,PACA_EXRFI+EX_R12(r13) 3023 ld r1,PACAKSAVE(r13) 3024 std r9,PACA_EXRFI+EX_R9(r13) 3025 std r10,PACA_EXRFI+EX_R10(r13) 3026 std r11,PACA_EXRFI+EX_R11(r13) 3027 mfctr r9 3028 L1D_DISPLACEMENT_FLUSH 3029 mtctr r9 3030 ld r9,PACA_EXRFI+EX_R9(r13) 3031 ld r10,PACA_EXRFI+EX_R10(r13) 3032 ld r11,PACA_EXRFI+EX_R11(r13) 3033 ld r1,PACA_EXRFI+EX_R12(r13) 3034 GET_SCRATCH0(r13); 3035 hrfid 3036 3037TRAMP_REAL_BEGIN(rfscv_flush_fallback) 3038 /* system call volatile */ 3039 mr r7,r13 3040 GET_PACA(r13); 3041 mr r8,r1 3042 ld r1,PACAKSAVE(r13) 3043 mfctr r9 3044 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 3045 ld r11,PACA_L1D_FLUSH_SIZE(r13) 3046 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 3047 mtctr r11 3048 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 3049 3050 /* order ld/st prior to dcbt stop all streams with flushing */ 3051 sync 3052 3053 /* 3054 * The load adresses are at staggered offsets within cachelines, 3055 * which suits some pipelines better (on others it should not 3056 * hurt). 3057 */ 30581: 3059 ld r11,(0x80 + 8)*0(r10) 3060 ld r11,(0x80 + 8)*1(r10) 3061 ld r11,(0x80 + 8)*2(r10) 3062 ld r11,(0x80 + 8)*3(r10) 3063 ld r11,(0x80 + 8)*4(r10) 3064 ld r11,(0x80 + 8)*5(r10) 3065 ld r11,(0x80 + 8)*6(r10) 3066 ld r11,(0x80 + 8)*7(r10) 3067 addi r10,r10,0x80*8 3068 bdnz 1b 3069 3070 mtctr r9 3071 li r9,0 3072 li r10,0 3073 li r11,0 3074 mr r1,r8 3075 mr r13,r7 3076 RFSCV 3077 3078USE_TEXT_SECTION() 3079 3080#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3081kvm_interrupt: 3082 /* 3083 * The conditional branch in KVMTEST can't reach all the way, 3084 * make a stub. 3085 */ 3086 b kvmppc_interrupt 3087#endif 3088 3089_GLOBAL(do_uaccess_flush) 3090 UACCESS_FLUSH_FIXUP_SECTION 3091 nop 3092 nop 3093 nop 3094 blr 3095 L1D_DISPLACEMENT_FLUSH 3096 blr 3097_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3098EXPORT_SYMBOL(do_uaccess_flush) 3099 3100 3101MASKED_INTERRUPT 3102MASKED_INTERRUPT hsrr=1 3103 3104USE_FIXED_SECTION(virt_trampolines) 3105 /* 3106 * All code below __end_soft_masked is treated as soft-masked. If 3107 * any code runs here with MSR[EE]=1, it must then cope with pending 3108 * soft interrupt being raised (i.e., by ensuring it is replayed). 3109 * 3110 * The __end_interrupts marker must be past the out-of-line (OOL) 3111 * handlers, so that they are copied to real address 0x100 when running 3112 * a relocatable kernel. This ensures they can be reached from the short 3113 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3114 * directly, without using LOAD_HANDLER(). 3115 */ 3116 .align 7 3117 .globl __end_interrupts 3118__end_interrupts: 3119DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines) 3120 3121CLOSE_FIXED_SECTION(real_vectors); 3122CLOSE_FIXED_SECTION(real_trampolines); 3123CLOSE_FIXED_SECTION(virt_vectors); 3124CLOSE_FIXED_SECTION(virt_trampolines); 3125 3126USE_TEXT_SECTION() 3127 3128/* MSR[RI] should be clear because this uses SRR[01] */ 3129_GLOBAL(enable_machine_check) 3130 mflr r0 3131 bcl 20,31,$+4 31320: mflr r3 3133 addi r3,r3,(1f - 0b) 3134 mtspr SPRN_SRR0,r3 3135 mfmsr r3 3136 ori r3,r3,MSR_ME 3137 mtspr SPRN_SRR1,r3 3138 RFI_TO_KERNEL 31391: mtlr r0 3140 blr 3141 3142/* MSR[RI] should be clear because this uses SRR[01] */ 3143disable_machine_check: 3144 mflr r0 3145 bcl 20,31,$+4 31460: mflr r3 3147 addi r3,r3,(1f - 0b) 3148 mtspr SPRN_SRR0,r3 3149 mfmsr r3 3150 li r4,MSR_ME 3151 andc r3,r3,r4 3152 mtspr SPRN_SRR1,r3 3153 RFI_TO_KERNEL 31541: mtlr r0 3155 blr 3156