1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name, text); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label, section) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label, section))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label, section) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label, section))@l; \ 90 addis reg,reg,(ABS_ADDR(label, section))@h 91 92/* 93 * Interrupt code generation macros 94 */ 95#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 96#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 97#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 98#define IAREA .L_IAREA_\name\() /* PACA save area */ 99#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 100#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 101#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */ 102#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */ 103#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 104#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 105#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 106#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 107#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 108#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 109#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 110#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 111#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 112#define __ISTACK(name) .L_ISTACK_ ## name 113#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 114 115#define INT_DEFINE_BEGIN(n) \ 116.macro int_define_ ## n name 117 118#define INT_DEFINE_END(n) \ 119.endm ; \ 120int_define_ ## n n ; \ 121do_define_int n 122 123.macro do_define_int name 124 .ifndef IVEC 125 .error "IVEC not defined" 126 .endif 127 .ifndef IHSRR 128 IHSRR=0 129 .endif 130 .ifndef IHSRR_IF_HVMODE 131 IHSRR_IF_HVMODE=0 132 .endif 133 .ifndef IAREA 134 IAREA=PACA_EXGEN 135 .endif 136 .ifndef IVIRT 137 IVIRT=1 138 .endif 139 .ifndef IISIDE 140 IISIDE=0 141 .endif 142 .ifndef ICFAR 143 ICFAR=1 144 .endif 145 .ifndef ICFAR_IF_HVMODE 146 ICFAR_IF_HVMODE=0 147 .endif 148 .ifndef IDAR 149 IDAR=0 150 .endif 151 .ifndef IDSISR 152 IDSISR=0 153 .endif 154 .ifndef IBRANCH_TO_COMMON 155 IBRANCH_TO_COMMON=1 156 .endif 157 .ifndef IREALMODE_COMMON 158 IREALMODE_COMMON=0 159 .else 160 .if ! IBRANCH_TO_COMMON 161 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 162 .endif 163 .endif 164 .ifndef IMASK 165 IMASK=0 166 .endif 167 .ifndef IKVM_REAL 168 IKVM_REAL=0 169 .endif 170 .ifndef IKVM_VIRT 171 IKVM_VIRT=0 172 .endif 173 .ifndef ISTACK 174 ISTACK=1 175 .endif 176 .ifndef IKUAP 177 IKUAP=1 178 .endif 179.endm 180 181/* 182 * All interrupts which set HSRR registers, as well as SRESET and MCE and 183 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 184 * so they all generally need to test whether they were taken in guest context. 185 * 186 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 187 * taken with MSR[HV]=0. 188 * 189 * Interrupts which set SRR registers (with the above exceptions) do not 190 * elevate to MSR[HV]=1 mode, though most can be taken when running with 191 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 192 * not need to test whether a guest is running because they get delivered to 193 * the guest directly, including nested HV KVM guests. 194 * 195 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 196 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 197 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 198 * delivered to the real-mode entry point, therefore such interrupts only test 199 * KVM in their real mode handlers, and only when PR KVM is possible. 200 * 201 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 202 * delivered in real-mode when the MMU is in hash mode because the MMU 203 * registers are not set appropriately to translate host addresses. In nested 204 * radix mode these can be delivered in virt-mode as the host translations are 205 * used implicitly (see: effective LPID, effective PID). 206 */ 207 208/* 209 * If an interrupt is taken while a guest is running, it is immediately routed 210 * to KVM to handle. 211 */ 212 213.macro KVMTEST name handler 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215 lbz r10,HSTATE_IN_GUEST(r13) 216 cmpwi r10,0 217 /* HSRR variants have the 0x2 bit added to their trap number */ 218 .if IHSRR_IF_HVMODE 219 BEGIN_FTR_SECTION 220 li r10,(IVEC + 0x2) 221 FTR_SECTION_ELSE 222 li r10,(IVEC) 223 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 224 .elseif IHSRR 225 li r10,(IVEC + 0x2) 226 .else 227 li r10,(IVEC) 228 .endif 229 bne \handler 230#endif 231.endm 232 233/* 234 * This is the BOOK3S interrupt entry code macro. 235 * 236 * This can result in one of several things happening: 237 * - Branch to the _common handler, relocated, in virtual mode. 238 * These are normal interrupts (synchronous and asynchronous) handled by 239 * the kernel. 240 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 241 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 242 * / intended for host or guest kernel, but KVM must always be involved 243 * because the machine state is set for guest execution. 244 * - Branch to the masked handler, unrelocated. 245 * These occur when maskable asynchronous interrupts are taken with the 246 * irq_soft_mask set. 247 * - Branch to an "early" handler in real mode but relocated. 248 * This is done if early=1. MCE and HMI use these to handle errors in real 249 * mode. 250 * - Fall through and continue executing in real, unrelocated mode. 251 * This is done if early=2. 252 */ 253 254.macro GEN_BRANCH_TO_COMMON name, virt 255 .if IREALMODE_COMMON 256 LOAD_HANDLER(r10, \name\()_common) 257 mtctr r10 258 bctr 259 .else 260 .if \virt 261#ifndef CONFIG_RELOCATABLE 262 b \name\()_common_virt 263#else 264 LOAD_HANDLER(r10, \name\()_common_virt) 265 mtctr r10 266 bctr 267#endif 268 .else 269 LOAD_HANDLER(r10, \name\()_common_real) 270 mtctr r10 271 bctr 272 .endif 273 .endif 274.endm 275 276.macro GEN_INT_ENTRY name, virt, ool=0 277 SET_SCRATCH0(r13) /* save r13 */ 278 GET_PACA(r13) 279 std r9,IAREA+EX_R9(r13) /* save r9 */ 280BEGIN_FTR_SECTION 281 mfspr r9,SPRN_PPR 282END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 283 HMT_MEDIUM 284 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 285 .if ICFAR 286BEGIN_FTR_SECTION 287 mfspr r10,SPRN_CFAR 288END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 289 .elseif ICFAR_IF_HVMODE 290BEGIN_FTR_SECTION 291 BEGIN_FTR_SECTION_NESTED(69) 292 mfspr r10,SPRN_CFAR 293 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 294FTR_SECTION_ELSE 295 BEGIN_FTR_SECTION_NESTED(69) 296 li r10,0 297 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 298ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 299 .endif 300 .if \ool 301 .if !\virt 302 b tramp_real_\name 303 .pushsection .text 304 TRAMP_REAL_BEGIN(tramp_real_\name) 305 .else 306 b tramp_virt_\name 307 .pushsection .text 308 TRAMP_VIRT_BEGIN(tramp_virt_\name) 309 .endif 310 .endif 311 312BEGIN_FTR_SECTION 313 std r9,IAREA+EX_PPR(r13) 314END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 315 .if ICFAR || ICFAR_IF_HVMODE 316BEGIN_FTR_SECTION 317 std r10,IAREA+EX_CFAR(r13) 318END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 319 .endif 320 INTERRUPT_TO_KERNEL 321 mfctr r10 322 std r10,IAREA+EX_CTR(r13) 323 mfcr r9 324 std r11,IAREA+EX_R11(r13) 325 std r12,IAREA+EX_R12(r13) 326 327 /* 328 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 329 * because a d-side MCE will clobber those registers so is 330 * not recoverable if they are live. 331 */ 332 GET_SCRATCH0(r10) 333 std r10,IAREA+EX_R13(r13) 334 .if IDAR && !IISIDE 335 .if IHSRR 336 mfspr r10,SPRN_HDAR 337 .else 338 mfspr r10,SPRN_DAR 339 .endif 340 std r10,IAREA+EX_DAR(r13) 341 .endif 342 .if IDSISR && !IISIDE 343 .if IHSRR 344 mfspr r10,SPRN_HDSISR 345 .else 346 mfspr r10,SPRN_DSISR 347 .endif 348 stw r10,IAREA+EX_DSISR(r13) 349 .endif 350 351 .if IHSRR_IF_HVMODE 352 BEGIN_FTR_SECTION 353 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 354 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 355 FTR_SECTION_ELSE 356 mfspr r11,SPRN_SRR0 /* save SRR0 */ 357 mfspr r12,SPRN_SRR1 /* and SRR1 */ 358 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 359 .elseif IHSRR 360 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 361 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 362 .else 363 mfspr r11,SPRN_SRR0 /* save SRR0 */ 364 mfspr r12,SPRN_SRR1 /* and SRR1 */ 365 .endif 366 367 .if IBRANCH_TO_COMMON 368 GEN_BRANCH_TO_COMMON \name \virt 369 .endif 370 371 .if \ool 372 .popsection 373 .endif 374.endm 375 376/* 377 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 378 * entry, except in the case of the real-mode handlers which require 379 * __GEN_REALMODE_COMMON_ENTRY. 380 * 381 * This switches to virtual mode and sets MSR[RI]. 382 */ 383.macro __GEN_COMMON_ENTRY name 384DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 385\name\()_common_real: 386 .if IKVM_REAL 387 KVMTEST \name kvm_interrupt 388 .endif 389 390 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 391 /* MSR[RI] is clear iff using SRR regs */ 392 .if IHSRR_IF_HVMODE 393 BEGIN_FTR_SECTION 394 xori r10,r10,MSR_RI 395 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 396 .elseif ! IHSRR 397 xori r10,r10,MSR_RI 398 .endif 399 mtmsrd r10 400 401 .if IVIRT 402 .if IKVM_VIRT 403 b 1f /* skip the virt test coming from real */ 404 .endif 405 406 .balign IFETCH_ALIGN_BYTES 407DEFINE_FIXED_SYMBOL(\name\()_common_virt, text) 408\name\()_common_virt: 409 .if IKVM_VIRT 410 KVMTEST \name kvm_interrupt 4111: 412 .endif 413 .endif /* IVIRT */ 414.endm 415 416/* 417 * Don't switch to virt mode. Used for early MCE and HMI handlers that 418 * want to run in real mode. 419 */ 420.macro __GEN_REALMODE_COMMON_ENTRY name 421DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 422\name\()_common_real: 423 .if IKVM_REAL 424 KVMTEST \name kvm_interrupt 425 .endif 426.endm 427 428.macro __GEN_COMMON_BODY name 429 .if IMASK 430 .if ! ISTACK 431 .error "No support for masked interrupt to use custom stack" 432 .endif 433 434 /* If coming from user, skip soft-mask tests. */ 435 andi. r10,r12,MSR_PR 436 bne 3f 437 438 /* 439 * Kernel code running below __end_soft_masked may be 440 * implicitly soft-masked if it is within the regions 441 * in the soft mask table. 442 */ 443 LOAD_HANDLER(r10, __end_soft_masked) 444 cmpld r11,r10 445 bge+ 1f 446 447 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 448 mtctr r12 449 stw r9,PACA_EXGEN+EX_CCR(r13) 450 SEARCH_SOFT_MASK_TABLE 451 cmpdi r12,0 452 mfctr r12 /* Restore r12 to SRR1 */ 453 lwz r9,PACA_EXGEN+EX_CCR(r13) 454 beq 1f /* Not in soft-mask table */ 455 li r10,IMASK 456 b 2f /* In soft-mask table, always mask */ 457 458 /* Test the soft mask state against our interrupt's bit */ 4591: lbz r10,PACAIRQSOFTMASK(r13) 4602: andi. r10,r10,IMASK 461 /* Associate vector numbers with bits in paca->irq_happened */ 462 .if IVEC == 0x500 || IVEC == 0xea0 463 li r10,PACA_IRQ_EE 464 .elseif IVEC == 0x900 465 li r10,PACA_IRQ_DEC 466 .elseif IVEC == 0xa00 || IVEC == 0xe80 467 li r10,PACA_IRQ_DBELL 468 .elseif IVEC == 0xe60 469 li r10,PACA_IRQ_HMI 470 .elseif IVEC == 0xf00 471 li r10,PACA_IRQ_PMI 472 .else 473 .abort "Bad maskable vector" 474 .endif 475 476 .if IHSRR_IF_HVMODE 477 BEGIN_FTR_SECTION 478 bne masked_Hinterrupt 479 FTR_SECTION_ELSE 480 bne masked_interrupt 481 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 482 .elseif IHSRR 483 bne masked_Hinterrupt 484 .else 485 bne masked_interrupt 486 .endif 487 .endif 488 489 .if ISTACK 490 andi. r10,r12,MSR_PR /* See if coming from user */ 4913: mr r10,r1 /* Save r1 */ 492 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 493 beq- 100f 494 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 495100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 496 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 497 .endif 498 499 std r9,_CCR(r1) /* save CR in stackframe */ 500 std r11,_NIP(r1) /* save SRR0 in stackframe */ 501 std r12,_MSR(r1) /* save SRR1 in stackframe */ 502 std r10,0(r1) /* make stack chain pointer */ 503 std r0,GPR0(r1) /* save r0 in stackframe */ 504 std r10,GPR1(r1) /* save r1 in stackframe */ 505 506 /* Mark our [H]SRRs valid for return */ 507 li r10,1 508 .if IHSRR_IF_HVMODE 509 BEGIN_FTR_SECTION 510 stb r10,PACAHSRR_VALID(r13) 511 FTR_SECTION_ELSE 512 stb r10,PACASRR_VALID(r13) 513 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 514 .elseif IHSRR 515 stb r10,PACAHSRR_VALID(r13) 516 .else 517 stb r10,PACASRR_VALID(r13) 518 .endif 519 520 .if ISTACK 521 .if IKUAP 522 kuap_save_amr_and_lock r9, r10, cr1, cr0 523 .endif 524 beq 101f /* if from kernel mode */ 525BEGIN_FTR_SECTION 526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 527 std r9,_PPR(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 529101: 530 .else 531 .if IKUAP 532 kuap_save_amr_and_lock r9, r10, cr1 533 .endif 534 .endif 535 536 /* Save original regs values from save area to stack frame. */ 537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 538 ld r10,IAREA+EX_R10(r13) 539 std r9,GPR9(r1) 540 std r10,GPR10(r1) 541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 542 ld r10,IAREA+EX_R12(r13) 543 ld r11,IAREA+EX_R13(r13) 544 std r9,GPR11(r1) 545 std r10,GPR12(r1) 546 std r11,GPR13(r1) 547 548 SAVE_NVGPRS(r1) 549 550 .if IDAR 551 .if IISIDE 552 ld r10,_NIP(r1) 553 .else 554 ld r10,IAREA+EX_DAR(r13) 555 .endif 556 std r10,_DAR(r1) 557 .endif 558 559 .if IDSISR 560 .if IISIDE 561 ld r10,_MSR(r1) 562 lis r11,DSISR_SRR1_MATCH_64S@h 563 and r10,r10,r11 564 .else 565 lwz r10,IAREA+EX_DSISR(r13) 566 .endif 567 std r10,_DSISR(r1) 568 .endif 569 570BEGIN_FTR_SECTION 571 .if ICFAR || ICFAR_IF_HVMODE 572 ld r10,IAREA+EX_CFAR(r13) 573 .else 574 li r10,0 575 .endif 576 std r10,ORIG_GPR3(r1) 577END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 578 ld r10,IAREA+EX_CTR(r13) 579 std r10,_CTR(r1) 580 std r2,GPR2(r1) /* save r2 in stackframe */ 581 SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ 582 mflr r9 /* Get LR, later save to stack */ 583 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 584 std r9,_LINK(r1) 585 lbz r10,PACAIRQSOFTMASK(r13) 586 mfspr r11,SPRN_XER /* save XER in stackframe */ 587 std r10,SOFTE(r1) 588 std r11,_XER(r1) 589 li r9,IVEC 590 std r9,_TRAP(r1) /* set trap number */ 591 li r10,0 592 ld r11,exception_marker@toc(r2) 593 std r10,RESULT(r1) /* clear regs->result */ 594 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 595.endm 596 597/* 598 * On entry r13 points to the paca, r9-r13 are saved in the paca, 599 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 600 * SRR1, and relocation is on. 601 * 602 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 603 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 604 */ 605.macro GEN_COMMON name 606 __GEN_COMMON_ENTRY \name 607 __GEN_COMMON_BODY \name 608.endm 609 610.macro SEARCH_RESTART_TABLE 611#ifdef CONFIG_RELOCATABLE 612 mr r12,r2 613 ld r2,PACATOC(r13) 614 LOAD_REG_ADDR(r9, __start___restart_table) 615 LOAD_REG_ADDR(r10, __stop___restart_table) 616 mr r2,r12 617#else 618 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 619 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 620#endif 621300: 622 cmpd r9,r10 623 beq 302f 624 ld r12,0(r9) 625 cmpld r11,r12 626 blt 301f 627 ld r12,8(r9) 628 cmpld r11,r12 629 bge 301f 630 ld r12,16(r9) 631 b 303f 632301: 633 addi r9,r9,24 634 b 300b 635302: 636 li r12,0 637303: 638.endm 639 640.macro SEARCH_SOFT_MASK_TABLE 641#ifdef CONFIG_RELOCATABLE 642 mr r12,r2 643 ld r2,PACATOC(r13) 644 LOAD_REG_ADDR(r9, __start___soft_mask_table) 645 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 646 mr r2,r12 647#else 648 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 649 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 650#endif 651300: 652 cmpd r9,r10 653 beq 302f 654 ld r12,0(r9) 655 cmpld r11,r12 656 blt 301f 657 ld r12,8(r9) 658 cmpld r11,r12 659 bge 301f 660 li r12,1 661 b 303f 662301: 663 addi r9,r9,16 664 b 300b 665302: 666 li r12,0 667303: 668.endm 669 670/* 671 * Restore all registers including H/SRR0/1 saved in a stack frame of a 672 * standard exception. 673 */ 674.macro EXCEPTION_RESTORE_REGS hsrr=0 675 /* Move original SRR0 and SRR1 into the respective regs */ 676 ld r9,_MSR(r1) 677 li r10,0 678 .if \hsrr 679 mtspr SPRN_HSRR1,r9 680 stb r10,PACAHSRR_VALID(r13) 681 .else 682 mtspr SPRN_SRR1,r9 683 stb r10,PACASRR_VALID(r13) 684 .endif 685 ld r9,_NIP(r1) 686 .if \hsrr 687 mtspr SPRN_HSRR0,r9 688 .else 689 mtspr SPRN_SRR0,r9 690 .endif 691 ld r9,_CTR(r1) 692 mtctr r9 693 ld r9,_XER(r1) 694 mtxer r9 695 ld r9,_LINK(r1) 696 mtlr r9 697 ld r9,_CCR(r1) 698 mtcr r9 699 REST_GPRS(2, 13, r1) 700 REST_GPR(0, r1) 701 /* restore original r1. */ 702 ld r1,GPR1(r1) 703.endm 704 705/* 706 * There are a few constraints to be concerned with. 707 * - Real mode exceptions code/data must be located at their physical location. 708 * - Virtual mode exceptions must be mapped at their 0xc000... location. 709 * - Fixed location code must not call directly beyond the __end_interrupts 710 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 711 * must be used. 712 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 713 * virtual 0xc00... 714 * - Conditional branch targets must be within +/-32K of caller. 715 * 716 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 717 * therefore don't have to run in physically located code or rfid to 718 * virtual mode kernel code. However on relocatable kernels they do have 719 * to branch to KERNELBASE offset because the rest of the kernel (outside 720 * the exception vectors) may be located elsewhere. 721 * 722 * Virtual exceptions correspond with physical, except their entry points 723 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 724 * offset applied. Virtual exceptions are enabled with the Alternate 725 * Interrupt Location (AIL) bit set in the LPCR. However this does not 726 * guarantee they will be delivered virtually. Some conditions (see the ISA) 727 * cause exceptions to be delivered in real mode. 728 * 729 * The scv instructions are a special case. They get a 0x3000 offset applied. 730 * scv exceptions have unique reentrancy properties, see below. 731 * 732 * It's impossible to receive interrupts below 0x300 via AIL. 733 * 734 * KVM: None of the virtual exceptions are from the guest. Anything that 735 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 736 * 737 * 738 * We layout physical memory as follows: 739 * 0x0000 - 0x00ff : Secondary processor spin code 740 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 741 * 0x1900 - 0x2fff : Real mode trampolines 742 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 743 * 0x5900 - 0x6fff : Relon mode trampolines 744 * 0x7000 - 0x7fff : FWNMI data area 745 * 0x8000 - .... : Common interrupt handlers, remaining early 746 * setup code, rest of kernel. 747 * 748 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 749 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 750 * vectors there. 751 */ 752OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 753OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 754OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 755OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 756 757#ifdef CONFIG_PPC_POWERNV 758 .globl start_real_trampolines 759 .globl end_real_trampolines 760 .globl start_virt_trampolines 761 .globl end_virt_trampolines 762#endif 763 764#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 765/* 766 * Data area reserved for FWNMI option. 767 * This address (0x7000) is fixed by the RPA. 768 * pseries and powernv need to keep the whole page from 769 * 0x7000 to 0x8000 free for use by the firmware 770 */ 771ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 772OPEN_TEXT_SECTION(0x8000) 773#else 774OPEN_TEXT_SECTION(0x7000) 775#endif 776 777USE_FIXED_SECTION(real_vectors) 778 779/* 780 * This is the start of the interrupt handlers for pSeries 781 * This code runs with relocation off. 782 * Code from here to __end_interrupts gets copied down to real 783 * address 0x100 when we are running a relocatable kernel. 784 * Therefore any relative branches in this section must only 785 * branch to labels in this section. 786 */ 787 .globl __start_interrupts 788__start_interrupts: 789 790/** 791 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 792 * This is a synchronous interrupt invoked with the "scv" instruction. The 793 * system call does not alter the HV bit, so it is directed to the OS. 794 * 795 * Handling: 796 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 797 * In particular, this means we can take a maskable interrupt at any point 798 * in the scv handler, which is unlike any other interrupt. This is solved 799 * by treating the instruction addresses in the handler as being soft-masked, 800 * by adding a SOFT_MASK_TABLE entry for them. 801 * 802 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 803 * ensure scv is never executed with relocation off, which means AIL-0 804 * should never happen. 805 * 806 * Before leaving the following inside-__end_soft_masked text, at least of the 807 * following must be true: 808 * - MSR[PR]=1 (i.e., return to userspace) 809 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 810 * - Standard kernel environment is set up (stack, paca, etc) 811 * 812 * KVM: 813 * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM 814 * ensures that FSCR[SCV] is disabled whenever it has to force AIL off. 815 * 816 * Call convention: 817 * 818 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 819 */ 820EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 821 /* SCV 0 */ 822 mr r9,r13 823 GET_PACA(r13) 824 mflr r11 825 mfctr r12 826 li r10,IRQS_ALL_DISABLED 827 stb r10,PACAIRQSOFTMASK(r13) 828#ifdef CONFIG_RELOCATABLE 829 b system_call_vectored_tramp 830#else 831 b system_call_vectored_common 832#endif 833 nop 834 835 /* SCV 1 - 127 */ 836 .rept 127 837 mr r9,r13 838 GET_PACA(r13) 839 mflr r11 840 mfctr r12 841 li r10,IRQS_ALL_DISABLED 842 stb r10,PACAIRQSOFTMASK(r13) 843 li r0,-1 /* cause failure */ 844#ifdef CONFIG_RELOCATABLE 845 b system_call_vectored_sigill_tramp 846#else 847 b system_call_vectored_sigill 848#endif 849 .endr 850EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 851 852// Treat scv vectors as soft-masked, see comment above. 853// Use absolute values rather than labels here, so they don't get relocated, 854// because this code runs unrelocated. 855SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) 856 857#ifdef CONFIG_RELOCATABLE 858TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 859 __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines) 860 mtctr r10 861 bctr 862 863TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 864 __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines) 865 mtctr r10 866 bctr 867#endif 868 869 870/* No virt vectors corresponding with 0x0..0x100 */ 871EXC_VIRT_NONE(0x4000, 0x100) 872 873 874/** 875 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 876 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 877 * It is caused by: 878 * - Wake from power-saving state, on powernv. 879 * - An NMI from another CPU, triggered by firmware or hypercall. 880 * - As crash/debug signal injected from BMC, firmware or hypervisor. 881 * 882 * Handling: 883 * Power-save wakeup is the only performance critical path, so this is 884 * determined quickly as possible first. In this case volatile registers 885 * can be discarded and SPRs like CFAR don't need to be read. 886 * 887 * If not a powersave wakeup, then it's run as a regular interrupt, however 888 * it uses its own stack and PACA save area to preserve the regular kernel 889 * environment for debugging. 890 * 891 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 892 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 893 * correct to switch to virtual mode to run the regular interrupt handler 894 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 895 * is clear). 896 * 897 * FWNMI: 898 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 899 * entry point with a different register set up. Some hypervisors will 900 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 901 * 902 * KVM: 903 * Unlike most SRR interrupts, this may be taken by the host while executing 904 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 905 * mode and then raise the sreset. 906 */ 907INT_DEFINE_BEGIN(system_reset) 908 IVEC=0x100 909 IAREA=PACA_EXNMI 910 IVIRT=0 /* no virt entry point */ 911 ISTACK=0 912 IKVM_REAL=1 913INT_DEFINE_END(system_reset) 914 915EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 916#ifdef CONFIG_PPC_P7_NAP 917 /* 918 * If running native on arch 2.06 or later, check if we are waking up 919 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 920 * bits 46:47. A non-0 value indicates that we are coming from a power 921 * saving state. The idle wakeup handler initially runs in real mode, 922 * but we branch to the 0xc000... address so we can turn on relocation 923 * with mtmsrd later, after SPRs are restored. 924 * 925 * Careful to minimise cost for the fast path (idle wakeup) while 926 * also avoiding clobbering CFAR for the debug path (non-idle). 927 * 928 * For the idle wake case volatile registers can be clobbered, which 929 * is why we use those initially. If it turns out to not be an idle 930 * wake, carefully put everything back the way it was, so we can use 931 * common exception macros to handle it. 932 */ 933BEGIN_FTR_SECTION 934 SET_SCRATCH0(r13) 935 GET_PACA(r13) 936 std r3,PACA_EXNMI+0*8(r13) 937 std r4,PACA_EXNMI+1*8(r13) 938 std r5,PACA_EXNMI+2*8(r13) 939 mfspr r3,SPRN_SRR1 940 mfocrf r4,0x80 941 rlwinm. r5,r3,47-31,30,31 942 bne+ system_reset_idle_wake 943 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 944 mtocrf 0x80,r4 945 ld r3,PACA_EXNMI+0*8(r13) 946 ld r4,PACA_EXNMI+1*8(r13) 947 ld r5,PACA_EXNMI+2*8(r13) 948 GET_SCRATCH0(r13) 949END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 950#endif 951 952 GEN_INT_ENTRY system_reset, virt=0 953 /* 954 * In theory, we should not enable relocation here if it was disabled 955 * in SRR1, because the MMU may not be configured to support it (e.g., 956 * SLB may have been cleared). In practice, there should only be a few 957 * small windows where that's the case, and sreset is considered to 958 * be dangerous anyway. 959 */ 960EXC_REAL_END(system_reset, 0x100, 0x100) 961EXC_VIRT_NONE(0x4100, 0x100) 962 963#ifdef CONFIG_PPC_P7_NAP 964TRAMP_REAL_BEGIN(system_reset_idle_wake) 965 /* We are waking up from idle, so may clobber any volatile register */ 966 cmpwi cr1,r5,2 967 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 968 __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines) 969 mtctr r12 970 bctr 971#endif 972 973#ifdef CONFIG_PPC_PSERIES 974/* 975 * Vectors for the FWNMI option. Share common code. 976 */ 977TRAMP_REAL_BEGIN(system_reset_fwnmi) 978 GEN_INT_ENTRY system_reset, virt=0 979 980#endif /* CONFIG_PPC_PSERIES */ 981 982EXC_COMMON_BEGIN(system_reset_common) 983 __GEN_COMMON_ENTRY system_reset 984 /* 985 * Increment paca->in_nmi. When the interrupt entry wrapper later 986 * enable MSR_RI, then SLB or MCE will be able to recover, but a nested 987 * NMI will notice in_nmi and not recover because of the use of the NMI 988 * stack. in_nmi reentrancy is tested in system_reset_exception. 989 */ 990 lhz r10,PACA_IN_NMI(r13) 991 addi r10,r10,1 992 sth r10,PACA_IN_NMI(r13) 993 994 mr r10,r1 995 ld r1,PACA_NMI_EMERG_SP(r13) 996 subi r1,r1,INT_FRAME_SIZE 997 __GEN_COMMON_BODY system_reset 998 999 addi r3,r1,STACK_FRAME_OVERHEAD 1000 bl system_reset_exception 1001 1002 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1003 li r9,0 1004 mtmsrd r9,1 1005 1006 /* 1007 * MSR_RI is clear, now we can decrement paca->in_nmi. 1008 */ 1009 lhz r10,PACA_IN_NMI(r13) 1010 subi r10,r10,1 1011 sth r10,PACA_IN_NMI(r13) 1012 1013 kuap_kernel_restore r9, r10 1014 EXCEPTION_RESTORE_REGS 1015 RFI_TO_USER_OR_KERNEL 1016 1017 1018/** 1019 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1020 * This is a non-maskable interrupt always taken in real-mode. It can be 1021 * synchronous or asynchronous, caused by hardware or software, and it may be 1022 * taken in a power-saving state. 1023 * 1024 * Handling: 1025 * Similarly to system reset, this uses its own stack and PACA save area, 1026 * the difference is re-entrancy is allowed on the machine check stack. 1027 * 1028 * machine_check_early is run in real mode, and carefully decodes the 1029 * machine check and tries to handle it (e.g., flush the SLB if there was an 1030 * error detected there), determines if it was recoverable and logs the 1031 * event. 1032 * 1033 * This early code does not "reconcile" irq soft-mask state like SRESET or 1034 * regular interrupts do, so irqs_disabled() among other things may not work 1035 * properly (irq disable/enable already doesn't work because irq tracing can 1036 * not work in real mode). 1037 * 1038 * Then, depending on the execution context when the interrupt is taken, there 1039 * are 3 main actions: 1040 * - Executing in kernel mode. The event is queued with irq_work, which means 1041 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1042 * interrupts), which could be immediately when the interrupt returns. This 1043 * avoids nasty issues like switching to virtual mode when the MMU is in a 1044 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1045 * but it has different priorities). Check to see if the CPU was in power 1046 * save, and return via the wake up code if it was. 1047 * 1048 * - Executing in user mode. machine_check_exception is run like a normal 1049 * interrupt handler, which processes the data generated by the early handler. 1050 * 1051 * - Executing in guest mode. The interrupt is run with its KVM test, and 1052 * branches to KVM to deal with. KVM may queue the event for the host 1053 * to report later. 1054 * 1055 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1056 * or SCRATCH0 is in use, it may cause a crash. 1057 * 1058 * KVM: 1059 * See SRESET. 1060 */ 1061INT_DEFINE_BEGIN(machine_check_early) 1062 IVEC=0x200 1063 IAREA=PACA_EXMC 1064 IVIRT=0 /* no virt entry point */ 1065 IREALMODE_COMMON=1 1066 ISTACK=0 1067 IDAR=1 1068 IDSISR=1 1069 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1070INT_DEFINE_END(machine_check_early) 1071 1072INT_DEFINE_BEGIN(machine_check) 1073 IVEC=0x200 1074 IAREA=PACA_EXMC 1075 IVIRT=0 /* no virt entry point */ 1076 IDAR=1 1077 IDSISR=1 1078 IKVM_REAL=1 1079INT_DEFINE_END(machine_check) 1080 1081EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1082 GEN_INT_ENTRY machine_check_early, virt=0 1083EXC_REAL_END(machine_check, 0x200, 0x100) 1084EXC_VIRT_NONE(0x4200, 0x100) 1085 1086#ifdef CONFIG_PPC_PSERIES 1087TRAMP_REAL_BEGIN(machine_check_fwnmi) 1088 /* See comment at machine_check exception, don't turn on RI */ 1089 GEN_INT_ENTRY machine_check_early, virt=0 1090#endif 1091 1092#define MACHINE_CHECK_HANDLER_WINDUP \ 1093 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1094 li r9,0; \ 1095 mtmsrd r9,1; /* Clear MSR_RI */ \ 1096 /* Decrement paca->in_mce now RI is clear. */ \ 1097 lhz r12,PACA_IN_MCE(r13); \ 1098 subi r12,r12,1; \ 1099 sth r12,PACA_IN_MCE(r13); \ 1100 EXCEPTION_RESTORE_REGS 1101 1102EXC_COMMON_BEGIN(machine_check_early_common) 1103 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1104 1105 /* 1106 * Switch to mc_emergency stack and handle re-entrancy (we limit 1107 * the nested MCE upto level 4 to avoid stack overflow). 1108 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1109 * 1110 * We use paca->in_mce to check whether this is the first entry or 1111 * nested machine check. We increment paca->in_mce to track nested 1112 * machine checks. 1113 * 1114 * If this is the first entry then set stack pointer to 1115 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1116 * stack frame on mc_emergency stack. 1117 * 1118 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1119 * checkstop if we get another machine check exception before we do 1120 * rfid with MSR_ME=1. 1121 * 1122 * This interrupt can wake directly from idle. If that is the case, 1123 * the machine check is handled then the idle wakeup code is called 1124 * to restore state. 1125 */ 1126 lhz r10,PACA_IN_MCE(r13) 1127 cmpwi r10,0 /* Are we in nested machine check */ 1128 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1129 addi r10,r10,1 /* increment paca->in_mce */ 1130 sth r10,PACA_IN_MCE(r13) 1131 1132 mr r10,r1 /* Save r1 */ 1133 bne 1f 1134 /* First machine check entry */ 1135 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11361: /* Limit nested MCE to level 4 to avoid stack overflow */ 1137 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1138 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1139 1140 __GEN_COMMON_BODY machine_check_early 1141 1142BEGIN_FTR_SECTION 1143 bl enable_machine_check 1144END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1145 addi r3,r1,STACK_FRAME_OVERHEAD 1146 bl machine_check_early 1147 std r3,RESULT(r1) /* Save result */ 1148 ld r12,_MSR(r1) 1149 1150#ifdef CONFIG_PPC_P7_NAP 1151 /* 1152 * Check if thread was in power saving mode. We come here when any 1153 * of the following is true: 1154 * a. thread wasn't in power saving mode 1155 * b. thread was in power saving mode with no state loss, 1156 * supervisor state loss or hypervisor state loss. 1157 * 1158 * Go back to nap/sleep/winkle mode again if (b) is true. 1159 */ 1160BEGIN_FTR_SECTION 1161 rlwinm. r11,r12,47-31,30,31 1162 bne machine_check_idle_common 1163END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1164#endif 1165 1166#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1167 /* 1168 * Check if we are coming from guest. If yes, then run the normal 1169 * exception handler which will take the 1170 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1171 * to guest. 1172 */ 1173 lbz r11,HSTATE_IN_GUEST(r13) 1174 cmpwi r11,0 /* Check if coming from guest */ 1175 bne mce_deliver /* continue if we are. */ 1176#endif 1177 1178 /* 1179 * Check if we are coming from userspace. If yes, then run the normal 1180 * exception handler which will deliver the MC event to this kernel. 1181 */ 1182 andi. r11,r12,MSR_PR /* See if coming from user. */ 1183 bne mce_deliver /* continue in V mode if we are. */ 1184 1185 /* 1186 * At this point we are coming from kernel context. 1187 * Queue up the MCE event and return from the interrupt. 1188 * But before that, check if this is an un-recoverable exception. 1189 * If yes, then stay on emergency stack and panic. 1190 */ 1191 andi. r11,r12,MSR_RI 1192 beq unrecoverable_mce 1193 1194 /* 1195 * Check if we have successfully handled/recovered from error, if not 1196 * then stay on emergency stack and panic. 1197 */ 1198 ld r3,RESULT(r1) /* Load result */ 1199 cmpdi r3,0 /* see if we handled MCE successfully */ 1200 beq unrecoverable_mce /* if !handled then panic */ 1201 1202 /* 1203 * Return from MC interrupt. 1204 * Queue up the MCE event so that we can log it later, while 1205 * returning from kernel or opal call. 1206 */ 1207 bl machine_check_queue_event 1208 MACHINE_CHECK_HANDLER_WINDUP 1209 RFI_TO_KERNEL 1210 1211mce_deliver: 1212 /* 1213 * This is a host user or guest MCE. Restore all registers, then 1214 * run the "late" handler. For host user, this will run the 1215 * machine_check_exception handler in virtual mode like a normal 1216 * interrupt handler. For guest, this will trigger the KVM test 1217 * and branch to the KVM interrupt similarly to other interrupts. 1218 */ 1219BEGIN_FTR_SECTION 1220 ld r10,ORIG_GPR3(r1) 1221 mtspr SPRN_CFAR,r10 1222END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1223 MACHINE_CHECK_HANDLER_WINDUP 1224 GEN_INT_ENTRY machine_check, virt=0 1225 1226EXC_COMMON_BEGIN(machine_check_common) 1227 /* 1228 * Machine check is different because we use a different 1229 * save area: PACA_EXMC instead of PACA_EXGEN. 1230 */ 1231 GEN_COMMON machine_check 1232 addi r3,r1,STACK_FRAME_OVERHEAD 1233 bl machine_check_exception_async 1234 b interrupt_return_srr 1235 1236 1237#ifdef CONFIG_PPC_P7_NAP 1238/* 1239 * This is an idle wakeup. Low level machine check has already been 1240 * done. Queue the event then call the idle code to do the wake up. 1241 */ 1242EXC_COMMON_BEGIN(machine_check_idle_common) 1243 bl machine_check_queue_event 1244 1245 /* 1246 * GPR-loss wakeups are relatively straightforward, because the 1247 * idle sleep code has saved all non-volatile registers on its 1248 * own stack, and r1 in PACAR1. 1249 * 1250 * For no-loss wakeups the r1 and lr registers used by the 1251 * early machine check handler have to be restored first. r2 is 1252 * the kernel TOC, so no need to restore it. 1253 * 1254 * Then decrement MCE nesting after finishing with the stack. 1255 */ 1256 ld r3,_MSR(r1) 1257 ld r4,_LINK(r1) 1258 ld r1,GPR1(r1) 1259 1260 lhz r11,PACA_IN_MCE(r13) 1261 subi r11,r11,1 1262 sth r11,PACA_IN_MCE(r13) 1263 1264 mtlr r4 1265 rlwinm r10,r3,47-31,30,31 1266 cmpwi cr1,r10,2 1267 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1268 b idle_return_gpr_loss 1269#endif 1270 1271EXC_COMMON_BEGIN(unrecoverable_mce) 1272 /* 1273 * We are going down. But there are chances that we might get hit by 1274 * another MCE during panic path and we may run into unstable state 1275 * with no way out. Hence, turn ME bit off while going down, so that 1276 * when another MCE is hit during panic path, system will checkstop 1277 * and hypervisor will get restarted cleanly by SP. 1278 */ 1279BEGIN_FTR_SECTION 1280 li r10,0 /* clear MSR_RI */ 1281 mtmsrd r10,1 1282 bl disable_machine_check 1283END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1284 ld r10,PACAKMSR(r13) 1285 li r3,MSR_ME 1286 andc r10,r10,r3 1287 mtmsrd r10 1288 1289 lhz r12,PACA_IN_MCE(r13) 1290 subi r12,r12,1 1291 sth r12,PACA_IN_MCE(r13) 1292 1293 /* 1294 * Invoke machine_check_exception to print MCE event and panic. 1295 * This is the NMI version of the handler because we are called from 1296 * the early handler which is a true NMI. 1297 */ 1298 addi r3,r1,STACK_FRAME_OVERHEAD 1299 bl machine_check_exception 1300 1301 /* 1302 * We will not reach here. Even if we did, there is no way out. 1303 * Call unrecoverable_exception and die. 1304 */ 1305 addi r3,r1,STACK_FRAME_OVERHEAD 1306 bl unrecoverable_exception 1307 b . 1308 1309 1310/** 1311 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1312 * This is a synchronous interrupt generated due to a data access exception, 1313 * e.g., a load orstore which does not have a valid page table entry with 1314 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1315 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1316 * 1317 * Handling: 1318 * - Hash MMU 1319 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1320 * Linux page table. Hash faults can hit in kernel mode in a fairly 1321 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1322 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1323 * backed by Linux page table entries. 1324 * 1325 * If no entry is found the Linux page fault handler is invoked (by 1326 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1327 * copy operations of course. 1328 * 1329 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1330 * MMU context, which may cause a DSI in the host, which must go to the 1331 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1332 * always be used regardless of AIL setting. 1333 * 1334 * - Radix MMU 1335 * The hardware loads from the Linux page table directly, so a fault goes 1336 * immediately to Linux page fault. 1337 * 1338 * Conditions like DAWR match are handled on the way in to Linux page fault. 1339 */ 1340INT_DEFINE_BEGIN(data_access) 1341 IVEC=0x300 1342 IDAR=1 1343 IDSISR=1 1344 IKVM_REAL=1 1345INT_DEFINE_END(data_access) 1346 1347EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1348 GEN_INT_ENTRY data_access, virt=0 1349EXC_REAL_END(data_access, 0x300, 0x80) 1350EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1351 GEN_INT_ENTRY data_access, virt=1 1352EXC_VIRT_END(data_access, 0x4300, 0x80) 1353EXC_COMMON_BEGIN(data_access_common) 1354 GEN_COMMON data_access 1355 ld r4,_DSISR(r1) 1356 addi r3,r1,STACK_FRAME_OVERHEAD 1357 andis. r0,r4,DSISR_DABRMATCH@h 1358 bne- 1f 1359#ifdef CONFIG_PPC_64S_HASH_MMU 1360BEGIN_MMU_FTR_SECTION 1361 bl do_hash_fault 1362MMU_FTR_SECTION_ELSE 1363 bl do_page_fault 1364ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1365#else 1366 bl do_page_fault 1367#endif 1368 b interrupt_return_srr 1369 13701: bl do_break 1371 /* 1372 * do_break() may have changed the NV GPRS while handling a breakpoint. 1373 * If so, we need to restore them with their updated values. 1374 */ 1375 REST_NVGPRS(r1) 1376 b interrupt_return_srr 1377 1378 1379/** 1380 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1381 * This is a synchronous interrupt in response to an MMU fault missing SLB 1382 * entry for HPT, or an address outside RPT translation range. 1383 * 1384 * Handling: 1385 * - HPT: 1386 * This refills the SLB, or reports an access fault similarly to a bad page 1387 * fault. When coming from user-mode, the SLB handler may access any kernel 1388 * data, though it may itself take a DSLB. When coming from kernel mode, 1389 * recursive faults must be avoided so access is restricted to the kernel 1390 * image text/data, kernel stack, and any data allocated below 1391 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1392 * on user-handler data structures. 1393 * 1394 * KVM: Same as 0x300, DSLB must test for KVM guest. 1395 */ 1396INT_DEFINE_BEGIN(data_access_slb) 1397 IVEC=0x380 1398 IDAR=1 1399 IKVM_REAL=1 1400INT_DEFINE_END(data_access_slb) 1401 1402EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1403 GEN_INT_ENTRY data_access_slb, virt=0 1404EXC_REAL_END(data_access_slb, 0x380, 0x80) 1405EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1406 GEN_INT_ENTRY data_access_slb, virt=1 1407EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1408EXC_COMMON_BEGIN(data_access_slb_common) 1409 GEN_COMMON data_access_slb 1410#ifdef CONFIG_PPC_64S_HASH_MMU 1411BEGIN_MMU_FTR_SECTION 1412 /* HPT case, do SLB fault */ 1413 addi r3,r1,STACK_FRAME_OVERHEAD 1414 bl do_slb_fault 1415 cmpdi r3,0 1416 bne- 1f 1417 b fast_interrupt_return_srr 14181: /* Error case */ 1419MMU_FTR_SECTION_ELSE 1420 /* Radix case, access is outside page table range */ 1421 li r3,-EFAULT 1422ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1423#else 1424 li r3,-EFAULT 1425#endif 1426 std r3,RESULT(r1) 1427 addi r3,r1,STACK_FRAME_OVERHEAD 1428 bl do_bad_segment_interrupt 1429 b interrupt_return_srr 1430 1431 1432/** 1433 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1434 * This is a synchronous interrupt in response to an MMU fault due to an 1435 * instruction fetch. 1436 * 1437 * Handling: 1438 * Similar to DSI, though in response to fetch. The faulting address is found 1439 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1440 */ 1441INT_DEFINE_BEGIN(instruction_access) 1442 IVEC=0x400 1443 IISIDE=1 1444 IDAR=1 1445 IDSISR=1 1446#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1447 IKVM_REAL=1 1448#endif 1449INT_DEFINE_END(instruction_access) 1450 1451EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1452 GEN_INT_ENTRY instruction_access, virt=0 1453EXC_REAL_END(instruction_access, 0x400, 0x80) 1454EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1455 GEN_INT_ENTRY instruction_access, virt=1 1456EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1457EXC_COMMON_BEGIN(instruction_access_common) 1458 GEN_COMMON instruction_access 1459 addi r3,r1,STACK_FRAME_OVERHEAD 1460#ifdef CONFIG_PPC_64S_HASH_MMU 1461BEGIN_MMU_FTR_SECTION 1462 bl do_hash_fault 1463MMU_FTR_SECTION_ELSE 1464 bl do_page_fault 1465ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1466#else 1467 bl do_page_fault 1468#endif 1469 b interrupt_return_srr 1470 1471 1472/** 1473 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1474 * This is a synchronous interrupt in response to an MMU fault due to an 1475 * instruction fetch. 1476 * 1477 * Handling: 1478 * Similar to DSLB, though in response to fetch. The faulting address is found 1479 * in SRR0 (rather than DAR). 1480 */ 1481INT_DEFINE_BEGIN(instruction_access_slb) 1482 IVEC=0x480 1483 IISIDE=1 1484 IDAR=1 1485#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1486 IKVM_REAL=1 1487#endif 1488INT_DEFINE_END(instruction_access_slb) 1489 1490EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1491 GEN_INT_ENTRY instruction_access_slb, virt=0 1492EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1493EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1494 GEN_INT_ENTRY instruction_access_slb, virt=1 1495EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1496EXC_COMMON_BEGIN(instruction_access_slb_common) 1497 GEN_COMMON instruction_access_slb 1498#ifdef CONFIG_PPC_64S_HASH_MMU 1499BEGIN_MMU_FTR_SECTION 1500 /* HPT case, do SLB fault */ 1501 addi r3,r1,STACK_FRAME_OVERHEAD 1502 bl do_slb_fault 1503 cmpdi r3,0 1504 bne- 1f 1505 b fast_interrupt_return_srr 15061: /* Error case */ 1507MMU_FTR_SECTION_ELSE 1508 /* Radix case, access is outside page table range */ 1509 li r3,-EFAULT 1510ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1511#else 1512 li r3,-EFAULT 1513#endif 1514 std r3,RESULT(r1) 1515 addi r3,r1,STACK_FRAME_OVERHEAD 1516 bl do_bad_segment_interrupt 1517 b interrupt_return_srr 1518 1519 1520/** 1521 * Interrupt 0x500 - External Interrupt. 1522 * This is an asynchronous maskable interrupt in response to an "external 1523 * exception" from the interrupt controller or hypervisor (e.g., device 1524 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1525 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1526 * 1527 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1528 * interrupts are delivered with HSRR registers, guests use SRRs, which 1529 * reqiures IHSRR_IF_HVMODE. 1530 * 1531 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1532 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1533 * rather than External Interrupts. 1534 * 1535 * Handling: 1536 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1537 * because registers at the time of the interrupt are not so important as it is 1538 * asynchronous. 1539 * 1540 * If soft masked, the masked handler will note the pending interrupt for 1541 * replay, and clear MSR[EE] in the interrupted context. 1542 * 1543 * CFAR is not required because this is an asynchronous interrupt that in 1544 * general won't have much bearing on the state of the CPU, with the possible 1545 * exception of crash/debug IPIs, but those are generally moving to use SRESET 1546 * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case 1547 * it may be exiting the guest and need CFAR to be saved. 1548 */ 1549INT_DEFINE_BEGIN(hardware_interrupt) 1550 IVEC=0x500 1551 IHSRR_IF_HVMODE=1 1552 IMASK=IRQS_DISABLED 1553 IKVM_REAL=1 1554 IKVM_VIRT=1 1555 ICFAR=0 1556#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1557 ICFAR_IF_HVMODE=1 1558#endif 1559INT_DEFINE_END(hardware_interrupt) 1560 1561EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1562 GEN_INT_ENTRY hardware_interrupt, virt=0 1563EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1564EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1565 GEN_INT_ENTRY hardware_interrupt, virt=1 1566EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1567EXC_COMMON_BEGIN(hardware_interrupt_common) 1568 GEN_COMMON hardware_interrupt 1569 addi r3,r1,STACK_FRAME_OVERHEAD 1570 bl do_IRQ 1571 BEGIN_FTR_SECTION 1572 b interrupt_return_hsrr 1573 FTR_SECTION_ELSE 1574 b interrupt_return_srr 1575 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1576 1577 1578/** 1579 * Interrupt 0x600 - Alignment Interrupt 1580 * This is a synchronous interrupt in response to data alignment fault. 1581 */ 1582INT_DEFINE_BEGIN(alignment) 1583 IVEC=0x600 1584 IDAR=1 1585 IDSISR=1 1586#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1587 IKVM_REAL=1 1588#endif 1589INT_DEFINE_END(alignment) 1590 1591EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1592 GEN_INT_ENTRY alignment, virt=0 1593EXC_REAL_END(alignment, 0x600, 0x100) 1594EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1595 GEN_INT_ENTRY alignment, virt=1 1596EXC_VIRT_END(alignment, 0x4600, 0x100) 1597EXC_COMMON_BEGIN(alignment_common) 1598 GEN_COMMON alignment 1599 addi r3,r1,STACK_FRAME_OVERHEAD 1600 bl alignment_exception 1601 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1602 b interrupt_return_srr 1603 1604 1605/** 1606 * Interrupt 0x700 - Program Interrupt (program check). 1607 * This is a synchronous interrupt in response to various instruction faults: 1608 * traps, privilege errors, TM errors, floating point exceptions. 1609 * 1610 * Handling: 1611 * This interrupt may use the "emergency stack" in some cases when being taken 1612 * from kernel context, which complicates handling. 1613 */ 1614INT_DEFINE_BEGIN(program_check) 1615 IVEC=0x700 1616#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1617 IKVM_REAL=1 1618#endif 1619INT_DEFINE_END(program_check) 1620 1621EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1622 1623#ifdef CONFIG_CPU_LITTLE_ENDIAN 1624 /* 1625 * There's a short window during boot where although the kernel is 1626 * running little endian, any exceptions will cause the CPU to switch 1627 * back to big endian. For example a WARN() boils down to a trap 1628 * instruction, which will cause a program check, and we end up here but 1629 * with the CPU in big endian mode. The first instruction of the program 1630 * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when 1631 * executed in the wrong endian is an lhzu with a ~3GB displacement from 1632 * r3. The content of r3 is random, so that is a load from some random 1633 * location, and depending on the system can easily lead to a checkstop, 1634 * or an infinitely recursive page fault. 1635 * 1636 * So to handle that case we have a trampoline here that can detect we 1637 * are in the wrong endian and flip us back to the correct endian. We 1638 * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires 1639 * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as 1640 * SPRG1 is already used for the paca. SPRG3 is user readable, but this 1641 * trampoline is only active very early in boot, and SPRG3 will be 1642 * reinitialised in vdso_getcpu_init() before userspace starts. 1643 */ 1644BEGIN_FTR_SECTION 1645 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 1646 b 1f // Skip trampoline if endian is correct 1647 .long 0xa643707d // mtsprg 0, r11 Backup r11 1648 .long 0xa6027a7d // mfsrr0 r11 1649 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 1650 .long 0xa6027b7d // mfsrr1 r11 1651 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 1652 .long 0xa600607d // mfmsr r11 1653 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 1654 .long 0xa6037b7d // mtsrr1 r11 1655 .long 0x34076039 // li r11, 0x734 1656 .long 0xa6037a7d // mtsrr0 r11 1657 .long 0x2400004c // rfid 1658 mfsprg r11, 3 1659 mtsrr1 r11 // Restore SRR1 1660 mfsprg r11, 2 1661 mtsrr0 r11 // Restore SRR0 1662 mfsprg r11, 0 // Restore r11 16631: 1664END_FTR_SECTION(0, 1) // nop out after boot 1665#endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1666 1667 GEN_INT_ENTRY program_check, virt=0 1668EXC_REAL_END(program_check, 0x700, 0x100) 1669EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1670 GEN_INT_ENTRY program_check, virt=1 1671EXC_VIRT_END(program_check, 0x4700, 0x100) 1672EXC_COMMON_BEGIN(program_check_common) 1673 __GEN_COMMON_ENTRY program_check 1674 1675 /* 1676 * It's possible to receive a TM Bad Thing type program check with 1677 * userspace register values (in particular r1), but with SRR1 reporting 1678 * that we came from the kernel. Normally that would confuse the bad 1679 * stack logic, and we would report a bad kernel stack pointer. Instead 1680 * we switch to the emergency stack if we're taking a TM Bad Thing from 1681 * the kernel. 1682 */ 1683 1684 andi. r10,r12,MSR_PR 1685 bne .Lnormal_stack /* If userspace, go normal path */ 1686 1687 andis. r10,r12,(SRR1_PROGTM)@h 1688 bne .Lemergency_stack /* If TM, emergency */ 1689 1690 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1691 blt .Lnormal_stack /* normal path if not */ 1692 1693 /* Use the emergency stack */ 1694.Lemergency_stack: 1695 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1696 /* 3 in EXCEPTION_PROLOG_COMMON */ 1697 mr r10,r1 /* Save r1 */ 1698 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1699 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1700 __ISTACK(program_check)=0 1701 __GEN_COMMON_BODY program_check 1702 b .Ldo_program_check 1703 1704.Lnormal_stack: 1705 __ISTACK(program_check)=1 1706 __GEN_COMMON_BODY program_check 1707 1708.Ldo_program_check: 1709 addi r3,r1,STACK_FRAME_OVERHEAD 1710 bl program_check_exception 1711 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1712 b interrupt_return_srr 1713 1714 1715/* 1716 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1717 * This is a synchronous interrupt in response to executing an fp instruction 1718 * with MSR[FP]=0. 1719 * 1720 * Handling: 1721 * This will load FP registers and enable the FP bit if coming from userspace, 1722 * otherwise report a bad kernel use of FP. 1723 */ 1724INT_DEFINE_BEGIN(fp_unavailable) 1725 IVEC=0x800 1726#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1727 IKVM_REAL=1 1728#endif 1729INT_DEFINE_END(fp_unavailable) 1730 1731EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1732 GEN_INT_ENTRY fp_unavailable, virt=0 1733EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1734EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1735 GEN_INT_ENTRY fp_unavailable, virt=1 1736EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1737EXC_COMMON_BEGIN(fp_unavailable_common) 1738 GEN_COMMON fp_unavailable 1739 bne 1f /* if from user, just load it up */ 1740 addi r3,r1,STACK_FRAME_OVERHEAD 1741 bl kernel_fp_unavailable_exception 17420: trap 1743 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17441: 1745#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1746BEGIN_FTR_SECTION 1747 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1748 * transaction), go do TM stuff 1749 */ 1750 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1751 bne- 2f 1752END_FTR_SECTION_IFSET(CPU_FTR_TM) 1753#endif 1754 bl load_up_fpu 1755 b fast_interrupt_return_srr 1756#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17572: /* User process was in a transaction */ 1758 addi r3,r1,STACK_FRAME_OVERHEAD 1759 bl fp_unavailable_tm 1760 b interrupt_return_srr 1761#endif 1762 1763 1764/** 1765 * Interrupt 0x900 - Decrementer Interrupt. 1766 * This is an asynchronous interrupt in response to a decrementer exception 1767 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1768 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1769 * local_irq_disable()). 1770 * 1771 * Handling: 1772 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1773 * 1774 * If soft masked, the masked handler will note the pending interrupt for 1775 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1776 * in the interrupted context. 1777 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1778 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1779 * on the emergency stack. 1780 * 1781 * CFAR is not required because this is asynchronous (see hardware_interrupt). 1782 * A watchdog interrupt may like to have CFAR, but usually the interesting 1783 * branch is long gone by that point (e.g., infinite loop). 1784 */ 1785INT_DEFINE_BEGIN(decrementer) 1786 IVEC=0x900 1787 IMASK=IRQS_DISABLED 1788#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1789 IKVM_REAL=1 1790#endif 1791 ICFAR=0 1792INT_DEFINE_END(decrementer) 1793 1794EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1795 GEN_INT_ENTRY decrementer, virt=0 1796EXC_REAL_END(decrementer, 0x900, 0x80) 1797EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1798 GEN_INT_ENTRY decrementer, virt=1 1799EXC_VIRT_END(decrementer, 0x4900, 0x80) 1800EXC_COMMON_BEGIN(decrementer_common) 1801 GEN_COMMON decrementer 1802 addi r3,r1,STACK_FRAME_OVERHEAD 1803 bl timer_interrupt 1804 b interrupt_return_srr 1805 1806 1807/** 1808 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1809 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1810 * register. 1811 * 1812 * Handling: 1813 * Linux does not use this outside KVM where it's used to keep a host timer 1814 * while the guest is given control of DEC. It should normally be caught by 1815 * the KVM test and routed there. 1816 */ 1817INT_DEFINE_BEGIN(hdecrementer) 1818 IVEC=0x980 1819 IHSRR=1 1820 ISTACK=0 1821 IKVM_REAL=1 1822 IKVM_VIRT=1 1823INT_DEFINE_END(hdecrementer) 1824 1825EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1826 GEN_INT_ENTRY hdecrementer, virt=0 1827EXC_REAL_END(hdecrementer, 0x980, 0x80) 1828EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1829 GEN_INT_ENTRY hdecrementer, virt=1 1830EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1831EXC_COMMON_BEGIN(hdecrementer_common) 1832 __GEN_COMMON_ENTRY hdecrementer 1833 /* 1834 * Hypervisor decrementer interrupts not caught by the KVM test 1835 * shouldn't occur but are sometimes left pending on exit from a KVM 1836 * guest. We don't need to do anything to clear them, as they are 1837 * edge-triggered. 1838 * 1839 * Be careful to avoid touching the kernel stack. 1840 */ 1841 li r10,0 1842 stb r10,PACAHSRR_VALID(r13) 1843 ld r10,PACA_EXGEN+EX_CTR(r13) 1844 mtctr r10 1845 mtcrf 0x80,r9 1846 ld r9,PACA_EXGEN+EX_R9(r13) 1847 ld r10,PACA_EXGEN+EX_R10(r13) 1848 ld r11,PACA_EXGEN+EX_R11(r13) 1849 ld r12,PACA_EXGEN+EX_R12(r13) 1850 ld r13,PACA_EXGEN+EX_R13(r13) 1851 HRFI_TO_KERNEL 1852 1853 1854/** 1855 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1856 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1857 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1858 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1859 * 1860 * Handling: 1861 * Guests may use this for IPIs between threads in a core if the 1862 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1863 * 1864 * If soft masked, the masked handler will note the pending interrupt for 1865 * replay, leaving MSR[EE] enabled in the interrupted context because the 1866 * doorbells are edge triggered. 1867 * 1868 * CFAR is not required, similarly to hardware_interrupt. 1869 */ 1870INT_DEFINE_BEGIN(doorbell_super) 1871 IVEC=0xa00 1872 IMASK=IRQS_DISABLED 1873#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1874 IKVM_REAL=1 1875#endif 1876 ICFAR=0 1877INT_DEFINE_END(doorbell_super) 1878 1879EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1880 GEN_INT_ENTRY doorbell_super, virt=0 1881EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1882EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1883 GEN_INT_ENTRY doorbell_super, virt=1 1884EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1885EXC_COMMON_BEGIN(doorbell_super_common) 1886 GEN_COMMON doorbell_super 1887 addi r3,r1,STACK_FRAME_OVERHEAD 1888#ifdef CONFIG_PPC_DOORBELL 1889 bl doorbell_exception 1890#else 1891 bl unknown_async_exception 1892#endif 1893 b interrupt_return_srr 1894 1895 1896EXC_REAL_NONE(0xb00, 0x100) 1897EXC_VIRT_NONE(0x4b00, 0x100) 1898 1899/** 1900 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1901 * This is a synchronous interrupt invoked with the "sc" instruction. The 1902 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1903 * is directed to the currently running OS. The hypercall is invoked with 1904 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1905 * 1906 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1907 * 0x4c00 virtual mode. 1908 * 1909 * Handling: 1910 * If the KVM test fires then it was due to a hypercall and is accordingly 1911 * routed to KVM. Otherwise this executes a normal Linux system call. 1912 * 1913 * Call convention: 1914 * 1915 * syscall and hypercalls register conventions are documented in 1916 * Documentation/powerpc/syscall64-abi.rst and 1917 * Documentation/powerpc/papr_hcalls.rst respectively. 1918 * 1919 * The intersection of volatile registers that don't contain possible 1920 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1921 * without saving, though xer is not a good idea to use, as hardware may 1922 * interpret some bits so it may be costly to change them. 1923 */ 1924INT_DEFINE_BEGIN(system_call) 1925 IVEC=0xc00 1926 IKVM_REAL=1 1927 IKVM_VIRT=1 1928 ICFAR=0 1929INT_DEFINE_END(system_call) 1930 1931.macro SYSTEM_CALL virt 1932#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1933 /* 1934 * There is a little bit of juggling to get syscall and hcall 1935 * working well. Save r13 in ctr to avoid using SPRG scratch 1936 * register. 1937 * 1938 * Userspace syscalls have already saved the PPR, hcalls must save 1939 * it before setting HMT_MEDIUM. 1940 */ 1941 mtctr r13 1942 GET_PACA(r13) 1943 std r10,PACA_EXGEN+EX_R10(r13) 1944 INTERRUPT_TO_KERNEL 1945 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1946 mfctr r9 1947#else 1948 mr r9,r13 1949 GET_PACA(r13) 1950 INTERRUPT_TO_KERNEL 1951#endif 1952 1953#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1954BEGIN_FTR_SECTION 1955 cmpdi r0,0x1ebe 1956 beq- 1f 1957END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1958#endif 1959 1960 /* We reach here with PACA in r13, r13 in r9. */ 1961 mfspr r11,SPRN_SRR0 1962 mfspr r12,SPRN_SRR1 1963 1964 HMT_MEDIUM 1965 1966 .if ! \virt 1967 __LOAD_HANDLER(r10, system_call_common_real, real_vectors) 1968 mtctr r10 1969 bctr 1970 .else 1971#ifdef CONFIG_RELOCATABLE 1972 __LOAD_HANDLER(r10, system_call_common, virt_vectors) 1973 mtctr r10 1974 bctr 1975#else 1976 b system_call_common 1977#endif 1978 .endif 1979 1980#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1981 /* Fast LE/BE switch system call */ 19821: mfspr r12,SPRN_SRR1 1983 xori r12,r12,MSR_LE 1984 mtspr SPRN_SRR1,r12 1985 mr r13,r9 1986 RFI_TO_USER /* return to userspace */ 1987 b . /* prevent speculative execution */ 1988#endif 1989.endm 1990 1991EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 1992 SYSTEM_CALL 0 1993EXC_REAL_END(system_call, 0xc00, 0x100) 1994EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 1995 SYSTEM_CALL 1 1996EXC_VIRT_END(system_call, 0x4c00, 0x100) 1997 1998#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1999TRAMP_REAL_BEGIN(kvm_hcall) 2000 std r9,PACA_EXGEN+EX_R9(r13) 2001 std r11,PACA_EXGEN+EX_R11(r13) 2002 std r12,PACA_EXGEN+EX_R12(r13) 2003 mfcr r9 2004 mfctr r10 2005 std r10,PACA_EXGEN+EX_R13(r13) 2006 li r10,0 2007 std r10,PACA_EXGEN+EX_CFAR(r13) 2008 std r10,PACA_EXGEN+EX_CTR(r13) 2009 /* 2010 * Save the PPR (on systems that support it) before changing to 2011 * HMT_MEDIUM. That allows the KVM code to save that value into the 2012 * guest state (it is the guest's PPR value). 2013 */ 2014BEGIN_FTR_SECTION 2015 mfspr r10,SPRN_PPR 2016 std r10,PACA_EXGEN+EX_PPR(r13) 2017END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2018 2019 HMT_MEDIUM 2020 2021#ifdef CONFIG_RELOCATABLE 2022 /* 2023 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 2024 * outside the head section. 2025 */ 2026 __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines) 2027 mtctr r10 2028 bctr 2029#else 2030 b kvmppc_hcall 2031#endif 2032#endif 2033 2034/** 2035 * Interrupt 0xd00 - Trace Interrupt. 2036 * This is a synchronous interrupt in response to instruction step or 2037 * breakpoint faults. 2038 */ 2039INT_DEFINE_BEGIN(single_step) 2040 IVEC=0xd00 2041#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2042 IKVM_REAL=1 2043#endif 2044INT_DEFINE_END(single_step) 2045 2046EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2047 GEN_INT_ENTRY single_step, virt=0 2048EXC_REAL_END(single_step, 0xd00, 0x100) 2049EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2050 GEN_INT_ENTRY single_step, virt=1 2051EXC_VIRT_END(single_step, 0x4d00, 0x100) 2052EXC_COMMON_BEGIN(single_step_common) 2053 GEN_COMMON single_step 2054 addi r3,r1,STACK_FRAME_OVERHEAD 2055 bl single_step_exception 2056 b interrupt_return_srr 2057 2058 2059/** 2060 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2061 * This is a synchronous interrupt in response to an MMU fault caused by a 2062 * guest data access. 2063 * 2064 * Handling: 2065 * This should always get routed to KVM. In radix MMU mode, this is caused 2066 * by a guest nested radix access that can't be performed due to the 2067 * partition scope page table. In hash mode, this can be caused by guests 2068 * running with translation disabled (virtual real mode) or with VPM enabled. 2069 * KVM will update the page table structures or disallow the access. 2070 */ 2071INT_DEFINE_BEGIN(h_data_storage) 2072 IVEC=0xe00 2073 IHSRR=1 2074 IDAR=1 2075 IDSISR=1 2076 IKVM_REAL=1 2077 IKVM_VIRT=1 2078INT_DEFINE_END(h_data_storage) 2079 2080EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2081 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2082EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2083EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2084 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2085EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2086EXC_COMMON_BEGIN(h_data_storage_common) 2087 GEN_COMMON h_data_storage 2088 addi r3,r1,STACK_FRAME_OVERHEAD 2089BEGIN_MMU_FTR_SECTION 2090 bl do_bad_page_fault_segv 2091MMU_FTR_SECTION_ELSE 2092 bl unknown_exception 2093ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2094 b interrupt_return_hsrr 2095 2096 2097/** 2098 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2099 * This is a synchronous interrupt in response to an MMU fault caused by a 2100 * guest instruction fetch, similar to HDSI. 2101 */ 2102INT_DEFINE_BEGIN(h_instr_storage) 2103 IVEC=0xe20 2104 IHSRR=1 2105 IKVM_REAL=1 2106 IKVM_VIRT=1 2107INT_DEFINE_END(h_instr_storage) 2108 2109EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2110 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2111EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2112EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2113 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2114EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2115EXC_COMMON_BEGIN(h_instr_storage_common) 2116 GEN_COMMON h_instr_storage 2117 addi r3,r1,STACK_FRAME_OVERHEAD 2118 bl unknown_exception 2119 b interrupt_return_hsrr 2120 2121 2122/** 2123 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2124 */ 2125INT_DEFINE_BEGIN(emulation_assist) 2126 IVEC=0xe40 2127 IHSRR=1 2128 IKVM_REAL=1 2129 IKVM_VIRT=1 2130INT_DEFINE_END(emulation_assist) 2131 2132EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2133 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2134EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2135EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2136 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2137EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2138EXC_COMMON_BEGIN(emulation_assist_common) 2139 GEN_COMMON emulation_assist 2140 addi r3,r1,STACK_FRAME_OVERHEAD 2141 bl emulation_assist_interrupt 2142 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2143 b interrupt_return_hsrr 2144 2145 2146/** 2147 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2148 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2149 * Exception. It is always taken in real mode but uses HSRR registers 2150 * unlike SRESET and MCE. 2151 * 2152 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2153 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2154 * 2155 * Handling: 2156 * This is a special case, this is handled similarly to machine checks, with an 2157 * initial real mode handler that is not soft-masked, which attempts to fix the 2158 * problem. Then a regular handler which is soft-maskable and reports the 2159 * problem. 2160 * 2161 * The emergency stack is used for the early real mode handler. 2162 * 2163 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2164 * either use soft-masking for the MCE, or use irq_work for the HMI. 2165 * 2166 * KVM: 2167 * Unlike MCE, this calls into KVM without calling the real mode handler 2168 * first. 2169 */ 2170INT_DEFINE_BEGIN(hmi_exception_early) 2171 IVEC=0xe60 2172 IHSRR=1 2173 IREALMODE_COMMON=1 2174 ISTACK=0 2175 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2176 IKVM_REAL=1 2177INT_DEFINE_END(hmi_exception_early) 2178 2179INT_DEFINE_BEGIN(hmi_exception) 2180 IVEC=0xe60 2181 IHSRR=1 2182 IMASK=IRQS_DISABLED 2183 IKVM_REAL=1 2184INT_DEFINE_END(hmi_exception) 2185 2186EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2187 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2188EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2189EXC_VIRT_NONE(0x4e60, 0x20) 2190 2191EXC_COMMON_BEGIN(hmi_exception_early_common) 2192 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2193 2194 mr r10,r1 /* Save r1 */ 2195 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2196 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2197 2198 __GEN_COMMON_BODY hmi_exception_early 2199 2200 addi r3,r1,STACK_FRAME_OVERHEAD 2201 bl hmi_exception_realmode 2202 cmpdi cr0,r3,0 2203 bne 1f 2204 2205 EXCEPTION_RESTORE_REGS hsrr=1 2206 HRFI_TO_USER_OR_KERNEL 2207 22081: 2209 /* 2210 * Go to virtual mode and pull the HMI event information from 2211 * firmware. 2212 */ 2213 EXCEPTION_RESTORE_REGS hsrr=1 2214 GEN_INT_ENTRY hmi_exception, virt=0 2215 2216EXC_COMMON_BEGIN(hmi_exception_common) 2217 GEN_COMMON hmi_exception 2218 addi r3,r1,STACK_FRAME_OVERHEAD 2219 bl handle_hmi_exception 2220 b interrupt_return_hsrr 2221 2222 2223/** 2224 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2225 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2226 * Similar to the 0xa00 doorbell but for host rather than guest. 2227 * 2228 * CFAR is not required (similar to doorbell_interrupt), unless KVM HV 2229 * is enabled, in which case it may be a guest exit. Most PowerNV kernels 2230 * include KVM support so it would be nice if this could be dynamically 2231 * patched out if KVM was not currently running any guests. 2232 */ 2233INT_DEFINE_BEGIN(h_doorbell) 2234 IVEC=0xe80 2235 IHSRR=1 2236 IMASK=IRQS_DISABLED 2237 IKVM_REAL=1 2238 IKVM_VIRT=1 2239#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2240 ICFAR=0 2241#endif 2242INT_DEFINE_END(h_doorbell) 2243 2244EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2245 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2246EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2247EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2248 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2249EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2250EXC_COMMON_BEGIN(h_doorbell_common) 2251 GEN_COMMON h_doorbell 2252 addi r3,r1,STACK_FRAME_OVERHEAD 2253#ifdef CONFIG_PPC_DOORBELL 2254 bl doorbell_exception 2255#else 2256 bl unknown_async_exception 2257#endif 2258 b interrupt_return_hsrr 2259 2260 2261/** 2262 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2263 * This is an asynchronous interrupt in response to an "external exception". 2264 * Similar to 0x500 but for host only. 2265 * 2266 * Like h_doorbell, CFAR is only required for KVM HV because this can be 2267 * a guest exit. 2268 */ 2269INT_DEFINE_BEGIN(h_virt_irq) 2270 IVEC=0xea0 2271 IHSRR=1 2272 IMASK=IRQS_DISABLED 2273 IKVM_REAL=1 2274 IKVM_VIRT=1 2275#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2276 ICFAR=0 2277#endif 2278INT_DEFINE_END(h_virt_irq) 2279 2280EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2281 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2282EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2283EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2284 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2285EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2286EXC_COMMON_BEGIN(h_virt_irq_common) 2287 GEN_COMMON h_virt_irq 2288 addi r3,r1,STACK_FRAME_OVERHEAD 2289 bl do_IRQ 2290 b interrupt_return_hsrr 2291 2292 2293EXC_REAL_NONE(0xec0, 0x20) 2294EXC_VIRT_NONE(0x4ec0, 0x20) 2295EXC_REAL_NONE(0xee0, 0x20) 2296EXC_VIRT_NONE(0x4ee0, 0x20) 2297 2298 2299/* 2300 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2301 * This is an asynchronous interrupt in response to a PMU exception. 2302 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2303 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2304 * 2305 * Handling: 2306 * This calls into the perf subsystem. 2307 * 2308 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2309 * runs under local_irq_disable. However it may be soft-masked in 2310 * powerpc-specific code. 2311 * 2312 * If soft masked, the masked handler will note the pending interrupt for 2313 * replay, and clear MSR[EE] in the interrupted context. 2314 * 2315 * CFAR is not used by perf interrupts so not required. 2316 */ 2317INT_DEFINE_BEGIN(performance_monitor) 2318 IVEC=0xf00 2319 IMASK=IRQS_PMI_DISABLED 2320#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2321 IKVM_REAL=1 2322#endif 2323 ICFAR=0 2324INT_DEFINE_END(performance_monitor) 2325 2326EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2327 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2328EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2329EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2330 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2331EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2332EXC_COMMON_BEGIN(performance_monitor_common) 2333 GEN_COMMON performance_monitor 2334 addi r3,r1,STACK_FRAME_OVERHEAD 2335 bl performance_monitor_exception 2336 b interrupt_return_srr 2337 2338 2339/** 2340 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2341 * This is a synchronous interrupt in response to 2342 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2343 * Similar to FP unavailable. 2344 */ 2345INT_DEFINE_BEGIN(altivec_unavailable) 2346 IVEC=0xf20 2347#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2348 IKVM_REAL=1 2349#endif 2350INT_DEFINE_END(altivec_unavailable) 2351 2352EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2353 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2354EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2355EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2356 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2357EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2358EXC_COMMON_BEGIN(altivec_unavailable_common) 2359 GEN_COMMON altivec_unavailable 2360#ifdef CONFIG_ALTIVEC 2361BEGIN_FTR_SECTION 2362 beq 1f 2363#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2364 BEGIN_FTR_SECTION_NESTED(69) 2365 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2366 * transaction), go do TM stuff 2367 */ 2368 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2369 bne- 2f 2370 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2371#endif 2372 bl load_up_altivec 2373 b fast_interrupt_return_srr 2374#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23752: /* User process was in a transaction */ 2376 addi r3,r1,STACK_FRAME_OVERHEAD 2377 bl altivec_unavailable_tm 2378 b interrupt_return_srr 2379#endif 23801: 2381END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2382#endif 2383 addi r3,r1,STACK_FRAME_OVERHEAD 2384 bl altivec_unavailable_exception 2385 b interrupt_return_srr 2386 2387 2388/** 2389 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2390 * This is a synchronous interrupt in response to 2391 * executing a VSX instruction with MSR[VSX]=0. 2392 * Similar to FP unavailable. 2393 */ 2394INT_DEFINE_BEGIN(vsx_unavailable) 2395 IVEC=0xf40 2396#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2397 IKVM_REAL=1 2398#endif 2399INT_DEFINE_END(vsx_unavailable) 2400 2401EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2402 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2403EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2404EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2405 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2406EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2407EXC_COMMON_BEGIN(vsx_unavailable_common) 2408 GEN_COMMON vsx_unavailable 2409#ifdef CONFIG_VSX 2410BEGIN_FTR_SECTION 2411 beq 1f 2412#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2413 BEGIN_FTR_SECTION_NESTED(69) 2414 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2415 * transaction), go do TM stuff 2416 */ 2417 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2418 bne- 2f 2419 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2420#endif 2421 b load_up_vsx 2422#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24232: /* User process was in a transaction */ 2424 addi r3,r1,STACK_FRAME_OVERHEAD 2425 bl vsx_unavailable_tm 2426 b interrupt_return_srr 2427#endif 24281: 2429END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2430#endif 2431 addi r3,r1,STACK_FRAME_OVERHEAD 2432 bl vsx_unavailable_exception 2433 b interrupt_return_srr 2434 2435 2436/** 2437 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2438 * This is a synchronous interrupt in response to 2439 * executing an instruction without access to the facility that can be 2440 * resolved by the OS (e.g., FSCR, MSR). 2441 * Similar to FP unavailable. 2442 */ 2443INT_DEFINE_BEGIN(facility_unavailable) 2444 IVEC=0xf60 2445#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2446 IKVM_REAL=1 2447#endif 2448INT_DEFINE_END(facility_unavailable) 2449 2450EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2451 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2452EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2453EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2454 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2455EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2456EXC_COMMON_BEGIN(facility_unavailable_common) 2457 GEN_COMMON facility_unavailable 2458 addi r3,r1,STACK_FRAME_OVERHEAD 2459 bl facility_unavailable_exception 2460 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2461 b interrupt_return_srr 2462 2463 2464/** 2465 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2466 * This is a synchronous interrupt in response to 2467 * executing an instruction without access to the facility that can only 2468 * be resolved in HV mode (e.g., HFSCR). 2469 * Similar to FP unavailable. 2470 */ 2471INT_DEFINE_BEGIN(h_facility_unavailable) 2472 IVEC=0xf80 2473 IHSRR=1 2474 IKVM_REAL=1 2475 IKVM_VIRT=1 2476INT_DEFINE_END(h_facility_unavailable) 2477 2478EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2479 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2480EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2481EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2482 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2483EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2484EXC_COMMON_BEGIN(h_facility_unavailable_common) 2485 GEN_COMMON h_facility_unavailable 2486 addi r3,r1,STACK_FRAME_OVERHEAD 2487 bl facility_unavailable_exception 2488 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2489 b interrupt_return_hsrr 2490 2491 2492EXC_REAL_NONE(0xfa0, 0x20) 2493EXC_VIRT_NONE(0x4fa0, 0x20) 2494EXC_REAL_NONE(0xfc0, 0x20) 2495EXC_VIRT_NONE(0x4fc0, 0x20) 2496EXC_REAL_NONE(0xfe0, 0x20) 2497EXC_VIRT_NONE(0x4fe0, 0x20) 2498 2499EXC_REAL_NONE(0x1000, 0x100) 2500EXC_VIRT_NONE(0x5000, 0x100) 2501EXC_REAL_NONE(0x1100, 0x100) 2502EXC_VIRT_NONE(0x5100, 0x100) 2503 2504#ifdef CONFIG_CBE_RAS 2505INT_DEFINE_BEGIN(cbe_system_error) 2506 IVEC=0x1200 2507 IHSRR=1 2508INT_DEFINE_END(cbe_system_error) 2509 2510EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2511 GEN_INT_ENTRY cbe_system_error, virt=0 2512EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2513EXC_VIRT_NONE(0x5200, 0x100) 2514EXC_COMMON_BEGIN(cbe_system_error_common) 2515 GEN_COMMON cbe_system_error 2516 addi r3,r1,STACK_FRAME_OVERHEAD 2517 bl cbe_system_error_exception 2518 b interrupt_return_hsrr 2519 2520#else /* CONFIG_CBE_RAS */ 2521EXC_REAL_NONE(0x1200, 0x100) 2522EXC_VIRT_NONE(0x5200, 0x100) 2523#endif 2524 2525/** 2526 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2527 * This has been removed from the ISA before 2.01, which is the earliest 2528 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2529 * interrupt with a non-architected feature available through the support 2530 * processor interface. 2531 */ 2532INT_DEFINE_BEGIN(instruction_breakpoint) 2533 IVEC=0x1300 2534#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2535 IKVM_REAL=1 2536#endif 2537INT_DEFINE_END(instruction_breakpoint) 2538 2539EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2540 GEN_INT_ENTRY instruction_breakpoint, virt=0 2541EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2542EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2543 GEN_INT_ENTRY instruction_breakpoint, virt=1 2544EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2545EXC_COMMON_BEGIN(instruction_breakpoint_common) 2546 GEN_COMMON instruction_breakpoint 2547 addi r3,r1,STACK_FRAME_OVERHEAD 2548 bl instruction_breakpoint_exception 2549 b interrupt_return_srr 2550 2551 2552EXC_REAL_NONE(0x1400, 0x100) 2553EXC_VIRT_NONE(0x5400, 0x100) 2554 2555/** 2556 * Interrupt 0x1500 - Soft Patch Interrupt 2557 * 2558 * Handling: 2559 * This is an implementation specific interrupt which can be used for a 2560 * range of exceptions. 2561 * 2562 * This interrupt handler is unique in that it runs the denormal assist 2563 * code even for guests (and even in guest context) without going to KVM, 2564 * for speed. POWER9 does not raise denorm exceptions, so this special case 2565 * could be phased out in future to reduce special cases. 2566 */ 2567INT_DEFINE_BEGIN(denorm_exception) 2568 IVEC=0x1500 2569 IHSRR=1 2570 IBRANCH_TO_COMMON=0 2571 IKVM_REAL=1 2572INT_DEFINE_END(denorm_exception) 2573 2574EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2575 GEN_INT_ENTRY denorm_exception, virt=0 2576#ifdef CONFIG_PPC_DENORMALISATION 2577 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2578 bne+ denorm_assist 2579#endif 2580 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2581EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2582#ifdef CONFIG_PPC_DENORMALISATION 2583EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2584 GEN_INT_ENTRY denorm_exception, virt=1 2585 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2586 bne+ denorm_assist 2587 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2588EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2589#else 2590EXC_VIRT_NONE(0x5500, 0x100) 2591#endif 2592 2593#ifdef CONFIG_PPC_DENORMALISATION 2594TRAMP_REAL_BEGIN(denorm_assist) 2595BEGIN_FTR_SECTION 2596/* 2597 * To denormalise we need to move a copy of the register to itself. 2598 * For POWER6 do that here for all FP regs. 2599 */ 2600 mfmsr r10 2601 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2602 xori r10,r10,(MSR_FE0|MSR_FE1) 2603 mtmsrd r10 2604 sync 2605 2606 .Lreg=0 2607 .rept 32 2608 fmr .Lreg,.Lreg 2609 .Lreg=.Lreg+1 2610 .endr 2611 2612FTR_SECTION_ELSE 2613/* 2614 * To denormalise we need to move a copy of the register to itself. 2615 * For POWER7 do that here for the first 32 VSX registers only. 2616 */ 2617 mfmsr r10 2618 oris r10,r10,MSR_VSX@h 2619 mtmsrd r10 2620 sync 2621 2622 .Lreg=0 2623 .rept 32 2624 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2625 .Lreg=.Lreg+1 2626 .endr 2627 2628ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2629 2630BEGIN_FTR_SECTION 2631 b denorm_done 2632END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2633/* 2634 * To denormalise we need to move a copy of the register to itself. 2635 * For POWER8 we need to do that for all 64 VSX registers 2636 */ 2637 .Lreg=32 2638 .rept 32 2639 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2640 .Lreg=.Lreg+1 2641 .endr 2642 2643denorm_done: 2644 mfspr r11,SPRN_HSRR0 2645 subi r11,r11,4 2646 mtspr SPRN_HSRR0,r11 2647 mtcrf 0x80,r9 2648 ld r9,PACA_EXGEN+EX_R9(r13) 2649BEGIN_FTR_SECTION 2650 ld r10,PACA_EXGEN+EX_PPR(r13) 2651 mtspr SPRN_PPR,r10 2652END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2653BEGIN_FTR_SECTION 2654 ld r10,PACA_EXGEN+EX_CFAR(r13) 2655 mtspr SPRN_CFAR,r10 2656END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2657 li r10,0 2658 stb r10,PACAHSRR_VALID(r13) 2659 ld r10,PACA_EXGEN+EX_R10(r13) 2660 ld r11,PACA_EXGEN+EX_R11(r13) 2661 ld r12,PACA_EXGEN+EX_R12(r13) 2662 ld r13,PACA_EXGEN+EX_R13(r13) 2663 HRFI_TO_UNKNOWN 2664 b . 2665#endif 2666 2667EXC_COMMON_BEGIN(denorm_exception_common) 2668 GEN_COMMON denorm_exception 2669 addi r3,r1,STACK_FRAME_OVERHEAD 2670 bl unknown_exception 2671 b interrupt_return_hsrr 2672 2673 2674#ifdef CONFIG_CBE_RAS 2675INT_DEFINE_BEGIN(cbe_maintenance) 2676 IVEC=0x1600 2677 IHSRR=1 2678INT_DEFINE_END(cbe_maintenance) 2679 2680EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2681 GEN_INT_ENTRY cbe_maintenance, virt=0 2682EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2683EXC_VIRT_NONE(0x5600, 0x100) 2684EXC_COMMON_BEGIN(cbe_maintenance_common) 2685 GEN_COMMON cbe_maintenance 2686 addi r3,r1,STACK_FRAME_OVERHEAD 2687 bl cbe_maintenance_exception 2688 b interrupt_return_hsrr 2689 2690#else /* CONFIG_CBE_RAS */ 2691EXC_REAL_NONE(0x1600, 0x100) 2692EXC_VIRT_NONE(0x5600, 0x100) 2693#endif 2694 2695 2696INT_DEFINE_BEGIN(altivec_assist) 2697 IVEC=0x1700 2698#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2699 IKVM_REAL=1 2700#endif 2701INT_DEFINE_END(altivec_assist) 2702 2703EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2704 GEN_INT_ENTRY altivec_assist, virt=0 2705EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2706EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2707 GEN_INT_ENTRY altivec_assist, virt=1 2708EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2709EXC_COMMON_BEGIN(altivec_assist_common) 2710 GEN_COMMON altivec_assist 2711 addi r3,r1,STACK_FRAME_OVERHEAD 2712#ifdef CONFIG_ALTIVEC 2713 bl altivec_assist_exception 2714 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2715#else 2716 bl unknown_exception 2717#endif 2718 b interrupt_return_srr 2719 2720 2721#ifdef CONFIG_CBE_RAS 2722INT_DEFINE_BEGIN(cbe_thermal) 2723 IVEC=0x1800 2724 IHSRR=1 2725INT_DEFINE_END(cbe_thermal) 2726 2727EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2728 GEN_INT_ENTRY cbe_thermal, virt=0 2729EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2730EXC_VIRT_NONE(0x5800, 0x100) 2731EXC_COMMON_BEGIN(cbe_thermal_common) 2732 GEN_COMMON cbe_thermal 2733 addi r3,r1,STACK_FRAME_OVERHEAD 2734 bl cbe_thermal_exception 2735 b interrupt_return_hsrr 2736 2737#else /* CONFIG_CBE_RAS */ 2738EXC_REAL_NONE(0x1800, 0x100) 2739EXC_VIRT_NONE(0x5800, 0x100) 2740#endif 2741 2742 2743#ifdef CONFIG_PPC_WATCHDOG 2744 2745INT_DEFINE_BEGIN(soft_nmi) 2746 IVEC=0x900 2747 ISTACK=0 2748 ICFAR=0 2749INT_DEFINE_END(soft_nmi) 2750 2751/* 2752 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2753 * stack is one that is usable by maskable interrupts so long as MSR_EE 2754 * remains off. It is used for recovery when something has corrupted the 2755 * normal kernel stack, for example. The "soft NMI" must not use the process 2756 * stack because we want irq disabled sections to avoid touching the stack 2757 * at all (other than PMU interrupts), so use the emergency stack for this, 2758 * and run it entirely with interrupts hard disabled. 2759 */ 2760EXC_COMMON_BEGIN(soft_nmi_common) 2761 mr r10,r1 2762 ld r1,PACAEMERGSP(r13) 2763 subi r1,r1,INT_FRAME_SIZE 2764 __GEN_COMMON_BODY soft_nmi 2765 2766 addi r3,r1,STACK_FRAME_OVERHEAD 2767 bl soft_nmi_interrupt 2768 2769 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2770 li r9,0 2771 mtmsrd r9,1 2772 2773 kuap_kernel_restore r9, r10 2774 2775 EXCEPTION_RESTORE_REGS hsrr=0 2776 RFI_TO_KERNEL 2777 2778#endif /* CONFIG_PPC_WATCHDOG */ 2779 2780/* 2781 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2782 * - If it was a decrementer interrupt, we bump the dec to max and return. 2783 * - If it was a doorbell we return immediately since doorbells are edge 2784 * triggered and won't automatically refire. 2785 * - If it was a HMI we return immediately since we handled it in realmode 2786 * and it won't refire. 2787 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2788 * This is called with r10 containing the value to OR to the paca field. 2789 */ 2790.macro MASKED_INTERRUPT hsrr=0 2791 .if \hsrr 2792masked_Hinterrupt: 2793 .else 2794masked_interrupt: 2795 .endif 2796 stw r9,PACA_EXGEN+EX_CCR(r13) 2797 lbz r9,PACAIRQHAPPENED(r13) 2798 or r9,r9,r10 2799 stb r9,PACAIRQHAPPENED(r13) 2800 2801 .if ! \hsrr 2802 cmpwi r10,PACA_IRQ_DEC 2803 bne 1f 2804 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2805 mtspr SPRN_DEC,r9 2806#ifdef CONFIG_PPC_WATCHDOG 2807 lwz r9,PACA_EXGEN+EX_CCR(r13) 2808 b soft_nmi_common 2809#else 2810 b 2f 2811#endif 2812 .endif 2813 28141: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2815 beq 2f 2816 xori r12,r12,MSR_EE /* clear MSR_EE */ 2817 .if \hsrr 2818 mtspr SPRN_HSRR1,r12 2819 .else 2820 mtspr SPRN_SRR1,r12 2821 .endif 2822 ori r9,r9,PACA_IRQ_HARD_DIS 2823 stb r9,PACAIRQHAPPENED(r13) 28242: /* done */ 2825 li r9,0 2826 .if \hsrr 2827 stb r9,PACAHSRR_VALID(r13) 2828 .else 2829 stb r9,PACASRR_VALID(r13) 2830 .endif 2831 2832 SEARCH_RESTART_TABLE 2833 cmpdi r12,0 2834 beq 3f 2835 .if \hsrr 2836 mtspr SPRN_HSRR0,r12 2837 .else 2838 mtspr SPRN_SRR0,r12 2839 .endif 28403: 2841 2842 ld r9,PACA_EXGEN+EX_CTR(r13) 2843 mtctr r9 2844 lwz r9,PACA_EXGEN+EX_CCR(r13) 2845 mtcrf 0x80,r9 2846 std r1,PACAR1(r13) 2847 ld r9,PACA_EXGEN+EX_R9(r13) 2848 ld r10,PACA_EXGEN+EX_R10(r13) 2849 ld r11,PACA_EXGEN+EX_R11(r13) 2850 ld r12,PACA_EXGEN+EX_R12(r13) 2851 ld r13,PACA_EXGEN+EX_R13(r13) 2852 /* May return to masked low address where r13 is not set up */ 2853 .if \hsrr 2854 HRFI_TO_KERNEL 2855 .else 2856 RFI_TO_KERNEL 2857 .endif 2858 b . 2859.endm 2860 2861TRAMP_REAL_BEGIN(stf_barrier_fallback) 2862 std r9,PACA_EXRFI+EX_R9(r13) 2863 std r10,PACA_EXRFI+EX_R10(r13) 2864 sync 2865 ld r9,PACA_EXRFI+EX_R9(r13) 2866 ld r10,PACA_EXRFI+EX_R10(r13) 2867 ori 31,31,0 2868 .rept 14 2869 b 1f 28701: 2871 .endr 2872 blr 2873 2874/* Clobbers r10, r11, ctr */ 2875.macro L1D_DISPLACEMENT_FLUSH 2876 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2877 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2878 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2879 mtctr r11 2880 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2881 2882 /* order ld/st prior to dcbt stop all streams with flushing */ 2883 sync 2884 2885 /* 2886 * The load addresses are at staggered offsets within cachelines, 2887 * which suits some pipelines better (on others it should not 2888 * hurt). 2889 */ 28901: 2891 ld r11,(0x80 + 8)*0(r10) 2892 ld r11,(0x80 + 8)*1(r10) 2893 ld r11,(0x80 + 8)*2(r10) 2894 ld r11,(0x80 + 8)*3(r10) 2895 ld r11,(0x80 + 8)*4(r10) 2896 ld r11,(0x80 + 8)*5(r10) 2897 ld r11,(0x80 + 8)*6(r10) 2898 ld r11,(0x80 + 8)*7(r10) 2899 addi r10,r10,0x80*8 2900 bdnz 1b 2901.endm 2902 2903TRAMP_REAL_BEGIN(entry_flush_fallback) 2904 std r9,PACA_EXRFI+EX_R9(r13) 2905 std r10,PACA_EXRFI+EX_R10(r13) 2906 std r11,PACA_EXRFI+EX_R11(r13) 2907 mfctr r9 2908 L1D_DISPLACEMENT_FLUSH 2909 mtctr r9 2910 ld r9,PACA_EXRFI+EX_R9(r13) 2911 ld r10,PACA_EXRFI+EX_R10(r13) 2912 ld r11,PACA_EXRFI+EX_R11(r13) 2913 blr 2914 2915/* 2916 * The SCV entry flush happens with interrupts enabled, so it must disable 2917 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2918 * (containing LR) does not need to be preserved here because scv entry 2919 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2920 */ 2921TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2922 li r10,0 2923 mtmsrd r10,1 2924 lbz r10,PACAIRQHAPPENED(r13) 2925 ori r10,r10,PACA_IRQ_HARD_DIS 2926 stb r10,PACAIRQHAPPENED(r13) 2927 std r11,PACA_EXRFI+EX_R11(r13) 2928 L1D_DISPLACEMENT_FLUSH 2929 ld r11,PACA_EXRFI+EX_R11(r13) 2930 li r10,MSR_RI 2931 mtmsrd r10,1 2932 blr 2933 2934TRAMP_REAL_BEGIN(rfi_flush_fallback) 2935 SET_SCRATCH0(r13); 2936 GET_PACA(r13); 2937 std r1,PACA_EXRFI+EX_R12(r13) 2938 ld r1,PACAKSAVE(r13) 2939 std r9,PACA_EXRFI+EX_R9(r13) 2940 std r10,PACA_EXRFI+EX_R10(r13) 2941 std r11,PACA_EXRFI+EX_R11(r13) 2942 mfctr r9 2943 L1D_DISPLACEMENT_FLUSH 2944 mtctr r9 2945 ld r9,PACA_EXRFI+EX_R9(r13) 2946 ld r10,PACA_EXRFI+EX_R10(r13) 2947 ld r11,PACA_EXRFI+EX_R11(r13) 2948 ld r1,PACA_EXRFI+EX_R12(r13) 2949 GET_SCRATCH0(r13); 2950 rfid 2951 2952TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2953 SET_SCRATCH0(r13); 2954 GET_PACA(r13); 2955 std r1,PACA_EXRFI+EX_R12(r13) 2956 ld r1,PACAKSAVE(r13) 2957 std r9,PACA_EXRFI+EX_R9(r13) 2958 std r10,PACA_EXRFI+EX_R10(r13) 2959 std r11,PACA_EXRFI+EX_R11(r13) 2960 mfctr r9 2961 L1D_DISPLACEMENT_FLUSH 2962 mtctr r9 2963 ld r9,PACA_EXRFI+EX_R9(r13) 2964 ld r10,PACA_EXRFI+EX_R10(r13) 2965 ld r11,PACA_EXRFI+EX_R11(r13) 2966 ld r1,PACA_EXRFI+EX_R12(r13) 2967 GET_SCRATCH0(r13); 2968 hrfid 2969 2970TRAMP_REAL_BEGIN(rfscv_flush_fallback) 2971 /* system call volatile */ 2972 mr r7,r13 2973 GET_PACA(r13); 2974 mr r8,r1 2975 ld r1,PACAKSAVE(r13) 2976 mfctr r9 2977 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2978 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2979 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2980 mtctr r11 2981 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2982 2983 /* order ld/st prior to dcbt stop all streams with flushing */ 2984 sync 2985 2986 /* 2987 * The load adresses are at staggered offsets within cachelines, 2988 * which suits some pipelines better (on others it should not 2989 * hurt). 2990 */ 29911: 2992 ld r11,(0x80 + 8)*0(r10) 2993 ld r11,(0x80 + 8)*1(r10) 2994 ld r11,(0x80 + 8)*2(r10) 2995 ld r11,(0x80 + 8)*3(r10) 2996 ld r11,(0x80 + 8)*4(r10) 2997 ld r11,(0x80 + 8)*5(r10) 2998 ld r11,(0x80 + 8)*6(r10) 2999 ld r11,(0x80 + 8)*7(r10) 3000 addi r10,r10,0x80*8 3001 bdnz 1b 3002 3003 mtctr r9 3004 li r9,0 3005 li r10,0 3006 li r11,0 3007 mr r1,r8 3008 mr r13,r7 3009 RFSCV 3010 3011USE_TEXT_SECTION() 3012 3013#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3014kvm_interrupt: 3015 /* 3016 * The conditional branch in KVMTEST can't reach all the way, 3017 * make a stub. 3018 */ 3019 b kvmppc_interrupt 3020#endif 3021 3022_GLOBAL(do_uaccess_flush) 3023 UACCESS_FLUSH_FIXUP_SECTION 3024 nop 3025 nop 3026 nop 3027 blr 3028 L1D_DISPLACEMENT_FLUSH 3029 blr 3030_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3031EXPORT_SYMBOL(do_uaccess_flush) 3032 3033 3034MASKED_INTERRUPT 3035MASKED_INTERRUPT hsrr=1 3036 3037 /* 3038 * Relocation-on interrupts: A subset of the interrupts can be delivered 3039 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 3040 * it. Addresses are the same as the original interrupt addresses, but 3041 * offset by 0xc000000000004000. 3042 * It's impossible to receive interrupts below 0x300 via this mechanism. 3043 * KVM: None of these traps are from the guest ; anything that escalated 3044 * to HV=1 from HV=0 is delivered via real mode handlers. 3045 */ 3046 3047 /* 3048 * This uses the standard macro, since the original 0x300 vector 3049 * only has extra guff for STAB-based processors -- which never 3050 * come here. 3051 */ 3052 3053USE_FIXED_SECTION(virt_trampolines) 3054 /* 3055 * All code below __end_soft_masked is treated as soft-masked. If 3056 * any code runs here with MSR[EE]=1, it must then cope with pending 3057 * soft interrupt being raised (i.e., by ensuring it is replayed). 3058 * 3059 * The __end_interrupts marker must be past the out-of-line (OOL) 3060 * handlers, so that they are copied to real address 0x100 when running 3061 * a relocatable kernel. This ensures they can be reached from the short 3062 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3063 * directly, without using LOAD_HANDLER(). 3064 */ 3065 .align 7 3066 .globl __end_interrupts 3067__end_interrupts: 3068DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines) 3069 3070CLOSE_FIXED_SECTION(real_vectors); 3071CLOSE_FIXED_SECTION(real_trampolines); 3072CLOSE_FIXED_SECTION(virt_vectors); 3073CLOSE_FIXED_SECTION(virt_trampolines); 3074 3075USE_TEXT_SECTION() 3076 3077/* MSR[RI] should be clear because this uses SRR[01] */ 3078enable_machine_check: 3079 mflr r0 3080 bcl 20,31,$+4 30810: mflr r3 3082 addi r3,r3,(1f - 0b) 3083 mtspr SPRN_SRR0,r3 3084 mfmsr r3 3085 ori r3,r3,MSR_ME 3086 mtspr SPRN_SRR1,r3 3087 RFI_TO_KERNEL 30881: mtlr r0 3089 blr 3090 3091/* MSR[RI] should be clear because this uses SRR[01] */ 3092disable_machine_check: 3093 mflr r0 3094 bcl 20,31,$+4 30950: mflr r3 3096 addi r3,r3,(1f - 0b) 3097 mtspr SPRN_SRR0,r3 3098 mfmsr r3 3099 li r4,MSR_ME 3100 andc r3,r3,r4 3101 mtspr SPRN_SRR1,r3 3102 RFI_TO_KERNEL 31031: mtlr r0 3104 blr 3105