1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name, text); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label, section) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label, section))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label, section) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label, section))@l; \ 90 addis reg,reg,(ABS_ADDR(label, section))@h 91 92/* 93 * Interrupt code generation macros 94 */ 95#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 96#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 97#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 98#define IAREA .L_IAREA_\name\() /* PACA save area */ 99#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 100#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 101#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */ 102#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */ 103#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 104#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 105#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 106#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 107#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 108#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 109#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 110#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 111#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 112#define __ISTACK(name) .L_ISTACK_ ## name 113#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 114 115#define INT_DEFINE_BEGIN(n) \ 116.macro int_define_ ## n name 117 118#define INT_DEFINE_END(n) \ 119.endm ; \ 120int_define_ ## n n ; \ 121do_define_int n 122 123.macro do_define_int name 124 .ifndef IVEC 125 .error "IVEC not defined" 126 .endif 127 .ifndef IHSRR 128 IHSRR=0 129 .endif 130 .ifndef IHSRR_IF_HVMODE 131 IHSRR_IF_HVMODE=0 132 .endif 133 .ifndef IAREA 134 IAREA=PACA_EXGEN 135 .endif 136 .ifndef IVIRT 137 IVIRT=1 138 .endif 139 .ifndef IISIDE 140 IISIDE=0 141 .endif 142 .ifndef ICFAR 143 ICFAR=1 144 .endif 145 .ifndef ICFAR_IF_HVMODE 146 ICFAR_IF_HVMODE=0 147 .endif 148 .ifndef IDAR 149 IDAR=0 150 .endif 151 .ifndef IDSISR 152 IDSISR=0 153 .endif 154 .ifndef IBRANCH_TO_COMMON 155 IBRANCH_TO_COMMON=1 156 .endif 157 .ifndef IREALMODE_COMMON 158 IREALMODE_COMMON=0 159 .else 160 .if ! IBRANCH_TO_COMMON 161 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 162 .endif 163 .endif 164 .ifndef IMASK 165 IMASK=0 166 .endif 167 .ifndef IKVM_REAL 168 IKVM_REAL=0 169 .endif 170 .ifndef IKVM_VIRT 171 IKVM_VIRT=0 172 .endif 173 .ifndef ISTACK 174 ISTACK=1 175 .endif 176 .ifndef IKUAP 177 IKUAP=1 178 .endif 179.endm 180 181/* 182 * All interrupts which set HSRR registers, as well as SRESET and MCE and 183 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 184 * so they all generally need to test whether they were taken in guest context. 185 * 186 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 187 * taken with MSR[HV]=0. 188 * 189 * Interrupts which set SRR registers (with the above exceptions) do not 190 * elevate to MSR[HV]=1 mode, though most can be taken when running with 191 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 192 * not need to test whether a guest is running because they get delivered to 193 * the guest directly, including nested HV KVM guests. 194 * 195 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 196 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 197 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 198 * delivered to the real-mode entry point, therefore such interrupts only test 199 * KVM in their real mode handlers, and only when PR KVM is possible. 200 * 201 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 202 * delivered in real-mode when the MMU is in hash mode because the MMU 203 * registers are not set appropriately to translate host addresses. In nested 204 * radix mode these can be delivered in virt-mode as the host translations are 205 * used implicitly (see: effective LPID, effective PID). 206 */ 207 208/* 209 * If an interrupt is taken while a guest is running, it is immediately routed 210 * to KVM to handle. 211 */ 212 213.macro KVMTEST name handler 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215 lbz r10,HSTATE_IN_GUEST(r13) 216 cmpwi r10,0 217 /* HSRR variants have the 0x2 bit added to their trap number */ 218 .if IHSRR_IF_HVMODE 219 BEGIN_FTR_SECTION 220 li r10,(IVEC + 0x2) 221 FTR_SECTION_ELSE 222 li r10,(IVEC) 223 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 224 .elseif IHSRR 225 li r10,(IVEC + 0x2) 226 .else 227 li r10,(IVEC) 228 .endif 229 bne \handler 230#endif 231.endm 232 233/* 234 * This is the BOOK3S interrupt entry code macro. 235 * 236 * This can result in one of several things happening: 237 * - Branch to the _common handler, relocated, in virtual mode. 238 * These are normal interrupts (synchronous and asynchronous) handled by 239 * the kernel. 240 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 241 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 242 * / intended for host or guest kernel, but KVM must always be involved 243 * because the machine state is set for guest execution. 244 * - Branch to the masked handler, unrelocated. 245 * These occur when maskable asynchronous interrupts are taken with the 246 * irq_soft_mask set. 247 * - Branch to an "early" handler in real mode but relocated. 248 * This is done if early=1. MCE and HMI use these to handle errors in real 249 * mode. 250 * - Fall through and continue executing in real, unrelocated mode. 251 * This is done if early=2. 252 */ 253 254.macro GEN_BRANCH_TO_COMMON name, virt 255 .if IREALMODE_COMMON 256 LOAD_HANDLER(r10, \name\()_common) 257 mtctr r10 258 bctr 259 .else 260 .if \virt 261#ifndef CONFIG_RELOCATABLE 262 b \name\()_common_virt 263#else 264 LOAD_HANDLER(r10, \name\()_common_virt) 265 mtctr r10 266 bctr 267#endif 268 .else 269 LOAD_HANDLER(r10, \name\()_common_real) 270 mtctr r10 271 bctr 272 .endif 273 .endif 274.endm 275 276.macro GEN_INT_ENTRY name, virt, ool=0 277 SET_SCRATCH0(r13) /* save r13 */ 278 GET_PACA(r13) 279 std r9,IAREA+EX_R9(r13) /* save r9 */ 280BEGIN_FTR_SECTION 281 mfspr r9,SPRN_PPR 282END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 283 HMT_MEDIUM 284 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 285 .if ICFAR 286BEGIN_FTR_SECTION 287 mfspr r10,SPRN_CFAR 288END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 289 .elseif ICFAR_IF_HVMODE 290BEGIN_FTR_SECTION 291 BEGIN_FTR_SECTION_NESTED(69) 292 mfspr r10,SPRN_CFAR 293 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 294FTR_SECTION_ELSE 295 BEGIN_FTR_SECTION_NESTED(69) 296 li r10,0 297 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 298ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 299 .endif 300 .if \ool 301 .if !\virt 302 b tramp_real_\name 303 .pushsection .text 304 TRAMP_REAL_BEGIN(tramp_real_\name) 305 .else 306 b tramp_virt_\name 307 .pushsection .text 308 TRAMP_VIRT_BEGIN(tramp_virt_\name) 309 .endif 310 .endif 311 312BEGIN_FTR_SECTION 313 std r9,IAREA+EX_PPR(r13) 314END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 315 .if ICFAR || ICFAR_IF_HVMODE 316BEGIN_FTR_SECTION 317 std r10,IAREA+EX_CFAR(r13) 318END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 319 .endif 320 INTERRUPT_TO_KERNEL 321 mfctr r10 322 std r10,IAREA+EX_CTR(r13) 323 mfcr r9 324 std r11,IAREA+EX_R11(r13) 325 std r12,IAREA+EX_R12(r13) 326 327 /* 328 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 329 * because a d-side MCE will clobber those registers so is 330 * not recoverable if they are live. 331 */ 332 GET_SCRATCH0(r10) 333 std r10,IAREA+EX_R13(r13) 334 .if IDAR && !IISIDE 335 .if IHSRR 336 mfspr r10,SPRN_HDAR 337 .else 338 mfspr r10,SPRN_DAR 339 .endif 340 std r10,IAREA+EX_DAR(r13) 341 .endif 342 .if IDSISR && !IISIDE 343 .if IHSRR 344 mfspr r10,SPRN_HDSISR 345 .else 346 mfspr r10,SPRN_DSISR 347 .endif 348 stw r10,IAREA+EX_DSISR(r13) 349 .endif 350 351 .if IHSRR_IF_HVMODE 352 BEGIN_FTR_SECTION 353 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 354 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 355 FTR_SECTION_ELSE 356 mfspr r11,SPRN_SRR0 /* save SRR0 */ 357 mfspr r12,SPRN_SRR1 /* and SRR1 */ 358 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 359 .elseif IHSRR 360 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 361 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 362 .else 363 mfspr r11,SPRN_SRR0 /* save SRR0 */ 364 mfspr r12,SPRN_SRR1 /* and SRR1 */ 365 .endif 366 367 .if IBRANCH_TO_COMMON 368 GEN_BRANCH_TO_COMMON \name \virt 369 .endif 370 371 .if \ool 372 .popsection 373 .endif 374.endm 375 376/* 377 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 378 * entry, except in the case of the real-mode handlers which require 379 * __GEN_REALMODE_COMMON_ENTRY. 380 * 381 * This switches to virtual mode and sets MSR[RI]. 382 */ 383.macro __GEN_COMMON_ENTRY name 384DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 385\name\()_common_real: 386 .if IKVM_REAL 387 KVMTEST \name kvm_interrupt 388 .endif 389 390 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 391 /* MSR[RI] is clear iff using SRR regs */ 392 .if IHSRR_IF_HVMODE 393 BEGIN_FTR_SECTION 394 xori r10,r10,MSR_RI 395 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 396 .elseif ! IHSRR 397 xori r10,r10,MSR_RI 398 .endif 399 mtmsrd r10 400 401 .if IVIRT 402 .if IKVM_VIRT 403 b 1f /* skip the virt test coming from real */ 404 .endif 405 406 .balign IFETCH_ALIGN_BYTES 407DEFINE_FIXED_SYMBOL(\name\()_common_virt, text) 408\name\()_common_virt: 409 .if IKVM_VIRT 410 KVMTEST \name kvm_interrupt 4111: 412 .endif 413 .endif /* IVIRT */ 414.endm 415 416/* 417 * Don't switch to virt mode. Used for early MCE and HMI handlers that 418 * want to run in real mode. 419 */ 420.macro __GEN_REALMODE_COMMON_ENTRY name 421DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 422\name\()_common_real: 423 .if IKVM_REAL 424 KVMTEST \name kvm_interrupt 425 .endif 426.endm 427 428.macro __GEN_COMMON_BODY name 429 .if IMASK 430 .if ! ISTACK 431 .error "No support for masked interrupt to use custom stack" 432 .endif 433 434 /* If coming from user, skip soft-mask tests. */ 435 andi. r10,r12,MSR_PR 436 bne 3f 437 438 /* 439 * Kernel code running below __end_soft_masked may be 440 * implicitly soft-masked if it is within the regions 441 * in the soft mask table. 442 */ 443 LOAD_HANDLER(r10, __end_soft_masked) 444 cmpld r11,r10 445 bge+ 1f 446 447 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 448 mtctr r12 449 stw r9,PACA_EXGEN+EX_CCR(r13) 450 SEARCH_SOFT_MASK_TABLE 451 cmpdi r12,0 452 mfctr r12 /* Restore r12 to SRR1 */ 453 lwz r9,PACA_EXGEN+EX_CCR(r13) 454 beq 1f /* Not in soft-mask table */ 455 li r10,IMASK 456 b 2f /* In soft-mask table, always mask */ 457 458 /* Test the soft mask state against our interrupt's bit */ 4591: lbz r10,PACAIRQSOFTMASK(r13) 4602: andi. r10,r10,IMASK 461 /* Associate vector numbers with bits in paca->irq_happened */ 462 .if IVEC == 0x500 || IVEC == 0xea0 463 li r10,PACA_IRQ_EE 464 .elseif IVEC == 0x900 465 li r10,PACA_IRQ_DEC 466 .elseif IVEC == 0xa00 || IVEC == 0xe80 467 li r10,PACA_IRQ_DBELL 468 .elseif IVEC == 0xe60 469 li r10,PACA_IRQ_HMI 470 .elseif IVEC == 0xf00 471 li r10,PACA_IRQ_PMI 472 .else 473 .abort "Bad maskable vector" 474 .endif 475 476 .if IHSRR_IF_HVMODE 477 BEGIN_FTR_SECTION 478 bne masked_Hinterrupt 479 FTR_SECTION_ELSE 480 bne masked_interrupt 481 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 482 .elseif IHSRR 483 bne masked_Hinterrupt 484 .else 485 bne masked_interrupt 486 .endif 487 .endif 488 489 .if ISTACK 490 andi. r10,r12,MSR_PR /* See if coming from user */ 4913: mr r10,r1 /* Save r1 */ 492 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 493 beq- 100f 494 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 495100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 496 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 497 .endif 498 499 std r9,_CCR(r1) /* save CR in stackframe */ 500 std r11,_NIP(r1) /* save SRR0 in stackframe */ 501 std r12,_MSR(r1) /* save SRR1 in stackframe */ 502 std r10,0(r1) /* make stack chain pointer */ 503 std r0,GPR0(r1) /* save r0 in stackframe */ 504 std r10,GPR1(r1) /* save r1 in stackframe */ 505 506 /* Mark our [H]SRRs valid for return */ 507 li r10,1 508 .if IHSRR_IF_HVMODE 509 BEGIN_FTR_SECTION 510 stb r10,PACAHSRR_VALID(r13) 511 FTR_SECTION_ELSE 512 stb r10,PACASRR_VALID(r13) 513 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 514 .elseif IHSRR 515 stb r10,PACAHSRR_VALID(r13) 516 .else 517 stb r10,PACASRR_VALID(r13) 518 .endif 519 520 .if ISTACK 521 .if IKUAP 522 kuap_save_amr_and_lock r9, r10, cr1, cr0 523 .endif 524 beq 101f /* if from kernel mode */ 525BEGIN_FTR_SECTION 526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 527 std r9,_PPR(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 529101: 530 .else 531 .if IKUAP 532 kuap_save_amr_and_lock r9, r10, cr1 533 .endif 534 .endif 535 536 /* Save original regs values from save area to stack frame. */ 537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 538 ld r10,IAREA+EX_R10(r13) 539 std r9,GPR9(r1) 540 std r10,GPR10(r1) 541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 542 ld r10,IAREA+EX_R12(r13) 543 ld r11,IAREA+EX_R13(r13) 544 std r9,GPR11(r1) 545 std r10,GPR12(r1) 546 std r11,GPR13(r1) 547 548 SAVE_NVGPRS(r1) 549 550 .if IDAR 551 .if IISIDE 552 ld r10,_NIP(r1) 553 .else 554 ld r10,IAREA+EX_DAR(r13) 555 .endif 556 std r10,_DAR(r1) 557 .endif 558 559 .if IDSISR 560 .if IISIDE 561 ld r10,_MSR(r1) 562 lis r11,DSISR_SRR1_MATCH_64S@h 563 and r10,r10,r11 564 .else 565 lwz r10,IAREA+EX_DSISR(r13) 566 .endif 567 std r10,_DSISR(r1) 568 .endif 569 570BEGIN_FTR_SECTION 571 .if ICFAR || ICFAR_IF_HVMODE 572 ld r10,IAREA+EX_CFAR(r13) 573 .else 574 li r10,0 575 .endif 576 std r10,ORIG_GPR3(r1) 577END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 578 ld r10,IAREA+EX_CTR(r13) 579 std r10,_CTR(r1) 580 std r2,GPR2(r1) /* save r2 in stackframe */ 581 SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ 582 mflr r9 /* Get LR, later save to stack */ 583 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 584 std r9,_LINK(r1) 585 lbz r10,PACAIRQSOFTMASK(r13) 586 mfspr r11,SPRN_XER /* save XER in stackframe */ 587 std r10,SOFTE(r1) 588 std r11,_XER(r1) 589 li r9,IVEC 590 std r9,_TRAP(r1) /* set trap number */ 591 li r10,0 592 ld r11,exception_marker@toc(r2) 593 std r10,RESULT(r1) /* clear regs->result */ 594 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 595.endm 596 597/* 598 * On entry r13 points to the paca, r9-r13 are saved in the paca, 599 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 600 * SRR1, and relocation is on. 601 * 602 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 603 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 604 */ 605.macro GEN_COMMON name 606 __GEN_COMMON_ENTRY \name 607 __GEN_COMMON_BODY \name 608.endm 609 610.macro SEARCH_RESTART_TABLE 611#ifdef CONFIG_RELOCATABLE 612 mr r12,r2 613 ld r2,PACATOC(r13) 614 LOAD_REG_ADDR(r9, __start___restart_table) 615 LOAD_REG_ADDR(r10, __stop___restart_table) 616 mr r2,r12 617#else 618 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 619 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 620#endif 621300: 622 cmpd r9,r10 623 beq 302f 624 ld r12,0(r9) 625 cmpld r11,r12 626 blt 301f 627 ld r12,8(r9) 628 cmpld r11,r12 629 bge 301f 630 ld r12,16(r9) 631 b 303f 632301: 633 addi r9,r9,24 634 b 300b 635302: 636 li r12,0 637303: 638.endm 639 640.macro SEARCH_SOFT_MASK_TABLE 641#ifdef CONFIG_RELOCATABLE 642 mr r12,r2 643 ld r2,PACATOC(r13) 644 LOAD_REG_ADDR(r9, __start___soft_mask_table) 645 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 646 mr r2,r12 647#else 648 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 649 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 650#endif 651300: 652 cmpd r9,r10 653 beq 302f 654 ld r12,0(r9) 655 cmpld r11,r12 656 blt 301f 657 ld r12,8(r9) 658 cmpld r11,r12 659 bge 301f 660 li r12,1 661 b 303f 662301: 663 addi r9,r9,16 664 b 300b 665302: 666 li r12,0 667303: 668.endm 669 670/* 671 * Restore all registers including H/SRR0/1 saved in a stack frame of a 672 * standard exception. 673 */ 674.macro EXCEPTION_RESTORE_REGS hsrr=0 675 /* Move original SRR0 and SRR1 into the respective regs */ 676 ld r9,_MSR(r1) 677 li r10,0 678 .if \hsrr 679 mtspr SPRN_HSRR1,r9 680 stb r10,PACAHSRR_VALID(r13) 681 .else 682 mtspr SPRN_SRR1,r9 683 stb r10,PACASRR_VALID(r13) 684 .endif 685 ld r9,_NIP(r1) 686 .if \hsrr 687 mtspr SPRN_HSRR0,r9 688 .else 689 mtspr SPRN_SRR0,r9 690 .endif 691 ld r9,_CTR(r1) 692 mtctr r9 693 ld r9,_XER(r1) 694 mtxer r9 695 ld r9,_LINK(r1) 696 mtlr r9 697 ld r9,_CCR(r1) 698 mtcr r9 699 REST_GPRS(2, 13, r1) 700 REST_GPR(0, r1) 701 /* restore original r1. */ 702 ld r1,GPR1(r1) 703.endm 704 705/* 706 * There are a few constraints to be concerned with. 707 * - Real mode exceptions code/data must be located at their physical location. 708 * - Virtual mode exceptions must be mapped at their 0xc000... location. 709 * - Fixed location code must not call directly beyond the __end_interrupts 710 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 711 * must be used. 712 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 713 * virtual 0xc00... 714 * - Conditional branch targets must be within +/-32K of caller. 715 * 716 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 717 * therefore don't have to run in physically located code or rfid to 718 * virtual mode kernel code. However on relocatable kernels they do have 719 * to branch to KERNELBASE offset because the rest of the kernel (outside 720 * the exception vectors) may be located elsewhere. 721 * 722 * Virtual exceptions correspond with physical, except their entry points 723 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 724 * offset applied. Virtual exceptions are enabled with the Alternate 725 * Interrupt Location (AIL) bit set in the LPCR. However this does not 726 * guarantee they will be delivered virtually. Some conditions (see the ISA) 727 * cause exceptions to be delivered in real mode. 728 * 729 * The scv instructions are a special case. They get a 0x3000 offset applied. 730 * scv exceptions have unique reentrancy properties, see below. 731 * 732 * It's impossible to receive interrupts below 0x300 via AIL. 733 * 734 * KVM: None of the virtual exceptions are from the guest. Anything that 735 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 736 * 737 * 738 * We layout physical memory as follows: 739 * 0x0000 - 0x00ff : Secondary processor spin code 740 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 741 * 0x1900 - 0x2fff : Real mode trampolines 742 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 743 * 0x5900 - 0x6fff : Relon mode trampolines 744 * 0x7000 - 0x7fff : FWNMI data area 745 * 0x8000 - .... : Common interrupt handlers, remaining early 746 * setup code, rest of kernel. 747 * 748 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 749 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 750 * vectors there. 751 */ 752OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 753OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 754OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 755OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 756 757#ifdef CONFIG_PPC_POWERNV 758 .globl start_real_trampolines 759 .globl end_real_trampolines 760 .globl start_virt_trampolines 761 .globl end_virt_trampolines 762#endif 763 764#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 765/* 766 * Data area reserved for FWNMI option. 767 * This address (0x7000) is fixed by the RPA. 768 * pseries and powernv need to keep the whole page from 769 * 0x7000 to 0x8000 free for use by the firmware 770 */ 771ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 772OPEN_TEXT_SECTION(0x8000) 773#else 774OPEN_TEXT_SECTION(0x7000) 775#endif 776 777USE_FIXED_SECTION(real_vectors) 778 779/* 780 * This is the start of the interrupt handlers for pSeries 781 * This code runs with relocation off. 782 * Code from here to __end_interrupts gets copied down to real 783 * address 0x100 when we are running a relocatable kernel. 784 * Therefore any relative branches in this section must only 785 * branch to labels in this section. 786 */ 787 .globl __start_interrupts 788__start_interrupts: 789 790/** 791 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 792 * This is a synchronous interrupt invoked with the "scv" instruction. The 793 * system call does not alter the HV bit, so it is directed to the OS. 794 * 795 * Handling: 796 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 797 * In particular, this means we can take a maskable interrupt at any point 798 * in the scv handler, which is unlike any other interrupt. This is solved 799 * by treating the instruction addresses in the handler as being soft-masked, 800 * by adding a SOFT_MASK_TABLE entry for them. 801 * 802 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 803 * ensure scv is never executed with relocation off, which means AIL-0 804 * should never happen. 805 * 806 * Before leaving the following inside-__end_soft_masked text, at least of the 807 * following must be true: 808 * - MSR[PR]=1 (i.e., return to userspace) 809 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 810 * - Standard kernel environment is set up (stack, paca, etc) 811 * 812 * Call convention: 813 * 814 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 815 */ 816EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 817 /* SCV 0 */ 818 mr r9,r13 819 GET_PACA(r13) 820 mflr r11 821 mfctr r12 822 li r10,IRQS_ALL_DISABLED 823 stb r10,PACAIRQSOFTMASK(r13) 824#ifdef CONFIG_RELOCATABLE 825 b system_call_vectored_tramp 826#else 827 b system_call_vectored_common 828#endif 829 nop 830 831 /* SCV 1 - 127 */ 832 .rept 127 833 mr r9,r13 834 GET_PACA(r13) 835 mflr r11 836 mfctr r12 837 li r10,IRQS_ALL_DISABLED 838 stb r10,PACAIRQSOFTMASK(r13) 839 li r0,-1 /* cause failure */ 840#ifdef CONFIG_RELOCATABLE 841 b system_call_vectored_sigill_tramp 842#else 843 b system_call_vectored_sigill 844#endif 845 .endr 846EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 847 848// Treat scv vectors as soft-masked, see comment above. 849// Use absolute values rather than labels here, so they don't get relocated, 850// because this code runs unrelocated. 851SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) 852 853#ifdef CONFIG_RELOCATABLE 854TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 855 __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines) 856 mtctr r10 857 bctr 858 859TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 860 __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines) 861 mtctr r10 862 bctr 863#endif 864 865 866/* No virt vectors corresponding with 0x0..0x100 */ 867EXC_VIRT_NONE(0x4000, 0x100) 868 869 870/** 871 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 872 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 873 * It is caused by: 874 * - Wake from power-saving state, on powernv. 875 * - An NMI from another CPU, triggered by firmware or hypercall. 876 * - As crash/debug signal injected from BMC, firmware or hypervisor. 877 * 878 * Handling: 879 * Power-save wakeup is the only performance critical path, so this is 880 * determined quickly as possible first. In this case volatile registers 881 * can be discarded and SPRs like CFAR don't need to be read. 882 * 883 * If not a powersave wakeup, then it's run as a regular interrupt, however 884 * it uses its own stack and PACA save area to preserve the regular kernel 885 * environment for debugging. 886 * 887 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 888 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 889 * correct to switch to virtual mode to run the regular interrupt handler 890 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 891 * is clear). 892 * 893 * FWNMI: 894 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 895 * entry point with a different register set up. Some hypervisors will 896 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 897 * 898 * KVM: 899 * Unlike most SRR interrupts, this may be taken by the host while executing 900 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 901 * mode and then raise the sreset. 902 */ 903INT_DEFINE_BEGIN(system_reset) 904 IVEC=0x100 905 IAREA=PACA_EXNMI 906 IVIRT=0 /* no virt entry point */ 907 ISTACK=0 908 IKVM_REAL=1 909INT_DEFINE_END(system_reset) 910 911EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 912#ifdef CONFIG_PPC_P7_NAP 913 /* 914 * If running native on arch 2.06 or later, check if we are waking up 915 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 916 * bits 46:47. A non-0 value indicates that we are coming from a power 917 * saving state. The idle wakeup handler initially runs in real mode, 918 * but we branch to the 0xc000... address so we can turn on relocation 919 * with mtmsrd later, after SPRs are restored. 920 * 921 * Careful to minimise cost for the fast path (idle wakeup) while 922 * also avoiding clobbering CFAR for the debug path (non-idle). 923 * 924 * For the idle wake case volatile registers can be clobbered, which 925 * is why we use those initially. If it turns out to not be an idle 926 * wake, carefully put everything back the way it was, so we can use 927 * common exception macros to handle it. 928 */ 929BEGIN_FTR_SECTION 930 SET_SCRATCH0(r13) 931 GET_PACA(r13) 932 std r3,PACA_EXNMI+0*8(r13) 933 std r4,PACA_EXNMI+1*8(r13) 934 std r5,PACA_EXNMI+2*8(r13) 935 mfspr r3,SPRN_SRR1 936 mfocrf r4,0x80 937 rlwinm. r5,r3,47-31,30,31 938 bne+ system_reset_idle_wake 939 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 940 mtocrf 0x80,r4 941 ld r3,PACA_EXNMI+0*8(r13) 942 ld r4,PACA_EXNMI+1*8(r13) 943 ld r5,PACA_EXNMI+2*8(r13) 944 GET_SCRATCH0(r13) 945END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 946#endif 947 948 GEN_INT_ENTRY system_reset, virt=0 949 /* 950 * In theory, we should not enable relocation here if it was disabled 951 * in SRR1, because the MMU may not be configured to support it (e.g., 952 * SLB may have been cleared). In practice, there should only be a few 953 * small windows where that's the case, and sreset is considered to 954 * be dangerous anyway. 955 */ 956EXC_REAL_END(system_reset, 0x100, 0x100) 957EXC_VIRT_NONE(0x4100, 0x100) 958 959#ifdef CONFIG_PPC_P7_NAP 960TRAMP_REAL_BEGIN(system_reset_idle_wake) 961 /* We are waking up from idle, so may clobber any volatile register */ 962 cmpwi cr1,r5,2 963 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 964 __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines) 965 mtctr r12 966 bctr 967#endif 968 969#ifdef CONFIG_PPC_PSERIES 970/* 971 * Vectors for the FWNMI option. Share common code. 972 */ 973TRAMP_REAL_BEGIN(system_reset_fwnmi) 974 GEN_INT_ENTRY system_reset, virt=0 975 976#endif /* CONFIG_PPC_PSERIES */ 977 978EXC_COMMON_BEGIN(system_reset_common) 979 __GEN_COMMON_ENTRY system_reset 980 /* 981 * Increment paca->in_nmi. When the interrupt entry wrapper later 982 * enable MSR_RI, then SLB or MCE will be able to recover, but a nested 983 * NMI will notice in_nmi and not recover because of the use of the NMI 984 * stack. in_nmi reentrancy is tested in system_reset_exception. 985 */ 986 lhz r10,PACA_IN_NMI(r13) 987 addi r10,r10,1 988 sth r10,PACA_IN_NMI(r13) 989 990 mr r10,r1 991 ld r1,PACA_NMI_EMERG_SP(r13) 992 subi r1,r1,INT_FRAME_SIZE 993 __GEN_COMMON_BODY system_reset 994 995 addi r3,r1,STACK_FRAME_OVERHEAD 996 bl system_reset_exception 997 998 /* Clear MSR_RI before setting SRR0 and SRR1. */ 999 li r9,0 1000 mtmsrd r9,1 1001 1002 /* 1003 * MSR_RI is clear, now we can decrement paca->in_nmi. 1004 */ 1005 lhz r10,PACA_IN_NMI(r13) 1006 subi r10,r10,1 1007 sth r10,PACA_IN_NMI(r13) 1008 1009 kuap_kernel_restore r9, r10 1010 EXCEPTION_RESTORE_REGS 1011 RFI_TO_USER_OR_KERNEL 1012 1013 1014/** 1015 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1016 * This is a non-maskable interrupt always taken in real-mode. It can be 1017 * synchronous or asynchronous, caused by hardware or software, and it may be 1018 * taken in a power-saving state. 1019 * 1020 * Handling: 1021 * Similarly to system reset, this uses its own stack and PACA save area, 1022 * the difference is re-entrancy is allowed on the machine check stack. 1023 * 1024 * machine_check_early is run in real mode, and carefully decodes the 1025 * machine check and tries to handle it (e.g., flush the SLB if there was an 1026 * error detected there), determines if it was recoverable and logs the 1027 * event. 1028 * 1029 * This early code does not "reconcile" irq soft-mask state like SRESET or 1030 * regular interrupts do, so irqs_disabled() among other things may not work 1031 * properly (irq disable/enable already doesn't work because irq tracing can 1032 * not work in real mode). 1033 * 1034 * Then, depending on the execution context when the interrupt is taken, there 1035 * are 3 main actions: 1036 * - Executing in kernel mode. The event is queued with irq_work, which means 1037 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1038 * interrupts), which could be immediately when the interrupt returns. This 1039 * avoids nasty issues like switching to virtual mode when the MMU is in a 1040 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1041 * but it has different priorities). Check to see if the CPU was in power 1042 * save, and return via the wake up code if it was. 1043 * 1044 * - Executing in user mode. machine_check_exception is run like a normal 1045 * interrupt handler, which processes the data generated by the early handler. 1046 * 1047 * - Executing in guest mode. The interrupt is run with its KVM test, and 1048 * branches to KVM to deal with. KVM may queue the event for the host 1049 * to report later. 1050 * 1051 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1052 * or SCRATCH0 is in use, it may cause a crash. 1053 * 1054 * KVM: 1055 * See SRESET. 1056 */ 1057INT_DEFINE_BEGIN(machine_check_early) 1058 IVEC=0x200 1059 IAREA=PACA_EXMC 1060 IVIRT=0 /* no virt entry point */ 1061 IREALMODE_COMMON=1 1062 ISTACK=0 1063 IDAR=1 1064 IDSISR=1 1065 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1066INT_DEFINE_END(machine_check_early) 1067 1068INT_DEFINE_BEGIN(machine_check) 1069 IVEC=0x200 1070 IAREA=PACA_EXMC 1071 IVIRT=0 /* no virt entry point */ 1072 IDAR=1 1073 IDSISR=1 1074 IKVM_REAL=1 1075INT_DEFINE_END(machine_check) 1076 1077EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1078 GEN_INT_ENTRY machine_check_early, virt=0 1079EXC_REAL_END(machine_check, 0x200, 0x100) 1080EXC_VIRT_NONE(0x4200, 0x100) 1081 1082#ifdef CONFIG_PPC_PSERIES 1083TRAMP_REAL_BEGIN(machine_check_fwnmi) 1084 /* See comment at machine_check exception, don't turn on RI */ 1085 GEN_INT_ENTRY machine_check_early, virt=0 1086#endif 1087 1088#define MACHINE_CHECK_HANDLER_WINDUP \ 1089 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1090 li r9,0; \ 1091 mtmsrd r9,1; /* Clear MSR_RI */ \ 1092 /* Decrement paca->in_mce now RI is clear. */ \ 1093 lhz r12,PACA_IN_MCE(r13); \ 1094 subi r12,r12,1; \ 1095 sth r12,PACA_IN_MCE(r13); \ 1096 EXCEPTION_RESTORE_REGS 1097 1098EXC_COMMON_BEGIN(machine_check_early_common) 1099 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1100 1101 /* 1102 * Switch to mc_emergency stack and handle re-entrancy (we limit 1103 * the nested MCE upto level 4 to avoid stack overflow). 1104 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1105 * 1106 * We use paca->in_mce to check whether this is the first entry or 1107 * nested machine check. We increment paca->in_mce to track nested 1108 * machine checks. 1109 * 1110 * If this is the first entry then set stack pointer to 1111 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1112 * stack frame on mc_emergency stack. 1113 * 1114 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1115 * checkstop if we get another machine check exception before we do 1116 * rfid with MSR_ME=1. 1117 * 1118 * This interrupt can wake directly from idle. If that is the case, 1119 * the machine check is handled then the idle wakeup code is called 1120 * to restore state. 1121 */ 1122 lhz r10,PACA_IN_MCE(r13) 1123 cmpwi r10,0 /* Are we in nested machine check */ 1124 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1125 addi r10,r10,1 /* increment paca->in_mce */ 1126 sth r10,PACA_IN_MCE(r13) 1127 1128 mr r10,r1 /* Save r1 */ 1129 bne 1f 1130 /* First machine check entry */ 1131 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11321: /* Limit nested MCE to level 4 to avoid stack overflow */ 1133 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1134 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1135 1136 __GEN_COMMON_BODY machine_check_early 1137 1138BEGIN_FTR_SECTION 1139 bl enable_machine_check 1140END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1141 addi r3,r1,STACK_FRAME_OVERHEAD 1142 bl machine_check_early 1143 std r3,RESULT(r1) /* Save result */ 1144 ld r12,_MSR(r1) 1145 1146#ifdef CONFIG_PPC_P7_NAP 1147 /* 1148 * Check if thread was in power saving mode. We come here when any 1149 * of the following is true: 1150 * a. thread wasn't in power saving mode 1151 * b. thread was in power saving mode with no state loss, 1152 * supervisor state loss or hypervisor state loss. 1153 * 1154 * Go back to nap/sleep/winkle mode again if (b) is true. 1155 */ 1156BEGIN_FTR_SECTION 1157 rlwinm. r11,r12,47-31,30,31 1158 bne machine_check_idle_common 1159END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1160#endif 1161 1162#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1163 /* 1164 * Check if we are coming from guest. If yes, then run the normal 1165 * exception handler which will take the 1166 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1167 * to guest. 1168 */ 1169 lbz r11,HSTATE_IN_GUEST(r13) 1170 cmpwi r11,0 /* Check if coming from guest */ 1171 bne mce_deliver /* continue if we are. */ 1172#endif 1173 1174 /* 1175 * Check if we are coming from userspace. If yes, then run the normal 1176 * exception handler which will deliver the MC event to this kernel. 1177 */ 1178 andi. r11,r12,MSR_PR /* See if coming from user. */ 1179 bne mce_deliver /* continue in V mode if we are. */ 1180 1181 /* 1182 * At this point we are coming from kernel context. 1183 * Queue up the MCE event and return from the interrupt. 1184 * But before that, check if this is an un-recoverable exception. 1185 * If yes, then stay on emergency stack and panic. 1186 */ 1187 andi. r11,r12,MSR_RI 1188 beq unrecoverable_mce 1189 1190 /* 1191 * Check if we have successfully handled/recovered from error, if not 1192 * then stay on emergency stack and panic. 1193 */ 1194 ld r3,RESULT(r1) /* Load result */ 1195 cmpdi r3,0 /* see if we handled MCE successfully */ 1196 beq unrecoverable_mce /* if !handled then panic */ 1197 1198 /* 1199 * Return from MC interrupt. 1200 * Queue up the MCE event so that we can log it later, while 1201 * returning from kernel or opal call. 1202 */ 1203 bl machine_check_queue_event 1204 MACHINE_CHECK_HANDLER_WINDUP 1205 RFI_TO_KERNEL 1206 1207mce_deliver: 1208 /* 1209 * This is a host user or guest MCE. Restore all registers, then 1210 * run the "late" handler. For host user, this will run the 1211 * machine_check_exception handler in virtual mode like a normal 1212 * interrupt handler. For guest, this will trigger the KVM test 1213 * and branch to the KVM interrupt similarly to other interrupts. 1214 */ 1215BEGIN_FTR_SECTION 1216 ld r10,ORIG_GPR3(r1) 1217 mtspr SPRN_CFAR,r10 1218END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1219 MACHINE_CHECK_HANDLER_WINDUP 1220 GEN_INT_ENTRY machine_check, virt=0 1221 1222EXC_COMMON_BEGIN(machine_check_common) 1223 /* 1224 * Machine check is different because we use a different 1225 * save area: PACA_EXMC instead of PACA_EXGEN. 1226 */ 1227 GEN_COMMON machine_check 1228 addi r3,r1,STACK_FRAME_OVERHEAD 1229 bl machine_check_exception_async 1230 b interrupt_return_srr 1231 1232 1233#ifdef CONFIG_PPC_P7_NAP 1234/* 1235 * This is an idle wakeup. Low level machine check has already been 1236 * done. Queue the event then call the idle code to do the wake up. 1237 */ 1238EXC_COMMON_BEGIN(machine_check_idle_common) 1239 bl machine_check_queue_event 1240 1241 /* 1242 * GPR-loss wakeups are relatively straightforward, because the 1243 * idle sleep code has saved all non-volatile registers on its 1244 * own stack, and r1 in PACAR1. 1245 * 1246 * For no-loss wakeups the r1 and lr registers used by the 1247 * early machine check handler have to be restored first. r2 is 1248 * the kernel TOC, so no need to restore it. 1249 * 1250 * Then decrement MCE nesting after finishing with the stack. 1251 */ 1252 ld r3,_MSR(r1) 1253 ld r4,_LINK(r1) 1254 ld r1,GPR1(r1) 1255 1256 lhz r11,PACA_IN_MCE(r13) 1257 subi r11,r11,1 1258 sth r11,PACA_IN_MCE(r13) 1259 1260 mtlr r4 1261 rlwinm r10,r3,47-31,30,31 1262 cmpwi cr1,r10,2 1263 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1264 b idle_return_gpr_loss 1265#endif 1266 1267EXC_COMMON_BEGIN(unrecoverable_mce) 1268 /* 1269 * We are going down. But there are chances that we might get hit by 1270 * another MCE during panic path and we may run into unstable state 1271 * with no way out. Hence, turn ME bit off while going down, so that 1272 * when another MCE is hit during panic path, system will checkstop 1273 * and hypervisor will get restarted cleanly by SP. 1274 */ 1275BEGIN_FTR_SECTION 1276 li r10,0 /* clear MSR_RI */ 1277 mtmsrd r10,1 1278 bl disable_machine_check 1279END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1280 ld r10,PACAKMSR(r13) 1281 li r3,MSR_ME 1282 andc r10,r10,r3 1283 mtmsrd r10 1284 1285 lhz r12,PACA_IN_MCE(r13) 1286 subi r12,r12,1 1287 sth r12,PACA_IN_MCE(r13) 1288 1289 /* 1290 * Invoke machine_check_exception to print MCE event and panic. 1291 * This is the NMI version of the handler because we are called from 1292 * the early handler which is a true NMI. 1293 */ 1294 addi r3,r1,STACK_FRAME_OVERHEAD 1295 bl machine_check_exception 1296 1297 /* 1298 * We will not reach here. Even if we did, there is no way out. 1299 * Call unrecoverable_exception and die. 1300 */ 1301 addi r3,r1,STACK_FRAME_OVERHEAD 1302 bl unrecoverable_exception 1303 b . 1304 1305 1306/** 1307 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1308 * This is a synchronous interrupt generated due to a data access exception, 1309 * e.g., a load orstore which does not have a valid page table entry with 1310 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1311 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1312 * 1313 * Handling: 1314 * - Hash MMU 1315 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1316 * Linux page table. Hash faults can hit in kernel mode in a fairly 1317 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1318 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1319 * backed by Linux page table entries. 1320 * 1321 * If no entry is found the Linux page fault handler is invoked (by 1322 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1323 * copy operations of course. 1324 * 1325 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1326 * MMU context, which may cause a DSI in the host, which must go to the 1327 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1328 * always be used regardless of AIL setting. 1329 * 1330 * - Radix MMU 1331 * The hardware loads from the Linux page table directly, so a fault goes 1332 * immediately to Linux page fault. 1333 * 1334 * Conditions like DAWR match are handled on the way in to Linux page fault. 1335 */ 1336INT_DEFINE_BEGIN(data_access) 1337 IVEC=0x300 1338 IDAR=1 1339 IDSISR=1 1340 IKVM_REAL=1 1341INT_DEFINE_END(data_access) 1342 1343EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1344 GEN_INT_ENTRY data_access, virt=0 1345EXC_REAL_END(data_access, 0x300, 0x80) 1346EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1347 GEN_INT_ENTRY data_access, virt=1 1348EXC_VIRT_END(data_access, 0x4300, 0x80) 1349EXC_COMMON_BEGIN(data_access_common) 1350 GEN_COMMON data_access 1351 ld r4,_DSISR(r1) 1352 addi r3,r1,STACK_FRAME_OVERHEAD 1353 andis. r0,r4,DSISR_DABRMATCH@h 1354 bne- 1f 1355#ifdef CONFIG_PPC_64S_HASH_MMU 1356BEGIN_MMU_FTR_SECTION 1357 bl do_hash_fault 1358MMU_FTR_SECTION_ELSE 1359 bl do_page_fault 1360ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1361#else 1362 bl do_page_fault 1363#endif 1364 b interrupt_return_srr 1365 13661: bl do_break 1367 /* 1368 * do_break() may have changed the NV GPRS while handling a breakpoint. 1369 * If so, we need to restore them with their updated values. 1370 */ 1371 REST_NVGPRS(r1) 1372 b interrupt_return_srr 1373 1374 1375/** 1376 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1377 * This is a synchronous interrupt in response to an MMU fault missing SLB 1378 * entry for HPT, or an address outside RPT translation range. 1379 * 1380 * Handling: 1381 * - HPT: 1382 * This refills the SLB, or reports an access fault similarly to a bad page 1383 * fault. When coming from user-mode, the SLB handler may access any kernel 1384 * data, though it may itself take a DSLB. When coming from kernel mode, 1385 * recursive faults must be avoided so access is restricted to the kernel 1386 * image text/data, kernel stack, and any data allocated below 1387 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1388 * on user-handler data structures. 1389 * 1390 * KVM: Same as 0x300, DSLB must test for KVM guest. 1391 */ 1392INT_DEFINE_BEGIN(data_access_slb) 1393 IVEC=0x380 1394 IDAR=1 1395 IKVM_REAL=1 1396INT_DEFINE_END(data_access_slb) 1397 1398EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1399 GEN_INT_ENTRY data_access_slb, virt=0 1400EXC_REAL_END(data_access_slb, 0x380, 0x80) 1401EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1402 GEN_INT_ENTRY data_access_slb, virt=1 1403EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1404EXC_COMMON_BEGIN(data_access_slb_common) 1405 GEN_COMMON data_access_slb 1406#ifdef CONFIG_PPC_64S_HASH_MMU 1407BEGIN_MMU_FTR_SECTION 1408 /* HPT case, do SLB fault */ 1409 addi r3,r1,STACK_FRAME_OVERHEAD 1410 bl do_slb_fault 1411 cmpdi r3,0 1412 bne- 1f 1413 b fast_interrupt_return_srr 14141: /* Error case */ 1415MMU_FTR_SECTION_ELSE 1416 /* Radix case, access is outside page table range */ 1417 li r3,-EFAULT 1418ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1419#else 1420 li r3,-EFAULT 1421#endif 1422 std r3,RESULT(r1) 1423 addi r3,r1,STACK_FRAME_OVERHEAD 1424 bl do_bad_segment_interrupt 1425 b interrupt_return_srr 1426 1427 1428/** 1429 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1430 * This is a synchronous interrupt in response to an MMU fault due to an 1431 * instruction fetch. 1432 * 1433 * Handling: 1434 * Similar to DSI, though in response to fetch. The faulting address is found 1435 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1436 */ 1437INT_DEFINE_BEGIN(instruction_access) 1438 IVEC=0x400 1439 IISIDE=1 1440 IDAR=1 1441 IDSISR=1 1442#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1443 IKVM_REAL=1 1444#endif 1445INT_DEFINE_END(instruction_access) 1446 1447EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1448 GEN_INT_ENTRY instruction_access, virt=0 1449EXC_REAL_END(instruction_access, 0x400, 0x80) 1450EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1451 GEN_INT_ENTRY instruction_access, virt=1 1452EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1453EXC_COMMON_BEGIN(instruction_access_common) 1454 GEN_COMMON instruction_access 1455 addi r3,r1,STACK_FRAME_OVERHEAD 1456#ifdef CONFIG_PPC_64S_HASH_MMU 1457BEGIN_MMU_FTR_SECTION 1458 bl do_hash_fault 1459MMU_FTR_SECTION_ELSE 1460 bl do_page_fault 1461ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1462#else 1463 bl do_page_fault 1464#endif 1465 b interrupt_return_srr 1466 1467 1468/** 1469 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1470 * This is a synchronous interrupt in response to an MMU fault due to an 1471 * instruction fetch. 1472 * 1473 * Handling: 1474 * Similar to DSLB, though in response to fetch. The faulting address is found 1475 * in SRR0 (rather than DAR). 1476 */ 1477INT_DEFINE_BEGIN(instruction_access_slb) 1478 IVEC=0x480 1479 IISIDE=1 1480 IDAR=1 1481#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1482 IKVM_REAL=1 1483#endif 1484INT_DEFINE_END(instruction_access_slb) 1485 1486EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1487 GEN_INT_ENTRY instruction_access_slb, virt=0 1488EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1489EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1490 GEN_INT_ENTRY instruction_access_slb, virt=1 1491EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1492EXC_COMMON_BEGIN(instruction_access_slb_common) 1493 GEN_COMMON instruction_access_slb 1494#ifdef CONFIG_PPC_64S_HASH_MMU 1495BEGIN_MMU_FTR_SECTION 1496 /* HPT case, do SLB fault */ 1497 addi r3,r1,STACK_FRAME_OVERHEAD 1498 bl do_slb_fault 1499 cmpdi r3,0 1500 bne- 1f 1501 b fast_interrupt_return_srr 15021: /* Error case */ 1503MMU_FTR_SECTION_ELSE 1504 /* Radix case, access is outside page table range */ 1505 li r3,-EFAULT 1506ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1507#else 1508 li r3,-EFAULT 1509#endif 1510 std r3,RESULT(r1) 1511 addi r3,r1,STACK_FRAME_OVERHEAD 1512 bl do_bad_segment_interrupt 1513 b interrupt_return_srr 1514 1515 1516/** 1517 * Interrupt 0x500 - External Interrupt. 1518 * This is an asynchronous maskable interrupt in response to an "external 1519 * exception" from the interrupt controller or hypervisor (e.g., device 1520 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1521 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1522 * 1523 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1524 * interrupts are delivered with HSRR registers, guests use SRRs, which 1525 * reqiures IHSRR_IF_HVMODE. 1526 * 1527 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1528 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1529 * rather than External Interrupts. 1530 * 1531 * Handling: 1532 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1533 * because registers at the time of the interrupt are not so important as it is 1534 * asynchronous. 1535 * 1536 * If soft masked, the masked handler will note the pending interrupt for 1537 * replay, and clear MSR[EE] in the interrupted context. 1538 * 1539 * CFAR is not required because this is an asynchronous interrupt that in 1540 * general won't have much bearing on the state of the CPU, with the possible 1541 * exception of crash/debug IPIs, but those are generally moving to use SRESET 1542 * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case 1543 * it may be exiting the guest and need CFAR to be saved. 1544 */ 1545INT_DEFINE_BEGIN(hardware_interrupt) 1546 IVEC=0x500 1547 IHSRR_IF_HVMODE=1 1548 IMASK=IRQS_DISABLED 1549 IKVM_REAL=1 1550 IKVM_VIRT=1 1551 ICFAR=0 1552#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1553 ICFAR_IF_HVMODE=1 1554#endif 1555INT_DEFINE_END(hardware_interrupt) 1556 1557EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1558 GEN_INT_ENTRY hardware_interrupt, virt=0 1559EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1560EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1561 GEN_INT_ENTRY hardware_interrupt, virt=1 1562EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1563EXC_COMMON_BEGIN(hardware_interrupt_common) 1564 GEN_COMMON hardware_interrupt 1565 addi r3,r1,STACK_FRAME_OVERHEAD 1566 bl do_IRQ 1567 BEGIN_FTR_SECTION 1568 b interrupt_return_hsrr 1569 FTR_SECTION_ELSE 1570 b interrupt_return_srr 1571 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1572 1573 1574/** 1575 * Interrupt 0x600 - Alignment Interrupt 1576 * This is a synchronous interrupt in response to data alignment fault. 1577 */ 1578INT_DEFINE_BEGIN(alignment) 1579 IVEC=0x600 1580 IDAR=1 1581 IDSISR=1 1582#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1583 IKVM_REAL=1 1584#endif 1585INT_DEFINE_END(alignment) 1586 1587EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1588 GEN_INT_ENTRY alignment, virt=0 1589EXC_REAL_END(alignment, 0x600, 0x100) 1590EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1591 GEN_INT_ENTRY alignment, virt=1 1592EXC_VIRT_END(alignment, 0x4600, 0x100) 1593EXC_COMMON_BEGIN(alignment_common) 1594 GEN_COMMON alignment 1595 addi r3,r1,STACK_FRAME_OVERHEAD 1596 bl alignment_exception 1597 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1598 b interrupt_return_srr 1599 1600 1601/** 1602 * Interrupt 0x700 - Program Interrupt (program check). 1603 * This is a synchronous interrupt in response to various instruction faults: 1604 * traps, privilege errors, TM errors, floating point exceptions. 1605 * 1606 * Handling: 1607 * This interrupt may use the "emergency stack" in some cases when being taken 1608 * from kernel context, which complicates handling. 1609 */ 1610INT_DEFINE_BEGIN(program_check) 1611 IVEC=0x700 1612#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1613 IKVM_REAL=1 1614#endif 1615INT_DEFINE_END(program_check) 1616 1617EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1618 1619#ifdef CONFIG_CPU_LITTLE_ENDIAN 1620 /* 1621 * There's a short window during boot where although the kernel is 1622 * running little endian, any exceptions will cause the CPU to switch 1623 * back to big endian. For example a WARN() boils down to a trap 1624 * instruction, which will cause a program check, and we end up here but 1625 * with the CPU in big endian mode. The first instruction of the program 1626 * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when 1627 * executed in the wrong endian is an lhzu with a ~3GB displacement from 1628 * r3. The content of r3 is random, so that is a load from some random 1629 * location, and depending on the system can easily lead to a checkstop, 1630 * or an infinitely recursive page fault. 1631 * 1632 * So to handle that case we have a trampoline here that can detect we 1633 * are in the wrong endian and flip us back to the correct endian. We 1634 * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires 1635 * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as 1636 * SPRG1 is already used for the paca. SPRG3 is user readable, but this 1637 * trampoline is only active very early in boot, and SPRG3 will be 1638 * reinitialised in vdso_getcpu_init() before userspace starts. 1639 */ 1640BEGIN_FTR_SECTION 1641 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 1642 b 1f // Skip trampoline if endian is correct 1643 .long 0xa643707d // mtsprg 0, r11 Backup r11 1644 .long 0xa6027a7d // mfsrr0 r11 1645 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 1646 .long 0xa6027b7d // mfsrr1 r11 1647 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 1648 .long 0xa600607d // mfmsr r11 1649 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 1650 .long 0xa6037b7d // mtsrr1 r11 1651 .long 0x34076039 // li r11, 0x734 1652 .long 0xa6037a7d // mtsrr0 r11 1653 .long 0x2400004c // rfid 1654 mfsprg r11, 3 1655 mtsrr1 r11 // Restore SRR1 1656 mfsprg r11, 2 1657 mtsrr0 r11 // Restore SRR0 1658 mfsprg r11, 0 // Restore r11 16591: 1660END_FTR_SECTION(0, 1) // nop out after boot 1661#endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1662 1663 GEN_INT_ENTRY program_check, virt=0 1664EXC_REAL_END(program_check, 0x700, 0x100) 1665EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1666 GEN_INT_ENTRY program_check, virt=1 1667EXC_VIRT_END(program_check, 0x4700, 0x100) 1668EXC_COMMON_BEGIN(program_check_common) 1669 __GEN_COMMON_ENTRY program_check 1670 1671 /* 1672 * It's possible to receive a TM Bad Thing type program check with 1673 * userspace register values (in particular r1), but with SRR1 reporting 1674 * that we came from the kernel. Normally that would confuse the bad 1675 * stack logic, and we would report a bad kernel stack pointer. Instead 1676 * we switch to the emergency stack if we're taking a TM Bad Thing from 1677 * the kernel. 1678 */ 1679 1680 andi. r10,r12,MSR_PR 1681 bne .Lnormal_stack /* If userspace, go normal path */ 1682 1683 andis. r10,r12,(SRR1_PROGTM)@h 1684 bne .Lemergency_stack /* If TM, emergency */ 1685 1686 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1687 blt .Lnormal_stack /* normal path if not */ 1688 1689 /* Use the emergency stack */ 1690.Lemergency_stack: 1691 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1692 /* 3 in EXCEPTION_PROLOG_COMMON */ 1693 mr r10,r1 /* Save r1 */ 1694 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1695 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1696 __ISTACK(program_check)=0 1697 __GEN_COMMON_BODY program_check 1698 b .Ldo_program_check 1699 1700.Lnormal_stack: 1701 __ISTACK(program_check)=1 1702 __GEN_COMMON_BODY program_check 1703 1704.Ldo_program_check: 1705 addi r3,r1,STACK_FRAME_OVERHEAD 1706 bl program_check_exception 1707 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1708 b interrupt_return_srr 1709 1710 1711/* 1712 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1713 * This is a synchronous interrupt in response to executing an fp instruction 1714 * with MSR[FP]=0. 1715 * 1716 * Handling: 1717 * This will load FP registers and enable the FP bit if coming from userspace, 1718 * otherwise report a bad kernel use of FP. 1719 */ 1720INT_DEFINE_BEGIN(fp_unavailable) 1721 IVEC=0x800 1722#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1723 IKVM_REAL=1 1724#endif 1725INT_DEFINE_END(fp_unavailable) 1726 1727EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1728 GEN_INT_ENTRY fp_unavailable, virt=0 1729EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1730EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1731 GEN_INT_ENTRY fp_unavailable, virt=1 1732EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1733EXC_COMMON_BEGIN(fp_unavailable_common) 1734 GEN_COMMON fp_unavailable 1735 bne 1f /* if from user, just load it up */ 1736 addi r3,r1,STACK_FRAME_OVERHEAD 1737 bl kernel_fp_unavailable_exception 17380: trap 1739 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17401: 1741#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1742BEGIN_FTR_SECTION 1743 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1744 * transaction), go do TM stuff 1745 */ 1746 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1747 bne- 2f 1748END_FTR_SECTION_IFSET(CPU_FTR_TM) 1749#endif 1750 bl load_up_fpu 1751 b fast_interrupt_return_srr 1752#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17532: /* User process was in a transaction */ 1754 addi r3,r1,STACK_FRAME_OVERHEAD 1755 bl fp_unavailable_tm 1756 b interrupt_return_srr 1757#endif 1758 1759 1760/** 1761 * Interrupt 0x900 - Decrementer Interrupt. 1762 * This is an asynchronous interrupt in response to a decrementer exception 1763 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1764 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1765 * local_irq_disable()). 1766 * 1767 * Handling: 1768 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1769 * 1770 * If soft masked, the masked handler will note the pending interrupt for 1771 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1772 * in the interrupted context. 1773 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1774 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1775 * on the emergency stack. 1776 * 1777 * CFAR is not required because this is asynchronous (see hardware_interrupt). 1778 * A watchdog interrupt may like to have CFAR, but usually the interesting 1779 * branch is long gone by that point (e.g., infinite loop). 1780 */ 1781INT_DEFINE_BEGIN(decrementer) 1782 IVEC=0x900 1783 IMASK=IRQS_DISABLED 1784#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1785 IKVM_REAL=1 1786#endif 1787 ICFAR=0 1788INT_DEFINE_END(decrementer) 1789 1790EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1791 GEN_INT_ENTRY decrementer, virt=0 1792EXC_REAL_END(decrementer, 0x900, 0x80) 1793EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1794 GEN_INT_ENTRY decrementer, virt=1 1795EXC_VIRT_END(decrementer, 0x4900, 0x80) 1796EXC_COMMON_BEGIN(decrementer_common) 1797 GEN_COMMON decrementer 1798 addi r3,r1,STACK_FRAME_OVERHEAD 1799 bl timer_interrupt 1800 b interrupt_return_srr 1801 1802 1803/** 1804 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1805 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1806 * register. 1807 * 1808 * Handling: 1809 * Linux does not use this outside KVM where it's used to keep a host timer 1810 * while the guest is given control of DEC. It should normally be caught by 1811 * the KVM test and routed there. 1812 */ 1813INT_DEFINE_BEGIN(hdecrementer) 1814 IVEC=0x980 1815 IHSRR=1 1816 ISTACK=0 1817 IKVM_REAL=1 1818 IKVM_VIRT=1 1819INT_DEFINE_END(hdecrementer) 1820 1821EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1822 GEN_INT_ENTRY hdecrementer, virt=0 1823EXC_REAL_END(hdecrementer, 0x980, 0x80) 1824EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1825 GEN_INT_ENTRY hdecrementer, virt=1 1826EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1827EXC_COMMON_BEGIN(hdecrementer_common) 1828 __GEN_COMMON_ENTRY hdecrementer 1829 /* 1830 * Hypervisor decrementer interrupts not caught by the KVM test 1831 * shouldn't occur but are sometimes left pending on exit from a KVM 1832 * guest. We don't need to do anything to clear them, as they are 1833 * edge-triggered. 1834 * 1835 * Be careful to avoid touching the kernel stack. 1836 */ 1837 li r10,0 1838 stb r10,PACAHSRR_VALID(r13) 1839 ld r10,PACA_EXGEN+EX_CTR(r13) 1840 mtctr r10 1841 mtcrf 0x80,r9 1842 ld r9,PACA_EXGEN+EX_R9(r13) 1843 ld r10,PACA_EXGEN+EX_R10(r13) 1844 ld r11,PACA_EXGEN+EX_R11(r13) 1845 ld r12,PACA_EXGEN+EX_R12(r13) 1846 ld r13,PACA_EXGEN+EX_R13(r13) 1847 HRFI_TO_KERNEL 1848 1849 1850/** 1851 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1852 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1853 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1854 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1855 * 1856 * Handling: 1857 * Guests may use this for IPIs between threads in a core if the 1858 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1859 * 1860 * If soft masked, the masked handler will note the pending interrupt for 1861 * replay, leaving MSR[EE] enabled in the interrupted context because the 1862 * doorbells are edge triggered. 1863 * 1864 * CFAR is not required, similarly to hardware_interrupt. 1865 */ 1866INT_DEFINE_BEGIN(doorbell_super) 1867 IVEC=0xa00 1868 IMASK=IRQS_DISABLED 1869#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1870 IKVM_REAL=1 1871#endif 1872 ICFAR=0 1873INT_DEFINE_END(doorbell_super) 1874 1875EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1876 GEN_INT_ENTRY doorbell_super, virt=0 1877EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1878EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1879 GEN_INT_ENTRY doorbell_super, virt=1 1880EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1881EXC_COMMON_BEGIN(doorbell_super_common) 1882 GEN_COMMON doorbell_super 1883 addi r3,r1,STACK_FRAME_OVERHEAD 1884#ifdef CONFIG_PPC_DOORBELL 1885 bl doorbell_exception 1886#else 1887 bl unknown_async_exception 1888#endif 1889 b interrupt_return_srr 1890 1891 1892EXC_REAL_NONE(0xb00, 0x100) 1893EXC_VIRT_NONE(0x4b00, 0x100) 1894 1895/** 1896 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1897 * This is a synchronous interrupt invoked with the "sc" instruction. The 1898 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1899 * is directed to the currently running OS. The hypercall is invoked with 1900 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1901 * 1902 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1903 * 0x4c00 virtual mode. 1904 * 1905 * Handling: 1906 * If the KVM test fires then it was due to a hypercall and is accordingly 1907 * routed to KVM. Otherwise this executes a normal Linux system call. 1908 * 1909 * Call convention: 1910 * 1911 * syscall and hypercalls register conventions are documented in 1912 * Documentation/powerpc/syscall64-abi.rst and 1913 * Documentation/powerpc/papr_hcalls.rst respectively. 1914 * 1915 * The intersection of volatile registers that don't contain possible 1916 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1917 * without saving, though xer is not a good idea to use, as hardware may 1918 * interpret some bits so it may be costly to change them. 1919 */ 1920INT_DEFINE_BEGIN(system_call) 1921 IVEC=0xc00 1922 IKVM_REAL=1 1923 IKVM_VIRT=1 1924 ICFAR=0 1925INT_DEFINE_END(system_call) 1926 1927.macro SYSTEM_CALL virt 1928#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1929 /* 1930 * There is a little bit of juggling to get syscall and hcall 1931 * working well. Save r13 in ctr to avoid using SPRG scratch 1932 * register. 1933 * 1934 * Userspace syscalls have already saved the PPR, hcalls must save 1935 * it before setting HMT_MEDIUM. 1936 */ 1937 mtctr r13 1938 GET_PACA(r13) 1939 std r10,PACA_EXGEN+EX_R10(r13) 1940 INTERRUPT_TO_KERNEL 1941 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1942 mfctr r9 1943#else 1944 mr r9,r13 1945 GET_PACA(r13) 1946 INTERRUPT_TO_KERNEL 1947#endif 1948 1949#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1950BEGIN_FTR_SECTION 1951 cmpdi r0,0x1ebe 1952 beq- 1f 1953END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1954#endif 1955 1956 /* We reach here with PACA in r13, r13 in r9. */ 1957 mfspr r11,SPRN_SRR0 1958 mfspr r12,SPRN_SRR1 1959 1960 HMT_MEDIUM 1961 1962 .if ! \virt 1963 __LOAD_HANDLER(r10, system_call_common_real, real_vectors) 1964 mtctr r10 1965 bctr 1966 .else 1967#ifdef CONFIG_RELOCATABLE 1968 __LOAD_HANDLER(r10, system_call_common, virt_vectors) 1969 mtctr r10 1970 bctr 1971#else 1972 b system_call_common 1973#endif 1974 .endif 1975 1976#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1977 /* Fast LE/BE switch system call */ 19781: mfspr r12,SPRN_SRR1 1979 xori r12,r12,MSR_LE 1980 mtspr SPRN_SRR1,r12 1981 mr r13,r9 1982 RFI_TO_USER /* return to userspace */ 1983 b . /* prevent speculative execution */ 1984#endif 1985.endm 1986 1987EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 1988 SYSTEM_CALL 0 1989EXC_REAL_END(system_call, 0xc00, 0x100) 1990EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 1991 SYSTEM_CALL 1 1992EXC_VIRT_END(system_call, 0x4c00, 0x100) 1993 1994#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1995TRAMP_REAL_BEGIN(kvm_hcall) 1996 std r9,PACA_EXGEN+EX_R9(r13) 1997 std r11,PACA_EXGEN+EX_R11(r13) 1998 std r12,PACA_EXGEN+EX_R12(r13) 1999 mfcr r9 2000 mfctr r10 2001 std r10,PACA_EXGEN+EX_R13(r13) 2002 li r10,0 2003 std r10,PACA_EXGEN+EX_CFAR(r13) 2004 std r10,PACA_EXGEN+EX_CTR(r13) 2005 /* 2006 * Save the PPR (on systems that support it) before changing to 2007 * HMT_MEDIUM. That allows the KVM code to save that value into the 2008 * guest state (it is the guest's PPR value). 2009 */ 2010BEGIN_FTR_SECTION 2011 mfspr r10,SPRN_PPR 2012 std r10,PACA_EXGEN+EX_PPR(r13) 2013END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2014 2015 HMT_MEDIUM 2016 2017#ifdef CONFIG_RELOCATABLE 2018 /* 2019 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 2020 * outside the head section. 2021 */ 2022 __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines) 2023 mtctr r10 2024 bctr 2025#else 2026 b kvmppc_hcall 2027#endif 2028#endif 2029 2030/** 2031 * Interrupt 0xd00 - Trace Interrupt. 2032 * This is a synchronous interrupt in response to instruction step or 2033 * breakpoint faults. 2034 */ 2035INT_DEFINE_BEGIN(single_step) 2036 IVEC=0xd00 2037#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2038 IKVM_REAL=1 2039#endif 2040INT_DEFINE_END(single_step) 2041 2042EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2043 GEN_INT_ENTRY single_step, virt=0 2044EXC_REAL_END(single_step, 0xd00, 0x100) 2045EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2046 GEN_INT_ENTRY single_step, virt=1 2047EXC_VIRT_END(single_step, 0x4d00, 0x100) 2048EXC_COMMON_BEGIN(single_step_common) 2049 GEN_COMMON single_step 2050 addi r3,r1,STACK_FRAME_OVERHEAD 2051 bl single_step_exception 2052 b interrupt_return_srr 2053 2054 2055/** 2056 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2057 * This is a synchronous interrupt in response to an MMU fault caused by a 2058 * guest data access. 2059 * 2060 * Handling: 2061 * This should always get routed to KVM. In radix MMU mode, this is caused 2062 * by a guest nested radix access that can't be performed due to the 2063 * partition scope page table. In hash mode, this can be caused by guests 2064 * running with translation disabled (virtual real mode) or with VPM enabled. 2065 * KVM will update the page table structures or disallow the access. 2066 */ 2067INT_DEFINE_BEGIN(h_data_storage) 2068 IVEC=0xe00 2069 IHSRR=1 2070 IDAR=1 2071 IDSISR=1 2072 IKVM_REAL=1 2073 IKVM_VIRT=1 2074INT_DEFINE_END(h_data_storage) 2075 2076EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2077 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2078EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2079EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2080 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2081EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2082EXC_COMMON_BEGIN(h_data_storage_common) 2083 GEN_COMMON h_data_storage 2084 addi r3,r1,STACK_FRAME_OVERHEAD 2085BEGIN_MMU_FTR_SECTION 2086 bl do_bad_page_fault_segv 2087MMU_FTR_SECTION_ELSE 2088 bl unknown_exception 2089ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2090 b interrupt_return_hsrr 2091 2092 2093/** 2094 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2095 * This is a synchronous interrupt in response to an MMU fault caused by a 2096 * guest instruction fetch, similar to HDSI. 2097 */ 2098INT_DEFINE_BEGIN(h_instr_storage) 2099 IVEC=0xe20 2100 IHSRR=1 2101 IKVM_REAL=1 2102 IKVM_VIRT=1 2103INT_DEFINE_END(h_instr_storage) 2104 2105EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2106 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2107EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2108EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2109 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2110EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2111EXC_COMMON_BEGIN(h_instr_storage_common) 2112 GEN_COMMON h_instr_storage 2113 addi r3,r1,STACK_FRAME_OVERHEAD 2114 bl unknown_exception 2115 b interrupt_return_hsrr 2116 2117 2118/** 2119 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2120 */ 2121INT_DEFINE_BEGIN(emulation_assist) 2122 IVEC=0xe40 2123 IHSRR=1 2124 IKVM_REAL=1 2125 IKVM_VIRT=1 2126INT_DEFINE_END(emulation_assist) 2127 2128EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2129 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2130EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2131EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2132 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2133EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2134EXC_COMMON_BEGIN(emulation_assist_common) 2135 GEN_COMMON emulation_assist 2136 addi r3,r1,STACK_FRAME_OVERHEAD 2137 bl emulation_assist_interrupt 2138 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2139 b interrupt_return_hsrr 2140 2141 2142/** 2143 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2144 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2145 * Exception. It is always taken in real mode but uses HSRR registers 2146 * unlike SRESET and MCE. 2147 * 2148 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2149 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2150 * 2151 * Handling: 2152 * This is a special case, this is handled similarly to machine checks, with an 2153 * initial real mode handler that is not soft-masked, which attempts to fix the 2154 * problem. Then a regular handler which is soft-maskable and reports the 2155 * problem. 2156 * 2157 * The emergency stack is used for the early real mode handler. 2158 * 2159 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2160 * either use soft-masking for the MCE, or use irq_work for the HMI. 2161 * 2162 * KVM: 2163 * Unlike MCE, this calls into KVM without calling the real mode handler 2164 * first. 2165 */ 2166INT_DEFINE_BEGIN(hmi_exception_early) 2167 IVEC=0xe60 2168 IHSRR=1 2169 IREALMODE_COMMON=1 2170 ISTACK=0 2171 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2172 IKVM_REAL=1 2173INT_DEFINE_END(hmi_exception_early) 2174 2175INT_DEFINE_BEGIN(hmi_exception) 2176 IVEC=0xe60 2177 IHSRR=1 2178 IMASK=IRQS_DISABLED 2179 IKVM_REAL=1 2180INT_DEFINE_END(hmi_exception) 2181 2182EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2183 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2184EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2185EXC_VIRT_NONE(0x4e60, 0x20) 2186 2187EXC_COMMON_BEGIN(hmi_exception_early_common) 2188 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2189 2190 mr r10,r1 /* Save r1 */ 2191 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2192 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2193 2194 __GEN_COMMON_BODY hmi_exception_early 2195 2196 addi r3,r1,STACK_FRAME_OVERHEAD 2197 bl hmi_exception_realmode 2198 cmpdi cr0,r3,0 2199 bne 1f 2200 2201 EXCEPTION_RESTORE_REGS hsrr=1 2202 HRFI_TO_USER_OR_KERNEL 2203 22041: 2205 /* 2206 * Go to virtual mode and pull the HMI event information from 2207 * firmware. 2208 */ 2209 EXCEPTION_RESTORE_REGS hsrr=1 2210 GEN_INT_ENTRY hmi_exception, virt=0 2211 2212EXC_COMMON_BEGIN(hmi_exception_common) 2213 GEN_COMMON hmi_exception 2214 addi r3,r1,STACK_FRAME_OVERHEAD 2215 bl handle_hmi_exception 2216 b interrupt_return_hsrr 2217 2218 2219/** 2220 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2221 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2222 * Similar to the 0xa00 doorbell but for host rather than guest. 2223 * 2224 * CFAR is not required (similar to doorbell_interrupt), unless KVM HV 2225 * is enabled, in which case it may be a guest exit. Most PowerNV kernels 2226 * include KVM support so it would be nice if this could be dynamically 2227 * patched out if KVM was not currently running any guests. 2228 */ 2229INT_DEFINE_BEGIN(h_doorbell) 2230 IVEC=0xe80 2231 IHSRR=1 2232 IMASK=IRQS_DISABLED 2233 IKVM_REAL=1 2234 IKVM_VIRT=1 2235#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2236 ICFAR=0 2237#endif 2238INT_DEFINE_END(h_doorbell) 2239 2240EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2241 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2242EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2243EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2244 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2245EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2246EXC_COMMON_BEGIN(h_doorbell_common) 2247 GEN_COMMON h_doorbell 2248 addi r3,r1,STACK_FRAME_OVERHEAD 2249#ifdef CONFIG_PPC_DOORBELL 2250 bl doorbell_exception 2251#else 2252 bl unknown_async_exception 2253#endif 2254 b interrupt_return_hsrr 2255 2256 2257/** 2258 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2259 * This is an asynchronous interrupt in response to an "external exception". 2260 * Similar to 0x500 but for host only. 2261 * 2262 * Like h_doorbell, CFAR is only required for KVM HV because this can be 2263 * a guest exit. 2264 */ 2265INT_DEFINE_BEGIN(h_virt_irq) 2266 IVEC=0xea0 2267 IHSRR=1 2268 IMASK=IRQS_DISABLED 2269 IKVM_REAL=1 2270 IKVM_VIRT=1 2271#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2272 ICFAR=0 2273#endif 2274INT_DEFINE_END(h_virt_irq) 2275 2276EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2277 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2278EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2279EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2280 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2281EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2282EXC_COMMON_BEGIN(h_virt_irq_common) 2283 GEN_COMMON h_virt_irq 2284 addi r3,r1,STACK_FRAME_OVERHEAD 2285 bl do_IRQ 2286 b interrupt_return_hsrr 2287 2288 2289EXC_REAL_NONE(0xec0, 0x20) 2290EXC_VIRT_NONE(0x4ec0, 0x20) 2291EXC_REAL_NONE(0xee0, 0x20) 2292EXC_VIRT_NONE(0x4ee0, 0x20) 2293 2294 2295/* 2296 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2297 * This is an asynchronous interrupt in response to a PMU exception. 2298 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2299 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2300 * 2301 * Handling: 2302 * This calls into the perf subsystem. 2303 * 2304 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2305 * runs under local_irq_disable. However it may be soft-masked in 2306 * powerpc-specific code. 2307 * 2308 * If soft masked, the masked handler will note the pending interrupt for 2309 * replay, and clear MSR[EE] in the interrupted context. 2310 * 2311 * CFAR is not used by perf interrupts so not required. 2312 */ 2313INT_DEFINE_BEGIN(performance_monitor) 2314 IVEC=0xf00 2315 IMASK=IRQS_PMI_DISABLED 2316#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2317 IKVM_REAL=1 2318#endif 2319 ICFAR=0 2320INT_DEFINE_END(performance_monitor) 2321 2322EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2323 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2324EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2325EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2326 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2327EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2328EXC_COMMON_BEGIN(performance_monitor_common) 2329 GEN_COMMON performance_monitor 2330 addi r3,r1,STACK_FRAME_OVERHEAD 2331 bl performance_monitor_exception 2332 b interrupt_return_srr 2333 2334 2335/** 2336 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2337 * This is a synchronous interrupt in response to 2338 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2339 * Similar to FP unavailable. 2340 */ 2341INT_DEFINE_BEGIN(altivec_unavailable) 2342 IVEC=0xf20 2343#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2344 IKVM_REAL=1 2345#endif 2346INT_DEFINE_END(altivec_unavailable) 2347 2348EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2349 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2350EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2351EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2352 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2353EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2354EXC_COMMON_BEGIN(altivec_unavailable_common) 2355 GEN_COMMON altivec_unavailable 2356#ifdef CONFIG_ALTIVEC 2357BEGIN_FTR_SECTION 2358 beq 1f 2359#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2360 BEGIN_FTR_SECTION_NESTED(69) 2361 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2362 * transaction), go do TM stuff 2363 */ 2364 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2365 bne- 2f 2366 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2367#endif 2368 bl load_up_altivec 2369 b fast_interrupt_return_srr 2370#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23712: /* User process was in a transaction */ 2372 addi r3,r1,STACK_FRAME_OVERHEAD 2373 bl altivec_unavailable_tm 2374 b interrupt_return_srr 2375#endif 23761: 2377END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2378#endif 2379 addi r3,r1,STACK_FRAME_OVERHEAD 2380 bl altivec_unavailable_exception 2381 b interrupt_return_srr 2382 2383 2384/** 2385 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2386 * This is a synchronous interrupt in response to 2387 * executing a VSX instruction with MSR[VSX]=0. 2388 * Similar to FP unavailable. 2389 */ 2390INT_DEFINE_BEGIN(vsx_unavailable) 2391 IVEC=0xf40 2392#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2393 IKVM_REAL=1 2394#endif 2395INT_DEFINE_END(vsx_unavailable) 2396 2397EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2398 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2399EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2400EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2401 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2402EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2403EXC_COMMON_BEGIN(vsx_unavailable_common) 2404 GEN_COMMON vsx_unavailable 2405#ifdef CONFIG_VSX 2406BEGIN_FTR_SECTION 2407 beq 1f 2408#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2409 BEGIN_FTR_SECTION_NESTED(69) 2410 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2411 * transaction), go do TM stuff 2412 */ 2413 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2414 bne- 2f 2415 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2416#endif 2417 b load_up_vsx 2418#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24192: /* User process was in a transaction */ 2420 addi r3,r1,STACK_FRAME_OVERHEAD 2421 bl vsx_unavailable_tm 2422 b interrupt_return_srr 2423#endif 24241: 2425END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2426#endif 2427 addi r3,r1,STACK_FRAME_OVERHEAD 2428 bl vsx_unavailable_exception 2429 b interrupt_return_srr 2430 2431 2432/** 2433 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2434 * This is a synchronous interrupt in response to 2435 * executing an instruction without access to the facility that can be 2436 * resolved by the OS (e.g., FSCR, MSR). 2437 * Similar to FP unavailable. 2438 */ 2439INT_DEFINE_BEGIN(facility_unavailable) 2440 IVEC=0xf60 2441#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2442 IKVM_REAL=1 2443#endif 2444INT_DEFINE_END(facility_unavailable) 2445 2446EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2447 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2448EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2449EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2450 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2451EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2452EXC_COMMON_BEGIN(facility_unavailable_common) 2453 GEN_COMMON facility_unavailable 2454 addi r3,r1,STACK_FRAME_OVERHEAD 2455 bl facility_unavailable_exception 2456 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2457 b interrupt_return_srr 2458 2459 2460/** 2461 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2462 * This is a synchronous interrupt in response to 2463 * executing an instruction without access to the facility that can only 2464 * be resolved in HV mode (e.g., HFSCR). 2465 * Similar to FP unavailable. 2466 */ 2467INT_DEFINE_BEGIN(h_facility_unavailable) 2468 IVEC=0xf80 2469 IHSRR=1 2470 IKVM_REAL=1 2471 IKVM_VIRT=1 2472INT_DEFINE_END(h_facility_unavailable) 2473 2474EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2475 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2476EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2477EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2478 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2479EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2480EXC_COMMON_BEGIN(h_facility_unavailable_common) 2481 GEN_COMMON h_facility_unavailable 2482 addi r3,r1,STACK_FRAME_OVERHEAD 2483 bl facility_unavailable_exception 2484 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2485 b interrupt_return_hsrr 2486 2487 2488EXC_REAL_NONE(0xfa0, 0x20) 2489EXC_VIRT_NONE(0x4fa0, 0x20) 2490EXC_REAL_NONE(0xfc0, 0x20) 2491EXC_VIRT_NONE(0x4fc0, 0x20) 2492EXC_REAL_NONE(0xfe0, 0x20) 2493EXC_VIRT_NONE(0x4fe0, 0x20) 2494 2495EXC_REAL_NONE(0x1000, 0x100) 2496EXC_VIRT_NONE(0x5000, 0x100) 2497EXC_REAL_NONE(0x1100, 0x100) 2498EXC_VIRT_NONE(0x5100, 0x100) 2499 2500#ifdef CONFIG_CBE_RAS 2501INT_DEFINE_BEGIN(cbe_system_error) 2502 IVEC=0x1200 2503 IHSRR=1 2504INT_DEFINE_END(cbe_system_error) 2505 2506EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2507 GEN_INT_ENTRY cbe_system_error, virt=0 2508EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2509EXC_VIRT_NONE(0x5200, 0x100) 2510EXC_COMMON_BEGIN(cbe_system_error_common) 2511 GEN_COMMON cbe_system_error 2512 addi r3,r1,STACK_FRAME_OVERHEAD 2513 bl cbe_system_error_exception 2514 b interrupt_return_hsrr 2515 2516#else /* CONFIG_CBE_RAS */ 2517EXC_REAL_NONE(0x1200, 0x100) 2518EXC_VIRT_NONE(0x5200, 0x100) 2519#endif 2520 2521/** 2522 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2523 * This has been removed from the ISA before 2.01, which is the earliest 2524 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2525 * interrupt with a non-architected feature available through the support 2526 * processor interface. 2527 */ 2528INT_DEFINE_BEGIN(instruction_breakpoint) 2529 IVEC=0x1300 2530#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2531 IKVM_REAL=1 2532#endif 2533INT_DEFINE_END(instruction_breakpoint) 2534 2535EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2536 GEN_INT_ENTRY instruction_breakpoint, virt=0 2537EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2538EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2539 GEN_INT_ENTRY instruction_breakpoint, virt=1 2540EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2541EXC_COMMON_BEGIN(instruction_breakpoint_common) 2542 GEN_COMMON instruction_breakpoint 2543 addi r3,r1,STACK_FRAME_OVERHEAD 2544 bl instruction_breakpoint_exception 2545 b interrupt_return_srr 2546 2547 2548EXC_REAL_NONE(0x1400, 0x100) 2549EXC_VIRT_NONE(0x5400, 0x100) 2550 2551/** 2552 * Interrupt 0x1500 - Soft Patch Interrupt 2553 * 2554 * Handling: 2555 * This is an implementation specific interrupt which can be used for a 2556 * range of exceptions. 2557 * 2558 * This interrupt handler is unique in that it runs the denormal assist 2559 * code even for guests (and even in guest context) without going to KVM, 2560 * for speed. POWER9 does not raise denorm exceptions, so this special case 2561 * could be phased out in future to reduce special cases. 2562 */ 2563INT_DEFINE_BEGIN(denorm_exception) 2564 IVEC=0x1500 2565 IHSRR=1 2566 IBRANCH_TO_COMMON=0 2567 IKVM_REAL=1 2568INT_DEFINE_END(denorm_exception) 2569 2570EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2571 GEN_INT_ENTRY denorm_exception, virt=0 2572#ifdef CONFIG_PPC_DENORMALISATION 2573 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2574 bne+ denorm_assist 2575#endif 2576 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2577EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2578#ifdef CONFIG_PPC_DENORMALISATION 2579EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2580 GEN_INT_ENTRY denorm_exception, virt=1 2581 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2582 bne+ denorm_assist 2583 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2584EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2585#else 2586EXC_VIRT_NONE(0x5500, 0x100) 2587#endif 2588 2589#ifdef CONFIG_PPC_DENORMALISATION 2590TRAMP_REAL_BEGIN(denorm_assist) 2591BEGIN_FTR_SECTION 2592/* 2593 * To denormalise we need to move a copy of the register to itself. 2594 * For POWER6 do that here for all FP regs. 2595 */ 2596 mfmsr r10 2597 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2598 xori r10,r10,(MSR_FE0|MSR_FE1) 2599 mtmsrd r10 2600 sync 2601 2602 .Lreg=0 2603 .rept 32 2604 fmr .Lreg,.Lreg 2605 .Lreg=.Lreg+1 2606 .endr 2607 2608FTR_SECTION_ELSE 2609/* 2610 * To denormalise we need to move a copy of the register to itself. 2611 * For POWER7 do that here for the first 32 VSX registers only. 2612 */ 2613 mfmsr r10 2614 oris r10,r10,MSR_VSX@h 2615 mtmsrd r10 2616 sync 2617 2618 .Lreg=0 2619 .rept 32 2620 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2621 .Lreg=.Lreg+1 2622 .endr 2623 2624ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2625 2626BEGIN_FTR_SECTION 2627 b denorm_done 2628END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2629/* 2630 * To denormalise we need to move a copy of the register to itself. 2631 * For POWER8 we need to do that for all 64 VSX registers 2632 */ 2633 .Lreg=32 2634 .rept 32 2635 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2636 .Lreg=.Lreg+1 2637 .endr 2638 2639denorm_done: 2640 mfspr r11,SPRN_HSRR0 2641 subi r11,r11,4 2642 mtspr SPRN_HSRR0,r11 2643 mtcrf 0x80,r9 2644 ld r9,PACA_EXGEN+EX_R9(r13) 2645BEGIN_FTR_SECTION 2646 ld r10,PACA_EXGEN+EX_PPR(r13) 2647 mtspr SPRN_PPR,r10 2648END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2649BEGIN_FTR_SECTION 2650 ld r10,PACA_EXGEN+EX_CFAR(r13) 2651 mtspr SPRN_CFAR,r10 2652END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2653 li r10,0 2654 stb r10,PACAHSRR_VALID(r13) 2655 ld r10,PACA_EXGEN+EX_R10(r13) 2656 ld r11,PACA_EXGEN+EX_R11(r13) 2657 ld r12,PACA_EXGEN+EX_R12(r13) 2658 ld r13,PACA_EXGEN+EX_R13(r13) 2659 HRFI_TO_UNKNOWN 2660 b . 2661#endif 2662 2663EXC_COMMON_BEGIN(denorm_exception_common) 2664 GEN_COMMON denorm_exception 2665 addi r3,r1,STACK_FRAME_OVERHEAD 2666 bl unknown_exception 2667 b interrupt_return_hsrr 2668 2669 2670#ifdef CONFIG_CBE_RAS 2671INT_DEFINE_BEGIN(cbe_maintenance) 2672 IVEC=0x1600 2673 IHSRR=1 2674INT_DEFINE_END(cbe_maintenance) 2675 2676EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2677 GEN_INT_ENTRY cbe_maintenance, virt=0 2678EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2679EXC_VIRT_NONE(0x5600, 0x100) 2680EXC_COMMON_BEGIN(cbe_maintenance_common) 2681 GEN_COMMON cbe_maintenance 2682 addi r3,r1,STACK_FRAME_OVERHEAD 2683 bl cbe_maintenance_exception 2684 b interrupt_return_hsrr 2685 2686#else /* CONFIG_CBE_RAS */ 2687EXC_REAL_NONE(0x1600, 0x100) 2688EXC_VIRT_NONE(0x5600, 0x100) 2689#endif 2690 2691 2692INT_DEFINE_BEGIN(altivec_assist) 2693 IVEC=0x1700 2694#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2695 IKVM_REAL=1 2696#endif 2697INT_DEFINE_END(altivec_assist) 2698 2699EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2700 GEN_INT_ENTRY altivec_assist, virt=0 2701EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2702EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2703 GEN_INT_ENTRY altivec_assist, virt=1 2704EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2705EXC_COMMON_BEGIN(altivec_assist_common) 2706 GEN_COMMON altivec_assist 2707 addi r3,r1,STACK_FRAME_OVERHEAD 2708#ifdef CONFIG_ALTIVEC 2709 bl altivec_assist_exception 2710 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2711#else 2712 bl unknown_exception 2713#endif 2714 b interrupt_return_srr 2715 2716 2717#ifdef CONFIG_CBE_RAS 2718INT_DEFINE_BEGIN(cbe_thermal) 2719 IVEC=0x1800 2720 IHSRR=1 2721INT_DEFINE_END(cbe_thermal) 2722 2723EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2724 GEN_INT_ENTRY cbe_thermal, virt=0 2725EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2726EXC_VIRT_NONE(0x5800, 0x100) 2727EXC_COMMON_BEGIN(cbe_thermal_common) 2728 GEN_COMMON cbe_thermal 2729 addi r3,r1,STACK_FRAME_OVERHEAD 2730 bl cbe_thermal_exception 2731 b interrupt_return_hsrr 2732 2733#else /* CONFIG_CBE_RAS */ 2734EXC_REAL_NONE(0x1800, 0x100) 2735EXC_VIRT_NONE(0x5800, 0x100) 2736#endif 2737 2738 2739#ifdef CONFIG_PPC_WATCHDOG 2740 2741INT_DEFINE_BEGIN(soft_nmi) 2742 IVEC=0x900 2743 ISTACK=0 2744 ICFAR=0 2745INT_DEFINE_END(soft_nmi) 2746 2747/* 2748 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2749 * stack is one that is usable by maskable interrupts so long as MSR_EE 2750 * remains off. It is used for recovery when something has corrupted the 2751 * normal kernel stack, for example. The "soft NMI" must not use the process 2752 * stack because we want irq disabled sections to avoid touching the stack 2753 * at all (other than PMU interrupts), so use the emergency stack for this, 2754 * and run it entirely with interrupts hard disabled. 2755 */ 2756EXC_COMMON_BEGIN(soft_nmi_common) 2757 mr r10,r1 2758 ld r1,PACAEMERGSP(r13) 2759 subi r1,r1,INT_FRAME_SIZE 2760 __GEN_COMMON_BODY soft_nmi 2761 2762 addi r3,r1,STACK_FRAME_OVERHEAD 2763 bl soft_nmi_interrupt 2764 2765 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2766 li r9,0 2767 mtmsrd r9,1 2768 2769 kuap_kernel_restore r9, r10 2770 2771 EXCEPTION_RESTORE_REGS hsrr=0 2772 RFI_TO_KERNEL 2773 2774#endif /* CONFIG_PPC_WATCHDOG */ 2775 2776/* 2777 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2778 * - If it was a decrementer interrupt, we bump the dec to max and and return. 2779 * - If it was a doorbell we return immediately since doorbells are edge 2780 * triggered and won't automatically refire. 2781 * - If it was a HMI we return immediately since we handled it in realmode 2782 * and it won't refire. 2783 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2784 * This is called with r10 containing the value to OR to the paca field. 2785 */ 2786.macro MASKED_INTERRUPT hsrr=0 2787 .if \hsrr 2788masked_Hinterrupt: 2789 .else 2790masked_interrupt: 2791 .endif 2792 stw r9,PACA_EXGEN+EX_CCR(r13) 2793 lbz r9,PACAIRQHAPPENED(r13) 2794 or r9,r9,r10 2795 stb r9,PACAIRQHAPPENED(r13) 2796 2797 .if ! \hsrr 2798 cmpwi r10,PACA_IRQ_DEC 2799 bne 1f 2800 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2801 mtspr SPRN_DEC,r9 2802#ifdef CONFIG_PPC_WATCHDOG 2803 lwz r9,PACA_EXGEN+EX_CCR(r13) 2804 b soft_nmi_common 2805#else 2806 b 2f 2807#endif 2808 .endif 2809 28101: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2811 beq 2f 2812 xori r12,r12,MSR_EE /* clear MSR_EE */ 2813 .if \hsrr 2814 mtspr SPRN_HSRR1,r12 2815 .else 2816 mtspr SPRN_SRR1,r12 2817 .endif 2818 ori r9,r9,PACA_IRQ_HARD_DIS 2819 stb r9,PACAIRQHAPPENED(r13) 28202: /* done */ 2821 li r9,0 2822 .if \hsrr 2823 stb r9,PACAHSRR_VALID(r13) 2824 .else 2825 stb r9,PACASRR_VALID(r13) 2826 .endif 2827 2828 SEARCH_RESTART_TABLE 2829 cmpdi r12,0 2830 beq 3f 2831 .if \hsrr 2832 mtspr SPRN_HSRR0,r12 2833 .else 2834 mtspr SPRN_SRR0,r12 2835 .endif 28363: 2837 2838 ld r9,PACA_EXGEN+EX_CTR(r13) 2839 mtctr r9 2840 lwz r9,PACA_EXGEN+EX_CCR(r13) 2841 mtcrf 0x80,r9 2842 std r1,PACAR1(r13) 2843 ld r9,PACA_EXGEN+EX_R9(r13) 2844 ld r10,PACA_EXGEN+EX_R10(r13) 2845 ld r11,PACA_EXGEN+EX_R11(r13) 2846 ld r12,PACA_EXGEN+EX_R12(r13) 2847 ld r13,PACA_EXGEN+EX_R13(r13) 2848 /* May return to masked low address where r13 is not set up */ 2849 .if \hsrr 2850 HRFI_TO_KERNEL 2851 .else 2852 RFI_TO_KERNEL 2853 .endif 2854 b . 2855.endm 2856 2857TRAMP_REAL_BEGIN(stf_barrier_fallback) 2858 std r9,PACA_EXRFI+EX_R9(r13) 2859 std r10,PACA_EXRFI+EX_R10(r13) 2860 sync 2861 ld r9,PACA_EXRFI+EX_R9(r13) 2862 ld r10,PACA_EXRFI+EX_R10(r13) 2863 ori 31,31,0 2864 .rept 14 2865 b 1f 28661: 2867 .endr 2868 blr 2869 2870/* Clobbers r10, r11, ctr */ 2871.macro L1D_DISPLACEMENT_FLUSH 2872 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2873 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2874 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2875 mtctr r11 2876 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2877 2878 /* order ld/st prior to dcbt stop all streams with flushing */ 2879 sync 2880 2881 /* 2882 * The load addresses are at staggered offsets within cachelines, 2883 * which suits some pipelines better (on others it should not 2884 * hurt). 2885 */ 28861: 2887 ld r11,(0x80 + 8)*0(r10) 2888 ld r11,(0x80 + 8)*1(r10) 2889 ld r11,(0x80 + 8)*2(r10) 2890 ld r11,(0x80 + 8)*3(r10) 2891 ld r11,(0x80 + 8)*4(r10) 2892 ld r11,(0x80 + 8)*5(r10) 2893 ld r11,(0x80 + 8)*6(r10) 2894 ld r11,(0x80 + 8)*7(r10) 2895 addi r10,r10,0x80*8 2896 bdnz 1b 2897.endm 2898 2899TRAMP_REAL_BEGIN(entry_flush_fallback) 2900 std r9,PACA_EXRFI+EX_R9(r13) 2901 std r10,PACA_EXRFI+EX_R10(r13) 2902 std r11,PACA_EXRFI+EX_R11(r13) 2903 mfctr r9 2904 L1D_DISPLACEMENT_FLUSH 2905 mtctr r9 2906 ld r9,PACA_EXRFI+EX_R9(r13) 2907 ld r10,PACA_EXRFI+EX_R10(r13) 2908 ld r11,PACA_EXRFI+EX_R11(r13) 2909 blr 2910 2911/* 2912 * The SCV entry flush happens with interrupts enabled, so it must disable 2913 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2914 * (containing LR) does not need to be preserved here because scv entry 2915 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2916 */ 2917TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2918 li r10,0 2919 mtmsrd r10,1 2920 lbz r10,PACAIRQHAPPENED(r13) 2921 ori r10,r10,PACA_IRQ_HARD_DIS 2922 stb r10,PACAIRQHAPPENED(r13) 2923 std r11,PACA_EXRFI+EX_R11(r13) 2924 L1D_DISPLACEMENT_FLUSH 2925 ld r11,PACA_EXRFI+EX_R11(r13) 2926 li r10,MSR_RI 2927 mtmsrd r10,1 2928 blr 2929 2930TRAMP_REAL_BEGIN(rfi_flush_fallback) 2931 SET_SCRATCH0(r13); 2932 GET_PACA(r13); 2933 std r1,PACA_EXRFI+EX_R12(r13) 2934 ld r1,PACAKSAVE(r13) 2935 std r9,PACA_EXRFI+EX_R9(r13) 2936 std r10,PACA_EXRFI+EX_R10(r13) 2937 std r11,PACA_EXRFI+EX_R11(r13) 2938 mfctr r9 2939 L1D_DISPLACEMENT_FLUSH 2940 mtctr r9 2941 ld r9,PACA_EXRFI+EX_R9(r13) 2942 ld r10,PACA_EXRFI+EX_R10(r13) 2943 ld r11,PACA_EXRFI+EX_R11(r13) 2944 ld r1,PACA_EXRFI+EX_R12(r13) 2945 GET_SCRATCH0(r13); 2946 rfid 2947 2948TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2949 SET_SCRATCH0(r13); 2950 GET_PACA(r13); 2951 std r1,PACA_EXRFI+EX_R12(r13) 2952 ld r1,PACAKSAVE(r13) 2953 std r9,PACA_EXRFI+EX_R9(r13) 2954 std r10,PACA_EXRFI+EX_R10(r13) 2955 std r11,PACA_EXRFI+EX_R11(r13) 2956 mfctr r9 2957 L1D_DISPLACEMENT_FLUSH 2958 mtctr r9 2959 ld r9,PACA_EXRFI+EX_R9(r13) 2960 ld r10,PACA_EXRFI+EX_R10(r13) 2961 ld r11,PACA_EXRFI+EX_R11(r13) 2962 ld r1,PACA_EXRFI+EX_R12(r13) 2963 GET_SCRATCH0(r13); 2964 hrfid 2965 2966TRAMP_REAL_BEGIN(rfscv_flush_fallback) 2967 /* system call volatile */ 2968 mr r7,r13 2969 GET_PACA(r13); 2970 mr r8,r1 2971 ld r1,PACAKSAVE(r13) 2972 mfctr r9 2973 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2974 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2975 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2976 mtctr r11 2977 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2978 2979 /* order ld/st prior to dcbt stop all streams with flushing */ 2980 sync 2981 2982 /* 2983 * The load adresses are at staggered offsets within cachelines, 2984 * which suits some pipelines better (on others it should not 2985 * hurt). 2986 */ 29871: 2988 ld r11,(0x80 + 8)*0(r10) 2989 ld r11,(0x80 + 8)*1(r10) 2990 ld r11,(0x80 + 8)*2(r10) 2991 ld r11,(0x80 + 8)*3(r10) 2992 ld r11,(0x80 + 8)*4(r10) 2993 ld r11,(0x80 + 8)*5(r10) 2994 ld r11,(0x80 + 8)*6(r10) 2995 ld r11,(0x80 + 8)*7(r10) 2996 addi r10,r10,0x80*8 2997 bdnz 1b 2998 2999 mtctr r9 3000 li r9,0 3001 li r10,0 3002 li r11,0 3003 mr r1,r8 3004 mr r13,r7 3005 RFSCV 3006 3007USE_TEXT_SECTION() 3008 3009#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3010kvm_interrupt: 3011 /* 3012 * The conditional branch in KVMTEST can't reach all the way, 3013 * make a stub. 3014 */ 3015 b kvmppc_interrupt 3016#endif 3017 3018_GLOBAL(do_uaccess_flush) 3019 UACCESS_FLUSH_FIXUP_SECTION 3020 nop 3021 nop 3022 nop 3023 blr 3024 L1D_DISPLACEMENT_FLUSH 3025 blr 3026_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3027EXPORT_SYMBOL(do_uaccess_flush) 3028 3029 3030MASKED_INTERRUPT 3031MASKED_INTERRUPT hsrr=1 3032 3033 /* 3034 * Relocation-on interrupts: A subset of the interrupts can be delivered 3035 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 3036 * it. Addresses are the same as the original interrupt addresses, but 3037 * offset by 0xc000000000004000. 3038 * It's impossible to receive interrupts below 0x300 via this mechanism. 3039 * KVM: None of these traps are from the guest ; anything that escalated 3040 * to HV=1 from HV=0 is delivered via real mode handlers. 3041 */ 3042 3043 /* 3044 * This uses the standard macro, since the original 0x300 vector 3045 * only has extra guff for STAB-based processors -- which never 3046 * come here. 3047 */ 3048 3049USE_FIXED_SECTION(virt_trampolines) 3050 /* 3051 * All code below __end_soft_masked is treated as soft-masked. If 3052 * any code runs here with MSR[EE]=1, it must then cope with pending 3053 * soft interrupt being raised (i.e., by ensuring it is replayed). 3054 * 3055 * The __end_interrupts marker must be past the out-of-line (OOL) 3056 * handlers, so that they are copied to real address 0x100 when running 3057 * a relocatable kernel. This ensures they can be reached from the short 3058 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3059 * directly, without using LOAD_HANDLER(). 3060 */ 3061 .align 7 3062 .globl __end_interrupts 3063__end_interrupts: 3064DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines) 3065 3066CLOSE_FIXED_SECTION(real_vectors); 3067CLOSE_FIXED_SECTION(real_trampolines); 3068CLOSE_FIXED_SECTION(virt_vectors); 3069CLOSE_FIXED_SECTION(virt_trampolines); 3070 3071USE_TEXT_SECTION() 3072 3073/* MSR[RI] should be clear because this uses SRR[01] */ 3074enable_machine_check: 3075 mflr r0 3076 bcl 20,31,$+4 30770: mflr r3 3078 addi r3,r3,(1f - 0b) 3079 mtspr SPRN_SRR0,r3 3080 mfmsr r3 3081 ori r3,r3,MSR_ME 3082 mtspr SPRN_SRR1,r3 3083 RFI_TO_KERNEL 30841: mtlr r0 3085 blr 3086 3087/* MSR[RI] should be clear because this uses SRR[01] */ 3088disable_machine_check: 3089 mflr r0 3090 bcl 20,31,$+4 30910: mflr r3 3092 addi r3,r3,(1f - 0b) 3093 mtspr SPRN_SRR0,r3 3094 mfmsr r3 3095 li r4,MSR_ME 3096 andc r3,r3,r4 3097 mtspr SPRN_SRR1,r3 3098 RFI_TO_KERNEL 30991: mtlr r0 3100 blr 3101