1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * 6 * Derived from book3s_rmhandlers.S and other files, which are: 7 * 8 * Copyright SUSE Linux Products GmbH 2009 9 * 10 * Authors: Alexander Graf <agraf@suse.de> 11 */ 12 13#include <asm/ppc_asm.h> 14#include <asm/code-patching-asm.h> 15#include <asm/kvm_asm.h> 16#include <asm/reg.h> 17#include <asm/mmu.h> 18#include <asm/page.h> 19#include <asm/ptrace.h> 20#include <asm/hvcall.h> 21#include <asm/asm-offsets.h> 22#include <asm/exception-64s.h> 23#include <asm/kvm_book3s_asm.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/export.h> 26#include <asm/tm.h> 27#include <asm/opal.h> 28#include <asm/thread_info.h> 29#include <asm/asm-compat.h> 30#include <asm/feature-fixups.h> 31#include <asm/cpuidle.h> 32 33/* Values in HSTATE_NAPPING(r13) */ 34#define NAPPING_CEDE 1 35#define NAPPING_NOVCPU 2 36#define NAPPING_UNSPLIT 3 37 38/* Stack frame offsets for kvmppc_hv_entry */ 39#define SFS 160 40#define STACK_SLOT_TRAP (SFS-4) 41#define STACK_SLOT_TID (SFS-16) 42#define STACK_SLOT_PSSCR (SFS-24) 43#define STACK_SLOT_PID (SFS-32) 44#define STACK_SLOT_IAMR (SFS-40) 45#define STACK_SLOT_CIABR (SFS-48) 46#define STACK_SLOT_DAWR0 (SFS-56) 47#define STACK_SLOT_DAWRX0 (SFS-64) 48#define STACK_SLOT_HFSCR (SFS-72) 49#define STACK_SLOT_AMR (SFS-80) 50#define STACK_SLOT_UAMOR (SFS-88) 51#define STACK_SLOT_FSCR (SFS-96) 52 53/* 54 * Call kvmppc_hv_entry in real mode. 55 * Must be called with interrupts hard-disabled. 56 * 57 * Input Registers: 58 * 59 * LR = return address to continue at after eventually re-enabling MMU 60 */ 61_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 62 mflr r0 63 std r0, PPC_LR_STKOFF(r1) 64 stdu r1, -112(r1) 65 mfmsr r10 66 std r10, HSTATE_HOST_MSR(r13) 67 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 68 li r0,MSR_RI 69 andc r0,r10,r0 70 li r6,MSR_IR | MSR_DR 71 andc r6,r10,r6 72 mtmsrd r0,1 /* clear RI in MSR */ 73 mtsrr0 r5 74 mtsrr1 r6 75 RFI_TO_KERNEL 76 77kvmppc_call_hv_entry: 78 ld r4, HSTATE_KVM_VCPU(r13) 79 bl kvmppc_hv_entry 80 81 /* Back from guest - restore host state and return to caller */ 82 83BEGIN_FTR_SECTION 84 /* Restore host DABR and DABRX */ 85 ld r5,HSTATE_DABR(r13) 86 li r6,7 87 mtspr SPRN_DABR,r5 88 mtspr SPRN_DABRX,r6 89END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 90 91 /* Restore SPRG3 */ 92 ld r3,PACA_SPRG_VDSO(r13) 93 mtspr SPRN_SPRG_VDSO_WRITE,r3 94 95 /* Reload the host's PMU registers */ 96 bl kvmhv_load_host_pmu 97 98 /* 99 * Reload DEC. HDEC interrupts were disabled when 100 * we reloaded the host's LPCR value. 101 */ 102 ld r3, HSTATE_DECEXP(r13) 103 mftb r4 104 subf r4, r4, r3 105 mtspr SPRN_DEC, r4 106 107 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 108 li r0, 0 109 stb r0, HSTATE_HWTHREAD_REQ(r13) 110 111 /* 112 * For external interrupts we need to call the Linux 113 * handler to process the interrupt. We do that by jumping 114 * to absolute address 0x500 for external interrupts. 115 * The [h]rfid at the end of the handler will return to 116 * the book3s_hv_interrupts.S code. For other interrupts 117 * we do the rfid to get back to the book3s_hv_interrupts.S 118 * code here. 119 */ 120 ld r8, 112+PPC_LR_STKOFF(r1) 121 addi r1, r1, 112 122 ld r7, HSTATE_HOST_MSR(r13) 123 124 /* Return the trap number on this thread as the return value */ 125 mr r3, r12 126 127 /* RFI into the highmem handler */ 128 mfmsr r6 129 li r0, MSR_RI 130 andc r6, r6, r0 131 mtmsrd r6, 1 /* Clear RI in MSR */ 132 mtsrr0 r8 133 mtsrr1 r7 134 RFI_TO_KERNEL 135 136kvmppc_primary_no_guest: 137 /* We handle this much like a ceded vcpu */ 138 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 139 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 140 /* HDEC value came from DEC in the first place, it will fit */ 141 mfspr r3, SPRN_HDEC 142 mtspr SPRN_DEC, r3 143 /* 144 * Make sure the primary has finished the MMU switch. 145 * We should never get here on a secondary thread, but 146 * check it for robustness' sake. 147 */ 148 ld r5, HSTATE_KVM_VCORE(r13) 14965: lbz r0, VCORE_IN_GUEST(r5) 150 cmpwi r0, 0 151 beq 65b 152 /* Set LPCR. */ 153 ld r8,VCORE_LPCR(r5) 154 mtspr SPRN_LPCR,r8 155 isync 156 /* set our bit in napping_threads */ 157 ld r5, HSTATE_KVM_VCORE(r13) 158 lbz r7, HSTATE_PTID(r13) 159 li r0, 1 160 sld r0, r0, r7 161 addi r6, r5, VCORE_NAPPING_THREADS 1621: lwarx r3, 0, r6 163 or r3, r3, r0 164 stwcx. r3, 0, r6 165 bne 1b 166 /* order napping_threads update vs testing entry_exit_map */ 167 isync 168 li r12, 0 169 lwz r7, VCORE_ENTRY_EXIT(r5) 170 cmpwi r7, 0x100 171 bge kvm_novcpu_exit /* another thread already exiting */ 172 li r3, NAPPING_NOVCPU 173 stb r3, HSTATE_NAPPING(r13) 174 175 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 176 b kvm_do_nap 177 178/* 179 * kvm_novcpu_wakeup 180 * Entered from kvm_start_guest if kvm_hstate.napping is set 181 * to NAPPING_NOVCPU 182 * r2 = kernel TOC 183 * r13 = paca 184 */ 185kvm_novcpu_wakeup: 186 ld r1, HSTATE_HOST_R1(r13) 187 ld r5, HSTATE_KVM_VCORE(r13) 188 li r0, 0 189 stb r0, HSTATE_NAPPING(r13) 190 191 /* check the wake reason */ 192 bl kvmppc_check_wake_reason 193 194 /* 195 * Restore volatile registers since we could have called 196 * a C routine in kvmppc_check_wake_reason. 197 * r5 = VCORE 198 */ 199 ld r5, HSTATE_KVM_VCORE(r13) 200 201 /* see if any other thread is already exiting */ 202 lwz r0, VCORE_ENTRY_EXIT(r5) 203 cmpwi r0, 0x100 204 bge kvm_novcpu_exit 205 206 /* clear our bit in napping_threads */ 207 lbz r7, HSTATE_PTID(r13) 208 li r0, 1 209 sld r0, r0, r7 210 addi r6, r5, VCORE_NAPPING_THREADS 2114: lwarx r7, 0, r6 212 andc r7, r7, r0 213 stwcx. r7, 0, r6 214 bne 4b 215 216 /* See if the wake reason means we need to exit */ 217 cmpdi r3, 0 218 bge kvm_novcpu_exit 219 220 /* See if our timeslice has expired (HDEC is negative) */ 221 mfspr r0, SPRN_HDEC 222 extsw r0, r0 223 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 224 cmpdi r0, 0 225 blt kvm_novcpu_exit 226 227 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 228 ld r4, HSTATE_KVM_VCPU(r13) 229 cmpdi r4, 0 230 beq kvmppc_primary_no_guest 231 232#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 233 addi r3, r4, VCPU_TB_RMENTRY 234 bl kvmhv_start_timing 235#endif 236 b kvmppc_got_guest 237 238kvm_novcpu_exit: 239#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 240 ld r4, HSTATE_KVM_VCPU(r13) 241 cmpdi r4, 0 242 beq 13f 243 addi r3, r4, VCPU_TB_RMEXIT 244 bl kvmhv_accumulate_time 245#endif 24613: mr r3, r12 247 stw r12, STACK_SLOT_TRAP(r1) 248 bl kvmhv_commence_exit 249 nop 250 b kvmhv_switch_to_host 251 252/* 253 * We come in here when wakened from Linux offline idle code. 254 * Relocation is off 255 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 256 */ 257_GLOBAL(idle_kvm_start_guest) 258 mfcr r5 259 mflr r0 260 std r5, 8(r1) // Save CR in caller's frame 261 std r0, 16(r1) // Save LR in caller's frame 262 // Create frame on emergency stack 263 ld r4, PACAEMERGSP(r13) 264 stdu r1, -SWITCH_FRAME_SIZE(r4) 265 // Switch to new frame on emergency stack 266 mr r1, r4 267 std r3, 32(r1) // Save SRR1 wakeup value 268 SAVE_NVGPRS(r1) 269 270 /* 271 * Could avoid this and pass it through in r3. For now, 272 * code expects it to be in SRR1. 273 */ 274 mtspr SPRN_SRR1,r3 275 276 li r0,0 277 stb r0,PACA_FTRACE_ENABLED(r13) 278 279 li r0,KVM_HWTHREAD_IN_KVM 280 stb r0,HSTATE_HWTHREAD_STATE(r13) 281 282 /* kvm cede / napping does not come through here */ 283 lbz r0,HSTATE_NAPPING(r13) 284 twnei r0,0 285 286 b 1f 287 288kvm_unsplit_wakeup: 289 li r0, 0 290 stb r0, HSTATE_NAPPING(r13) 291 2921: 293 294 /* 295 * We weren't napping due to cede, so this must be a secondary 296 * thread being woken up to run a guest, or being woken up due 297 * to a stray IPI. (Or due to some machine check or hypervisor 298 * maintenance interrupt while the core is in KVM.) 299 */ 300 301 /* Check the wake reason in SRR1 to see why we got here */ 302 bl kvmppc_check_wake_reason 303 /* 304 * kvmppc_check_wake_reason could invoke a C routine, but we 305 * have no volatile registers to restore when we return. 306 */ 307 308 cmpdi r3, 0 309 bge kvm_no_guest 310 311 /* get vcore pointer, NULL if we have nothing to run */ 312 ld r5,HSTATE_KVM_VCORE(r13) 313 cmpdi r5,0 314 /* if we have no vcore to run, go back to sleep */ 315 beq kvm_no_guest 316 317kvm_secondary_got_guest: 318 319 // About to go to guest, clear saved SRR1 320 li r0, 0 321 std r0, 32(r1) 322 323 /* Set HSTATE_DSCR(r13) to something sensible */ 324 ld r6, PACA_DSCR_DEFAULT(r13) 325 std r6, HSTATE_DSCR(r13) 326 327 /* On thread 0 of a subcore, set HDEC to max */ 328 lbz r4, HSTATE_PTID(r13) 329 cmpwi r4, 0 330 bne 63f 331 lis r6,0x7fff /* MAX_INT@h */ 332 mtspr SPRN_HDEC, r6 333 /* and set per-LPAR registers, if doing dynamic micro-threading */ 334 ld r6, HSTATE_SPLIT_MODE(r13) 335 cmpdi r6, 0 336 beq 63f 337 ld r0, KVM_SPLIT_RPR(r6) 338 mtspr SPRN_RPR, r0 339 ld r0, KVM_SPLIT_PMMAR(r6) 340 mtspr SPRN_PMMAR, r0 341 ld r0, KVM_SPLIT_LDBAR(r6) 342 mtspr SPRN_LDBAR, r0 343 isync 34463: 345 /* Order load of vcpu after load of vcore */ 346 lwsync 347 ld r4, HSTATE_KVM_VCPU(r13) 348 bl kvmppc_hv_entry 349 350 /* Back from the guest, go back to nap */ 351 /* Clear our vcpu and vcore pointers so we don't come back in early */ 352 li r0, 0 353 std r0, HSTATE_KVM_VCPU(r13) 354 /* 355 * Once we clear HSTATE_KVM_VCORE(r13), the code in 356 * kvmppc_run_core() is going to assume that all our vcpu 357 * state is visible in memory. This lwsync makes sure 358 * that that is true. 359 */ 360 lwsync 361 std r0, HSTATE_KVM_VCORE(r13) 362 363 /* 364 * All secondaries exiting guest will fall through this path. 365 * Before proceeding, just check for HMI interrupt and 366 * invoke opal hmi handler. By now we are sure that the 367 * primary thread on this core/subcore has already made partition 368 * switch/TB resync and we are good to call opal hmi handler. 369 */ 370 cmpwi r12, BOOK3S_INTERRUPT_HMI 371 bne kvm_no_guest 372 373 li r3,0 /* NULL argument */ 374 bl hmi_exception_realmode 375/* 376 * At this point we have finished executing in the guest. 377 * We need to wait for hwthread_req to become zero, since 378 * we may not turn on the MMU while hwthread_req is non-zero. 379 * While waiting we also need to check if we get given a vcpu to run. 380 */ 381kvm_no_guest: 382 lbz r3, HSTATE_HWTHREAD_REQ(r13) 383 cmpwi r3, 0 384 bne 53f 385 HMT_MEDIUM 386 li r0, KVM_HWTHREAD_IN_KERNEL 387 stb r0, HSTATE_HWTHREAD_STATE(r13) 388 /* need to recheck hwthread_req after a barrier, to avoid race */ 389 sync 390 lbz r3, HSTATE_HWTHREAD_REQ(r13) 391 cmpwi r3, 0 392 bne 54f 393 394 /* 395 * Jump to idle_return_gpr_loss, which returns to the 396 * idle_kvm_start_guest caller. 397 */ 398 li r3, LPCR_PECE0 399 mfspr r4, SPRN_LPCR 400 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 401 mtspr SPRN_LPCR, r4 402 // Return SRR1 wakeup value, or 0 if we went into the guest 403 ld r3, 32(r1) 404 REST_NVGPRS(r1) 405 ld r1, 0(r1) // Switch back to caller stack 406 ld r0, 16(r1) // Reload LR 407 ld r5, 8(r1) // Reload CR 408 mtlr r0 409 mtcr r5 410 blr 411 41253: 413 HMT_LOW 414 ld r5, HSTATE_KVM_VCORE(r13) 415 cmpdi r5, 0 416 bne 60f 417 ld r3, HSTATE_SPLIT_MODE(r13) 418 cmpdi r3, 0 419 beq kvm_no_guest 420 lbz r0, KVM_SPLIT_DO_NAP(r3) 421 cmpwi r0, 0 422 beq kvm_no_guest 423 HMT_MEDIUM 424 b kvm_unsplit_nap 42560: HMT_MEDIUM 426 b kvm_secondary_got_guest 427 42854: li r0, KVM_HWTHREAD_IN_KVM 429 stb r0, HSTATE_HWTHREAD_STATE(r13) 430 b kvm_no_guest 431 432/* 433 * Here the primary thread is trying to return the core to 434 * whole-core mode, so we need to nap. 435 */ 436kvm_unsplit_nap: 437 /* 438 * When secondaries are napping in kvm_unsplit_nap() with 439 * hwthread_req = 1, HMI goes ignored even though subcores are 440 * already exited the guest. Hence HMI keeps waking up secondaries 441 * from nap in a loop and secondaries always go back to nap since 442 * no vcore is assigned to them. This makes impossible for primary 443 * thread to get hold of secondary threads resulting into a soft 444 * lockup in KVM path. 445 * 446 * Let us check if HMI is pending and handle it before we go to nap. 447 */ 448 cmpwi r12, BOOK3S_INTERRUPT_HMI 449 bne 55f 450 li r3, 0 /* NULL argument */ 451 bl hmi_exception_realmode 45255: 453 /* 454 * Ensure that secondary doesn't nap when it has 455 * its vcore pointer set. 456 */ 457 sync /* matches smp_mb() before setting split_info.do_nap */ 458 ld r0, HSTATE_KVM_VCORE(r13) 459 cmpdi r0, 0 460 bne kvm_no_guest 461 /* clear any pending message */ 462BEGIN_FTR_SECTION 463 lis r6, (PPC_DBELL_SERVER << (63-36))@h 464 PPC_MSGCLR(6) 465END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 466 /* Set kvm_split_mode.napped[tid] = 1 */ 467 ld r3, HSTATE_SPLIT_MODE(r13) 468 li r0, 1 469 lhz r4, PACAPACAINDEX(r13) 470 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ 471 addi r4, r4, KVM_SPLIT_NAPPED 472 stbx r0, r3, r4 473 /* Check the do_nap flag again after setting napped[] */ 474 sync 475 lbz r0, KVM_SPLIT_DO_NAP(r3) 476 cmpwi r0, 0 477 beq 57f 478 li r3, NAPPING_UNSPLIT 479 stb r3, HSTATE_NAPPING(r13) 480 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 481 mfspr r5, SPRN_LPCR 482 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 483 b kvm_nap_sequence 484 48557: li r0, 0 486 stbx r0, r3, r4 487 b kvm_no_guest 488 489/****************************************************************************** 490 * * 491 * Entry code * 492 * * 493 *****************************************************************************/ 494 495.global kvmppc_hv_entry 496kvmppc_hv_entry: 497 498 /* Required state: 499 * 500 * R4 = vcpu pointer (or NULL) 501 * MSR = ~IR|DR 502 * R13 = PACA 503 * R1 = host R1 504 * R2 = TOC 505 * all other volatile GPRS = free 506 * Does not preserve non-volatile GPRs or CR fields 507 */ 508 mflr r0 509 std r0, PPC_LR_STKOFF(r1) 510 stdu r1, -SFS(r1) 511 512 /* Save R1 in the PACA */ 513 std r1, HSTATE_HOST_R1(r13) 514 515 li r6, KVM_GUEST_MODE_HOST_HV 516 stb r6, HSTATE_IN_GUEST(r13) 517 518#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 519 /* Store initial timestamp */ 520 cmpdi r4, 0 521 beq 1f 522 addi r3, r4, VCPU_TB_RMENTRY 523 bl kvmhv_start_timing 5241: 525#endif 526 527 ld r5, HSTATE_KVM_VCORE(r13) 528 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 529 530 /* 531 * POWER7/POWER8 host -> guest partition switch code. 532 * We don't have to lock against concurrent tlbies, 533 * but we do have to coordinate across hardware threads. 534 */ 535 /* Set bit in entry map iff exit map is zero. */ 536 li r7, 1 537 lbz r6, HSTATE_PTID(r13) 538 sld r7, r7, r6 539 addi r8, r5, VCORE_ENTRY_EXIT 54021: lwarx r3, 0, r8 541 cmpwi r3, 0x100 /* any threads starting to exit? */ 542 bge secondary_too_late /* if so we're too late to the party */ 543 or r3, r3, r7 544 stwcx. r3, 0, r8 545 bne 21b 546 547 /* Primary thread switches to guest partition. */ 548 cmpwi r6,0 549 bne 10f 550 551 lwz r7,KVM_LPID(r9) 552 ld r6,KVM_SDR1(r9) 553 li r0,LPID_RSVD /* switch to reserved LPID */ 554 mtspr SPRN_LPID,r0 555 ptesync 556 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 557 mtspr SPRN_LPID,r7 558 isync 559 560 /* See if we need to flush the TLB. */ 561 mr r3, r9 /* kvm pointer */ 562 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 563 li r5, 0 /* nested vcpu pointer */ 564 bl kvmppc_check_need_tlb_flush 565 nop 566 ld r5, HSTATE_KVM_VCORE(r13) 567 568 /* Add timebase offset onto timebase */ 56922: ld r8,VCORE_TB_OFFSET(r5) 570 cmpdi r8,0 571 beq 37f 572 std r8, VCORE_TB_OFFSET_APPL(r5) 573 mftb r6 /* current host timebase */ 574 add r8,r8,r6 575 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 576 mftb r7 /* check if lower 24 bits overflowed */ 577 clrldi r6,r6,40 578 clrldi r7,r7,40 579 cmpld r7,r6 580 bge 37f 581 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 582 mtspr SPRN_TBU40,r8 583 584 /* Load guest PCR value to select appropriate compat mode */ 58537: ld r7, VCORE_PCR(r5) 586 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 587 cmpld r7, r6 588 beq 38f 589 or r7, r7, r6 590 mtspr SPRN_PCR, r7 59138: 592 593BEGIN_FTR_SECTION 594 /* DPDES and VTB are shared between threads */ 595 ld r8, VCORE_DPDES(r5) 596 ld r7, VCORE_VTB(r5) 597 mtspr SPRN_DPDES, r8 598 mtspr SPRN_VTB, r7 599END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 600 601 /* Mark the subcore state as inside guest */ 602 bl kvmppc_subcore_enter_guest 603 nop 604 ld r5, HSTATE_KVM_VCORE(r13) 605 ld r4, HSTATE_KVM_VCPU(r13) 606 li r0,1 607 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 608 609 /* Do we have a guest vcpu to run? */ 61010: cmpdi r4, 0 611 beq kvmppc_primary_no_guest 612kvmppc_got_guest: 613 /* Increment yield count if they have a VPA */ 614 ld r3, VCPU_VPA(r4) 615 cmpdi r3, 0 616 beq 25f 617 li r6, LPPACA_YIELDCOUNT 618 LWZX_BE r5, r3, r6 619 addi r5, r5, 1 620 STWX_BE r5, r3, r6 621 li r6, 1 622 stb r6, VCPU_VPA_DIRTY(r4) 62325: 624 625 /* Save purr/spurr */ 626 mfspr r5,SPRN_PURR 627 mfspr r6,SPRN_SPURR 628 std r5,HSTATE_PURR(r13) 629 std r6,HSTATE_SPURR(r13) 630 ld r7,VCPU_PURR(r4) 631 ld r8,VCPU_SPURR(r4) 632 mtspr SPRN_PURR,r7 633 mtspr SPRN_SPURR,r8 634 635 /* Save host values of some registers */ 636BEGIN_FTR_SECTION 637 mfspr r5, SPRN_CIABR 638 mfspr r6, SPRN_DAWR0 639 mfspr r7, SPRN_DAWRX0 640 mfspr r8, SPRN_IAMR 641 std r5, STACK_SLOT_CIABR(r1) 642 std r6, STACK_SLOT_DAWR0(r1) 643 std r7, STACK_SLOT_DAWRX0(r1) 644 std r8, STACK_SLOT_IAMR(r1) 645 mfspr r5, SPRN_FSCR 646 std r5, STACK_SLOT_FSCR(r1) 647END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 648 649 mfspr r5, SPRN_AMR 650 std r5, STACK_SLOT_AMR(r1) 651 mfspr r6, SPRN_UAMOR 652 std r6, STACK_SLOT_UAMOR(r1) 653 654BEGIN_FTR_SECTION 655 /* Set partition DABR */ 656 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 657 lwz r5,VCPU_DABRX(r4) 658 ld r6,VCPU_DABR(r4) 659 mtspr SPRN_DABRX,r5 660 mtspr SPRN_DABR,r6 661 isync 662END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 663 664#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 665BEGIN_FTR_SECTION 666 b 91f 667END_FTR_SECTION_IFCLR(CPU_FTR_TM) 668 /* 669 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 670 */ 671 mr r3, r4 672 ld r4, VCPU_MSR(r3) 673 li r5, 0 /* don't preserve non-vol regs */ 674 bl kvmppc_restore_tm_hv 675 nop 676 ld r4, HSTATE_KVM_VCPU(r13) 67791: 678#endif 679 680 /* Load guest PMU registers; r4 = vcpu pointer here */ 681 mr r3, r4 682 bl kvmhv_load_guest_pmu 683 684 /* Load up FP, VMX and VSX registers */ 685 ld r4, HSTATE_KVM_VCPU(r13) 686 bl kvmppc_load_fp 687 688 ld r14, VCPU_GPR(R14)(r4) 689 ld r15, VCPU_GPR(R15)(r4) 690 ld r16, VCPU_GPR(R16)(r4) 691 ld r17, VCPU_GPR(R17)(r4) 692 ld r18, VCPU_GPR(R18)(r4) 693 ld r19, VCPU_GPR(R19)(r4) 694 ld r20, VCPU_GPR(R20)(r4) 695 ld r21, VCPU_GPR(R21)(r4) 696 ld r22, VCPU_GPR(R22)(r4) 697 ld r23, VCPU_GPR(R23)(r4) 698 ld r24, VCPU_GPR(R24)(r4) 699 ld r25, VCPU_GPR(R25)(r4) 700 ld r26, VCPU_GPR(R26)(r4) 701 ld r27, VCPU_GPR(R27)(r4) 702 ld r28, VCPU_GPR(R28)(r4) 703 ld r29, VCPU_GPR(R29)(r4) 704 ld r30, VCPU_GPR(R30)(r4) 705 ld r31, VCPU_GPR(R31)(r4) 706 707 /* Switch DSCR to guest value */ 708 ld r5, VCPU_DSCR(r4) 709 mtspr SPRN_DSCR, r5 710 711BEGIN_FTR_SECTION 712 /* Skip next section on POWER7 */ 713 b 8f 714END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 715 /* Load up POWER8-specific registers */ 716 ld r5, VCPU_IAMR(r4) 717 lwz r6, VCPU_PSPB(r4) 718 ld r7, VCPU_FSCR(r4) 719 mtspr SPRN_IAMR, r5 720 mtspr SPRN_PSPB, r6 721 mtspr SPRN_FSCR, r7 722 /* 723 * Handle broken DAWR case by not writing it. This means we 724 * can still store the DAWR register for migration. 725 */ 726 LOAD_REG_ADDR(r5, dawr_force_enable) 727 lbz r5, 0(r5) 728 cmpdi r5, 0 729 beq 1f 730 ld r5, VCPU_DAWR0(r4) 731 ld r6, VCPU_DAWRX0(r4) 732 mtspr SPRN_DAWR0, r5 733 mtspr SPRN_DAWRX0, r6 7341: 735 ld r7, VCPU_CIABR(r4) 736 ld r8, VCPU_TAR(r4) 737 mtspr SPRN_CIABR, r7 738 mtspr SPRN_TAR, r8 739 ld r5, VCPU_IC(r4) 740 ld r8, VCPU_EBBHR(r4) 741 mtspr SPRN_IC, r5 742 mtspr SPRN_EBBHR, r8 743 ld r5, VCPU_EBBRR(r4) 744 ld r6, VCPU_BESCR(r4) 745 lwz r7, VCPU_GUEST_PID(r4) 746 ld r8, VCPU_WORT(r4) 747 mtspr SPRN_EBBRR, r5 748 mtspr SPRN_BESCR, r6 749 mtspr SPRN_PID, r7 750 mtspr SPRN_WORT, r8 751 /* POWER8-only registers */ 752 ld r5, VCPU_TCSCR(r4) 753 ld r6, VCPU_ACOP(r4) 754 ld r7, VCPU_CSIGR(r4) 755 ld r8, VCPU_TACR(r4) 756 mtspr SPRN_TCSCR, r5 757 mtspr SPRN_ACOP, r6 758 mtspr SPRN_CSIGR, r7 759 mtspr SPRN_TACR, r8 760 nop 7618: 762 763 ld r5, VCPU_SPRG0(r4) 764 ld r6, VCPU_SPRG1(r4) 765 ld r7, VCPU_SPRG2(r4) 766 ld r8, VCPU_SPRG3(r4) 767 mtspr SPRN_SPRG0, r5 768 mtspr SPRN_SPRG1, r6 769 mtspr SPRN_SPRG2, r7 770 mtspr SPRN_SPRG3, r8 771 772 /* Load up DAR and DSISR */ 773 ld r5, VCPU_DAR(r4) 774 lwz r6, VCPU_DSISR(r4) 775 mtspr SPRN_DAR, r5 776 mtspr SPRN_DSISR, r6 777 778 /* Restore AMR and UAMOR, set AMOR to all 1s */ 779 ld r5,VCPU_AMR(r4) 780 ld r6,VCPU_UAMOR(r4) 781 mtspr SPRN_AMR,r5 782 mtspr SPRN_UAMOR,r6 783 784 /* Restore state of CTRL run bit; the host currently has it set to 1 */ 785 lwz r5,VCPU_CTRL(r4) 786 andi. r5,r5,1 787 bne 4f 788 li r6,0 789 mtspr SPRN_CTRLT,r6 7904: 791 /* Secondary threads wait for primary to have done partition switch */ 792 ld r5, HSTATE_KVM_VCORE(r13) 793 lbz r6, HSTATE_PTID(r13) 794 cmpwi r6, 0 795 beq 21f 796 lbz r0, VCORE_IN_GUEST(r5) 797 cmpwi r0, 0 798 bne 21f 799 HMT_LOW 80020: lwz r3, VCORE_ENTRY_EXIT(r5) 801 cmpwi r3, 0x100 802 bge no_switch_exit 803 lbz r0, VCORE_IN_GUEST(r5) 804 cmpwi r0, 0 805 beq 20b 806 HMT_MEDIUM 80721: 808 /* Set LPCR. */ 809 ld r8,VCORE_LPCR(r5) 810 mtspr SPRN_LPCR,r8 811 isync 812 813 /* 814 * Set the decrementer to the guest decrementer. 815 */ 816 ld r8,VCPU_DEC_EXPIRES(r4) 817 mftb r7 818 subf r3,r7,r8 819 mtspr SPRN_DEC,r3 820 821 /* Check if HDEC expires soon */ 822 mfspr r3, SPRN_HDEC 823 extsw r3, r3 824 cmpdi r3, 512 /* 1 microsecond */ 825 blt hdec_soon 826 827 /* Clear out and reload the SLB */ 828 li r6, 0 829 slbmte r6, r6 830 PPC_SLBIA(6) 831 ptesync 832 833 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 834 lwz r5,VCPU_SLB_MAX(r4) 835 cmpwi r5,0 836 beq 9f 837 mtctr r5 838 addi r6,r4,VCPU_SLB 8391: ld r8,VCPU_SLB_E(r6) 840 ld r9,VCPU_SLB_V(r6) 841 slbmte r9,r8 842 addi r6,r6,VCPU_SLB_SIZE 843 bdnz 1b 8449: 845 846deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 847 /* Check if we can deliver an external or decrementer interrupt now */ 848 ld r0, VCPU_PENDING_EXC(r4) 849 cmpdi r0, 0 850 beq 71f 851 mr r3, r4 852 bl kvmppc_guest_entry_inject_int 853 ld r4, HSTATE_KVM_VCPU(r13) 85471: 855 ld r6, VCPU_SRR0(r4) 856 ld r7, VCPU_SRR1(r4) 857 mtspr SPRN_SRR0, r6 858 mtspr SPRN_SRR1, r7 859 860 ld r10, VCPU_PC(r4) 861 ld r11, VCPU_MSR(r4) 862 /* r11 = vcpu->arch.msr & ~MSR_HV */ 863 rldicl r11, r11, 63 - MSR_HV_LG, 1 864 rotldi r11, r11, 1 + MSR_HV_LG 865 ori r11, r11, MSR_ME 866 867 ld r6, VCPU_CTR(r4) 868 ld r7, VCPU_XER(r4) 869 mtctr r6 870 mtxer r7 871 872/* 873 * Required state: 874 * R4 = vcpu 875 * R10: value for HSRR0 876 * R11: value for HSRR1 877 * R13 = PACA 878 */ 879fast_guest_return: 880 li r0,0 881 stb r0,VCPU_CEDED(r4) /* cancel cede */ 882 mtspr SPRN_HSRR0,r10 883 mtspr SPRN_HSRR1,r11 884 885 /* Activate guest mode, so faults get handled by KVM */ 886 li r9, KVM_GUEST_MODE_GUEST_HV 887 stb r9, HSTATE_IN_GUEST(r13) 888 889#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 890 /* Accumulate timing */ 891 addi r3, r4, VCPU_TB_GUEST 892 bl kvmhv_accumulate_time 893#endif 894 895 /* Enter guest */ 896 897BEGIN_FTR_SECTION 898 ld r5, VCPU_CFAR(r4) 899 mtspr SPRN_CFAR, r5 900END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 901BEGIN_FTR_SECTION 902 ld r0, VCPU_PPR(r4) 903END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 904 905 ld r5, VCPU_LR(r4) 906 mtlr r5 907 908 ld r1, VCPU_GPR(R1)(r4) 909 ld r5, VCPU_GPR(R5)(r4) 910 ld r8, VCPU_GPR(R8)(r4) 911 ld r9, VCPU_GPR(R9)(r4) 912 ld r10, VCPU_GPR(R10)(r4) 913 ld r11, VCPU_GPR(R11)(r4) 914 ld r12, VCPU_GPR(R12)(r4) 915 ld r13, VCPU_GPR(R13)(r4) 916 917BEGIN_FTR_SECTION 918 mtspr SPRN_PPR, r0 919END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 920 921 ld r6, VCPU_GPR(R6)(r4) 922 ld r7, VCPU_GPR(R7)(r4) 923 924 ld r0, VCPU_CR(r4) 925 mtcr r0 926 927 ld r0, VCPU_GPR(R0)(r4) 928 ld r2, VCPU_GPR(R2)(r4) 929 ld r3, VCPU_GPR(R3)(r4) 930 ld r4, VCPU_GPR(R4)(r4) 931 HRFI_TO_GUEST 932 b . 933 934secondary_too_late: 935 li r12, 0 936 stw r12, STACK_SLOT_TRAP(r1) 937 cmpdi r4, 0 938 beq 11f 939 stw r12, VCPU_TRAP(r4) 940#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 941 addi r3, r4, VCPU_TB_RMEXIT 942 bl kvmhv_accumulate_time 943#endif 94411: b kvmhv_switch_to_host 945 946no_switch_exit: 947 HMT_MEDIUM 948 li r12, 0 949 b 12f 950hdec_soon: 951 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 95212: stw r12, VCPU_TRAP(r4) 953 mr r9, r4 954#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 955 addi r3, r4, VCPU_TB_RMEXIT 956 bl kvmhv_accumulate_time 957#endif 958 b guest_bypass 959 960/****************************************************************************** 961 * * 962 * Exit code * 963 * * 964 *****************************************************************************/ 965 966/* 967 * We come here from the first-level interrupt handlers. 968 */ 969 .globl kvmppc_interrupt_hv 970kvmppc_interrupt_hv: 971 /* 972 * Register contents: 973 * R9 = HSTATE_IN_GUEST 974 * R12 = (guest CR << 32) | interrupt vector 975 * R13 = PACA 976 * guest R12 saved in shadow VCPU SCRATCH0 977 * guest R13 saved in SPRN_SCRATCH0 978 * guest R9 saved in HSTATE_SCRATCH2 979 */ 980 /* We're now back in the host but in guest MMU context */ 981 cmpwi r9,KVM_GUEST_MODE_HOST_HV 982 beq kvmppc_bad_host_intr 983 li r9, KVM_GUEST_MODE_HOST_HV 984 stb r9, HSTATE_IN_GUEST(r13) 985 986 ld r9, HSTATE_KVM_VCPU(r13) 987 988 /* Save registers */ 989 990 std r0, VCPU_GPR(R0)(r9) 991 std r1, VCPU_GPR(R1)(r9) 992 std r2, VCPU_GPR(R2)(r9) 993 std r3, VCPU_GPR(R3)(r9) 994 std r4, VCPU_GPR(R4)(r9) 995 std r5, VCPU_GPR(R5)(r9) 996 std r6, VCPU_GPR(R6)(r9) 997 std r7, VCPU_GPR(R7)(r9) 998 std r8, VCPU_GPR(R8)(r9) 999 ld r0, HSTATE_SCRATCH2(r13) 1000 std r0, VCPU_GPR(R9)(r9) 1001 std r10, VCPU_GPR(R10)(r9) 1002 std r11, VCPU_GPR(R11)(r9) 1003 ld r3, HSTATE_SCRATCH0(r13) 1004 std r3, VCPU_GPR(R12)(r9) 1005 /* CR is in the high half of r12 */ 1006 srdi r4, r12, 32 1007 std r4, VCPU_CR(r9) 1008BEGIN_FTR_SECTION 1009 ld r3, HSTATE_CFAR(r13) 1010 std r3, VCPU_CFAR(r9) 1011END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1012BEGIN_FTR_SECTION 1013 ld r4, HSTATE_PPR(r13) 1014 std r4, VCPU_PPR(r9) 1015END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1016 1017 /* Restore R1/R2 so we can handle faults */ 1018 ld r1, HSTATE_HOST_R1(r13) 1019 ld r2, PACATOC(r13) 1020 1021 mfspr r10, SPRN_SRR0 1022 mfspr r11, SPRN_SRR1 1023 std r10, VCPU_SRR0(r9) 1024 std r11, VCPU_SRR1(r9) 1025 /* trap is in the low half of r12, clear CR from the high half */ 1026 clrldi r12, r12, 32 1027 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1028 beq 1f 1029 mfspr r10, SPRN_HSRR0 1030 mfspr r11, SPRN_HSRR1 1031 clrrdi r12, r12, 2 10321: std r10, VCPU_PC(r9) 1033 std r11, VCPU_MSR(r9) 1034 1035 GET_SCRATCH0(r3) 1036 mflr r4 1037 std r3, VCPU_GPR(R13)(r9) 1038 std r4, VCPU_LR(r9) 1039 1040 stw r12,VCPU_TRAP(r9) 1041 1042 /* 1043 * Now that we have saved away SRR0/1 and HSRR0/1, 1044 * interrupts are recoverable in principle, so set MSR_RI. 1045 * This becomes important for relocation-on interrupts from 1046 * the guest, which we can get in radix mode on POWER9. 1047 */ 1048 li r0, MSR_RI 1049 mtmsrd r0, 1 1050 1051#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1052 addi r3, r9, VCPU_TB_RMINTR 1053 mr r4, r9 1054 bl kvmhv_accumulate_time 1055 ld r5, VCPU_GPR(R5)(r9) 1056 ld r6, VCPU_GPR(R6)(r9) 1057 ld r7, VCPU_GPR(R7)(r9) 1058 ld r8, VCPU_GPR(R8)(r9) 1059#endif 1060 1061 /* Save HEIR (HV emulation assist reg) in emul_inst 1062 if this is an HEI (HV emulation interrupt, e40) */ 1063 li r3,KVM_INST_FETCH_FAILED 1064 stw r3,VCPU_LAST_INST(r9) 1065 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1066 bne 11f 1067 mfspr r3,SPRN_HEIR 106811: stw r3,VCPU_HEIR(r9) 1069 1070 /* these are volatile across C function calls */ 1071 mfctr r3 1072 mfxer r4 1073 std r3, VCPU_CTR(r9) 1074 std r4, VCPU_XER(r9) 1075 1076 /* Save more register state */ 1077 mfdar r3 1078 mfdsisr r4 1079 std r3, VCPU_DAR(r9) 1080 stw r4, VCPU_DSISR(r9) 1081 1082 /* If this is a page table miss then see if it's theirs or ours */ 1083 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1084 beq kvmppc_hdsi 1085 std r3, VCPU_FAULT_DAR(r9) 1086 stw r4, VCPU_FAULT_DSISR(r9) 1087 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1088 beq kvmppc_hisi 1089 1090 /* See if this is a leftover HDEC interrupt */ 1091 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1092 bne 2f 1093 mfspr r3,SPRN_HDEC 1094 extsw r3, r3 1095 cmpdi r3,0 1096 mr r4,r9 1097 bge fast_guest_return 10982: 1099 /* See if this is an hcall we can handle in real mode */ 1100 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1101 beq hcall_try_real_mode 1102 1103 /* Hypervisor doorbell - exit only if host IPI flag set */ 1104 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1105 bne 3f 1106 lbz r0, HSTATE_HOST_IPI(r13) 1107 cmpwi r0, 0 1108 beq maybe_reenter_guest 1109 b guest_exit_cont 11103: 1111 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1112 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1113 bne 14f 1114 mfspr r3, SPRN_HFSCR 1115 std r3, VCPU_HFSCR(r9) 1116 b guest_exit_cont 111714: 1118 /* External interrupt ? */ 1119 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1120 beq kvmppc_guest_external 1121 /* See if it is a machine check */ 1122 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1123 beq machine_check_realmode 1124 /* Or a hypervisor maintenance interrupt */ 1125 cmpwi r12, BOOK3S_INTERRUPT_HMI 1126 beq hmi_realmode 1127 1128guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1129 1130#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1131 addi r3, r9, VCPU_TB_RMEXIT 1132 mr r4, r9 1133 bl kvmhv_accumulate_time 1134#endif 1135 1136 /* 1137 * Possibly flush the link stack here, before we do a blr in 1138 * kvmhv_switch_to_host. 1139 */ 11401: nop 1141 patch_site 1b patch__call_kvm_flush_link_stack 1142 1143 /* For hash guest, read the guest SLB and save it away */ 1144 li r5, 0 1145 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1146 mtctr r0 1147 li r6,0 1148 addi r7,r9,VCPU_SLB 11491: slbmfee r8,r6 1150 andis. r0,r8,SLB_ESID_V@h 1151 beq 2f 1152 add r8,r8,r6 /* put index in */ 1153 slbmfev r3,r6 1154 std r8,VCPU_SLB_E(r7) 1155 std r3,VCPU_SLB_V(r7) 1156 addi r7,r7,VCPU_SLB_SIZE 1157 addi r5,r5,1 11582: addi r6,r6,1 1159 bdnz 1b 1160 /* Finally clear out the SLB */ 1161 li r0,0 1162 slbmte r0,r0 1163 PPC_SLBIA(6) 1164 ptesync 1165 stw r5,VCPU_SLB_MAX(r9) 1166 1167 /* load host SLB entries */ 1168 ld r8,PACA_SLBSHADOWPTR(r13) 1169 1170 .rept SLB_NUM_BOLTED 1171 li r3, SLBSHADOW_SAVEAREA 1172 LDX_BE r5, r8, r3 1173 addi r3, r3, 8 1174 LDX_BE r6, r8, r3 1175 andis. r7,r5,SLB_ESID_V@h 1176 beq 1f 1177 slbmte r6,r5 11781: addi r8,r8,16 1179 .endr 1180 1181guest_bypass: 1182 stw r12, STACK_SLOT_TRAP(r1) 1183 1184 /* Save DEC */ 1185 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1186 ld r3, HSTATE_KVM_VCORE(r13) 1187 mfspr r5,SPRN_DEC 1188 mftb r6 1189 extsw r5,r5 119016: add r5,r5,r6 1191 std r5,VCPU_DEC_EXPIRES(r9) 1192 1193 /* Increment exit count, poke other threads to exit */ 1194 mr r3, r12 1195 bl kvmhv_commence_exit 1196 nop 1197 ld r9, HSTATE_KVM_VCPU(r13) 1198 1199 /* Stop others sending VCPU interrupts to this physical CPU */ 1200 li r0, -1 1201 stw r0, VCPU_CPU(r9) 1202 stw r0, VCPU_THREAD_CPU(r9) 1203 1204 /* Save guest CTRL register, set runlatch to 1 if it was clear */ 1205 mfspr r6,SPRN_CTRLF 1206 stw r6,VCPU_CTRL(r9) 1207 andi. r0,r6,1 1208 bne 4f 1209 li r6,1 1210 mtspr SPRN_CTRLT,r6 12114: 1212 /* 1213 * Save the guest PURR/SPURR 1214 */ 1215 mfspr r5,SPRN_PURR 1216 mfspr r6,SPRN_SPURR 1217 ld r7,VCPU_PURR(r9) 1218 ld r8,VCPU_SPURR(r9) 1219 std r5,VCPU_PURR(r9) 1220 std r6,VCPU_SPURR(r9) 1221 subf r5,r7,r5 1222 subf r6,r8,r6 1223 1224 /* 1225 * Restore host PURR/SPURR and add guest times 1226 * so that the time in the guest gets accounted. 1227 */ 1228 ld r3,HSTATE_PURR(r13) 1229 ld r4,HSTATE_SPURR(r13) 1230 add r3,r3,r5 1231 add r4,r4,r6 1232 mtspr SPRN_PURR,r3 1233 mtspr SPRN_SPURR,r4 1234 1235BEGIN_FTR_SECTION 1236 b 8f 1237END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1238 /* Save POWER8-specific registers */ 1239 mfspr r5, SPRN_IAMR 1240 mfspr r6, SPRN_PSPB 1241 mfspr r7, SPRN_FSCR 1242 std r5, VCPU_IAMR(r9) 1243 stw r6, VCPU_PSPB(r9) 1244 std r7, VCPU_FSCR(r9) 1245 mfspr r5, SPRN_IC 1246 mfspr r7, SPRN_TAR 1247 std r5, VCPU_IC(r9) 1248 std r7, VCPU_TAR(r9) 1249 mfspr r8, SPRN_EBBHR 1250 std r8, VCPU_EBBHR(r9) 1251 mfspr r5, SPRN_EBBRR 1252 mfspr r6, SPRN_BESCR 1253 mfspr r7, SPRN_PID 1254 mfspr r8, SPRN_WORT 1255 std r5, VCPU_EBBRR(r9) 1256 std r6, VCPU_BESCR(r9) 1257 stw r7, VCPU_GUEST_PID(r9) 1258 std r8, VCPU_WORT(r9) 1259 mfspr r5, SPRN_TCSCR 1260 mfspr r6, SPRN_ACOP 1261 mfspr r7, SPRN_CSIGR 1262 mfspr r8, SPRN_TACR 1263 std r5, VCPU_TCSCR(r9) 1264 std r6, VCPU_ACOP(r9) 1265 std r7, VCPU_CSIGR(r9) 1266 std r8, VCPU_TACR(r9) 1267BEGIN_FTR_SECTION 1268 ld r5, STACK_SLOT_FSCR(r1) 1269 mtspr SPRN_FSCR, r5 1270END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1271 /* 1272 * Restore various registers to 0, where non-zero values 1273 * set by the guest could disrupt the host. 1274 */ 1275 li r0, 0 1276 mtspr SPRN_PSPB, r0 1277 mtspr SPRN_WORT, r0 1278 mtspr SPRN_TCSCR, r0 1279 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1280 li r0, 1 1281 sldi r0, r0, 31 1282 mtspr SPRN_MMCRS, r0 1283 1284 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1285 ld r8, STACK_SLOT_IAMR(r1) 1286 mtspr SPRN_IAMR, r8 1287 12888: /* Power7 jumps back in here */ 1289 mfspr r5,SPRN_AMR 1290 mfspr r6,SPRN_UAMOR 1291 std r5,VCPU_AMR(r9) 1292 std r6,VCPU_UAMOR(r9) 1293 ld r5,STACK_SLOT_AMR(r1) 1294 ld r6,STACK_SLOT_UAMOR(r1) 1295 mtspr SPRN_AMR, r5 1296 mtspr SPRN_UAMOR, r6 1297 1298 /* Switch DSCR back to host value */ 1299 mfspr r8, SPRN_DSCR 1300 ld r7, HSTATE_DSCR(r13) 1301 std r8, VCPU_DSCR(r9) 1302 mtspr SPRN_DSCR, r7 1303 1304 /* Save non-volatile GPRs */ 1305 std r14, VCPU_GPR(R14)(r9) 1306 std r15, VCPU_GPR(R15)(r9) 1307 std r16, VCPU_GPR(R16)(r9) 1308 std r17, VCPU_GPR(R17)(r9) 1309 std r18, VCPU_GPR(R18)(r9) 1310 std r19, VCPU_GPR(R19)(r9) 1311 std r20, VCPU_GPR(R20)(r9) 1312 std r21, VCPU_GPR(R21)(r9) 1313 std r22, VCPU_GPR(R22)(r9) 1314 std r23, VCPU_GPR(R23)(r9) 1315 std r24, VCPU_GPR(R24)(r9) 1316 std r25, VCPU_GPR(R25)(r9) 1317 std r26, VCPU_GPR(R26)(r9) 1318 std r27, VCPU_GPR(R27)(r9) 1319 std r28, VCPU_GPR(R28)(r9) 1320 std r29, VCPU_GPR(R29)(r9) 1321 std r30, VCPU_GPR(R30)(r9) 1322 std r31, VCPU_GPR(R31)(r9) 1323 1324 /* Save SPRGs */ 1325 mfspr r3, SPRN_SPRG0 1326 mfspr r4, SPRN_SPRG1 1327 mfspr r5, SPRN_SPRG2 1328 mfspr r6, SPRN_SPRG3 1329 std r3, VCPU_SPRG0(r9) 1330 std r4, VCPU_SPRG1(r9) 1331 std r5, VCPU_SPRG2(r9) 1332 std r6, VCPU_SPRG3(r9) 1333 1334 /* save FP state */ 1335 mr r3, r9 1336 bl kvmppc_save_fp 1337 1338#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1339BEGIN_FTR_SECTION 1340 b 91f 1341END_FTR_SECTION_IFCLR(CPU_FTR_TM) 1342 /* 1343 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1344 */ 1345 mr r3, r9 1346 ld r4, VCPU_MSR(r3) 1347 li r5, 0 /* don't preserve non-vol regs */ 1348 bl kvmppc_save_tm_hv 1349 nop 1350 ld r9, HSTATE_KVM_VCPU(r13) 135191: 1352#endif 1353 1354 /* Increment yield count if they have a VPA */ 1355 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1356 cmpdi r8, 0 1357 beq 25f 1358 li r4, LPPACA_YIELDCOUNT 1359 LWZX_BE r3, r8, r4 1360 addi r3, r3, 1 1361 STWX_BE r3, r8, r4 1362 li r3, 1 1363 stb r3, VCPU_VPA_DIRTY(r9) 136425: 1365 /* Save PMU registers if requested */ 1366 /* r8 and cr0.eq are live here */ 1367 mr r3, r9 1368 li r4, 1 1369 beq 21f /* if no VPA, save PMU stuff anyway */ 1370 lbz r4, LPPACA_PMCINUSE(r8) 137121: bl kvmhv_save_guest_pmu 1372 ld r9, HSTATE_KVM_VCPU(r13) 1373 1374 /* Restore host values of some registers */ 1375BEGIN_FTR_SECTION 1376 ld r5, STACK_SLOT_CIABR(r1) 1377 ld r6, STACK_SLOT_DAWR0(r1) 1378 ld r7, STACK_SLOT_DAWRX0(r1) 1379 mtspr SPRN_CIABR, r5 1380 /* 1381 * If the DAWR doesn't work, it's ok to write these here as 1382 * this value should always be zero 1383 */ 1384 mtspr SPRN_DAWR0, r6 1385 mtspr SPRN_DAWRX0, r7 1386END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1387 1388 /* 1389 * POWER7/POWER8 guest -> host partition switch code. 1390 * We don't have to lock against tlbies but we do 1391 * have to coordinate the hardware threads. 1392 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1393 */ 1394kvmhv_switch_to_host: 1395 /* Secondary threads wait for primary to do partition switch */ 1396 ld r5,HSTATE_KVM_VCORE(r13) 1397 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1398 lbz r3,HSTATE_PTID(r13) 1399 cmpwi r3,0 1400 beq 15f 1401 HMT_LOW 140213: lbz r3,VCORE_IN_GUEST(r5) 1403 cmpwi r3,0 1404 bne 13b 1405 HMT_MEDIUM 1406 b 16f 1407 1408 /* Primary thread waits for all the secondaries to exit guest */ 140915: lwz r3,VCORE_ENTRY_EXIT(r5) 1410 rlwinm r0,r3,32-8,0xff 1411 clrldi r3,r3,56 1412 cmpw r3,r0 1413 bne 15b 1414 isync 1415 1416 /* Did we actually switch to the guest at all? */ 1417 lbz r6, VCORE_IN_GUEST(r5) 1418 cmpwi r6, 0 1419 beq 19f 1420 1421 /* Primary thread switches back to host partition */ 1422 lwz r7,KVM_HOST_LPID(r4) 1423 ld r6,KVM_HOST_SDR1(r4) 1424 li r8,LPID_RSVD /* switch to reserved LPID */ 1425 mtspr SPRN_LPID,r8 1426 ptesync 1427 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1428 mtspr SPRN_LPID,r7 1429 isync 1430 1431BEGIN_FTR_SECTION 1432 /* DPDES and VTB are shared between threads */ 1433 mfspr r7, SPRN_DPDES 1434 mfspr r8, SPRN_VTB 1435 std r7, VCORE_DPDES(r5) 1436 std r8, VCORE_VTB(r5) 1437 /* clear DPDES so we don't get guest doorbells in the host */ 1438 li r8, 0 1439 mtspr SPRN_DPDES, r8 1440END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1441 1442 /* Subtract timebase offset from timebase */ 1443 ld r8, VCORE_TB_OFFSET_APPL(r5) 1444 cmpdi r8,0 1445 beq 17f 1446 li r0, 0 1447 std r0, VCORE_TB_OFFSET_APPL(r5) 1448 mftb r6 /* current guest timebase */ 1449 subf r8,r8,r6 1450 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1451 mftb r7 /* check if lower 24 bits overflowed */ 1452 clrldi r6,r6,40 1453 clrldi r7,r7,40 1454 cmpld r7,r6 1455 bge 17f 1456 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1457 mtspr SPRN_TBU40,r8 1458 145917: 1460 /* 1461 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1462 * above, which may or may not have already called 1463 * kvmppc_subcore_exit_guest. Fortunately, all that 1464 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1465 * it again here is benign even if kvmppc_realmode_hmi_handler 1466 * has already called it. 1467 */ 1468 bl kvmppc_subcore_exit_guest 1469 nop 147030: ld r5,HSTATE_KVM_VCORE(r13) 1471 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1472 1473 /* Reset PCR */ 1474 ld r0, VCORE_PCR(r5) 1475 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 1476 cmpld r0, r6 1477 beq 18f 1478 mtspr SPRN_PCR, r6 147918: 1480 /* Signal secondary CPUs to continue */ 1481 li r0, 0 1482 stb r0,VCORE_IN_GUEST(r5) 148319: lis r8,0x7fff /* MAX_INT@h */ 1484 mtspr SPRN_HDEC,r8 1485 148616: ld r8,KVM_HOST_LPCR(r4) 1487 mtspr SPRN_LPCR,r8 1488 isync 1489 1490#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1491 /* Finish timing, if we have a vcpu */ 1492 ld r4, HSTATE_KVM_VCPU(r13) 1493 cmpdi r4, 0 1494 li r3, 0 1495 beq 2f 1496 bl kvmhv_accumulate_time 14972: 1498#endif 1499 /* Unset guest mode */ 1500 li r0, KVM_GUEST_MODE_NONE 1501 stb r0, HSTATE_IN_GUEST(r13) 1502 1503 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1504 ld r0, SFS+PPC_LR_STKOFF(r1) 1505 addi r1, r1, SFS 1506 mtlr r0 1507 blr 1508 1509.balign 32 1510.global kvm_flush_link_stack 1511kvm_flush_link_stack: 1512 /* Save LR into r0 */ 1513 mflr r0 1514 1515 /* Flush the link stack. On Power8 it's up to 32 entries in size. */ 1516 .rept 32 1517 bl .+4 1518 .endr 1519 1520 /* And on Power9 it's up to 64. */ 1521BEGIN_FTR_SECTION 1522 .rept 32 1523 bl .+4 1524 .endr 1525END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1526 1527 /* Restore LR */ 1528 mtlr r0 1529 blr 1530 1531kvmppc_guest_external: 1532 /* External interrupt, first check for host_ipi. If this is 1533 * set, we know the host wants us out so let's do it now 1534 */ 1535 bl kvmppc_read_intr 1536 1537 /* 1538 * Restore the active volatile registers after returning from 1539 * a C function. 1540 */ 1541 ld r9, HSTATE_KVM_VCPU(r13) 1542 li r12, BOOK3S_INTERRUPT_EXTERNAL 1543 1544 /* 1545 * kvmppc_read_intr return codes: 1546 * 1547 * Exit to host (r3 > 0) 1548 * 1 An interrupt is pending that needs to be handled by the host 1549 * Exit guest and return to host by branching to guest_exit_cont 1550 * 1551 * 2 Passthrough that needs completion in the host 1552 * Exit guest and return to host by branching to guest_exit_cont 1553 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1554 * to indicate to the host to complete handling the interrupt 1555 * 1556 * Before returning to guest, we check if any CPU is heading out 1557 * to the host and if so, we head out also. If no CPUs are heading 1558 * check return values <= 0. 1559 * 1560 * Return to guest (r3 <= 0) 1561 * 0 No external interrupt is pending 1562 * -1 A guest wakeup IPI (which has now been cleared) 1563 * In either case, we return to guest to deliver any pending 1564 * guest interrupts. 1565 * 1566 * -2 A PCI passthrough external interrupt was handled 1567 * (interrupt was delivered directly to guest) 1568 * Return to guest to deliver any pending guest interrupts. 1569 */ 1570 1571 cmpdi r3, 1 1572 ble 1f 1573 1574 /* Return code = 2 */ 1575 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1576 stw r12, VCPU_TRAP(r9) 1577 b guest_exit_cont 1578 15791: /* Return code <= 1 */ 1580 cmpdi r3, 0 1581 bgt guest_exit_cont 1582 1583 /* Return code <= 0 */ 1584maybe_reenter_guest: 1585 ld r5, HSTATE_KVM_VCORE(r13) 1586 lwz r0, VCORE_ENTRY_EXIT(r5) 1587 cmpwi r0, 0x100 1588 mr r4, r9 1589 blt deliver_guest_interrupt 1590 b guest_exit_cont 1591 1592/* 1593 * Check whether an HDSI is an HPTE not found fault or something else. 1594 * If it is an HPTE not found fault that is due to the guest accessing 1595 * a page that they have mapped but which we have paged out, then 1596 * we continue on with the guest exit path. In all other cases, 1597 * reflect the HDSI to the guest as a DSI. 1598 */ 1599kvmppc_hdsi: 1600 mfspr r4, SPRN_HDAR 1601 mfspr r6, SPRN_HDSISR 1602 /* HPTE not found fault or protection fault? */ 1603 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1604 beq 1f /* if not, send it to the guest */ 1605 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1606 beq 3f 1607 clrrdi r0, r4, 28 1608 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1609 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 1610 bne 7f /* if no SLB entry found */ 16114: std r4, VCPU_FAULT_DAR(r9) 1612 stw r6, VCPU_FAULT_DSISR(r9) 1613 1614 /* Search the hash table. */ 1615 mr r3, r9 /* vcpu pointer */ 1616 li r7, 1 /* data fault */ 1617 bl kvmppc_hpte_hv_fault 1618 ld r9, HSTATE_KVM_VCPU(r13) 1619 ld r10, VCPU_PC(r9) 1620 ld r11, VCPU_MSR(r9) 1621 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1622 cmpdi r3, 0 /* retry the instruction */ 1623 beq 6f 1624 cmpdi r3, -1 /* handle in kernel mode */ 1625 beq guest_exit_cont 1626 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1627 beq 2f 1628 1629 /* Synthesize a DSI (or DSegI) for the guest */ 1630 ld r4, VCPU_FAULT_DAR(r9) 1631 mr r6, r3 16321: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 1633 mtspr SPRN_DSISR, r6 16347: mtspr SPRN_DAR, r4 1635 mtspr SPRN_SRR0, r10 1636 mtspr SPRN_SRR1, r11 1637 mr r10, r0 1638 bl kvmppc_msr_interrupt 1639fast_interrupt_c_return: 16406: ld r7, VCPU_CTR(r9) 1641 ld r8, VCPU_XER(r9) 1642 mtctr r7 1643 mtxer r8 1644 mr r4, r9 1645 b fast_guest_return 1646 16473: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1648 ld r5, KVM_VRMA_SLB_V(r5) 1649 b 4b 1650 1651 /* If this is for emulated MMIO, load the instruction word */ 16522: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1653 1654 /* Set guest mode to 'jump over instruction' so if lwz faults 1655 * we'll just continue at the next IP. */ 1656 li r0, KVM_GUEST_MODE_SKIP 1657 stb r0, HSTATE_IN_GUEST(r13) 1658 1659 /* Do the access with MSR:DR enabled */ 1660 mfmsr r3 1661 ori r4, r3, MSR_DR /* Enable paging for data */ 1662 mtmsrd r4 1663 lwz r8, 0(r10) 1664 mtmsrd r3 1665 1666 /* Store the result */ 1667 stw r8, VCPU_LAST_INST(r9) 1668 1669 /* Unset guest mode. */ 1670 li r0, KVM_GUEST_MODE_HOST_HV 1671 stb r0, HSTATE_IN_GUEST(r13) 1672 b guest_exit_cont 1673 1674/* 1675 * Similarly for an HISI, reflect it to the guest as an ISI unless 1676 * it is an HPTE not found fault for a page that we have paged out. 1677 */ 1678kvmppc_hisi: 1679 andis. r0, r11, SRR1_ISI_NOPT@h 1680 beq 1f 1681 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1682 beq 3f 1683 clrrdi r0, r10, 28 1684 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1685 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 1686 bne 7f /* if no SLB entry found */ 16874: 1688 /* Search the hash table. */ 1689 mr r3, r9 /* vcpu pointer */ 1690 mr r4, r10 1691 mr r6, r11 1692 li r7, 0 /* instruction fault */ 1693 bl kvmppc_hpte_hv_fault 1694 ld r9, HSTATE_KVM_VCPU(r13) 1695 ld r10, VCPU_PC(r9) 1696 ld r11, VCPU_MSR(r9) 1697 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1698 cmpdi r3, 0 /* retry the instruction */ 1699 beq fast_interrupt_c_return 1700 cmpdi r3, -1 /* handle in kernel mode */ 1701 beq guest_exit_cont 1702 1703 /* Synthesize an ISI (or ISegI) for the guest */ 1704 mr r11, r3 17051: li r0, BOOK3S_INTERRUPT_INST_STORAGE 17067: mtspr SPRN_SRR0, r10 1707 mtspr SPRN_SRR1, r11 1708 mr r10, r0 1709 bl kvmppc_msr_interrupt 1710 b fast_interrupt_c_return 1711 17123: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1713 ld r5, KVM_VRMA_SLB_V(r6) 1714 b 4b 1715 1716/* 1717 * Try to handle an hcall in real mode. 1718 * Returns to the guest if we handle it, or continues on up to 1719 * the kernel if we can't (i.e. if we don't have a handler for 1720 * it, or if the handler returns H_TOO_HARD). 1721 * 1722 * r5 - r8 contain hcall args, 1723 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 1724 */ 1725hcall_try_real_mode: 1726 ld r3,VCPU_GPR(R3)(r9) 1727 andi. r0,r11,MSR_PR 1728 /* sc 1 from userspace - reflect to guest syscall */ 1729 bne sc_1_fast_return 1730 clrrdi r3,r3,2 1731 cmpldi r3,hcall_real_table_end - hcall_real_table 1732 bge guest_exit_cont 1733 /* See if this hcall is enabled for in-kernel handling */ 1734 ld r4, VCPU_KVM(r9) 1735 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 1736 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 1737 add r4, r4, r0 1738 ld r0, KVM_ENABLED_HCALLS(r4) 1739 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 1740 srd r0, r0, r4 1741 andi. r0, r0, 1 1742 beq guest_exit_cont 1743 /* Get pointer to handler, if any, and call it */ 1744 LOAD_REG_ADDR(r4, hcall_real_table) 1745 lwax r3,r3,r4 1746 cmpwi r3,0 1747 beq guest_exit_cont 1748 add r12,r3,r4 1749 mtctr r12 1750 mr r3,r9 /* get vcpu pointer */ 1751 ld r4,VCPU_GPR(R4)(r9) 1752 bctrl 1753 cmpdi r3,H_TOO_HARD 1754 beq hcall_real_fallback 1755 ld r4,HSTATE_KVM_VCPU(r13) 1756 std r3,VCPU_GPR(R3)(r4) 1757 ld r10,VCPU_PC(r4) 1758 ld r11,VCPU_MSR(r4) 1759 b fast_guest_return 1760 1761sc_1_fast_return: 1762 mtspr SPRN_SRR0,r10 1763 mtspr SPRN_SRR1,r11 1764 li r10, BOOK3S_INTERRUPT_SYSCALL 1765 bl kvmppc_msr_interrupt 1766 mr r4,r9 1767 b fast_guest_return 1768 1769 /* We've attempted a real mode hcall, but it's punted it back 1770 * to userspace. We need to restore some clobbered volatiles 1771 * before resuming the pass-it-to-qemu path */ 1772hcall_real_fallback: 1773 li r12,BOOK3S_INTERRUPT_SYSCALL 1774 ld r9, HSTATE_KVM_VCPU(r13) 1775 1776 b guest_exit_cont 1777 1778 .globl hcall_real_table 1779hcall_real_table: 1780 .long 0 /* 0 - unused */ 1781 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 1782 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 1783 .long DOTSYM(kvmppc_h_read) - hcall_real_table 1784 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 1785 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 1786 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 1787#ifdef CONFIG_SPAPR_TCE_IOMMU 1788 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 1789 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 1790#else 1791 .long 0 /* 0x1c */ 1792 .long 0 /* 0x20 */ 1793#endif 1794 .long 0 /* 0x24 - H_SET_SPRG0 */ 1795 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 1796 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 1797 .long 0 /* 0x30 */ 1798 .long 0 /* 0x34 */ 1799 .long 0 /* 0x38 */ 1800 .long 0 /* 0x3c */ 1801 .long 0 /* 0x40 */ 1802 .long 0 /* 0x44 */ 1803 .long 0 /* 0x48 */ 1804 .long 0 /* 0x4c */ 1805 .long 0 /* 0x50 */ 1806 .long 0 /* 0x54 */ 1807 .long 0 /* 0x58 */ 1808 .long 0 /* 0x5c */ 1809 .long 0 /* 0x60 */ 1810#ifdef CONFIG_KVM_XICS 1811 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 1812 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 1813 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 1814 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 1815 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 1816#else 1817 .long 0 /* 0x64 - H_EOI */ 1818 .long 0 /* 0x68 - H_CPPR */ 1819 .long 0 /* 0x6c - H_IPI */ 1820 .long 0 /* 0x70 - H_IPOLL */ 1821 .long 0 /* 0x74 - H_XIRR */ 1822#endif 1823 .long 0 /* 0x78 */ 1824 .long 0 /* 0x7c */ 1825 .long 0 /* 0x80 */ 1826 .long 0 /* 0x84 */ 1827 .long 0 /* 0x88 */ 1828 .long 0 /* 0x8c */ 1829 .long 0 /* 0x90 */ 1830 .long 0 /* 0x94 */ 1831 .long 0 /* 0x98 */ 1832 .long 0 /* 0x9c */ 1833 .long 0 /* 0xa0 */ 1834 .long 0 /* 0xa4 */ 1835 .long 0 /* 0xa8 */ 1836 .long 0 /* 0xac */ 1837 .long 0 /* 0xb0 */ 1838 .long 0 /* 0xb4 */ 1839 .long 0 /* 0xb8 */ 1840 .long 0 /* 0xbc */ 1841 .long 0 /* 0xc0 */ 1842 .long 0 /* 0xc4 */ 1843 .long 0 /* 0xc8 */ 1844 .long 0 /* 0xcc */ 1845 .long 0 /* 0xd0 */ 1846 .long 0 /* 0xd4 */ 1847 .long 0 /* 0xd8 */ 1848 .long 0 /* 0xdc */ 1849 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 1850 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 1851 .long 0 /* 0xe8 */ 1852 .long 0 /* 0xec */ 1853 .long 0 /* 0xf0 */ 1854 .long 0 /* 0xf4 */ 1855 .long 0 /* 0xf8 */ 1856 .long 0 /* 0xfc */ 1857 .long 0 /* 0x100 */ 1858 .long 0 /* 0x104 */ 1859 .long 0 /* 0x108 */ 1860 .long 0 /* 0x10c */ 1861 .long 0 /* 0x110 */ 1862 .long 0 /* 0x114 */ 1863 .long 0 /* 0x118 */ 1864 .long 0 /* 0x11c */ 1865 .long 0 /* 0x120 */ 1866 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 1867 .long 0 /* 0x128 */ 1868 .long 0 /* 0x12c */ 1869 .long 0 /* 0x130 */ 1870 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 1871#ifdef CONFIG_SPAPR_TCE_IOMMU 1872 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 1873 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 1874#else 1875 .long 0 /* 0x138 */ 1876 .long 0 /* 0x13c */ 1877#endif 1878 .long 0 /* 0x140 */ 1879 .long 0 /* 0x144 */ 1880 .long 0 /* 0x148 */ 1881 .long 0 /* 0x14c */ 1882 .long 0 /* 0x150 */ 1883 .long 0 /* 0x154 */ 1884 .long 0 /* 0x158 */ 1885 .long 0 /* 0x15c */ 1886 .long 0 /* 0x160 */ 1887 .long 0 /* 0x164 */ 1888 .long 0 /* 0x168 */ 1889 .long 0 /* 0x16c */ 1890 .long 0 /* 0x170 */ 1891 .long 0 /* 0x174 */ 1892 .long 0 /* 0x178 */ 1893 .long 0 /* 0x17c */ 1894 .long 0 /* 0x180 */ 1895 .long 0 /* 0x184 */ 1896 .long 0 /* 0x188 */ 1897 .long 0 /* 0x18c */ 1898 .long 0 /* 0x190 */ 1899 .long 0 /* 0x194 */ 1900 .long 0 /* 0x198 */ 1901 .long 0 /* 0x19c */ 1902 .long 0 /* 0x1a0 */ 1903 .long 0 /* 0x1a4 */ 1904 .long 0 /* 0x1a8 */ 1905 .long 0 /* 0x1ac */ 1906 .long 0 /* 0x1b0 */ 1907 .long 0 /* 0x1b4 */ 1908 .long 0 /* 0x1b8 */ 1909 .long 0 /* 0x1bc */ 1910 .long 0 /* 0x1c0 */ 1911 .long 0 /* 0x1c4 */ 1912 .long 0 /* 0x1c8 */ 1913 .long 0 /* 0x1cc */ 1914 .long 0 /* 0x1d0 */ 1915 .long 0 /* 0x1d4 */ 1916 .long 0 /* 0x1d8 */ 1917 .long 0 /* 0x1dc */ 1918 .long 0 /* 0x1e0 */ 1919 .long 0 /* 0x1e4 */ 1920 .long 0 /* 0x1e8 */ 1921 .long 0 /* 0x1ec */ 1922 .long 0 /* 0x1f0 */ 1923 .long 0 /* 0x1f4 */ 1924 .long 0 /* 0x1f8 */ 1925 .long 0 /* 0x1fc */ 1926 .long 0 /* 0x200 */ 1927 .long 0 /* 0x204 */ 1928 .long 0 /* 0x208 */ 1929 .long 0 /* 0x20c */ 1930 .long 0 /* 0x210 */ 1931 .long 0 /* 0x214 */ 1932 .long 0 /* 0x218 */ 1933 .long 0 /* 0x21c */ 1934 .long 0 /* 0x220 */ 1935 .long 0 /* 0x224 */ 1936 .long 0 /* 0x228 */ 1937 .long 0 /* 0x22c */ 1938 .long 0 /* 0x230 */ 1939 .long 0 /* 0x234 */ 1940 .long 0 /* 0x238 */ 1941 .long 0 /* 0x23c */ 1942 .long 0 /* 0x240 */ 1943 .long 0 /* 0x244 */ 1944 .long 0 /* 0x248 */ 1945 .long 0 /* 0x24c */ 1946 .long 0 /* 0x250 */ 1947 .long 0 /* 0x254 */ 1948 .long 0 /* 0x258 */ 1949 .long 0 /* 0x25c */ 1950 .long 0 /* 0x260 */ 1951 .long 0 /* 0x264 */ 1952 .long 0 /* 0x268 */ 1953 .long 0 /* 0x26c */ 1954 .long 0 /* 0x270 */ 1955 .long 0 /* 0x274 */ 1956 .long 0 /* 0x278 */ 1957 .long 0 /* 0x27c */ 1958 .long 0 /* 0x280 */ 1959 .long 0 /* 0x284 */ 1960 .long 0 /* 0x288 */ 1961 .long 0 /* 0x28c */ 1962 .long 0 /* 0x290 */ 1963 .long 0 /* 0x294 */ 1964 .long 0 /* 0x298 */ 1965 .long 0 /* 0x29c */ 1966 .long 0 /* 0x2a0 */ 1967 .long 0 /* 0x2a4 */ 1968 .long 0 /* 0x2a8 */ 1969 .long 0 /* 0x2ac */ 1970 .long 0 /* 0x2b0 */ 1971 .long 0 /* 0x2b4 */ 1972 .long 0 /* 0x2b8 */ 1973 .long 0 /* 0x2bc */ 1974 .long 0 /* 0x2c0 */ 1975 .long 0 /* 0x2c4 */ 1976 .long 0 /* 0x2c8 */ 1977 .long 0 /* 0x2cc */ 1978 .long 0 /* 0x2d0 */ 1979 .long 0 /* 0x2d4 */ 1980 .long 0 /* 0x2d8 */ 1981 .long 0 /* 0x2dc */ 1982 .long 0 /* 0x2e0 */ 1983 .long 0 /* 0x2e4 */ 1984 .long 0 /* 0x2e8 */ 1985 .long 0 /* 0x2ec */ 1986 .long 0 /* 0x2f0 */ 1987 .long 0 /* 0x2f4 */ 1988 .long 0 /* 0x2f8 */ 1989#ifdef CONFIG_KVM_XICS 1990 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 1991#else 1992 .long 0 /* 0x2fc - H_XIRR_X*/ 1993#endif 1994 .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table 1995 .globl hcall_real_table_end 1996hcall_real_table_end: 1997 1998_GLOBAL_TOC(kvmppc_h_set_xdabr) 1999EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2000 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2001 beq 6f 2002 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2003 andc. r0, r5, r0 2004 beq 3f 20056: li r3, H_PARAMETER 2006 blr 2007 2008_GLOBAL_TOC(kvmppc_h_set_dabr) 2009EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2010 li r5, DABRX_USER | DABRX_KERNEL 20113: 2012BEGIN_FTR_SECTION 2013 b 2f 2014END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2015 std r4,VCPU_DABR(r3) 2016 stw r5, VCPU_DABRX(r3) 2017 mtspr SPRN_DABRX, r5 2018 /* Work around P7 bug where DABR can get corrupted on mtspr */ 20191: mtspr SPRN_DABR,r4 2020 mfspr r5, SPRN_DABR 2021 cmpd r4, r5 2022 bne 1b 2023 isync 2024 li r3,0 2025 blr 2026 20272: 2028 LOAD_REG_ADDR(r11, dawr_force_enable) 2029 lbz r11, 0(r11) 2030 cmpdi r11, 0 2031 bne 3f 2032 li r3, H_HARDWARE 2033 blr 20343: 2035 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2036 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2037 rlwimi r5, r4, 2, DAWRX_WT 2038 clrrdi r4, r4, 3 2039 std r4, VCPU_DAWR0(r3) 2040 std r5, VCPU_DAWRX0(r3) 2041 /* 2042 * If came in through the real mode hcall handler then it is necessary 2043 * to write the registers since the return path won't. Otherwise it is 2044 * sufficient to store then in the vcpu struct as they will be loaded 2045 * next time the vcpu is run. 2046 */ 2047 mfmsr r6 2048 andi. r6, r6, MSR_DR /* in real mode? */ 2049 bne 4f 2050 mtspr SPRN_DAWR0, r4 2051 mtspr SPRN_DAWRX0, r5 20524: li r3, 0 2053 blr 2054 2055_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2056 ori r11,r11,MSR_EE 2057 std r11,VCPU_MSR(r3) 2058 li r0,1 2059 stb r0,VCPU_CEDED(r3) 2060 sync /* order setting ceded vs. testing prodded */ 2061 lbz r5,VCPU_PRODDED(r3) 2062 cmpwi r5,0 2063 bne kvm_cede_prodded 2064 li r12,0 /* set trap to 0 to say hcall is handled */ 2065 stw r12,VCPU_TRAP(r3) 2066 li r0,H_SUCCESS 2067 std r0,VCPU_GPR(R3)(r3) 2068 2069 /* 2070 * Set our bit in the bitmask of napping threads unless all the 2071 * other threads are already napping, in which case we send this 2072 * up to the host. 2073 */ 2074 ld r5,HSTATE_KVM_VCORE(r13) 2075 lbz r6,HSTATE_PTID(r13) 2076 lwz r8,VCORE_ENTRY_EXIT(r5) 2077 clrldi r8,r8,56 2078 li r0,1 2079 sld r0,r0,r6 2080 addi r6,r5,VCORE_NAPPING_THREADS 208131: lwarx r4,0,r6 2082 or r4,r4,r0 2083 cmpw r4,r8 2084 beq kvm_cede_exit 2085 stwcx. r4,0,r6 2086 bne 31b 2087 /* order napping_threads update vs testing entry_exit_map */ 2088 isync 2089 li r0,NAPPING_CEDE 2090 stb r0,HSTATE_NAPPING(r13) 2091 lwz r7,VCORE_ENTRY_EXIT(r5) 2092 cmpwi r7,0x100 2093 bge 33f /* another thread already exiting */ 2094 2095/* 2096 * Although not specifically required by the architecture, POWER7 2097 * preserves the following registers in nap mode, even if an SMT mode 2098 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2099 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2100 */ 2101 /* Save non-volatile GPRs */ 2102 std r14, VCPU_GPR(R14)(r3) 2103 std r15, VCPU_GPR(R15)(r3) 2104 std r16, VCPU_GPR(R16)(r3) 2105 std r17, VCPU_GPR(R17)(r3) 2106 std r18, VCPU_GPR(R18)(r3) 2107 std r19, VCPU_GPR(R19)(r3) 2108 std r20, VCPU_GPR(R20)(r3) 2109 std r21, VCPU_GPR(R21)(r3) 2110 std r22, VCPU_GPR(R22)(r3) 2111 std r23, VCPU_GPR(R23)(r3) 2112 std r24, VCPU_GPR(R24)(r3) 2113 std r25, VCPU_GPR(R25)(r3) 2114 std r26, VCPU_GPR(R26)(r3) 2115 std r27, VCPU_GPR(R27)(r3) 2116 std r28, VCPU_GPR(R28)(r3) 2117 std r29, VCPU_GPR(R29)(r3) 2118 std r30, VCPU_GPR(R30)(r3) 2119 std r31, VCPU_GPR(R31)(r3) 2120 2121 /* save FP state */ 2122 bl kvmppc_save_fp 2123 2124#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2125BEGIN_FTR_SECTION 2126 b 91f 2127END_FTR_SECTION_IFCLR(CPU_FTR_TM) 2128 /* 2129 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2130 */ 2131 ld r3, HSTATE_KVM_VCPU(r13) 2132 ld r4, VCPU_MSR(r3) 2133 li r5, 0 /* don't preserve non-vol regs */ 2134 bl kvmppc_save_tm_hv 2135 nop 213691: 2137#endif 2138 2139 /* 2140 * Set DEC to the smaller of DEC and HDEC, so that we wake 2141 * no later than the end of our timeslice (HDEC interrupts 2142 * don't wake us from nap). 2143 */ 2144 mfspr r3, SPRN_DEC 2145 mfspr r4, SPRN_HDEC 2146 mftb r5 2147 extsw r3, r3 2148 extsw r4, r4 2149 cmpd r3, r4 2150 ble 67f 2151 mtspr SPRN_DEC, r4 215267: 2153 /* save expiry time of guest decrementer */ 2154 add r3, r3, r5 2155 ld r4, HSTATE_KVM_VCPU(r13) 2156 std r3, VCPU_DEC_EXPIRES(r4) 2157 2158#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2159 ld r4, HSTATE_KVM_VCPU(r13) 2160 addi r3, r4, VCPU_TB_CEDE 2161 bl kvmhv_accumulate_time 2162#endif 2163 2164 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2165 2166 /* Go back to host stack */ 2167 ld r1, HSTATE_HOST_R1(r13) 2168 2169 /* 2170 * Take a nap until a decrementer or external or doobell interrupt 2171 * occurs, with PECE1 and PECE0 set in LPCR. 2172 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2173 * Also clear the runlatch bit before napping. 2174 */ 2175kvm_do_nap: 2176 li r0,0 2177 mtspr SPRN_CTRLT, r0 2178 2179 li r0,1 2180 stb r0,HSTATE_HWTHREAD_REQ(r13) 2181 mfspr r5,SPRN_LPCR 2182 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2183BEGIN_FTR_SECTION 2184 ori r5, r5, LPCR_PECEDH 2185 rlwimi r5, r3, 0, LPCR_PECEDP 2186END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2187 2188kvm_nap_sequence: /* desired LPCR value in r5 */ 2189 li r3, PNV_THREAD_NAP 2190 mtspr SPRN_LPCR,r5 2191 isync 2192 2193 bl isa206_idle_insn_mayloss 2194 2195 li r0,1 2196 mtspr SPRN_CTRLT, r0 2197 2198 mtspr SPRN_SRR1, r3 2199 2200 li r0, 0 2201 stb r0, PACA_FTRACE_ENABLED(r13) 2202 2203 li r0, KVM_HWTHREAD_IN_KVM 2204 stb r0, HSTATE_HWTHREAD_STATE(r13) 2205 2206 lbz r0, HSTATE_NAPPING(r13) 2207 cmpwi r0, NAPPING_CEDE 2208 beq kvm_end_cede 2209 cmpwi r0, NAPPING_NOVCPU 2210 beq kvm_novcpu_wakeup 2211 cmpwi r0, NAPPING_UNSPLIT 2212 beq kvm_unsplit_wakeup 2213 twi 31,0,0 /* Nap state must not be zero */ 2214 221533: mr r4, r3 2216 li r3, 0 2217 li r12, 0 2218 b 34f 2219 2220kvm_end_cede: 2221 /* Woken by external or decrementer interrupt */ 2222 2223 /* get vcpu pointer */ 2224 ld r4, HSTATE_KVM_VCPU(r13) 2225 2226#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2227 addi r3, r4, VCPU_TB_RMINTR 2228 bl kvmhv_accumulate_time 2229#endif 2230 2231#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2232BEGIN_FTR_SECTION 2233 b 91f 2234END_FTR_SECTION_IFCLR(CPU_FTR_TM) 2235 /* 2236 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2237 */ 2238 mr r3, r4 2239 ld r4, VCPU_MSR(r3) 2240 li r5, 0 /* don't preserve non-vol regs */ 2241 bl kvmppc_restore_tm_hv 2242 nop 2243 ld r4, HSTATE_KVM_VCPU(r13) 224491: 2245#endif 2246 2247 /* load up FP state */ 2248 bl kvmppc_load_fp 2249 2250 /* Restore guest decrementer */ 2251 ld r3, VCPU_DEC_EXPIRES(r4) 2252 mftb r7 2253 subf r3, r7, r3 2254 mtspr SPRN_DEC, r3 2255 2256 /* Load NV GPRS */ 2257 ld r14, VCPU_GPR(R14)(r4) 2258 ld r15, VCPU_GPR(R15)(r4) 2259 ld r16, VCPU_GPR(R16)(r4) 2260 ld r17, VCPU_GPR(R17)(r4) 2261 ld r18, VCPU_GPR(R18)(r4) 2262 ld r19, VCPU_GPR(R19)(r4) 2263 ld r20, VCPU_GPR(R20)(r4) 2264 ld r21, VCPU_GPR(R21)(r4) 2265 ld r22, VCPU_GPR(R22)(r4) 2266 ld r23, VCPU_GPR(R23)(r4) 2267 ld r24, VCPU_GPR(R24)(r4) 2268 ld r25, VCPU_GPR(R25)(r4) 2269 ld r26, VCPU_GPR(R26)(r4) 2270 ld r27, VCPU_GPR(R27)(r4) 2271 ld r28, VCPU_GPR(R28)(r4) 2272 ld r29, VCPU_GPR(R29)(r4) 2273 ld r30, VCPU_GPR(R30)(r4) 2274 ld r31, VCPU_GPR(R31)(r4) 2275 2276 /* Check the wake reason in SRR1 to see why we got here */ 2277 bl kvmppc_check_wake_reason 2278 2279 /* 2280 * Restore volatile registers since we could have called a 2281 * C routine in kvmppc_check_wake_reason 2282 * r4 = VCPU 2283 * r3 tells us whether we need to return to host or not 2284 * WARNING: it gets checked further down: 2285 * should not modify r3 until this check is done. 2286 */ 2287 ld r4, HSTATE_KVM_VCPU(r13) 2288 2289 /* clear our bit in vcore->napping_threads */ 229034: ld r5,HSTATE_KVM_VCORE(r13) 2291 lbz r7,HSTATE_PTID(r13) 2292 li r0,1 2293 sld r0,r0,r7 2294 addi r6,r5,VCORE_NAPPING_THREADS 229532: lwarx r7,0,r6 2296 andc r7,r7,r0 2297 stwcx. r7,0,r6 2298 bne 32b 2299 li r0,0 2300 stb r0,HSTATE_NAPPING(r13) 2301 2302 /* See if the wake reason saved in r3 means we need to exit */ 2303 stw r12, VCPU_TRAP(r4) 2304 mr r9, r4 2305 cmpdi r3, 0 2306 bgt guest_exit_cont 2307 b maybe_reenter_guest 2308 2309 /* cede when already previously prodded case */ 2310kvm_cede_prodded: 2311 li r0,0 2312 stb r0,VCPU_PRODDED(r3) 2313 sync /* order testing prodded vs. clearing ceded */ 2314 stb r0,VCPU_CEDED(r3) 2315 li r3,H_SUCCESS 2316 blr 2317 2318 /* we've ceded but we want to give control to the host */ 2319kvm_cede_exit: 2320 ld r9, HSTATE_KVM_VCPU(r13) 2321 b guest_exit_cont 2322 2323 /* Try to do machine check recovery in real mode */ 2324machine_check_realmode: 2325 mr r3, r9 /* get vcpu pointer */ 2326 bl kvmppc_realmode_machine_check 2327 nop 2328 /* all machine checks go to virtual mode for further handling */ 2329 ld r9, HSTATE_KVM_VCPU(r13) 2330 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2331 b guest_exit_cont 2332 2333/* 2334 * Call C code to handle a HMI in real mode. 2335 * Only the primary thread does the call, secondary threads are handled 2336 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2337 * r9 points to the vcpu on entry 2338 */ 2339hmi_realmode: 2340 lbz r0, HSTATE_PTID(r13) 2341 cmpwi r0, 0 2342 bne guest_exit_cont 2343 bl kvmppc_realmode_hmi_handler 2344 ld r9, HSTATE_KVM_VCPU(r13) 2345 li r12, BOOK3S_INTERRUPT_HMI 2346 b guest_exit_cont 2347 2348/* 2349 * Check the reason we woke from nap, and take appropriate action. 2350 * Returns (in r3): 2351 * 0 if nothing needs to be done 2352 * 1 if something happened that needs to be handled by the host 2353 * -1 if there was a guest wakeup (IPI or msgsnd) 2354 * -2 if we handled a PCI passthrough interrupt (returned by 2355 * kvmppc_read_intr only) 2356 * 2357 * Also sets r12 to the interrupt vector for any interrupt that needs 2358 * to be handled now by the host (0x500 for external interrupt), or zero. 2359 * Modifies all volatile registers (since it may call a C function). 2360 * This routine calls kvmppc_read_intr, a C function, if an external 2361 * interrupt is pending. 2362 */ 2363kvmppc_check_wake_reason: 2364 mfspr r6, SPRN_SRR1 2365BEGIN_FTR_SECTION 2366 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2367FTR_SECTION_ELSE 2368 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2369ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2370 cmpwi r6, 8 /* was it an external interrupt? */ 2371 beq 7f /* if so, see what it was */ 2372 li r3, 0 2373 li r12, 0 2374 cmpwi r6, 6 /* was it the decrementer? */ 2375 beq 0f 2376BEGIN_FTR_SECTION 2377 cmpwi r6, 5 /* privileged doorbell? */ 2378 beq 0f 2379 cmpwi r6, 3 /* hypervisor doorbell? */ 2380 beq 3f 2381END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2382 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2383 beq 4f 2384 li r3, 1 /* anything else, return 1 */ 23850: blr 2386 2387 /* hypervisor doorbell */ 23883: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2389 2390 /* 2391 * Clear the doorbell as we will invoke the handler 2392 * explicitly in the guest exit path. 2393 */ 2394 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2395 PPC_MSGCLR(6) 2396 /* see if it's a host IPI */ 2397 li r3, 1 2398 lbz r0, HSTATE_HOST_IPI(r13) 2399 cmpwi r0, 0 2400 bnelr 2401 /* if not, return -1 */ 2402 li r3, -1 2403 blr 2404 2405 /* Woken up due to Hypervisor maintenance interrupt */ 24064: li r12, BOOK3S_INTERRUPT_HMI 2407 li r3, 1 2408 blr 2409 2410 /* external interrupt - create a stack frame so we can call C */ 24117: mflr r0 2412 std r0, PPC_LR_STKOFF(r1) 2413 stdu r1, -PPC_MIN_STKFRM(r1) 2414 bl kvmppc_read_intr 2415 nop 2416 li r12, BOOK3S_INTERRUPT_EXTERNAL 2417 cmpdi r3, 1 2418 ble 1f 2419 2420 /* 2421 * Return code of 2 means PCI passthrough interrupt, but 2422 * we need to return back to host to complete handling the 2423 * interrupt. Trap reason is expected in r12 by guest 2424 * exit code. 2425 */ 2426 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 24271: 2428 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2429 addi r1, r1, PPC_MIN_STKFRM 2430 mtlr r0 2431 blr 2432 2433/* 2434 * Save away FP, VMX and VSX registers. 2435 * r3 = vcpu pointer 2436 * N.B. r30 and r31 are volatile across this function, 2437 * thus it is not callable from C. 2438 */ 2439kvmppc_save_fp: 2440 mflr r30 2441 mr r31,r3 2442 mfmsr r5 2443 ori r8,r5,MSR_FP 2444#ifdef CONFIG_ALTIVEC 2445BEGIN_FTR_SECTION 2446 oris r8,r8,MSR_VEC@h 2447END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2448#endif 2449#ifdef CONFIG_VSX 2450BEGIN_FTR_SECTION 2451 oris r8,r8,MSR_VSX@h 2452END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2453#endif 2454 mtmsrd r8 2455 addi r3,r3,VCPU_FPRS 2456 bl store_fp_state 2457#ifdef CONFIG_ALTIVEC 2458BEGIN_FTR_SECTION 2459 addi r3,r31,VCPU_VRS 2460 bl store_vr_state 2461END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2462#endif 2463 mfspr r6,SPRN_VRSAVE 2464 stw r6,VCPU_VRSAVE(r31) 2465 mtlr r30 2466 blr 2467 2468/* 2469 * Load up FP, VMX and VSX registers 2470 * r4 = vcpu pointer 2471 * N.B. r30 and r31 are volatile across this function, 2472 * thus it is not callable from C. 2473 */ 2474kvmppc_load_fp: 2475 mflr r30 2476 mr r31,r4 2477 mfmsr r9 2478 ori r8,r9,MSR_FP 2479#ifdef CONFIG_ALTIVEC 2480BEGIN_FTR_SECTION 2481 oris r8,r8,MSR_VEC@h 2482END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2483#endif 2484#ifdef CONFIG_VSX 2485BEGIN_FTR_SECTION 2486 oris r8,r8,MSR_VSX@h 2487END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2488#endif 2489 mtmsrd r8 2490 addi r3,r4,VCPU_FPRS 2491 bl load_fp_state 2492#ifdef CONFIG_ALTIVEC 2493BEGIN_FTR_SECTION 2494 addi r3,r31,VCPU_VRS 2495 bl load_vr_state 2496END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2497#endif 2498 lwz r7,VCPU_VRSAVE(r31) 2499 mtspr SPRN_VRSAVE,r7 2500 mtlr r30 2501 mr r4,r31 2502 blr 2503 2504#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2505/* 2506 * Save transactional state and TM-related registers. 2507 * Called with r3 pointing to the vcpu struct and r4 containing 2508 * the guest MSR value. 2509 * r5 is non-zero iff non-volatile register state needs to be maintained. 2510 * If r5 == 0, this can modify all checkpointed registers, but 2511 * restores r1 and r2 before exit. 2512 */ 2513_GLOBAL_TOC(kvmppc_save_tm_hv) 2514EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 2515 /* See if we need to handle fake suspend mode */ 2516BEGIN_FTR_SECTION 2517 b __kvmppc_save_tm 2518END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 2519 2520 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 2521 cmpwi r0, 0 2522 beq __kvmppc_save_tm 2523 2524 /* The following code handles the fake_suspend = 1 case */ 2525 mflr r0 2526 std r0, PPC_LR_STKOFF(r1) 2527 stdu r1, -TM_FRAME_SIZE(r1) 2528 2529 /* Turn on TM. */ 2530 mfmsr r8 2531 li r0, 1 2532 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 2533 mtmsrd r8 2534 2535 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 2536 beq 4f 2537BEGIN_FTR_SECTION 2538 bl pnv_power9_force_smt4_catch 2539END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2540 nop 2541 2542 /* 2543 * It's possible that treclaim. may modify registers, if we have lost 2544 * track of fake-suspend state in the guest due to it using rfscv. 2545 * Save and restore registers in case this occurs. 2546 */ 2547 mfspr r3, SPRN_DSCR 2548 mfspr r4, SPRN_XER 2549 mfspr r5, SPRN_AMR 2550 /* SPRN_TAR would need to be saved here if the kernel ever used it */ 2551 mfcr r12 2552 SAVE_NVGPRS(r1) 2553 SAVE_GPR(2, r1) 2554 SAVE_GPR(3, r1) 2555 SAVE_GPR(4, r1) 2556 SAVE_GPR(5, r1) 2557 stw r12, 8(r1) 2558 std r1, HSTATE_HOST_R1(r13) 2559 2560 /* We have to treclaim here because that's the only way to do S->N */ 2561 li r3, TM_CAUSE_KVM_RESCHED 2562 TRECLAIM(R3) 2563 2564 GET_PACA(r13) 2565 ld r1, HSTATE_HOST_R1(r13) 2566 REST_GPR(2, r1) 2567 REST_GPR(3, r1) 2568 REST_GPR(4, r1) 2569 REST_GPR(5, r1) 2570 lwz r12, 8(r1) 2571 REST_NVGPRS(r1) 2572 mtspr SPRN_DSCR, r3 2573 mtspr SPRN_XER, r4 2574 mtspr SPRN_AMR, r5 2575 mtcr r12 2576 HMT_MEDIUM 2577 2578 /* 2579 * We were in fake suspend, so we are not going to save the 2580 * register state as the guest checkpointed state (since 2581 * we already have it), therefore we can now use any volatile GPR. 2582 * In fact treclaim in fake suspend state doesn't modify 2583 * any registers. 2584 */ 2585 2586BEGIN_FTR_SECTION 2587 bl pnv_power9_force_smt4_release 2588END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2589 nop 2590 25914: 2592 mfspr r3, SPRN_PSSCR 2593 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 2594 li r0, PSSCR_FAKE_SUSPEND 2595 andc r3, r3, r0 2596 mtspr SPRN_PSSCR, r3 2597 2598 /* Don't save TEXASR, use value from last exit in real suspend state */ 2599 ld r9, HSTATE_KVM_VCPU(r13) 2600 mfspr r5, SPRN_TFHAR 2601 mfspr r6, SPRN_TFIAR 2602 std r5, VCPU_TFHAR(r9) 2603 std r6, VCPU_TFIAR(r9) 2604 2605 addi r1, r1, TM_FRAME_SIZE 2606 ld r0, PPC_LR_STKOFF(r1) 2607 mtlr r0 2608 blr 2609 2610/* 2611 * Restore transactional state and TM-related registers. 2612 * Called with r3 pointing to the vcpu struct 2613 * and r4 containing the guest MSR value. 2614 * r5 is non-zero iff non-volatile register state needs to be maintained. 2615 * This potentially modifies all checkpointed registers. 2616 * It restores r1 and r2 from the PACA. 2617 */ 2618_GLOBAL_TOC(kvmppc_restore_tm_hv) 2619EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 2620 /* 2621 * If we are doing TM emulation for the guest on a POWER9 DD2, 2622 * then we don't actually do a trechkpt -- we either set up 2623 * fake-suspend mode, or emulate a TM rollback. 2624 */ 2625BEGIN_FTR_SECTION 2626 b __kvmppc_restore_tm 2627END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 2628 mflr r0 2629 std r0, PPC_LR_STKOFF(r1) 2630 2631 li r0, 0 2632 stb r0, HSTATE_FAKE_SUSPEND(r13) 2633 2634 /* Turn on TM so we can restore TM SPRs */ 2635 mfmsr r5 2636 li r0, 1 2637 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 2638 mtmsrd r5 2639 2640 /* 2641 * The user may change these outside of a transaction, so they must 2642 * always be context switched. 2643 */ 2644 ld r5, VCPU_TFHAR(r3) 2645 ld r6, VCPU_TFIAR(r3) 2646 ld r7, VCPU_TEXASR(r3) 2647 mtspr SPRN_TFHAR, r5 2648 mtspr SPRN_TFIAR, r6 2649 mtspr SPRN_TEXASR, r7 2650 2651 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 2652 beqlr /* TM not active in guest */ 2653 2654 /* Make sure the failure summary is set */ 2655 oris r7, r7, (TEXASR_FS)@h 2656 mtspr SPRN_TEXASR, r7 2657 2658 cmpwi r5, 1 /* check for suspended state */ 2659 bgt 10f 2660 stb r5, HSTATE_FAKE_SUSPEND(r13) 2661 b 9f /* and return */ 266210: stdu r1, -PPC_MIN_STKFRM(r1) 2663 /* guest is in transactional state, so simulate rollback */ 2664 bl kvmhv_emulate_tm_rollback 2665 nop 2666 addi r1, r1, PPC_MIN_STKFRM 26679: ld r0, PPC_LR_STKOFF(r1) 2668 mtlr r0 2669 blr 2670#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2671 2672/* 2673 * We come here if we get any exception or interrupt while we are 2674 * executing host real mode code while in guest MMU context. 2675 * r12 is (CR << 32) | vector 2676 * r13 points to our PACA 2677 * r12 is saved in HSTATE_SCRATCH0(r13) 2678 * r9 is saved in HSTATE_SCRATCH2(r13) 2679 * r13 is saved in HSPRG1 2680 * cfar is saved in HSTATE_CFAR(r13) 2681 * ppr is saved in HSTATE_PPR(r13) 2682 */ 2683kvmppc_bad_host_intr: 2684 /* 2685 * Switch to the emergency stack, but start half-way down in 2686 * case we were already on it. 2687 */ 2688 mr r9, r1 2689 std r1, PACAR1(r13) 2690 ld r1, PACAEMERGSP(r13) 2691 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 2692 std r9, 0(r1) 2693 std r0, GPR0(r1) 2694 std r9, GPR1(r1) 2695 std r2, GPR2(r1) 2696 SAVE_GPRS(3, 8, r1) 2697 srdi r0, r12, 32 2698 clrldi r12, r12, 32 2699 std r0, _CCR(r1) 2700 std r12, _TRAP(r1) 2701 andi. r0, r12, 2 2702 beq 1f 2703 mfspr r3, SPRN_HSRR0 2704 mfspr r4, SPRN_HSRR1 2705 mfspr r5, SPRN_HDAR 2706 mfspr r6, SPRN_HDSISR 2707 b 2f 27081: mfspr r3, SPRN_SRR0 2709 mfspr r4, SPRN_SRR1 2710 mfspr r5, SPRN_DAR 2711 mfspr r6, SPRN_DSISR 27122: std r3, _NIP(r1) 2713 std r4, _MSR(r1) 2714 std r5, _DAR(r1) 2715 std r6, _DSISR(r1) 2716 ld r9, HSTATE_SCRATCH2(r13) 2717 ld r12, HSTATE_SCRATCH0(r13) 2718 GET_SCRATCH0(r0) 2719 SAVE_GPRS(9, 12, r1) 2720 std r0, GPR13(r1) 2721 SAVE_NVGPRS(r1) 2722 ld r5, HSTATE_CFAR(r13) 2723 std r5, ORIG_GPR3(r1) 2724 mflr r3 2725 mfctr r4 2726 mfxer r5 2727 lbz r6, PACAIRQSOFTMASK(r13) 2728 std r3, _LINK(r1) 2729 std r4, _CTR(r1) 2730 std r5, _XER(r1) 2731 std r6, SOFTE(r1) 2732 ld r2, PACATOC(r13) 2733 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 2734 std r3, STACK_FRAME_OVERHEAD-16(r1) 2735 2736 /* 2737 * XXX On POWER7 and POWER8, we just spin here since we don't 2738 * know what the other threads are doing (and we don't want to 2739 * coordinate with them) - but at least we now have register state 2740 * in memory that we might be able to look at from another CPU. 2741 */ 2742 b . 2743 2744/* 2745 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2746 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2747 * r11 has the guest MSR value (in/out) 2748 * r9 has a vcpu pointer (in) 2749 * r0 is used as a scratch register 2750 */ 2751kvmppc_msr_interrupt: 2752 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2753 cmpwi r0, 2 /* Check if we are in transactional state.. */ 2754 ld r11, VCPU_INTR_MSR(r9) 2755 bne 1f 2756 /* ... if transactional, change to suspended */ 2757 li r0, 1 27581: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2759 blr 2760 2761/* 2762 * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) 2763 * 2764 * Load up guest PMU state. R3 points to the vcpu struct. 2765 */ 2766kvmhv_load_guest_pmu: 2767 mr r4, r3 2768 mflr r0 2769 li r3, 1 2770 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 2771 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 2772 isync 2773BEGIN_FTR_SECTION 2774 ld r3, VCPU_MMCR(r4) 2775 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 2776 cmpwi r5, MMCR0_PMAO 2777 beql kvmppc_fix_pmao 2778END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 2779 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 2780 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 2781 lwz r6, VCPU_PMC + 8(r4) 2782 lwz r7, VCPU_PMC + 12(r4) 2783 lwz r8, VCPU_PMC + 16(r4) 2784 lwz r9, VCPU_PMC + 20(r4) 2785 mtspr SPRN_PMC1, r3 2786 mtspr SPRN_PMC2, r5 2787 mtspr SPRN_PMC3, r6 2788 mtspr SPRN_PMC4, r7 2789 mtspr SPRN_PMC5, r8 2790 mtspr SPRN_PMC6, r9 2791 ld r3, VCPU_MMCR(r4) 2792 ld r5, VCPU_MMCR + 8(r4) 2793 ld r6, VCPU_MMCRA(r4) 2794 ld r7, VCPU_SIAR(r4) 2795 ld r8, VCPU_SDAR(r4) 2796 mtspr SPRN_MMCR1, r5 2797 mtspr SPRN_MMCRA, r6 2798 mtspr SPRN_SIAR, r7 2799 mtspr SPRN_SDAR, r8 2800BEGIN_FTR_SECTION 2801 ld r5, VCPU_MMCR + 16(r4) 2802 ld r6, VCPU_SIER(r4) 2803 mtspr SPRN_MMCR2, r5 2804 mtspr SPRN_SIER, r6 2805 lwz r7, VCPU_PMC + 24(r4) 2806 lwz r8, VCPU_PMC + 28(r4) 2807 ld r9, VCPU_MMCRS(r4) 2808 mtspr SPRN_SPMC1, r7 2809 mtspr SPRN_SPMC2, r8 2810 mtspr SPRN_MMCRS, r9 2811END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2812 mtspr SPRN_MMCR0, r3 2813 isync 2814 mtlr r0 2815 blr 2816 2817/* 2818 * void kvmhv_load_host_pmu(void) 2819 * 2820 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 2821 */ 2822kvmhv_load_host_pmu: 2823 mflr r0 2824 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 2825 cmpwi r4, 0 2826 beq 23f /* skip if not */ 2827BEGIN_FTR_SECTION 2828 ld r3, HSTATE_MMCR0(r13) 2829 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 2830 cmpwi r4, MMCR0_PMAO 2831 beql kvmppc_fix_pmao 2832END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 2833 lwz r3, HSTATE_PMC1(r13) 2834 lwz r4, HSTATE_PMC2(r13) 2835 lwz r5, HSTATE_PMC3(r13) 2836 lwz r6, HSTATE_PMC4(r13) 2837 lwz r8, HSTATE_PMC5(r13) 2838 lwz r9, HSTATE_PMC6(r13) 2839 mtspr SPRN_PMC1, r3 2840 mtspr SPRN_PMC2, r4 2841 mtspr SPRN_PMC3, r5 2842 mtspr SPRN_PMC4, r6 2843 mtspr SPRN_PMC5, r8 2844 mtspr SPRN_PMC6, r9 2845 ld r3, HSTATE_MMCR0(r13) 2846 ld r4, HSTATE_MMCR1(r13) 2847 ld r5, HSTATE_MMCRA(r13) 2848 ld r6, HSTATE_SIAR(r13) 2849 ld r7, HSTATE_SDAR(r13) 2850 mtspr SPRN_MMCR1, r4 2851 mtspr SPRN_MMCRA, r5 2852 mtspr SPRN_SIAR, r6 2853 mtspr SPRN_SDAR, r7 2854BEGIN_FTR_SECTION 2855 ld r8, HSTATE_MMCR2(r13) 2856 ld r9, HSTATE_SIER(r13) 2857 mtspr SPRN_MMCR2, r8 2858 mtspr SPRN_SIER, r9 2859END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2860 mtspr SPRN_MMCR0, r3 2861 isync 2862 mtlr r0 286323: blr 2864 2865/* 2866 * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) 2867 * 2868 * Save guest PMU state into the vcpu struct. 2869 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 2870 */ 2871kvmhv_save_guest_pmu: 2872 mr r9, r3 2873 mr r8, r4 2874BEGIN_FTR_SECTION 2875 /* 2876 * POWER8 seems to have a hardware bug where setting 2877 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 2878 * when some counters are already negative doesn't seem 2879 * to cause a performance monitor alert (and hence interrupt). 2880 * The effect of this is that when saving the PMU state, 2881 * if there is no PMU alert pending when we read MMCR0 2882 * before freezing the counters, but one becomes pending 2883 * before we read the counters, we lose it. 2884 * To work around this, we need a way to freeze the counters 2885 * before reading MMCR0. Normally, freezing the counters 2886 * is done by writing MMCR0 (to set MMCR0[FC]) which 2887 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 2888 * we can also freeze the counters using MMCR2, by writing 2889 * 1s to all the counter freeze condition bits (there are 2890 * 9 bits each for 6 counters). 2891 */ 2892 li r3, -1 /* set all freeze bits */ 2893 clrrdi r3, r3, 10 2894 mfspr r10, SPRN_MMCR2 2895 mtspr SPRN_MMCR2, r3 2896 isync 2897END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2898 li r3, 1 2899 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 2900 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 2901 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 2902 mfspr r6, SPRN_MMCRA 2903 /* Clear MMCRA in order to disable SDAR updates */ 2904 li r7, 0 2905 mtspr SPRN_MMCRA, r7 2906 isync 2907 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 2908 bne 21f 2909 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 2910 b 22f 291121: mfspr r5, SPRN_MMCR1 2912 mfspr r7, SPRN_SIAR 2913 mfspr r8, SPRN_SDAR 2914 std r4, VCPU_MMCR(r9) 2915 std r5, VCPU_MMCR + 8(r9) 2916 std r6, VCPU_MMCRA(r9) 2917BEGIN_FTR_SECTION 2918 std r10, VCPU_MMCR + 16(r9) 2919END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2920 std r7, VCPU_SIAR(r9) 2921 std r8, VCPU_SDAR(r9) 2922 mfspr r3, SPRN_PMC1 2923 mfspr r4, SPRN_PMC2 2924 mfspr r5, SPRN_PMC3 2925 mfspr r6, SPRN_PMC4 2926 mfspr r7, SPRN_PMC5 2927 mfspr r8, SPRN_PMC6 2928 stw r3, VCPU_PMC(r9) 2929 stw r4, VCPU_PMC + 4(r9) 2930 stw r5, VCPU_PMC + 8(r9) 2931 stw r6, VCPU_PMC + 12(r9) 2932 stw r7, VCPU_PMC + 16(r9) 2933 stw r8, VCPU_PMC + 20(r9) 2934BEGIN_FTR_SECTION 2935 mfspr r5, SPRN_SIER 2936 std r5, VCPU_SIER(r9) 2937 mfspr r6, SPRN_SPMC1 2938 mfspr r7, SPRN_SPMC2 2939 mfspr r8, SPRN_MMCRS 2940 stw r6, VCPU_PMC + 24(r9) 2941 stw r7, VCPU_PMC + 28(r9) 2942 std r8, VCPU_MMCRS(r9) 2943 lis r4, 0x8000 2944 mtspr SPRN_MMCRS, r4 2945END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 294622: blr 2947 2948/* 2949 * This works around a hardware bug on POWER8E processors, where 2950 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 2951 * performance monitor interrupt. Instead, when we need to have 2952 * an interrupt pending, we have to arrange for a counter to overflow. 2953 */ 2954kvmppc_fix_pmao: 2955 li r3, 0 2956 mtspr SPRN_MMCR2, r3 2957 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 2958 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 2959 mtspr SPRN_MMCR0, r3 2960 lis r3, 0x7fff 2961 ori r3, r3, 0xffff 2962 mtspr SPRN_PMC6, r3 2963 isync 2964 blr 2965 2966#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2967/* 2968 * Start timing an activity 2969 * r3 = pointer to time accumulation struct, r4 = vcpu 2970 */ 2971kvmhv_start_timing: 2972 ld r5, HSTATE_KVM_VCORE(r13) 2973 ld r6, VCORE_TB_OFFSET_APPL(r5) 2974 mftb r5 2975 subf r5, r6, r5 /* subtract current timebase offset */ 2976 std r3, VCPU_CUR_ACTIVITY(r4) 2977 std r5, VCPU_ACTIVITY_START(r4) 2978 blr 2979 2980/* 2981 * Accumulate time to one activity and start another. 2982 * r3 = pointer to new time accumulation struct, r4 = vcpu 2983 */ 2984kvmhv_accumulate_time: 2985 ld r5, HSTATE_KVM_VCORE(r13) 2986 ld r8, VCORE_TB_OFFSET_APPL(r5) 2987 ld r5, VCPU_CUR_ACTIVITY(r4) 2988 ld r6, VCPU_ACTIVITY_START(r4) 2989 std r3, VCPU_CUR_ACTIVITY(r4) 2990 mftb r7 2991 subf r7, r8, r7 /* subtract current timebase offset */ 2992 std r7, VCPU_ACTIVITY_START(r4) 2993 cmpdi r5, 0 2994 beqlr 2995 subf r3, r6, r7 2996 ld r8, TAS_SEQCOUNT(r5) 2997 cmpdi r8, 0 2998 addi r8, r8, 1 2999 std r8, TAS_SEQCOUNT(r5) 3000 lwsync 3001 ld r7, TAS_TOTAL(r5) 3002 add r7, r7, r3 3003 std r7, TAS_TOTAL(r5) 3004 ld r6, TAS_MIN(r5) 3005 ld r7, TAS_MAX(r5) 3006 beq 3f 3007 cmpd r3, r6 3008 bge 1f 30093: std r3, TAS_MIN(r5) 30101: cmpd r3, r7 3011 ble 2f 3012 std r3, TAS_MAX(r5) 30132: lwsync 3014 addi r8, r8, 1 3015 std r8, TAS_SEQCOUNT(r5) 3016 blr 3017#endif 3018