1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * 6 * Derived from book3s_rmhandlers.S and other files, which are: 7 * 8 * Copyright SUSE Linux Products GmbH 2009 9 * 10 * Authors: Alexander Graf <agraf@suse.de> 11 */ 12 13#include <asm/ppc_asm.h> 14#include <asm/kvm_asm.h> 15#include <asm/reg.h> 16#include <asm/mmu.h> 17#include <asm/page.h> 18#include <asm/ptrace.h> 19#include <asm/hvcall.h> 20#include <asm/asm-offsets.h> 21#include <asm/exception-64s.h> 22#include <asm/kvm_book3s_asm.h> 23#include <asm/book3s/64/mmu-hash.h> 24#include <asm/export.h> 25#include <asm/tm.h> 26#include <asm/opal.h> 27#include <asm/xive-regs.h> 28#include <asm/thread_info.h> 29#include <asm/asm-compat.h> 30#include <asm/feature-fixups.h> 31#include <asm/cpuidle.h> 32 33/* Sign-extend HDEC if not on POWER9 */ 34#define EXTEND_HDEC(reg) \ 35BEGIN_FTR_SECTION; \ 36 extsw reg, reg; \ 37END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 38 39/* Values in HSTATE_NAPPING(r13) */ 40#define NAPPING_CEDE 1 41#define NAPPING_NOVCPU 2 42#define NAPPING_UNSPLIT 3 43 44/* Stack frame offsets for kvmppc_hv_entry */ 45#define SFS 208 46#define STACK_SLOT_TRAP (SFS-4) 47#define STACK_SLOT_SHORT_PATH (SFS-8) 48#define STACK_SLOT_TID (SFS-16) 49#define STACK_SLOT_PSSCR (SFS-24) 50#define STACK_SLOT_PID (SFS-32) 51#define STACK_SLOT_IAMR (SFS-40) 52#define STACK_SLOT_CIABR (SFS-48) 53#define STACK_SLOT_DAWR (SFS-56) 54#define STACK_SLOT_DAWRX (SFS-64) 55#define STACK_SLOT_HFSCR (SFS-72) 56#define STACK_SLOT_AMR (SFS-80) 57#define STACK_SLOT_UAMOR (SFS-88) 58/* the following is used by the P9 short path */ 59#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ 60 61/* 62 * Call kvmppc_hv_entry in real mode. 63 * Must be called with interrupts hard-disabled. 64 * 65 * Input Registers: 66 * 67 * LR = return address to continue at after eventually re-enabling MMU 68 */ 69_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 70 mflr r0 71 std r0, PPC_LR_STKOFF(r1) 72 stdu r1, -112(r1) 73 mfmsr r10 74 std r10, HSTATE_HOST_MSR(r13) 75 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 76 li r0,MSR_RI 77 andc r0,r10,r0 78 li r6,MSR_IR | MSR_DR 79 andc r6,r10,r6 80 mtmsrd r0,1 /* clear RI in MSR */ 81 mtsrr0 r5 82 mtsrr1 r6 83 RFI_TO_KERNEL 84 85kvmppc_call_hv_entry: 86BEGIN_FTR_SECTION 87 /* On P9, do LPCR setting, if necessary */ 88 ld r3, HSTATE_SPLIT_MODE(r13) 89 cmpdi r3, 0 90 beq 46f 91 lwz r4, KVM_SPLIT_DO_SET(r3) 92 cmpwi r4, 0 93 beq 46f 94 bl kvmhv_p9_set_lpcr 95 nop 9646: 97END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 98 99 ld r4, HSTATE_KVM_VCPU(r13) 100 bl kvmppc_hv_entry 101 102 /* Back from guest - restore host state and return to caller */ 103 104BEGIN_FTR_SECTION 105 /* Restore host DABR and DABRX */ 106 ld r5,HSTATE_DABR(r13) 107 li r6,7 108 mtspr SPRN_DABR,r5 109 mtspr SPRN_DABRX,r6 110END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 111 112 /* Restore SPRG3 */ 113 ld r3,PACA_SPRG_VDSO(r13) 114 mtspr SPRN_SPRG_VDSO_WRITE,r3 115 116 /* Reload the host's PMU registers */ 117 bl kvmhv_load_host_pmu 118 119 /* 120 * Reload DEC. HDEC interrupts were disabled when 121 * we reloaded the host's LPCR value. 122 */ 123 ld r3, HSTATE_DECEXP(r13) 124 mftb r4 125 subf r4, r4, r3 126 mtspr SPRN_DEC, r4 127 128 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 129 li r0, 0 130 stb r0, HSTATE_HWTHREAD_REQ(r13) 131 132 /* 133 * For external interrupts we need to call the Linux 134 * handler to process the interrupt. We do that by jumping 135 * to absolute address 0x500 for external interrupts. 136 * The [h]rfid at the end of the handler will return to 137 * the book3s_hv_interrupts.S code. For other interrupts 138 * we do the rfid to get back to the book3s_hv_interrupts.S 139 * code here. 140 */ 141 ld r8, 112+PPC_LR_STKOFF(r1) 142 addi r1, r1, 112 143 ld r7, HSTATE_HOST_MSR(r13) 144 145 /* Return the trap number on this thread as the return value */ 146 mr r3, r12 147 148 /* 149 * If we came back from the guest via a relocation-on interrupt, 150 * we will be in virtual mode at this point, which makes it a 151 * little easier to get back to the caller. 152 */ 153 mfmsr r0 154 andi. r0, r0, MSR_IR /* in real mode? */ 155 bne .Lvirt_return 156 157 /* RFI into the highmem handler */ 158 mfmsr r6 159 li r0, MSR_RI 160 andc r6, r6, r0 161 mtmsrd r6, 1 /* Clear RI in MSR */ 162 mtsrr0 r8 163 mtsrr1 r7 164 RFI_TO_KERNEL 165 166 /* Virtual-mode return */ 167.Lvirt_return: 168 mtlr r8 169 blr 170 171kvmppc_primary_no_guest: 172 /* We handle this much like a ceded vcpu */ 173 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 174 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 175 /* HDEC value came from DEC in the first place, it will fit */ 176 mfspr r3, SPRN_HDEC 177 mtspr SPRN_DEC, r3 178 /* 179 * Make sure the primary has finished the MMU switch. 180 * We should never get here on a secondary thread, but 181 * check it for robustness' sake. 182 */ 183 ld r5, HSTATE_KVM_VCORE(r13) 18465: lbz r0, VCORE_IN_GUEST(r5) 185 cmpwi r0, 0 186 beq 65b 187 /* Set LPCR. */ 188 ld r8,VCORE_LPCR(r5) 189 mtspr SPRN_LPCR,r8 190 isync 191 /* set our bit in napping_threads */ 192 ld r5, HSTATE_KVM_VCORE(r13) 193 lbz r7, HSTATE_PTID(r13) 194 li r0, 1 195 sld r0, r0, r7 196 addi r6, r5, VCORE_NAPPING_THREADS 1971: lwarx r3, 0, r6 198 or r3, r3, r0 199 stwcx. r3, 0, r6 200 bne 1b 201 /* order napping_threads update vs testing entry_exit_map */ 202 isync 203 li r12, 0 204 lwz r7, VCORE_ENTRY_EXIT(r5) 205 cmpwi r7, 0x100 206 bge kvm_novcpu_exit /* another thread already exiting */ 207 li r3, NAPPING_NOVCPU 208 stb r3, HSTATE_NAPPING(r13) 209 210 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 211 b kvm_do_nap 212 213/* 214 * kvm_novcpu_wakeup 215 * Entered from kvm_start_guest if kvm_hstate.napping is set 216 * to NAPPING_NOVCPU 217 * r2 = kernel TOC 218 * r13 = paca 219 */ 220kvm_novcpu_wakeup: 221 ld r1, HSTATE_HOST_R1(r13) 222 ld r5, HSTATE_KVM_VCORE(r13) 223 li r0, 0 224 stb r0, HSTATE_NAPPING(r13) 225 226 /* check the wake reason */ 227 bl kvmppc_check_wake_reason 228 229 /* 230 * Restore volatile registers since we could have called 231 * a C routine in kvmppc_check_wake_reason. 232 * r5 = VCORE 233 */ 234 ld r5, HSTATE_KVM_VCORE(r13) 235 236 /* see if any other thread is already exiting */ 237 lwz r0, VCORE_ENTRY_EXIT(r5) 238 cmpwi r0, 0x100 239 bge kvm_novcpu_exit 240 241 /* clear our bit in napping_threads */ 242 lbz r7, HSTATE_PTID(r13) 243 li r0, 1 244 sld r0, r0, r7 245 addi r6, r5, VCORE_NAPPING_THREADS 2464: lwarx r7, 0, r6 247 andc r7, r7, r0 248 stwcx. r7, 0, r6 249 bne 4b 250 251 /* See if the wake reason means we need to exit */ 252 cmpdi r3, 0 253 bge kvm_novcpu_exit 254 255 /* See if our timeslice has expired (HDEC is negative) */ 256 mfspr r0, SPRN_HDEC 257 EXTEND_HDEC(r0) 258 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 259 cmpdi r0, 0 260 blt kvm_novcpu_exit 261 262 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 263 ld r4, HSTATE_KVM_VCPU(r13) 264 cmpdi r4, 0 265 beq kvmppc_primary_no_guest 266 267#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 268 addi r3, r4, VCPU_TB_RMENTRY 269 bl kvmhv_start_timing 270#endif 271 b kvmppc_got_guest 272 273kvm_novcpu_exit: 274#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 275 ld r4, HSTATE_KVM_VCPU(r13) 276 cmpdi r4, 0 277 beq 13f 278 addi r3, r4, VCPU_TB_RMEXIT 279 bl kvmhv_accumulate_time 280#endif 28113: mr r3, r12 282 stw r12, STACK_SLOT_TRAP(r1) 283 bl kvmhv_commence_exit 284 nop 285 b kvmhv_switch_to_host 286 287/* 288 * We come in here when wakened from Linux offline idle code. 289 * Relocation is off 290 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 291 */ 292_GLOBAL(idle_kvm_start_guest) 293 ld r4,PACAEMERGSP(r13) 294 mfcr r5 295 mflr r0 296 std r1,0(r4) 297 std r5,8(r4) 298 std r0,16(r4) 299 subi r1,r4,STACK_FRAME_OVERHEAD 300 SAVE_NVGPRS(r1) 301 302 /* 303 * Could avoid this and pass it through in r3. For now, 304 * code expects it to be in SRR1. 305 */ 306 mtspr SPRN_SRR1,r3 307 308 li r0,0 309 stb r0,PACA_FTRACE_ENABLED(r13) 310 311 li r0,KVM_HWTHREAD_IN_KVM 312 stb r0,HSTATE_HWTHREAD_STATE(r13) 313 314 /* kvm cede / napping does not come through here */ 315 lbz r0,HSTATE_NAPPING(r13) 316 twnei r0,0 317 318 b 1f 319 320kvm_unsplit_wakeup: 321 li r0, 0 322 stb r0, HSTATE_NAPPING(r13) 323 3241: 325 326 /* 327 * We weren't napping due to cede, so this must be a secondary 328 * thread being woken up to run a guest, or being woken up due 329 * to a stray IPI. (Or due to some machine check or hypervisor 330 * maintenance interrupt while the core is in KVM.) 331 */ 332 333 /* Check the wake reason in SRR1 to see why we got here */ 334 bl kvmppc_check_wake_reason 335 /* 336 * kvmppc_check_wake_reason could invoke a C routine, but we 337 * have no volatile registers to restore when we return. 338 */ 339 340 cmpdi r3, 0 341 bge kvm_no_guest 342 343 /* get vcore pointer, NULL if we have nothing to run */ 344 ld r5,HSTATE_KVM_VCORE(r13) 345 cmpdi r5,0 346 /* if we have no vcore to run, go back to sleep */ 347 beq kvm_no_guest 348 349kvm_secondary_got_guest: 350 351 /* Set HSTATE_DSCR(r13) to something sensible */ 352 ld r6, PACA_DSCR_DEFAULT(r13) 353 std r6, HSTATE_DSCR(r13) 354 355 /* On thread 0 of a subcore, set HDEC to max */ 356 lbz r4, HSTATE_PTID(r13) 357 cmpwi r4, 0 358 bne 63f 359 LOAD_REG_ADDR(r6, decrementer_max) 360 ld r6, 0(r6) 361 mtspr SPRN_HDEC, r6 362 /* and set per-LPAR registers, if doing dynamic micro-threading */ 363 ld r6, HSTATE_SPLIT_MODE(r13) 364 cmpdi r6, 0 365 beq 63f 366BEGIN_FTR_SECTION 367 ld r0, KVM_SPLIT_RPR(r6) 368 mtspr SPRN_RPR, r0 369 ld r0, KVM_SPLIT_PMMAR(r6) 370 mtspr SPRN_PMMAR, r0 371 ld r0, KVM_SPLIT_LDBAR(r6) 372 mtspr SPRN_LDBAR, r0 373 isync 374FTR_SECTION_ELSE 375 /* On P9 we use the split_info for coordinating LPCR changes */ 376 lwz r4, KVM_SPLIT_DO_SET(r6) 377 cmpwi r4, 0 378 beq 1f 379 mr r3, r6 380 bl kvmhv_p9_set_lpcr 381 nop 3821: 383ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 38463: 385 /* Order load of vcpu after load of vcore */ 386 lwsync 387 ld r4, HSTATE_KVM_VCPU(r13) 388 bl kvmppc_hv_entry 389 390 /* Back from the guest, go back to nap */ 391 /* Clear our vcpu and vcore pointers so we don't come back in early */ 392 li r0, 0 393 std r0, HSTATE_KVM_VCPU(r13) 394 /* 395 * Once we clear HSTATE_KVM_VCORE(r13), the code in 396 * kvmppc_run_core() is going to assume that all our vcpu 397 * state is visible in memory. This lwsync makes sure 398 * that that is true. 399 */ 400 lwsync 401 std r0, HSTATE_KVM_VCORE(r13) 402 403 /* 404 * All secondaries exiting guest will fall through this path. 405 * Before proceeding, just check for HMI interrupt and 406 * invoke opal hmi handler. By now we are sure that the 407 * primary thread on this core/subcore has already made partition 408 * switch/TB resync and we are good to call opal hmi handler. 409 */ 410 cmpwi r12, BOOK3S_INTERRUPT_HMI 411 bne kvm_no_guest 412 413 li r3,0 /* NULL argument */ 414 bl hmi_exception_realmode 415/* 416 * At this point we have finished executing in the guest. 417 * We need to wait for hwthread_req to become zero, since 418 * we may not turn on the MMU while hwthread_req is non-zero. 419 * While waiting we also need to check if we get given a vcpu to run. 420 */ 421kvm_no_guest: 422 lbz r3, HSTATE_HWTHREAD_REQ(r13) 423 cmpwi r3, 0 424 bne 53f 425 HMT_MEDIUM 426 li r0, KVM_HWTHREAD_IN_KERNEL 427 stb r0, HSTATE_HWTHREAD_STATE(r13) 428 /* need to recheck hwthread_req after a barrier, to avoid race */ 429 sync 430 lbz r3, HSTATE_HWTHREAD_REQ(r13) 431 cmpwi r3, 0 432 bne 54f 433 434 /* 435 * Jump to idle_return_gpr_loss, which returns to the 436 * idle_kvm_start_guest caller. 437 */ 438 li r3, LPCR_PECE0 439 mfspr r4, SPRN_LPCR 440 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 441 mtspr SPRN_LPCR, r4 442 /* set up r3 for return */ 443 mfspr r3,SPRN_SRR1 444 REST_NVGPRS(r1) 445 addi r1, r1, STACK_FRAME_OVERHEAD 446 ld r0, 16(r1) 447 ld r5, 8(r1) 448 ld r1, 0(r1) 449 mtlr r0 450 mtcr r5 451 blr 452 45353: HMT_LOW 454 ld r5, HSTATE_KVM_VCORE(r13) 455 cmpdi r5, 0 456 bne 60f 457 ld r3, HSTATE_SPLIT_MODE(r13) 458 cmpdi r3, 0 459 beq kvm_no_guest 460 lwz r0, KVM_SPLIT_DO_SET(r3) 461 cmpwi r0, 0 462 bne kvmhv_do_set 463 lwz r0, KVM_SPLIT_DO_RESTORE(r3) 464 cmpwi r0, 0 465 bne kvmhv_do_restore 466 lbz r0, KVM_SPLIT_DO_NAP(r3) 467 cmpwi r0, 0 468 beq kvm_no_guest 469 HMT_MEDIUM 470 b kvm_unsplit_nap 47160: HMT_MEDIUM 472 b kvm_secondary_got_guest 473 47454: li r0, KVM_HWTHREAD_IN_KVM 475 stb r0, HSTATE_HWTHREAD_STATE(r13) 476 b kvm_no_guest 477 478kvmhv_do_set: 479 /* Set LPCR, LPIDR etc. on P9 */ 480 HMT_MEDIUM 481 bl kvmhv_p9_set_lpcr 482 nop 483 b kvm_no_guest 484 485kvmhv_do_restore: 486 HMT_MEDIUM 487 bl kvmhv_p9_restore_lpcr 488 nop 489 b kvm_no_guest 490 491/* 492 * Here the primary thread is trying to return the core to 493 * whole-core mode, so we need to nap. 494 */ 495kvm_unsplit_nap: 496 /* 497 * When secondaries are napping in kvm_unsplit_nap() with 498 * hwthread_req = 1, HMI goes ignored even though subcores are 499 * already exited the guest. Hence HMI keeps waking up secondaries 500 * from nap in a loop and secondaries always go back to nap since 501 * no vcore is assigned to them. This makes impossible for primary 502 * thread to get hold of secondary threads resulting into a soft 503 * lockup in KVM path. 504 * 505 * Let us check if HMI is pending and handle it before we go to nap. 506 */ 507 cmpwi r12, BOOK3S_INTERRUPT_HMI 508 bne 55f 509 li r3, 0 /* NULL argument */ 510 bl hmi_exception_realmode 51155: 512 /* 513 * Ensure that secondary doesn't nap when it has 514 * its vcore pointer set. 515 */ 516 sync /* matches smp_mb() before setting split_info.do_nap */ 517 ld r0, HSTATE_KVM_VCORE(r13) 518 cmpdi r0, 0 519 bne kvm_no_guest 520 /* clear any pending message */ 521BEGIN_FTR_SECTION 522 lis r6, (PPC_DBELL_SERVER << (63-36))@h 523 PPC_MSGCLR(6) 524END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 525 /* Set kvm_split_mode.napped[tid] = 1 */ 526 ld r3, HSTATE_SPLIT_MODE(r13) 527 li r0, 1 528 lbz r4, HSTATE_TID(r13) 529 addi r4, r4, KVM_SPLIT_NAPPED 530 stbx r0, r3, r4 531 /* Check the do_nap flag again after setting napped[] */ 532 sync 533 lbz r0, KVM_SPLIT_DO_NAP(r3) 534 cmpwi r0, 0 535 beq 57f 536 li r3, NAPPING_UNSPLIT 537 stb r3, HSTATE_NAPPING(r13) 538 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 539 mfspr r5, SPRN_LPCR 540 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 541 b kvm_nap_sequence 542 54357: li r0, 0 544 stbx r0, r3, r4 545 b kvm_no_guest 546 547/****************************************************************************** 548 * * 549 * Entry code * 550 * * 551 *****************************************************************************/ 552 553.global kvmppc_hv_entry 554kvmppc_hv_entry: 555 556 /* Required state: 557 * 558 * R4 = vcpu pointer (or NULL) 559 * MSR = ~IR|DR 560 * R13 = PACA 561 * R1 = host R1 562 * R2 = TOC 563 * all other volatile GPRS = free 564 * Does not preserve non-volatile GPRs or CR fields 565 */ 566 mflr r0 567 std r0, PPC_LR_STKOFF(r1) 568 stdu r1, -SFS(r1) 569 570 /* Save R1 in the PACA */ 571 std r1, HSTATE_HOST_R1(r13) 572 573 li r6, KVM_GUEST_MODE_HOST_HV 574 stb r6, HSTATE_IN_GUEST(r13) 575 576#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 577 /* Store initial timestamp */ 578 cmpdi r4, 0 579 beq 1f 580 addi r3, r4, VCPU_TB_RMENTRY 581 bl kvmhv_start_timing 5821: 583#endif 584 585 ld r5, HSTATE_KVM_VCORE(r13) 586 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 587 588 /* 589 * POWER7/POWER8 host -> guest partition switch code. 590 * We don't have to lock against concurrent tlbies, 591 * but we do have to coordinate across hardware threads. 592 */ 593 /* Set bit in entry map iff exit map is zero. */ 594 li r7, 1 595 lbz r6, HSTATE_PTID(r13) 596 sld r7, r7, r6 597 addi r8, r5, VCORE_ENTRY_EXIT 59821: lwarx r3, 0, r8 599 cmpwi r3, 0x100 /* any threads starting to exit? */ 600 bge secondary_too_late /* if so we're too late to the party */ 601 or r3, r3, r7 602 stwcx. r3, 0, r8 603 bne 21b 604 605 /* Primary thread switches to guest partition. */ 606 cmpwi r6,0 607 bne 10f 608 609 lwz r7,KVM_LPID(r9) 610BEGIN_FTR_SECTION 611 ld r6,KVM_SDR1(r9) 612 li r0,LPID_RSVD /* switch to reserved LPID */ 613 mtspr SPRN_LPID,r0 614 ptesync 615 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 616END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 617 mtspr SPRN_LPID,r7 618 isync 619 620 /* See if we need to flush the TLB. */ 621 mr r3, r9 /* kvm pointer */ 622 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 623 li r5, 0 /* nested vcpu pointer */ 624 bl kvmppc_check_need_tlb_flush 625 nop 626 ld r5, HSTATE_KVM_VCORE(r13) 627 628 /* Add timebase offset onto timebase */ 62922: ld r8,VCORE_TB_OFFSET(r5) 630 cmpdi r8,0 631 beq 37f 632 std r8, VCORE_TB_OFFSET_APPL(r5) 633 mftb r6 /* current host timebase */ 634 add r8,r8,r6 635 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 636 mftb r7 /* check if lower 24 bits overflowed */ 637 clrldi r6,r6,40 638 clrldi r7,r7,40 639 cmpld r7,r6 640 bge 37f 641 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 642 mtspr SPRN_TBU40,r8 643 644 /* Load guest PCR value to select appropriate compat mode */ 64537: ld r7, VCORE_PCR(r5) 646 cmpdi r7, 0 647 beq 38f 648 mtspr SPRN_PCR, r7 64938: 650 651BEGIN_FTR_SECTION 652 /* DPDES and VTB are shared between threads */ 653 ld r8, VCORE_DPDES(r5) 654 ld r7, VCORE_VTB(r5) 655 mtspr SPRN_DPDES, r8 656 mtspr SPRN_VTB, r7 657END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 658 659 /* Mark the subcore state as inside guest */ 660 bl kvmppc_subcore_enter_guest 661 nop 662 ld r5, HSTATE_KVM_VCORE(r13) 663 ld r4, HSTATE_KVM_VCPU(r13) 664 li r0,1 665 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 666 667 /* Do we have a guest vcpu to run? */ 66810: cmpdi r4, 0 669 beq kvmppc_primary_no_guest 670kvmppc_got_guest: 671 /* Increment yield count if they have a VPA */ 672 ld r3, VCPU_VPA(r4) 673 cmpdi r3, 0 674 beq 25f 675 li r6, LPPACA_YIELDCOUNT 676 LWZX_BE r5, r3, r6 677 addi r5, r5, 1 678 STWX_BE r5, r3, r6 679 li r6, 1 680 stb r6, VCPU_VPA_DIRTY(r4) 68125: 682 683 /* Save purr/spurr */ 684 mfspr r5,SPRN_PURR 685 mfspr r6,SPRN_SPURR 686 std r5,HSTATE_PURR(r13) 687 std r6,HSTATE_SPURR(r13) 688 ld r7,VCPU_PURR(r4) 689 ld r8,VCPU_SPURR(r4) 690 mtspr SPRN_PURR,r7 691 mtspr SPRN_SPURR,r8 692 693 /* Save host values of some registers */ 694BEGIN_FTR_SECTION 695 mfspr r5, SPRN_TIDR 696 mfspr r6, SPRN_PSSCR 697 mfspr r7, SPRN_PID 698 std r5, STACK_SLOT_TID(r1) 699 std r6, STACK_SLOT_PSSCR(r1) 700 std r7, STACK_SLOT_PID(r1) 701 mfspr r5, SPRN_HFSCR 702 std r5, STACK_SLOT_HFSCR(r1) 703END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 704BEGIN_FTR_SECTION 705 mfspr r5, SPRN_CIABR 706 mfspr r6, SPRN_DAWR 707 mfspr r7, SPRN_DAWRX 708 mfspr r8, SPRN_IAMR 709 std r5, STACK_SLOT_CIABR(r1) 710 std r6, STACK_SLOT_DAWR(r1) 711 std r7, STACK_SLOT_DAWRX(r1) 712 std r8, STACK_SLOT_IAMR(r1) 713END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 714 715 mfspr r5, SPRN_AMR 716 std r5, STACK_SLOT_AMR(r1) 717 mfspr r6, SPRN_UAMOR 718 std r6, STACK_SLOT_UAMOR(r1) 719 720BEGIN_FTR_SECTION 721 /* Set partition DABR */ 722 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 723 lwz r5,VCPU_DABRX(r4) 724 ld r6,VCPU_DABR(r4) 725 mtspr SPRN_DABRX,r5 726 mtspr SPRN_DABR,r6 727 isync 728END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 729 730#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 731/* 732 * Branch around the call if both CPU_FTR_TM and 733 * CPU_FTR_P9_TM_HV_ASSIST are off. 734 */ 735BEGIN_FTR_SECTION 736 b 91f 737END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 738 /* 739 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 740 */ 741 mr r3, r4 742 ld r4, VCPU_MSR(r3) 743 li r5, 0 /* don't preserve non-vol regs */ 744 bl kvmppc_restore_tm_hv 745 nop 746 ld r4, HSTATE_KVM_VCPU(r13) 74791: 748#endif 749 750 /* Load guest PMU registers; r4 = vcpu pointer here */ 751 mr r3, r4 752 bl kvmhv_load_guest_pmu 753 754 /* Load up FP, VMX and VSX registers */ 755 ld r4, HSTATE_KVM_VCPU(r13) 756 bl kvmppc_load_fp 757 758 ld r14, VCPU_GPR(R14)(r4) 759 ld r15, VCPU_GPR(R15)(r4) 760 ld r16, VCPU_GPR(R16)(r4) 761 ld r17, VCPU_GPR(R17)(r4) 762 ld r18, VCPU_GPR(R18)(r4) 763 ld r19, VCPU_GPR(R19)(r4) 764 ld r20, VCPU_GPR(R20)(r4) 765 ld r21, VCPU_GPR(R21)(r4) 766 ld r22, VCPU_GPR(R22)(r4) 767 ld r23, VCPU_GPR(R23)(r4) 768 ld r24, VCPU_GPR(R24)(r4) 769 ld r25, VCPU_GPR(R25)(r4) 770 ld r26, VCPU_GPR(R26)(r4) 771 ld r27, VCPU_GPR(R27)(r4) 772 ld r28, VCPU_GPR(R28)(r4) 773 ld r29, VCPU_GPR(R29)(r4) 774 ld r30, VCPU_GPR(R30)(r4) 775 ld r31, VCPU_GPR(R31)(r4) 776 777 /* Switch DSCR to guest value */ 778 ld r5, VCPU_DSCR(r4) 779 mtspr SPRN_DSCR, r5 780 781BEGIN_FTR_SECTION 782 /* Skip next section on POWER7 */ 783 b 8f 784END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 785 /* Load up POWER8-specific registers */ 786 ld r5, VCPU_IAMR(r4) 787 lwz r6, VCPU_PSPB(r4) 788 ld r7, VCPU_FSCR(r4) 789 mtspr SPRN_IAMR, r5 790 mtspr SPRN_PSPB, r6 791 mtspr SPRN_FSCR, r7 792 /* 793 * Handle broken DAWR case by not writing it. This means we 794 * can still store the DAWR register for migration. 795 */ 796 LOAD_REG_ADDR(r5, dawr_force_enable) 797 lbz r5, 0(r5) 798 cmpdi r5, 0 799 beq 1f 800 ld r5, VCPU_DAWR(r4) 801 ld r6, VCPU_DAWRX(r4) 802 mtspr SPRN_DAWR, r5 803 mtspr SPRN_DAWRX, r6 8041: 805 ld r7, VCPU_CIABR(r4) 806 ld r8, VCPU_TAR(r4) 807 mtspr SPRN_CIABR, r7 808 mtspr SPRN_TAR, r8 809 ld r5, VCPU_IC(r4) 810 ld r8, VCPU_EBBHR(r4) 811 mtspr SPRN_IC, r5 812 mtspr SPRN_EBBHR, r8 813 ld r5, VCPU_EBBRR(r4) 814 ld r6, VCPU_BESCR(r4) 815 lwz r7, VCPU_GUEST_PID(r4) 816 ld r8, VCPU_WORT(r4) 817 mtspr SPRN_EBBRR, r5 818 mtspr SPRN_BESCR, r6 819 mtspr SPRN_PID, r7 820 mtspr SPRN_WORT, r8 821BEGIN_FTR_SECTION 822 /* POWER8-only registers */ 823 ld r5, VCPU_TCSCR(r4) 824 ld r6, VCPU_ACOP(r4) 825 ld r7, VCPU_CSIGR(r4) 826 ld r8, VCPU_TACR(r4) 827 mtspr SPRN_TCSCR, r5 828 mtspr SPRN_ACOP, r6 829 mtspr SPRN_CSIGR, r7 830 mtspr SPRN_TACR, r8 831 nop 832FTR_SECTION_ELSE 833 /* POWER9-only registers */ 834 ld r5, VCPU_TID(r4) 835 ld r6, VCPU_PSSCR(r4) 836 lbz r8, HSTATE_FAKE_SUSPEND(r13) 837 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 838 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG 839 ld r7, VCPU_HFSCR(r4) 840 mtspr SPRN_TIDR, r5 841 mtspr SPRN_PSSCR, r6 842 mtspr SPRN_HFSCR, r7 843ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 8448: 845 846 ld r5, VCPU_SPRG0(r4) 847 ld r6, VCPU_SPRG1(r4) 848 ld r7, VCPU_SPRG2(r4) 849 ld r8, VCPU_SPRG3(r4) 850 mtspr SPRN_SPRG0, r5 851 mtspr SPRN_SPRG1, r6 852 mtspr SPRN_SPRG2, r7 853 mtspr SPRN_SPRG3, r8 854 855 /* Load up DAR and DSISR */ 856 ld r5, VCPU_DAR(r4) 857 lwz r6, VCPU_DSISR(r4) 858 mtspr SPRN_DAR, r5 859 mtspr SPRN_DSISR, r6 860 861 /* Restore AMR and UAMOR, set AMOR to all 1s */ 862 ld r5,VCPU_AMR(r4) 863 ld r6,VCPU_UAMOR(r4) 864 li r7,-1 865 mtspr SPRN_AMR,r5 866 mtspr SPRN_UAMOR,r6 867 mtspr SPRN_AMOR,r7 868 869 /* Restore state of CTRL run bit; assume 1 on entry */ 870 lwz r5,VCPU_CTRL(r4) 871 andi. r5,r5,1 872 bne 4f 873 mfspr r6,SPRN_CTRLF 874 clrrdi r6,r6,1 875 mtspr SPRN_CTRLT,r6 8764: 877 /* Secondary threads wait for primary to have done partition switch */ 878 ld r5, HSTATE_KVM_VCORE(r13) 879 lbz r6, HSTATE_PTID(r13) 880 cmpwi r6, 0 881 beq 21f 882 lbz r0, VCORE_IN_GUEST(r5) 883 cmpwi r0, 0 884 bne 21f 885 HMT_LOW 88620: lwz r3, VCORE_ENTRY_EXIT(r5) 887 cmpwi r3, 0x100 888 bge no_switch_exit 889 lbz r0, VCORE_IN_GUEST(r5) 890 cmpwi r0, 0 891 beq 20b 892 HMT_MEDIUM 89321: 894 /* Set LPCR. */ 895 ld r8,VCORE_LPCR(r5) 896 mtspr SPRN_LPCR,r8 897 isync 898 899 /* 900 * Set the decrementer to the guest decrementer. 901 */ 902 ld r8,VCPU_DEC_EXPIRES(r4) 903 /* r8 is a host timebase value here, convert to guest TB */ 904 ld r5,HSTATE_KVM_VCORE(r13) 905 ld r6,VCORE_TB_OFFSET_APPL(r5) 906 add r8,r8,r6 907 mftb r7 908 subf r3,r7,r8 909 mtspr SPRN_DEC,r3 910 911 /* Check if HDEC expires soon */ 912 mfspr r3, SPRN_HDEC 913 EXTEND_HDEC(r3) 914 cmpdi r3, 512 /* 1 microsecond */ 915 blt hdec_soon 916 917 /* For hash guest, clear out and reload the SLB */ 918 ld r6, VCPU_KVM(r4) 919 lbz r0, KVM_RADIX(r6) 920 cmpwi r0, 0 921 bne 9f 922 li r6, 0 923 slbmte r6, r6 924 slbia 925 ptesync 926 927 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 928 lwz r5,VCPU_SLB_MAX(r4) 929 cmpwi r5,0 930 beq 9f 931 mtctr r5 932 addi r6,r4,VCPU_SLB 9331: ld r8,VCPU_SLB_E(r6) 934 ld r9,VCPU_SLB_V(r6) 935 slbmte r9,r8 936 addi r6,r6,VCPU_SLB_SIZE 937 bdnz 1b 9389: 939 940#ifdef CONFIG_KVM_XICS 941 /* We are entering the guest on that thread, push VCPU to XIVE */ 942 ld r11, VCPU_XIVE_SAVED_STATE(r4) 943 li r9, TM_QW1_OS 944 lwz r8, VCPU_XIVE_CAM_WORD(r4) 945 li r7, TM_QW1_OS + TM_WORD2 946 mfmsr r0 947 andi. r0, r0, MSR_DR /* in real mode? */ 948 beq 2f 949 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 950 cmpldi cr1, r10, 0 951 beq cr1, no_xive 952 eieio 953 stdx r11,r9,r10 954 stwx r8,r7,r10 955 b 3f 9562: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 957 cmpldi cr1, r10, 0 958 beq cr1, no_xive 959 eieio 960 stdcix r11,r9,r10 961 stwcix r8,r7,r10 9623: li r9, 1 963 stb r9, VCPU_XIVE_PUSHED(r4) 964 eieio 965 966 /* 967 * We clear the irq_pending flag. There is a small chance of a 968 * race vs. the escalation interrupt happening on another 969 * processor setting it again, but the only consequence is to 970 * cause a spurrious wakeup on the next H_CEDE which is not an 971 * issue. 972 */ 973 li r0,0 974 stb r0, VCPU_IRQ_PENDING(r4) 975 976 /* 977 * In single escalation mode, if the escalation interrupt is 978 * on, we mask it. 979 */ 980 lbz r0, VCPU_XIVE_ESC_ON(r4) 981 cmpwi cr1, r0,0 982 beq cr1, 1f 983 li r9, XIVE_ESB_SET_PQ_01 984 beq 4f /* in real mode? */ 985 ld r10, VCPU_XIVE_ESC_VADDR(r4) 986 ldx r0, r10, r9 987 b 5f 9884: ld r10, VCPU_XIVE_ESC_RADDR(r4) 989 ldcix r0, r10, r9 9905: sync 991 992 /* We have a possible subtle race here: The escalation interrupt might 993 * have fired and be on its way to the host queue while we mask it, 994 * and if we unmask it early enough (re-cede right away), there is 995 * a theorical possibility that it fires again, thus landing in the 996 * target queue more than once which is a big no-no. 997 * 998 * Fortunately, solving this is rather easy. If the above load setting 999 * PQ to 01 returns a previous value where P is set, then we know the 1000 * escalation interrupt is somewhere on its way to the host. In that 1001 * case we simply don't clear the xive_esc_on flag below. It will be 1002 * eventually cleared by the handler for the escalation interrupt. 1003 * 1004 * Then, when doing a cede, we check that flag again before re-enabling 1005 * the escalation interrupt, and if set, we abort the cede. 1006 */ 1007 andi. r0, r0, XIVE_ESB_VAL_P 1008 bne- 1f 1009 1010 /* Now P is 0, we can clear the flag */ 1011 li r0, 0 1012 stb r0, VCPU_XIVE_ESC_ON(r4) 10131: 1014no_xive: 1015#endif /* CONFIG_KVM_XICS */ 1016 1017 li r0, 0 1018 stw r0, STACK_SLOT_SHORT_PATH(r1) 1019 1020deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 1021 /* Check if we can deliver an external or decrementer interrupt now */ 1022 ld r0, VCPU_PENDING_EXC(r4) 1023BEGIN_FTR_SECTION 1024 /* On POWER9, also check for emulated doorbell interrupt */ 1025 lbz r3, VCPU_DBELL_REQ(r4) 1026 or r0, r0, r3 1027END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1028 cmpdi r0, 0 1029 beq 71f 1030 mr r3, r4 1031 bl kvmppc_guest_entry_inject_int 1032 ld r4, HSTATE_KVM_VCPU(r13) 103371: 1034 ld r6, VCPU_SRR0(r4) 1035 ld r7, VCPU_SRR1(r4) 1036 mtspr SPRN_SRR0, r6 1037 mtspr SPRN_SRR1, r7 1038 1039fast_guest_entry_c: 1040 ld r10, VCPU_PC(r4) 1041 ld r11, VCPU_MSR(r4) 1042 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1043 rldicl r11, r11, 63 - MSR_HV_LG, 1 1044 rotldi r11, r11, 1 + MSR_HV_LG 1045 ori r11, r11, MSR_ME 1046 1047 ld r6, VCPU_CTR(r4) 1048 ld r7, VCPU_XER(r4) 1049 mtctr r6 1050 mtxer r7 1051 1052/* 1053 * Required state: 1054 * R4 = vcpu 1055 * R10: value for HSRR0 1056 * R11: value for HSRR1 1057 * R13 = PACA 1058 */ 1059fast_guest_return: 1060 li r0,0 1061 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1062 mtspr SPRN_HSRR0,r10 1063 mtspr SPRN_HSRR1,r11 1064 1065 /* Activate guest mode, so faults get handled by KVM */ 1066 li r9, KVM_GUEST_MODE_GUEST_HV 1067 stb r9, HSTATE_IN_GUEST(r13) 1068 1069#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1070 /* Accumulate timing */ 1071 addi r3, r4, VCPU_TB_GUEST 1072 bl kvmhv_accumulate_time 1073#endif 1074 1075 /* Enter guest */ 1076 1077BEGIN_FTR_SECTION 1078 ld r5, VCPU_CFAR(r4) 1079 mtspr SPRN_CFAR, r5 1080END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1081BEGIN_FTR_SECTION 1082 ld r0, VCPU_PPR(r4) 1083END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1084 1085 ld r5, VCPU_LR(r4) 1086 ld r6, VCPU_CR(r4) 1087 mtlr r5 1088 mtcr r6 1089 1090 ld r1, VCPU_GPR(R1)(r4) 1091 ld r2, VCPU_GPR(R2)(r4) 1092 ld r3, VCPU_GPR(R3)(r4) 1093 ld r5, VCPU_GPR(R5)(r4) 1094 ld r6, VCPU_GPR(R6)(r4) 1095 ld r7, VCPU_GPR(R7)(r4) 1096 ld r8, VCPU_GPR(R8)(r4) 1097 ld r9, VCPU_GPR(R9)(r4) 1098 ld r10, VCPU_GPR(R10)(r4) 1099 ld r11, VCPU_GPR(R11)(r4) 1100 ld r12, VCPU_GPR(R12)(r4) 1101 ld r13, VCPU_GPR(R13)(r4) 1102 1103BEGIN_FTR_SECTION 1104 mtspr SPRN_PPR, r0 1105END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1106 1107/* Move canary into DSISR to check for later */ 1108BEGIN_FTR_SECTION 1109 li r0, 0x7fff 1110 mtspr SPRN_HDSISR, r0 1111END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1112 1113 ld r0, VCPU_GPR(R0)(r4) 1114 ld r4, VCPU_GPR(R4)(r4) 1115 HRFI_TO_GUEST 1116 b . 1117 1118/* 1119 * Enter the guest on a P9 or later system where we have exactly 1120 * one vcpu per vcore and we don't need to go to real mode 1121 * (which implies that host and guest are both using radix MMU mode). 1122 * r3 = vcpu pointer 1123 * Most SPRs and all the VSRs have been loaded already. 1124 */ 1125_GLOBAL(__kvmhv_vcpu_entry_p9) 1126EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) 1127 mflr r0 1128 std r0, PPC_LR_STKOFF(r1) 1129 stdu r1, -SFS(r1) 1130 1131 li r0, 1 1132 stw r0, STACK_SLOT_SHORT_PATH(r1) 1133 1134 std r3, HSTATE_KVM_VCPU(r13) 1135 mfcr r4 1136 stw r4, SFS+8(r1) 1137 1138 std r1, HSTATE_HOST_R1(r13) 1139 1140 reg = 14 1141 .rept 18 1142 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1143 reg = reg + 1 1144 .endr 1145 1146 reg = 14 1147 .rept 18 1148 ld reg, __VCPU_GPR(reg)(r3) 1149 reg = reg + 1 1150 .endr 1151 1152 mfmsr r10 1153 std r10, HSTATE_HOST_MSR(r13) 1154 1155 mr r4, r3 1156 b fast_guest_entry_c 1157guest_exit_short_path: 1158 1159 li r0, KVM_GUEST_MODE_NONE 1160 stb r0, HSTATE_IN_GUEST(r13) 1161 1162 reg = 14 1163 .rept 18 1164 std reg, __VCPU_GPR(reg)(r9) 1165 reg = reg + 1 1166 .endr 1167 1168 reg = 14 1169 .rept 18 1170 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1171 reg = reg + 1 1172 .endr 1173 1174 lwz r4, SFS+8(r1) 1175 mtcr r4 1176 1177 mr r3, r12 /* trap number */ 1178 1179 addi r1, r1, SFS 1180 ld r0, PPC_LR_STKOFF(r1) 1181 mtlr r0 1182 1183 /* If we are in real mode, do a rfid to get back to the caller */ 1184 mfmsr r4 1185 andi. r5, r4, MSR_IR 1186 bnelr 1187 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ 1188 mtspr SPRN_SRR0, r0 1189 ld r10, HSTATE_HOST_MSR(r13) 1190 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG 1191 mtspr SPRN_SRR1, r10 1192 RFI_TO_KERNEL 1193 b . 1194 1195secondary_too_late: 1196 li r12, 0 1197 stw r12, STACK_SLOT_TRAP(r1) 1198 cmpdi r4, 0 1199 beq 11f 1200 stw r12, VCPU_TRAP(r4) 1201#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1202 addi r3, r4, VCPU_TB_RMEXIT 1203 bl kvmhv_accumulate_time 1204#endif 120511: b kvmhv_switch_to_host 1206 1207no_switch_exit: 1208 HMT_MEDIUM 1209 li r12, 0 1210 b 12f 1211hdec_soon: 1212 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 121312: stw r12, VCPU_TRAP(r4) 1214 mr r9, r4 1215#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1216 addi r3, r4, VCPU_TB_RMEXIT 1217 bl kvmhv_accumulate_time 1218#endif 1219 b guest_bypass 1220 1221/****************************************************************************** 1222 * * 1223 * Exit code * 1224 * * 1225 *****************************************************************************/ 1226 1227/* 1228 * We come here from the first-level interrupt handlers. 1229 */ 1230 .globl kvmppc_interrupt_hv 1231kvmppc_interrupt_hv: 1232 /* 1233 * Register contents: 1234 * R12 = (guest CR << 32) | interrupt vector 1235 * R13 = PACA 1236 * guest R12 saved in shadow VCPU SCRATCH0 1237 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE 1238 * guest R13 saved in SPRN_SCRATCH0 1239 */ 1240 std r9, HSTATE_SCRATCH2(r13) 1241 lbz r9, HSTATE_IN_GUEST(r13) 1242 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1243 beq kvmppc_bad_host_intr 1244#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1245 cmpwi r9, KVM_GUEST_MODE_GUEST 1246 ld r9, HSTATE_SCRATCH2(r13) 1247 beq kvmppc_interrupt_pr 1248#endif 1249 /* We're now back in the host but in guest MMU context */ 1250 li r9, KVM_GUEST_MODE_HOST_HV 1251 stb r9, HSTATE_IN_GUEST(r13) 1252 1253 ld r9, HSTATE_KVM_VCPU(r13) 1254 1255 /* Save registers */ 1256 1257 std r0, VCPU_GPR(R0)(r9) 1258 std r1, VCPU_GPR(R1)(r9) 1259 std r2, VCPU_GPR(R2)(r9) 1260 std r3, VCPU_GPR(R3)(r9) 1261 std r4, VCPU_GPR(R4)(r9) 1262 std r5, VCPU_GPR(R5)(r9) 1263 std r6, VCPU_GPR(R6)(r9) 1264 std r7, VCPU_GPR(R7)(r9) 1265 std r8, VCPU_GPR(R8)(r9) 1266 ld r0, HSTATE_SCRATCH2(r13) 1267 std r0, VCPU_GPR(R9)(r9) 1268 std r10, VCPU_GPR(R10)(r9) 1269 std r11, VCPU_GPR(R11)(r9) 1270 ld r3, HSTATE_SCRATCH0(r13) 1271 std r3, VCPU_GPR(R12)(r9) 1272 /* CR is in the high half of r12 */ 1273 srdi r4, r12, 32 1274 std r4, VCPU_CR(r9) 1275BEGIN_FTR_SECTION 1276 ld r3, HSTATE_CFAR(r13) 1277 std r3, VCPU_CFAR(r9) 1278END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1279BEGIN_FTR_SECTION 1280 ld r4, HSTATE_PPR(r13) 1281 std r4, VCPU_PPR(r9) 1282END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1283 1284 /* Restore R1/R2 so we can handle faults */ 1285 ld r1, HSTATE_HOST_R1(r13) 1286 ld r2, PACATOC(r13) 1287 1288 mfspr r10, SPRN_SRR0 1289 mfspr r11, SPRN_SRR1 1290 std r10, VCPU_SRR0(r9) 1291 std r11, VCPU_SRR1(r9) 1292 /* trap is in the low half of r12, clear CR from the high half */ 1293 clrldi r12, r12, 32 1294 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1295 beq 1f 1296 mfspr r10, SPRN_HSRR0 1297 mfspr r11, SPRN_HSRR1 1298 clrrdi r12, r12, 2 12991: std r10, VCPU_PC(r9) 1300 std r11, VCPU_MSR(r9) 1301 1302 GET_SCRATCH0(r3) 1303 mflr r4 1304 std r3, VCPU_GPR(R13)(r9) 1305 std r4, VCPU_LR(r9) 1306 1307 stw r12,VCPU_TRAP(r9) 1308 1309 /* 1310 * Now that we have saved away SRR0/1 and HSRR0/1, 1311 * interrupts are recoverable in principle, so set MSR_RI. 1312 * This becomes important for relocation-on interrupts from 1313 * the guest, which we can get in radix mode on POWER9. 1314 */ 1315 li r0, MSR_RI 1316 mtmsrd r0, 1 1317 1318#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1319 addi r3, r9, VCPU_TB_RMINTR 1320 mr r4, r9 1321 bl kvmhv_accumulate_time 1322 ld r5, VCPU_GPR(R5)(r9) 1323 ld r6, VCPU_GPR(R6)(r9) 1324 ld r7, VCPU_GPR(R7)(r9) 1325 ld r8, VCPU_GPR(R8)(r9) 1326#endif 1327 1328 /* Save HEIR (HV emulation assist reg) in emul_inst 1329 if this is an HEI (HV emulation interrupt, e40) */ 1330 li r3,KVM_INST_FETCH_FAILED 1331 stw r3,VCPU_LAST_INST(r9) 1332 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1333 bne 11f 1334 mfspr r3,SPRN_HEIR 133511: stw r3,VCPU_HEIR(r9) 1336 1337 /* these are volatile across C function calls */ 1338#ifdef CONFIG_RELOCATABLE 1339 ld r3, HSTATE_SCRATCH1(r13) 1340 mtctr r3 1341#else 1342 mfctr r3 1343#endif 1344 mfxer r4 1345 std r3, VCPU_CTR(r9) 1346 std r4, VCPU_XER(r9) 1347 1348 /* Save more register state */ 1349 mfdar r3 1350 mfdsisr r4 1351 std r3, VCPU_DAR(r9) 1352 stw r4, VCPU_DSISR(r9) 1353 1354 /* If this is a page table miss then see if it's theirs or ours */ 1355 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1356 beq kvmppc_hdsi 1357 std r3, VCPU_FAULT_DAR(r9) 1358 stw r4, VCPU_FAULT_DSISR(r9) 1359 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1360 beq kvmppc_hisi 1361 1362#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1363 /* For softpatch interrupt, go off and do TM instruction emulation */ 1364 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 1365 beq kvmppc_tm_emul 1366#endif 1367 1368 /* See if this is a leftover HDEC interrupt */ 1369 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1370 bne 2f 1371 mfspr r3,SPRN_HDEC 1372 EXTEND_HDEC(r3) 1373 cmpdi r3,0 1374 mr r4,r9 1375 bge fast_guest_return 13762: 1377 /* See if this is an hcall we can handle in real mode */ 1378 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1379 beq hcall_try_real_mode 1380 1381 /* Hypervisor doorbell - exit only if host IPI flag set */ 1382 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1383 bne 3f 1384BEGIN_FTR_SECTION 1385 PPC_MSGSYNC 1386 lwsync 1387 /* always exit if we're running a nested guest */ 1388 ld r0, VCPU_NESTED(r9) 1389 cmpdi r0, 0 1390 bne guest_exit_cont 1391END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1392 lbz r0, HSTATE_HOST_IPI(r13) 1393 cmpwi r0, 0 1394 beq maybe_reenter_guest 1395 b guest_exit_cont 13963: 1397 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1398 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1399 bne 14f 1400 mfspr r3, SPRN_HFSCR 1401 std r3, VCPU_HFSCR(r9) 1402 b guest_exit_cont 140314: 1404 /* External interrupt ? */ 1405 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1406 beq kvmppc_guest_external 1407 /* See if it is a machine check */ 1408 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1409 beq machine_check_realmode 1410 /* Or a hypervisor maintenance interrupt */ 1411 cmpwi r12, BOOK3S_INTERRUPT_HMI 1412 beq hmi_realmode 1413 1414guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1415 1416#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1417 addi r3, r9, VCPU_TB_RMEXIT 1418 mr r4, r9 1419 bl kvmhv_accumulate_time 1420#endif 1421#ifdef CONFIG_KVM_XICS 1422 /* We are exiting, pull the VP from the XIVE */ 1423 lbz r0, VCPU_XIVE_PUSHED(r9) 1424 cmpwi cr0, r0, 0 1425 beq 1f 1426 li r7, TM_SPC_PULL_OS_CTX 1427 li r6, TM_QW1_OS 1428 mfmsr r0 1429 andi. r0, r0, MSR_DR /* in real mode? */ 1430 beq 2f 1431 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1432 cmpldi cr0, r10, 0 1433 beq 1f 1434 /* First load to pull the context, we ignore the value */ 1435 eieio 1436 lwzx r11, r7, r10 1437 /* Second load to recover the context state (Words 0 and 1) */ 1438 ldx r11, r6, r10 1439 b 3f 14402: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1441 cmpldi cr0, r10, 0 1442 beq 1f 1443 /* First load to pull the context, we ignore the value */ 1444 eieio 1445 lwzcix r11, r7, r10 1446 /* Second load to recover the context state (Words 0 and 1) */ 1447 ldcix r11, r6, r10 14483: std r11, VCPU_XIVE_SAVED_STATE(r9) 1449 /* Fixup some of the state for the next load */ 1450 li r10, 0 1451 li r0, 0xff 1452 stb r10, VCPU_XIVE_PUSHED(r9) 1453 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1454 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1455 eieio 14561: 1457#endif /* CONFIG_KVM_XICS */ 1458 1459 /* If we came in through the P9 short path, go back out to C now */ 1460 lwz r0, STACK_SLOT_SHORT_PATH(r1) 1461 cmpwi r0, 0 1462 bne guest_exit_short_path 1463 1464 /* For hash guest, read the guest SLB and save it away */ 1465 ld r5, VCPU_KVM(r9) 1466 lbz r0, KVM_RADIX(r5) 1467 li r5, 0 1468 cmpwi r0, 0 1469 bne 3f /* for radix, save 0 entries */ 1470 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1471 mtctr r0 1472 li r6,0 1473 addi r7,r9,VCPU_SLB 14741: slbmfee r8,r6 1475 andis. r0,r8,SLB_ESID_V@h 1476 beq 2f 1477 add r8,r8,r6 /* put index in */ 1478 slbmfev r3,r6 1479 std r8,VCPU_SLB_E(r7) 1480 std r3,VCPU_SLB_V(r7) 1481 addi r7,r7,VCPU_SLB_SIZE 1482 addi r5,r5,1 14832: addi r6,r6,1 1484 bdnz 1b 1485 /* Finally clear out the SLB */ 1486 li r0,0 1487 slbmte r0,r0 1488 slbia 1489 ptesync 14903: stw r5,VCPU_SLB_MAX(r9) 1491 1492 /* load host SLB entries */ 1493BEGIN_MMU_FTR_SECTION 1494 b 0f 1495END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1496 ld r8,PACA_SLBSHADOWPTR(r13) 1497 1498 .rept SLB_NUM_BOLTED 1499 li r3, SLBSHADOW_SAVEAREA 1500 LDX_BE r5, r8, r3 1501 addi r3, r3, 8 1502 LDX_BE r6, r8, r3 1503 andis. r7,r5,SLB_ESID_V@h 1504 beq 1f 1505 slbmte r6,r5 15061: addi r8,r8,16 1507 .endr 15080: 1509 1510guest_bypass: 1511 stw r12, STACK_SLOT_TRAP(r1) 1512 1513 /* Save DEC */ 1514 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1515 ld r3, HSTATE_KVM_VCORE(r13) 1516 mfspr r5,SPRN_DEC 1517 mftb r6 1518 /* On P9, if the guest has large decr enabled, don't sign extend */ 1519BEGIN_FTR_SECTION 1520 ld r4, VCORE_LPCR(r3) 1521 andis. r4, r4, LPCR_LD@h 1522 bne 16f 1523END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1524 extsw r5,r5 152516: add r5,r5,r6 1526 /* r5 is a guest timebase value here, convert to host TB */ 1527 ld r4,VCORE_TB_OFFSET_APPL(r3) 1528 subf r5,r4,r5 1529 std r5,VCPU_DEC_EXPIRES(r9) 1530 1531 /* Increment exit count, poke other threads to exit */ 1532 mr r3, r12 1533 bl kvmhv_commence_exit 1534 nop 1535 ld r9, HSTATE_KVM_VCPU(r13) 1536 1537 /* Stop others sending VCPU interrupts to this physical CPU */ 1538 li r0, -1 1539 stw r0, VCPU_CPU(r9) 1540 stw r0, VCPU_THREAD_CPU(r9) 1541 1542 /* Save guest CTRL register, set runlatch to 1 */ 1543 mfspr r6,SPRN_CTRLF 1544 stw r6,VCPU_CTRL(r9) 1545 andi. r0,r6,1 1546 bne 4f 1547 ori r6,r6,1 1548 mtspr SPRN_CTRLT,r6 15494: 1550 /* 1551 * Save the guest PURR/SPURR 1552 */ 1553 mfspr r5,SPRN_PURR 1554 mfspr r6,SPRN_SPURR 1555 ld r7,VCPU_PURR(r9) 1556 ld r8,VCPU_SPURR(r9) 1557 std r5,VCPU_PURR(r9) 1558 std r6,VCPU_SPURR(r9) 1559 subf r5,r7,r5 1560 subf r6,r8,r6 1561 1562 /* 1563 * Restore host PURR/SPURR and add guest times 1564 * so that the time in the guest gets accounted. 1565 */ 1566 ld r3,HSTATE_PURR(r13) 1567 ld r4,HSTATE_SPURR(r13) 1568 add r3,r3,r5 1569 add r4,r4,r6 1570 mtspr SPRN_PURR,r3 1571 mtspr SPRN_SPURR,r4 1572 1573BEGIN_FTR_SECTION 1574 b 8f 1575END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1576 /* Save POWER8-specific registers */ 1577 mfspr r5, SPRN_IAMR 1578 mfspr r6, SPRN_PSPB 1579 mfspr r7, SPRN_FSCR 1580 std r5, VCPU_IAMR(r9) 1581 stw r6, VCPU_PSPB(r9) 1582 std r7, VCPU_FSCR(r9) 1583 mfspr r5, SPRN_IC 1584 mfspr r7, SPRN_TAR 1585 std r5, VCPU_IC(r9) 1586 std r7, VCPU_TAR(r9) 1587 mfspr r8, SPRN_EBBHR 1588 std r8, VCPU_EBBHR(r9) 1589 mfspr r5, SPRN_EBBRR 1590 mfspr r6, SPRN_BESCR 1591 mfspr r7, SPRN_PID 1592 mfspr r8, SPRN_WORT 1593 std r5, VCPU_EBBRR(r9) 1594 std r6, VCPU_BESCR(r9) 1595 stw r7, VCPU_GUEST_PID(r9) 1596 std r8, VCPU_WORT(r9) 1597BEGIN_FTR_SECTION 1598 mfspr r5, SPRN_TCSCR 1599 mfspr r6, SPRN_ACOP 1600 mfspr r7, SPRN_CSIGR 1601 mfspr r8, SPRN_TACR 1602 std r5, VCPU_TCSCR(r9) 1603 std r6, VCPU_ACOP(r9) 1604 std r7, VCPU_CSIGR(r9) 1605 std r8, VCPU_TACR(r9) 1606FTR_SECTION_ELSE 1607 mfspr r5, SPRN_TIDR 1608 mfspr r6, SPRN_PSSCR 1609 std r5, VCPU_TID(r9) 1610 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1611 rotldi r6, r6, 60 1612 std r6, VCPU_PSSCR(r9) 1613 /* Restore host HFSCR value */ 1614 ld r7, STACK_SLOT_HFSCR(r1) 1615 mtspr SPRN_HFSCR, r7 1616ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1617 /* 1618 * Restore various registers to 0, where non-zero values 1619 * set by the guest could disrupt the host. 1620 */ 1621 li r0, 0 1622 mtspr SPRN_PSPB, r0 1623 mtspr SPRN_WORT, r0 1624BEGIN_FTR_SECTION 1625 mtspr SPRN_TCSCR, r0 1626 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1627 li r0, 1 1628 sldi r0, r0, 31 1629 mtspr SPRN_MMCRS, r0 1630END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1631 1632 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1633 ld r8, STACK_SLOT_IAMR(r1) 1634 mtspr SPRN_IAMR, r8 1635 16368: /* Power7 jumps back in here */ 1637 mfspr r5,SPRN_AMR 1638 mfspr r6,SPRN_UAMOR 1639 std r5,VCPU_AMR(r9) 1640 std r6,VCPU_UAMOR(r9) 1641 ld r5,STACK_SLOT_AMR(r1) 1642 ld r6,STACK_SLOT_UAMOR(r1) 1643 mtspr SPRN_AMR, r5 1644 mtspr SPRN_UAMOR, r6 1645 1646 /* Switch DSCR back to host value */ 1647 mfspr r8, SPRN_DSCR 1648 ld r7, HSTATE_DSCR(r13) 1649 std r8, VCPU_DSCR(r9) 1650 mtspr SPRN_DSCR, r7 1651 1652 /* Save non-volatile GPRs */ 1653 std r14, VCPU_GPR(R14)(r9) 1654 std r15, VCPU_GPR(R15)(r9) 1655 std r16, VCPU_GPR(R16)(r9) 1656 std r17, VCPU_GPR(R17)(r9) 1657 std r18, VCPU_GPR(R18)(r9) 1658 std r19, VCPU_GPR(R19)(r9) 1659 std r20, VCPU_GPR(R20)(r9) 1660 std r21, VCPU_GPR(R21)(r9) 1661 std r22, VCPU_GPR(R22)(r9) 1662 std r23, VCPU_GPR(R23)(r9) 1663 std r24, VCPU_GPR(R24)(r9) 1664 std r25, VCPU_GPR(R25)(r9) 1665 std r26, VCPU_GPR(R26)(r9) 1666 std r27, VCPU_GPR(R27)(r9) 1667 std r28, VCPU_GPR(R28)(r9) 1668 std r29, VCPU_GPR(R29)(r9) 1669 std r30, VCPU_GPR(R30)(r9) 1670 std r31, VCPU_GPR(R31)(r9) 1671 1672 /* Save SPRGs */ 1673 mfspr r3, SPRN_SPRG0 1674 mfspr r4, SPRN_SPRG1 1675 mfspr r5, SPRN_SPRG2 1676 mfspr r6, SPRN_SPRG3 1677 std r3, VCPU_SPRG0(r9) 1678 std r4, VCPU_SPRG1(r9) 1679 std r5, VCPU_SPRG2(r9) 1680 std r6, VCPU_SPRG3(r9) 1681 1682 /* save FP state */ 1683 mr r3, r9 1684 bl kvmppc_save_fp 1685 1686#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1687/* 1688 * Branch around the call if both CPU_FTR_TM and 1689 * CPU_FTR_P9_TM_HV_ASSIST are off. 1690 */ 1691BEGIN_FTR_SECTION 1692 b 91f 1693END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 1694 /* 1695 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1696 */ 1697 mr r3, r9 1698 ld r4, VCPU_MSR(r3) 1699 li r5, 0 /* don't preserve non-vol regs */ 1700 bl kvmppc_save_tm_hv 1701 nop 1702 ld r9, HSTATE_KVM_VCPU(r13) 170391: 1704#endif 1705 1706 /* Increment yield count if they have a VPA */ 1707 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1708 cmpdi r8, 0 1709 beq 25f 1710 li r4, LPPACA_YIELDCOUNT 1711 LWZX_BE r3, r8, r4 1712 addi r3, r3, 1 1713 STWX_BE r3, r8, r4 1714 li r3, 1 1715 stb r3, VCPU_VPA_DIRTY(r9) 171625: 1717 /* Save PMU registers if requested */ 1718 /* r8 and cr0.eq are live here */ 1719 mr r3, r9 1720 li r4, 1 1721 beq 21f /* if no VPA, save PMU stuff anyway */ 1722 lbz r4, LPPACA_PMCINUSE(r8) 172321: bl kvmhv_save_guest_pmu 1724 ld r9, HSTATE_KVM_VCPU(r13) 1725 1726 /* Restore host values of some registers */ 1727BEGIN_FTR_SECTION 1728 ld r5, STACK_SLOT_CIABR(r1) 1729 ld r6, STACK_SLOT_DAWR(r1) 1730 ld r7, STACK_SLOT_DAWRX(r1) 1731 mtspr SPRN_CIABR, r5 1732 /* 1733 * If the DAWR doesn't work, it's ok to write these here as 1734 * this value should always be zero 1735 */ 1736 mtspr SPRN_DAWR, r6 1737 mtspr SPRN_DAWRX, r7 1738END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1739BEGIN_FTR_SECTION 1740 ld r5, STACK_SLOT_TID(r1) 1741 ld r6, STACK_SLOT_PSSCR(r1) 1742 ld r7, STACK_SLOT_PID(r1) 1743 mtspr SPRN_TIDR, r5 1744 mtspr SPRN_PSSCR, r6 1745 mtspr SPRN_PID, r7 1746END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1747 1748#ifdef CONFIG_PPC_RADIX_MMU 1749 /* 1750 * Are we running hash or radix ? 1751 */ 1752 ld r5, VCPU_KVM(r9) 1753 lbz r0, KVM_RADIX(r5) 1754 cmpwi cr2, r0, 0 1755 beq cr2, 2f 1756 1757 /* 1758 * Radix: do eieio; tlbsync; ptesync sequence in case we 1759 * interrupted the guest between a tlbie and a ptesync. 1760 */ 1761 eieio 1762 tlbsync 1763 ptesync 1764 1765 /* Radix: Handle the case where the guest used an illegal PID */ 1766 LOAD_REG_ADDR(r4, mmu_base_pid) 1767 lwz r3, VCPU_GUEST_PID(r9) 1768 lwz r5, 0(r4) 1769 cmpw cr0,r3,r5 1770 blt 2f 1771 1772 /* 1773 * Illegal PID, the HW might have prefetched and cached in the TLB 1774 * some translations for the LPID 0 / guest PID combination which 1775 * Linux doesn't know about, so we need to flush that PID out of 1776 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to 1777 * the right context. 1778 */ 1779 li r0,0 1780 mtspr SPRN_LPID,r0 1781 isync 1782 1783 /* Then do a congruence class local flush */ 1784 ld r6,VCPU_KVM(r9) 1785 lwz r0,KVM_TLB_SETS(r6) 1786 mtctr r0 1787 li r7,0x400 /* IS field = 0b01 */ 1788 ptesync 1789 sldi r0,r3,32 /* RS has PID */ 17901: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ 1791 addi r7,r7,0x1000 1792 bdnz 1b 1793 ptesync 1794 17952: 1796#endif /* CONFIG_PPC_RADIX_MMU */ 1797 1798 /* 1799 * POWER7/POWER8 guest -> host partition switch code. 1800 * We don't have to lock against tlbies but we do 1801 * have to coordinate the hardware threads. 1802 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1803 */ 1804kvmhv_switch_to_host: 1805 /* Secondary threads wait for primary to do partition switch */ 1806 ld r5,HSTATE_KVM_VCORE(r13) 1807 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1808 lbz r3,HSTATE_PTID(r13) 1809 cmpwi r3,0 1810 beq 15f 1811 HMT_LOW 181213: lbz r3,VCORE_IN_GUEST(r5) 1813 cmpwi r3,0 1814 bne 13b 1815 HMT_MEDIUM 1816 b 16f 1817 1818 /* Primary thread waits for all the secondaries to exit guest */ 181915: lwz r3,VCORE_ENTRY_EXIT(r5) 1820 rlwinm r0,r3,32-8,0xff 1821 clrldi r3,r3,56 1822 cmpw r3,r0 1823 bne 15b 1824 isync 1825 1826 /* Did we actually switch to the guest at all? */ 1827 lbz r6, VCORE_IN_GUEST(r5) 1828 cmpwi r6, 0 1829 beq 19f 1830 1831 /* Primary thread switches back to host partition */ 1832 lwz r7,KVM_HOST_LPID(r4) 1833BEGIN_FTR_SECTION 1834 ld r6,KVM_HOST_SDR1(r4) 1835 li r8,LPID_RSVD /* switch to reserved LPID */ 1836 mtspr SPRN_LPID,r8 1837 ptesync 1838 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1839END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1840 mtspr SPRN_LPID,r7 1841 isync 1842 1843BEGIN_FTR_SECTION 1844 /* DPDES and VTB are shared between threads */ 1845 mfspr r7, SPRN_DPDES 1846 mfspr r8, SPRN_VTB 1847 std r7, VCORE_DPDES(r5) 1848 std r8, VCORE_VTB(r5) 1849 /* clear DPDES so we don't get guest doorbells in the host */ 1850 li r8, 0 1851 mtspr SPRN_DPDES, r8 1852END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1853 1854 /* Subtract timebase offset from timebase */ 1855 ld r8, VCORE_TB_OFFSET_APPL(r5) 1856 cmpdi r8,0 1857 beq 17f 1858 li r0, 0 1859 std r0, VCORE_TB_OFFSET_APPL(r5) 1860 mftb r6 /* current guest timebase */ 1861 subf r8,r8,r6 1862 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1863 mftb r7 /* check if lower 24 bits overflowed */ 1864 clrldi r6,r6,40 1865 clrldi r7,r7,40 1866 cmpld r7,r6 1867 bge 17f 1868 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1869 mtspr SPRN_TBU40,r8 1870 187117: 1872 /* 1873 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1874 * above, which may or may not have already called 1875 * kvmppc_subcore_exit_guest. Fortunately, all that 1876 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1877 * it again here is benign even if kvmppc_realmode_hmi_handler 1878 * has already called it. 1879 */ 1880 bl kvmppc_subcore_exit_guest 1881 nop 188230: ld r5,HSTATE_KVM_VCORE(r13) 1883 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1884 1885 /* Reset PCR */ 1886 ld r0, VCORE_PCR(r5) 1887 cmpdi r0, 0 1888 beq 18f 1889 li r0, 0 1890 mtspr SPRN_PCR, r0 189118: 1892 /* Signal secondary CPUs to continue */ 1893 stb r0,VCORE_IN_GUEST(r5) 189419: lis r8,0x7fff /* MAX_INT@h */ 1895 mtspr SPRN_HDEC,r8 1896 189716: 1898BEGIN_FTR_SECTION 1899 /* On POWER9 with HPT-on-radix we need to wait for all other threads */ 1900 ld r3, HSTATE_SPLIT_MODE(r13) 1901 cmpdi r3, 0 1902 beq 47f 1903 lwz r8, KVM_SPLIT_DO_RESTORE(r3) 1904 cmpwi r8, 0 1905 beq 47f 1906 bl kvmhv_p9_restore_lpcr 1907 nop 1908 b 48f 190947: 1910END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1911 ld r8,KVM_HOST_LPCR(r4) 1912 mtspr SPRN_LPCR,r8 1913 isync 191448: 1915#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1916 /* Finish timing, if we have a vcpu */ 1917 ld r4, HSTATE_KVM_VCPU(r13) 1918 cmpdi r4, 0 1919 li r3, 0 1920 beq 2f 1921 bl kvmhv_accumulate_time 19222: 1923#endif 1924 /* Unset guest mode */ 1925 li r0, KVM_GUEST_MODE_NONE 1926 stb r0, HSTATE_IN_GUEST(r13) 1927 1928 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1929 ld r0, SFS+PPC_LR_STKOFF(r1) 1930 addi r1, r1, SFS 1931 mtlr r0 1932 blr 1933 1934kvmppc_guest_external: 1935 /* External interrupt, first check for host_ipi. If this is 1936 * set, we know the host wants us out so let's do it now 1937 */ 1938 bl kvmppc_read_intr 1939 1940 /* 1941 * Restore the active volatile registers after returning from 1942 * a C function. 1943 */ 1944 ld r9, HSTATE_KVM_VCPU(r13) 1945 li r12, BOOK3S_INTERRUPT_EXTERNAL 1946 1947 /* 1948 * kvmppc_read_intr return codes: 1949 * 1950 * Exit to host (r3 > 0) 1951 * 1 An interrupt is pending that needs to be handled by the host 1952 * Exit guest and return to host by branching to guest_exit_cont 1953 * 1954 * 2 Passthrough that needs completion in the host 1955 * Exit guest and return to host by branching to guest_exit_cont 1956 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1957 * to indicate to the host to complete handling the interrupt 1958 * 1959 * Before returning to guest, we check if any CPU is heading out 1960 * to the host and if so, we head out also. If no CPUs are heading 1961 * check return values <= 0. 1962 * 1963 * Return to guest (r3 <= 0) 1964 * 0 No external interrupt is pending 1965 * -1 A guest wakeup IPI (which has now been cleared) 1966 * In either case, we return to guest to deliver any pending 1967 * guest interrupts. 1968 * 1969 * -2 A PCI passthrough external interrupt was handled 1970 * (interrupt was delivered directly to guest) 1971 * Return to guest to deliver any pending guest interrupts. 1972 */ 1973 1974 cmpdi r3, 1 1975 ble 1f 1976 1977 /* Return code = 2 */ 1978 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1979 stw r12, VCPU_TRAP(r9) 1980 b guest_exit_cont 1981 19821: /* Return code <= 1 */ 1983 cmpdi r3, 0 1984 bgt guest_exit_cont 1985 1986 /* Return code <= 0 */ 1987maybe_reenter_guest: 1988 ld r5, HSTATE_KVM_VCORE(r13) 1989 lwz r0, VCORE_ENTRY_EXIT(r5) 1990 cmpwi r0, 0x100 1991 mr r4, r9 1992 blt deliver_guest_interrupt 1993 b guest_exit_cont 1994 1995#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1996/* 1997 * Softpatch interrupt for transactional memory emulation cases 1998 * on POWER9 DD2.2. This is early in the guest exit path - we 1999 * haven't saved registers or done a treclaim yet. 2000 */ 2001kvmppc_tm_emul: 2002 /* Save instruction image in HEIR */ 2003 mfspr r3, SPRN_HEIR 2004 stw r3, VCPU_HEIR(r9) 2005 2006 /* 2007 * The cases we want to handle here are those where the guest 2008 * is in real suspend mode and is trying to transition to 2009 * transactional mode. 2010 */ 2011 lbz r0, HSTATE_FAKE_SUSPEND(r13) 2012 cmpwi r0, 0 /* keep exiting guest if in fake suspend */ 2013 bne guest_exit_cont 2014 rldicl r3, r11, 64 - MSR_TS_S_LG, 62 2015 cmpwi r3, 1 /* or if not in suspend state */ 2016 bne guest_exit_cont 2017 2018 /* Call C code to do the emulation */ 2019 mr r3, r9 2020 bl kvmhv_p9_tm_emulation_early 2021 nop 2022 ld r9, HSTATE_KVM_VCPU(r13) 2023 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 2024 cmpwi r3, 0 2025 beq guest_exit_cont /* continue exiting if not handled */ 2026 ld r10, VCPU_PC(r9) 2027 ld r11, VCPU_MSR(r9) 2028 b fast_interrupt_c_return /* go back to guest if handled */ 2029#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2030 2031/* 2032 * Check whether an HDSI is an HPTE not found fault or something else. 2033 * If it is an HPTE not found fault that is due to the guest accessing 2034 * a page that they have mapped but which we have paged out, then 2035 * we continue on with the guest exit path. In all other cases, 2036 * reflect the HDSI to the guest as a DSI. 2037 */ 2038kvmppc_hdsi: 2039 ld r3, VCPU_KVM(r9) 2040 lbz r0, KVM_RADIX(r3) 2041 mfspr r4, SPRN_HDAR 2042 mfspr r6, SPRN_HDSISR 2043BEGIN_FTR_SECTION 2044 /* Look for DSISR canary. If we find it, retry instruction */ 2045 cmpdi r6, 0x7fff 2046 beq 6f 2047END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2048 cmpwi r0, 0 2049 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 2050 /* HPTE not found fault or protection fault? */ 2051 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 2052 beq 1f /* if not, send it to the guest */ 2053 andi. r0, r11, MSR_DR /* data relocation enabled? */ 2054 beq 3f 2055BEGIN_FTR_SECTION 2056 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2057 b 4f 2058END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2059 clrrdi r0, r4, 28 2060 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2061 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 2062 bne 7f /* if no SLB entry found */ 20634: std r4, VCPU_FAULT_DAR(r9) 2064 stw r6, VCPU_FAULT_DSISR(r9) 2065 2066 /* Search the hash table. */ 2067 mr r3, r9 /* vcpu pointer */ 2068 li r7, 1 /* data fault */ 2069 bl kvmppc_hpte_hv_fault 2070 ld r9, HSTATE_KVM_VCPU(r13) 2071 ld r10, VCPU_PC(r9) 2072 ld r11, VCPU_MSR(r9) 2073 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 2074 cmpdi r3, 0 /* retry the instruction */ 2075 beq 6f 2076 cmpdi r3, -1 /* handle in kernel mode */ 2077 beq guest_exit_cont 2078 cmpdi r3, -2 /* MMIO emulation; need instr word */ 2079 beq 2f 2080 2081 /* Synthesize a DSI (or DSegI) for the guest */ 2082 ld r4, VCPU_FAULT_DAR(r9) 2083 mr r6, r3 20841: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 2085 mtspr SPRN_DSISR, r6 20867: mtspr SPRN_DAR, r4 2087 mtspr SPRN_SRR0, r10 2088 mtspr SPRN_SRR1, r11 2089 mr r10, r0 2090 bl kvmppc_msr_interrupt 2091fast_interrupt_c_return: 20926: ld r7, VCPU_CTR(r9) 2093 ld r8, VCPU_XER(r9) 2094 mtctr r7 2095 mtxer r8 2096 mr r4, r9 2097 b fast_guest_return 2098 20993: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 2100 ld r5, KVM_VRMA_SLB_V(r5) 2101 b 4b 2102 2103 /* If this is for emulated MMIO, load the instruction word */ 21042: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 2105 2106 /* Set guest mode to 'jump over instruction' so if lwz faults 2107 * we'll just continue at the next IP. */ 2108 li r0, KVM_GUEST_MODE_SKIP 2109 stb r0, HSTATE_IN_GUEST(r13) 2110 2111 /* Do the access with MSR:DR enabled */ 2112 mfmsr r3 2113 ori r4, r3, MSR_DR /* Enable paging for data */ 2114 mtmsrd r4 2115 lwz r8, 0(r10) 2116 mtmsrd r3 2117 2118 /* Store the result */ 2119 stw r8, VCPU_LAST_INST(r9) 2120 2121 /* Unset guest mode. */ 2122 li r0, KVM_GUEST_MODE_HOST_HV 2123 stb r0, HSTATE_IN_GUEST(r13) 2124 b guest_exit_cont 2125 2126.Lradix_hdsi: 2127 std r4, VCPU_FAULT_DAR(r9) 2128 stw r6, VCPU_FAULT_DSISR(r9) 2129.Lradix_hisi: 2130 mfspr r5, SPRN_ASDR 2131 std r5, VCPU_FAULT_GPA(r9) 2132 b guest_exit_cont 2133 2134/* 2135 * Similarly for an HISI, reflect it to the guest as an ISI unless 2136 * it is an HPTE not found fault for a page that we have paged out. 2137 */ 2138kvmppc_hisi: 2139 ld r3, VCPU_KVM(r9) 2140 lbz r0, KVM_RADIX(r3) 2141 cmpwi r0, 0 2142 bne .Lradix_hisi /* for radix, just save ASDR */ 2143 andis. r0, r11, SRR1_ISI_NOPT@h 2144 beq 1f 2145 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 2146 beq 3f 2147BEGIN_FTR_SECTION 2148 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2149 b 4f 2150END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2151 clrrdi r0, r10, 28 2152 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2153 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2154 bne 7f /* if no SLB entry found */ 21554: 2156 /* Search the hash table. */ 2157 mr r3, r9 /* vcpu pointer */ 2158 mr r4, r10 2159 mr r6, r11 2160 li r7, 0 /* instruction fault */ 2161 bl kvmppc_hpte_hv_fault 2162 ld r9, HSTATE_KVM_VCPU(r13) 2163 ld r10, VCPU_PC(r9) 2164 ld r11, VCPU_MSR(r9) 2165 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2166 cmpdi r3, 0 /* retry the instruction */ 2167 beq fast_interrupt_c_return 2168 cmpdi r3, -1 /* handle in kernel mode */ 2169 beq guest_exit_cont 2170 2171 /* Synthesize an ISI (or ISegI) for the guest */ 2172 mr r11, r3 21731: li r0, BOOK3S_INTERRUPT_INST_STORAGE 21747: mtspr SPRN_SRR0, r10 2175 mtspr SPRN_SRR1, r11 2176 mr r10, r0 2177 bl kvmppc_msr_interrupt 2178 b fast_interrupt_c_return 2179 21803: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2181 ld r5, KVM_VRMA_SLB_V(r6) 2182 b 4b 2183 2184/* 2185 * Try to handle an hcall in real mode. 2186 * Returns to the guest if we handle it, or continues on up to 2187 * the kernel if we can't (i.e. if we don't have a handler for 2188 * it, or if the handler returns H_TOO_HARD). 2189 * 2190 * r5 - r8 contain hcall args, 2191 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2192 */ 2193hcall_try_real_mode: 2194 ld r3,VCPU_GPR(R3)(r9) 2195 andi. r0,r11,MSR_PR 2196 /* sc 1 from userspace - reflect to guest syscall */ 2197 bne sc_1_fast_return 2198 /* sc 1 from nested guest - give it to L1 to handle */ 2199 ld r0, VCPU_NESTED(r9) 2200 cmpdi r0, 0 2201 bne guest_exit_cont 2202 clrrdi r3,r3,2 2203 cmpldi r3,hcall_real_table_end - hcall_real_table 2204 bge guest_exit_cont 2205 /* See if this hcall is enabled for in-kernel handling */ 2206 ld r4, VCPU_KVM(r9) 2207 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2208 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2209 add r4, r4, r0 2210 ld r0, KVM_ENABLED_HCALLS(r4) 2211 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2212 srd r0, r0, r4 2213 andi. r0, r0, 1 2214 beq guest_exit_cont 2215 /* Get pointer to handler, if any, and call it */ 2216 LOAD_REG_ADDR(r4, hcall_real_table) 2217 lwax r3,r3,r4 2218 cmpwi r3,0 2219 beq guest_exit_cont 2220 add r12,r3,r4 2221 mtctr r12 2222 mr r3,r9 /* get vcpu pointer */ 2223 ld r4,VCPU_GPR(R4)(r9) 2224 bctrl 2225 cmpdi r3,H_TOO_HARD 2226 beq hcall_real_fallback 2227 ld r4,HSTATE_KVM_VCPU(r13) 2228 std r3,VCPU_GPR(R3)(r4) 2229 ld r10,VCPU_PC(r4) 2230 ld r11,VCPU_MSR(r4) 2231 b fast_guest_return 2232 2233sc_1_fast_return: 2234 mtspr SPRN_SRR0,r10 2235 mtspr SPRN_SRR1,r11 2236 li r10, BOOK3S_INTERRUPT_SYSCALL 2237 bl kvmppc_msr_interrupt 2238 mr r4,r9 2239 b fast_guest_return 2240 2241 /* We've attempted a real mode hcall, but it's punted it back 2242 * to userspace. We need to restore some clobbered volatiles 2243 * before resuming the pass-it-to-qemu path */ 2244hcall_real_fallback: 2245 li r12,BOOK3S_INTERRUPT_SYSCALL 2246 ld r9, HSTATE_KVM_VCPU(r13) 2247 2248 b guest_exit_cont 2249 2250 .globl hcall_real_table 2251hcall_real_table: 2252 .long 0 /* 0 - unused */ 2253 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2254 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2255 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2256 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2257 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2258 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2259#ifdef CONFIG_SPAPR_TCE_IOMMU 2260 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2261 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2262#else 2263 .long 0 /* 0x1c */ 2264 .long 0 /* 0x20 */ 2265#endif 2266 .long 0 /* 0x24 - H_SET_SPRG0 */ 2267 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2268 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 2269 .long 0 /* 0x30 */ 2270 .long 0 /* 0x34 */ 2271 .long 0 /* 0x38 */ 2272 .long 0 /* 0x3c */ 2273 .long 0 /* 0x40 */ 2274 .long 0 /* 0x44 */ 2275 .long 0 /* 0x48 */ 2276 .long 0 /* 0x4c */ 2277 .long 0 /* 0x50 */ 2278 .long 0 /* 0x54 */ 2279 .long 0 /* 0x58 */ 2280 .long 0 /* 0x5c */ 2281 .long 0 /* 0x60 */ 2282#ifdef CONFIG_KVM_XICS 2283 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2284 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2285 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2286 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2287 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2288#else 2289 .long 0 /* 0x64 - H_EOI */ 2290 .long 0 /* 0x68 - H_CPPR */ 2291 .long 0 /* 0x6c - H_IPI */ 2292 .long 0 /* 0x70 - H_IPOLL */ 2293 .long 0 /* 0x74 - H_XIRR */ 2294#endif 2295 .long 0 /* 0x78 */ 2296 .long 0 /* 0x7c */ 2297 .long 0 /* 0x80 */ 2298 .long 0 /* 0x84 */ 2299 .long 0 /* 0x88 */ 2300 .long 0 /* 0x8c */ 2301 .long 0 /* 0x90 */ 2302 .long 0 /* 0x94 */ 2303 .long 0 /* 0x98 */ 2304 .long 0 /* 0x9c */ 2305 .long 0 /* 0xa0 */ 2306 .long 0 /* 0xa4 */ 2307 .long 0 /* 0xa8 */ 2308 .long 0 /* 0xac */ 2309 .long 0 /* 0xb0 */ 2310 .long 0 /* 0xb4 */ 2311 .long 0 /* 0xb8 */ 2312 .long 0 /* 0xbc */ 2313 .long 0 /* 0xc0 */ 2314 .long 0 /* 0xc4 */ 2315 .long 0 /* 0xc8 */ 2316 .long 0 /* 0xcc */ 2317 .long 0 /* 0xd0 */ 2318 .long 0 /* 0xd4 */ 2319 .long 0 /* 0xd8 */ 2320 .long 0 /* 0xdc */ 2321 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2322 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2323 .long 0 /* 0xe8 */ 2324 .long 0 /* 0xec */ 2325 .long 0 /* 0xf0 */ 2326 .long 0 /* 0xf4 */ 2327 .long 0 /* 0xf8 */ 2328 .long 0 /* 0xfc */ 2329 .long 0 /* 0x100 */ 2330 .long 0 /* 0x104 */ 2331 .long 0 /* 0x108 */ 2332 .long 0 /* 0x10c */ 2333 .long 0 /* 0x110 */ 2334 .long 0 /* 0x114 */ 2335 .long 0 /* 0x118 */ 2336 .long 0 /* 0x11c */ 2337 .long 0 /* 0x120 */ 2338 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2339 .long 0 /* 0x128 */ 2340 .long 0 /* 0x12c */ 2341 .long 0 /* 0x130 */ 2342 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2343#ifdef CONFIG_SPAPR_TCE_IOMMU 2344 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2345 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2346#else 2347 .long 0 /* 0x138 */ 2348 .long 0 /* 0x13c */ 2349#endif 2350 .long 0 /* 0x140 */ 2351 .long 0 /* 0x144 */ 2352 .long 0 /* 0x148 */ 2353 .long 0 /* 0x14c */ 2354 .long 0 /* 0x150 */ 2355 .long 0 /* 0x154 */ 2356 .long 0 /* 0x158 */ 2357 .long 0 /* 0x15c */ 2358 .long 0 /* 0x160 */ 2359 .long 0 /* 0x164 */ 2360 .long 0 /* 0x168 */ 2361 .long 0 /* 0x16c */ 2362 .long 0 /* 0x170 */ 2363 .long 0 /* 0x174 */ 2364 .long 0 /* 0x178 */ 2365 .long 0 /* 0x17c */ 2366 .long 0 /* 0x180 */ 2367 .long 0 /* 0x184 */ 2368 .long 0 /* 0x188 */ 2369 .long 0 /* 0x18c */ 2370 .long 0 /* 0x190 */ 2371 .long 0 /* 0x194 */ 2372 .long 0 /* 0x198 */ 2373 .long 0 /* 0x19c */ 2374 .long 0 /* 0x1a0 */ 2375 .long 0 /* 0x1a4 */ 2376 .long 0 /* 0x1a8 */ 2377 .long 0 /* 0x1ac */ 2378 .long 0 /* 0x1b0 */ 2379 .long 0 /* 0x1b4 */ 2380 .long 0 /* 0x1b8 */ 2381 .long 0 /* 0x1bc */ 2382 .long 0 /* 0x1c0 */ 2383 .long 0 /* 0x1c4 */ 2384 .long 0 /* 0x1c8 */ 2385 .long 0 /* 0x1cc */ 2386 .long 0 /* 0x1d0 */ 2387 .long 0 /* 0x1d4 */ 2388 .long 0 /* 0x1d8 */ 2389 .long 0 /* 0x1dc */ 2390 .long 0 /* 0x1e0 */ 2391 .long 0 /* 0x1e4 */ 2392 .long 0 /* 0x1e8 */ 2393 .long 0 /* 0x1ec */ 2394 .long 0 /* 0x1f0 */ 2395 .long 0 /* 0x1f4 */ 2396 .long 0 /* 0x1f8 */ 2397 .long 0 /* 0x1fc */ 2398 .long 0 /* 0x200 */ 2399 .long 0 /* 0x204 */ 2400 .long 0 /* 0x208 */ 2401 .long 0 /* 0x20c */ 2402 .long 0 /* 0x210 */ 2403 .long 0 /* 0x214 */ 2404 .long 0 /* 0x218 */ 2405 .long 0 /* 0x21c */ 2406 .long 0 /* 0x220 */ 2407 .long 0 /* 0x224 */ 2408 .long 0 /* 0x228 */ 2409 .long 0 /* 0x22c */ 2410 .long 0 /* 0x230 */ 2411 .long 0 /* 0x234 */ 2412 .long 0 /* 0x238 */ 2413 .long 0 /* 0x23c */ 2414 .long 0 /* 0x240 */ 2415 .long 0 /* 0x244 */ 2416 .long 0 /* 0x248 */ 2417 .long 0 /* 0x24c */ 2418 .long 0 /* 0x250 */ 2419 .long 0 /* 0x254 */ 2420 .long 0 /* 0x258 */ 2421 .long 0 /* 0x25c */ 2422 .long 0 /* 0x260 */ 2423 .long 0 /* 0x264 */ 2424 .long 0 /* 0x268 */ 2425 .long 0 /* 0x26c */ 2426 .long 0 /* 0x270 */ 2427 .long 0 /* 0x274 */ 2428 .long 0 /* 0x278 */ 2429 .long 0 /* 0x27c */ 2430 .long 0 /* 0x280 */ 2431 .long 0 /* 0x284 */ 2432 .long 0 /* 0x288 */ 2433 .long 0 /* 0x28c */ 2434 .long 0 /* 0x290 */ 2435 .long 0 /* 0x294 */ 2436 .long 0 /* 0x298 */ 2437 .long 0 /* 0x29c */ 2438 .long 0 /* 0x2a0 */ 2439 .long 0 /* 0x2a4 */ 2440 .long 0 /* 0x2a8 */ 2441 .long 0 /* 0x2ac */ 2442 .long 0 /* 0x2b0 */ 2443 .long 0 /* 0x2b4 */ 2444 .long 0 /* 0x2b8 */ 2445 .long 0 /* 0x2bc */ 2446 .long 0 /* 0x2c0 */ 2447 .long 0 /* 0x2c4 */ 2448 .long 0 /* 0x2c8 */ 2449 .long 0 /* 0x2cc */ 2450 .long 0 /* 0x2d0 */ 2451 .long 0 /* 0x2d4 */ 2452 .long 0 /* 0x2d8 */ 2453 .long 0 /* 0x2dc */ 2454 .long 0 /* 0x2e0 */ 2455 .long 0 /* 0x2e4 */ 2456 .long 0 /* 0x2e8 */ 2457 .long 0 /* 0x2ec */ 2458 .long 0 /* 0x2f0 */ 2459 .long 0 /* 0x2f4 */ 2460 .long 0 /* 0x2f8 */ 2461#ifdef CONFIG_KVM_XICS 2462 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2463#else 2464 .long 0 /* 0x2fc - H_XIRR_X*/ 2465#endif 2466 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2467 .globl hcall_real_table_end 2468hcall_real_table_end: 2469 2470_GLOBAL(kvmppc_h_set_xdabr) 2471EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2472 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2473 beq 6f 2474 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2475 andc. r0, r5, r0 2476 beq 3f 24776: li r3, H_PARAMETER 2478 blr 2479 2480_GLOBAL(kvmppc_h_set_dabr) 2481EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2482 li r5, DABRX_USER | DABRX_KERNEL 24833: 2484BEGIN_FTR_SECTION 2485 b 2f 2486END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2487 std r4,VCPU_DABR(r3) 2488 stw r5, VCPU_DABRX(r3) 2489 mtspr SPRN_DABRX, r5 2490 /* Work around P7 bug where DABR can get corrupted on mtspr */ 24911: mtspr SPRN_DABR,r4 2492 mfspr r5, SPRN_DABR 2493 cmpd r4, r5 2494 bne 1b 2495 isync 2496 li r3,0 2497 blr 2498 24992: 2500 LOAD_REG_ADDR(r11, dawr_force_enable) 2501 lbz r11, 0(r11) 2502 cmpdi r11, 0 2503 bne 3f 2504 li r3, H_HARDWARE 2505 blr 25063: 2507 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2508 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2509 rlwimi r5, r4, 2, DAWRX_WT 2510 clrrdi r4, r4, 3 2511 std r4, VCPU_DAWR(r3) 2512 std r5, VCPU_DAWRX(r3) 2513 /* 2514 * If came in through the real mode hcall handler then it is necessary 2515 * to write the registers since the return path won't. Otherwise it is 2516 * sufficient to store then in the vcpu struct as they will be loaded 2517 * next time the vcpu is run. 2518 */ 2519 mfmsr r6 2520 andi. r6, r6, MSR_DR /* in real mode? */ 2521 bne 4f 2522 mtspr SPRN_DAWR, r4 2523 mtspr SPRN_DAWRX, r5 25244: li r3, 0 2525 blr 2526 2527_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2528 ori r11,r11,MSR_EE 2529 std r11,VCPU_MSR(r3) 2530 li r0,1 2531 stb r0,VCPU_CEDED(r3) 2532 sync /* order setting ceded vs. testing prodded */ 2533 lbz r5,VCPU_PRODDED(r3) 2534 cmpwi r5,0 2535 bne kvm_cede_prodded 2536 li r12,0 /* set trap to 0 to say hcall is handled */ 2537 stw r12,VCPU_TRAP(r3) 2538 li r0,H_SUCCESS 2539 std r0,VCPU_GPR(R3)(r3) 2540 2541 /* 2542 * Set our bit in the bitmask of napping threads unless all the 2543 * other threads are already napping, in which case we send this 2544 * up to the host. 2545 */ 2546 ld r5,HSTATE_KVM_VCORE(r13) 2547 lbz r6,HSTATE_PTID(r13) 2548 lwz r8,VCORE_ENTRY_EXIT(r5) 2549 clrldi r8,r8,56 2550 li r0,1 2551 sld r0,r0,r6 2552 addi r6,r5,VCORE_NAPPING_THREADS 255331: lwarx r4,0,r6 2554 or r4,r4,r0 2555 cmpw r4,r8 2556 beq kvm_cede_exit 2557 stwcx. r4,0,r6 2558 bne 31b 2559 /* order napping_threads update vs testing entry_exit_map */ 2560 isync 2561 li r0,NAPPING_CEDE 2562 stb r0,HSTATE_NAPPING(r13) 2563 lwz r7,VCORE_ENTRY_EXIT(r5) 2564 cmpwi r7,0x100 2565 bge 33f /* another thread already exiting */ 2566 2567/* 2568 * Although not specifically required by the architecture, POWER7 2569 * preserves the following registers in nap mode, even if an SMT mode 2570 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2571 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2572 */ 2573 /* Save non-volatile GPRs */ 2574 std r14, VCPU_GPR(R14)(r3) 2575 std r15, VCPU_GPR(R15)(r3) 2576 std r16, VCPU_GPR(R16)(r3) 2577 std r17, VCPU_GPR(R17)(r3) 2578 std r18, VCPU_GPR(R18)(r3) 2579 std r19, VCPU_GPR(R19)(r3) 2580 std r20, VCPU_GPR(R20)(r3) 2581 std r21, VCPU_GPR(R21)(r3) 2582 std r22, VCPU_GPR(R22)(r3) 2583 std r23, VCPU_GPR(R23)(r3) 2584 std r24, VCPU_GPR(R24)(r3) 2585 std r25, VCPU_GPR(R25)(r3) 2586 std r26, VCPU_GPR(R26)(r3) 2587 std r27, VCPU_GPR(R27)(r3) 2588 std r28, VCPU_GPR(R28)(r3) 2589 std r29, VCPU_GPR(R29)(r3) 2590 std r30, VCPU_GPR(R30)(r3) 2591 std r31, VCPU_GPR(R31)(r3) 2592 2593 /* save FP state */ 2594 bl kvmppc_save_fp 2595 2596#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2597/* 2598 * Branch around the call if both CPU_FTR_TM and 2599 * CPU_FTR_P9_TM_HV_ASSIST are off. 2600 */ 2601BEGIN_FTR_SECTION 2602 b 91f 2603END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2604 /* 2605 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2606 */ 2607 ld r3, HSTATE_KVM_VCPU(r13) 2608 ld r4, VCPU_MSR(r3) 2609 li r5, 0 /* don't preserve non-vol regs */ 2610 bl kvmppc_save_tm_hv 2611 nop 261291: 2613#endif 2614 2615 /* 2616 * Set DEC to the smaller of DEC and HDEC, so that we wake 2617 * no later than the end of our timeslice (HDEC interrupts 2618 * don't wake us from nap). 2619 */ 2620 mfspr r3, SPRN_DEC 2621 mfspr r4, SPRN_HDEC 2622 mftb r5 2623BEGIN_FTR_SECTION 2624 /* On P9 check whether the guest has large decrementer mode enabled */ 2625 ld r6, HSTATE_KVM_VCORE(r13) 2626 ld r6, VCORE_LPCR(r6) 2627 andis. r6, r6, LPCR_LD@h 2628 bne 68f 2629END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2630 extsw r3, r3 263168: EXTEND_HDEC(r4) 2632 cmpd r3, r4 2633 ble 67f 2634 mtspr SPRN_DEC, r4 263567: 2636 /* save expiry time of guest decrementer */ 2637 add r3, r3, r5 2638 ld r4, HSTATE_KVM_VCPU(r13) 2639 ld r5, HSTATE_KVM_VCORE(r13) 2640 ld r6, VCORE_TB_OFFSET_APPL(r5) 2641 subf r3, r6, r3 /* convert to host TB value */ 2642 std r3, VCPU_DEC_EXPIRES(r4) 2643 2644#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2645 ld r4, HSTATE_KVM_VCPU(r13) 2646 addi r3, r4, VCPU_TB_CEDE 2647 bl kvmhv_accumulate_time 2648#endif 2649 2650 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2651 2652 /* Go back to host stack */ 2653 ld r1, HSTATE_HOST_R1(r13) 2654 2655 /* 2656 * Take a nap until a decrementer or external or doobell interrupt 2657 * occurs, with PECE1 and PECE0 set in LPCR. 2658 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2659 * Also clear the runlatch bit before napping. 2660 */ 2661kvm_do_nap: 2662 mfspr r0, SPRN_CTRLF 2663 clrrdi r0, r0, 1 2664 mtspr SPRN_CTRLT, r0 2665 2666 li r0,1 2667 stb r0,HSTATE_HWTHREAD_REQ(r13) 2668 mfspr r5,SPRN_LPCR 2669 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2670BEGIN_FTR_SECTION 2671 ori r5, r5, LPCR_PECEDH 2672 rlwimi r5, r3, 0, LPCR_PECEDP 2673END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2674 2675kvm_nap_sequence: /* desired LPCR value in r5 */ 2676BEGIN_FTR_SECTION 2677 /* 2678 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2679 * enable state loss = 1 (allow SMT mode switch) 2680 * requested level = 0 (just stop dispatching) 2681 */ 2682 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2683 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2684 li r4, LPCR_PECE_HVEE@higher 2685 sldi r4, r4, 32 2686 or r5, r5, r4 2687FTR_SECTION_ELSE 2688 li r3, PNV_THREAD_NAP 2689ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2690 mtspr SPRN_LPCR,r5 2691 isync 2692 2693BEGIN_FTR_SECTION 2694 bl isa300_idle_stop_mayloss 2695FTR_SECTION_ELSE 2696 bl isa206_idle_insn_mayloss 2697ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2698 2699 mfspr r0, SPRN_CTRLF 2700 ori r0, r0, 1 2701 mtspr SPRN_CTRLT, r0 2702 2703 mtspr SPRN_SRR1, r3 2704 2705 li r0, 0 2706 stb r0, PACA_FTRACE_ENABLED(r13) 2707 2708 li r0, KVM_HWTHREAD_IN_KVM 2709 stb r0, HSTATE_HWTHREAD_STATE(r13) 2710 2711 lbz r0, HSTATE_NAPPING(r13) 2712 cmpwi r0, NAPPING_CEDE 2713 beq kvm_end_cede 2714 cmpwi r0, NAPPING_NOVCPU 2715 beq kvm_novcpu_wakeup 2716 cmpwi r0, NAPPING_UNSPLIT 2717 beq kvm_unsplit_wakeup 2718 twi 31,0,0 /* Nap state must not be zero */ 2719 272033: mr r4, r3 2721 li r3, 0 2722 li r12, 0 2723 b 34f 2724 2725kvm_end_cede: 2726 /* Woken by external or decrementer interrupt */ 2727 2728 /* get vcpu pointer */ 2729 ld r4, HSTATE_KVM_VCPU(r13) 2730 2731#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2732 addi r3, r4, VCPU_TB_RMINTR 2733 bl kvmhv_accumulate_time 2734#endif 2735 2736#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2737/* 2738 * Branch around the call if both CPU_FTR_TM and 2739 * CPU_FTR_P9_TM_HV_ASSIST are off. 2740 */ 2741BEGIN_FTR_SECTION 2742 b 91f 2743END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2744 /* 2745 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2746 */ 2747 mr r3, r4 2748 ld r4, VCPU_MSR(r3) 2749 li r5, 0 /* don't preserve non-vol regs */ 2750 bl kvmppc_restore_tm_hv 2751 nop 2752 ld r4, HSTATE_KVM_VCPU(r13) 275391: 2754#endif 2755 2756 /* load up FP state */ 2757 bl kvmppc_load_fp 2758 2759 /* Restore guest decrementer */ 2760 ld r3, VCPU_DEC_EXPIRES(r4) 2761 ld r5, HSTATE_KVM_VCORE(r13) 2762 ld r6, VCORE_TB_OFFSET_APPL(r5) 2763 add r3, r3, r6 /* convert host TB to guest TB value */ 2764 mftb r7 2765 subf r3, r7, r3 2766 mtspr SPRN_DEC, r3 2767 2768 /* Load NV GPRS */ 2769 ld r14, VCPU_GPR(R14)(r4) 2770 ld r15, VCPU_GPR(R15)(r4) 2771 ld r16, VCPU_GPR(R16)(r4) 2772 ld r17, VCPU_GPR(R17)(r4) 2773 ld r18, VCPU_GPR(R18)(r4) 2774 ld r19, VCPU_GPR(R19)(r4) 2775 ld r20, VCPU_GPR(R20)(r4) 2776 ld r21, VCPU_GPR(R21)(r4) 2777 ld r22, VCPU_GPR(R22)(r4) 2778 ld r23, VCPU_GPR(R23)(r4) 2779 ld r24, VCPU_GPR(R24)(r4) 2780 ld r25, VCPU_GPR(R25)(r4) 2781 ld r26, VCPU_GPR(R26)(r4) 2782 ld r27, VCPU_GPR(R27)(r4) 2783 ld r28, VCPU_GPR(R28)(r4) 2784 ld r29, VCPU_GPR(R29)(r4) 2785 ld r30, VCPU_GPR(R30)(r4) 2786 ld r31, VCPU_GPR(R31)(r4) 2787 2788 /* Check the wake reason in SRR1 to see why we got here */ 2789 bl kvmppc_check_wake_reason 2790 2791 /* 2792 * Restore volatile registers since we could have called a 2793 * C routine in kvmppc_check_wake_reason 2794 * r4 = VCPU 2795 * r3 tells us whether we need to return to host or not 2796 * WARNING: it gets checked further down: 2797 * should not modify r3 until this check is done. 2798 */ 2799 ld r4, HSTATE_KVM_VCPU(r13) 2800 2801 /* clear our bit in vcore->napping_threads */ 280234: ld r5,HSTATE_KVM_VCORE(r13) 2803 lbz r7,HSTATE_PTID(r13) 2804 li r0,1 2805 sld r0,r0,r7 2806 addi r6,r5,VCORE_NAPPING_THREADS 280732: lwarx r7,0,r6 2808 andc r7,r7,r0 2809 stwcx. r7,0,r6 2810 bne 32b 2811 li r0,0 2812 stb r0,HSTATE_NAPPING(r13) 2813 2814 /* See if the wake reason saved in r3 means we need to exit */ 2815 stw r12, VCPU_TRAP(r4) 2816 mr r9, r4 2817 cmpdi r3, 0 2818 bgt guest_exit_cont 2819 b maybe_reenter_guest 2820 2821 /* cede when already previously prodded case */ 2822kvm_cede_prodded: 2823 li r0,0 2824 stb r0,VCPU_PRODDED(r3) 2825 sync /* order testing prodded vs. clearing ceded */ 2826 stb r0,VCPU_CEDED(r3) 2827 li r3,H_SUCCESS 2828 blr 2829 2830 /* we've ceded but we want to give control to the host */ 2831kvm_cede_exit: 2832 ld r9, HSTATE_KVM_VCPU(r13) 2833#ifdef CONFIG_KVM_XICS 2834 /* Abort if we still have a pending escalation */ 2835 lbz r5, VCPU_XIVE_ESC_ON(r9) 2836 cmpwi r5, 0 2837 beq 1f 2838 li r0, 0 2839 stb r0, VCPU_CEDED(r9) 28401: /* Enable XIVE escalation */ 2841 li r5, XIVE_ESB_SET_PQ_00 2842 mfmsr r0 2843 andi. r0, r0, MSR_DR /* in real mode? */ 2844 beq 1f 2845 ld r10, VCPU_XIVE_ESC_VADDR(r9) 2846 cmpdi r10, 0 2847 beq 3f 2848 ldx r0, r10, r5 2849 b 2f 28501: ld r10, VCPU_XIVE_ESC_RADDR(r9) 2851 cmpdi r10, 0 2852 beq 3f 2853 ldcix r0, r10, r5 28542: sync 2855 li r0, 1 2856 stb r0, VCPU_XIVE_ESC_ON(r9) 2857#endif /* CONFIG_KVM_XICS */ 28583: b guest_exit_cont 2859 2860 /* Try to do machine check recovery in real mode */ 2861machine_check_realmode: 2862 mr r3, r9 /* get vcpu pointer */ 2863 bl kvmppc_realmode_machine_check 2864 nop 2865 /* all machine checks go to virtual mode for further handling */ 2866 ld r9, HSTATE_KVM_VCPU(r13) 2867 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2868 b guest_exit_cont 2869 2870/* 2871 * Call C code to handle a HMI in real mode. 2872 * Only the primary thread does the call, secondary threads are handled 2873 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2874 * r9 points to the vcpu on entry 2875 */ 2876hmi_realmode: 2877 lbz r0, HSTATE_PTID(r13) 2878 cmpwi r0, 0 2879 bne guest_exit_cont 2880 bl kvmppc_realmode_hmi_handler 2881 ld r9, HSTATE_KVM_VCPU(r13) 2882 li r12, BOOK3S_INTERRUPT_HMI 2883 b guest_exit_cont 2884 2885/* 2886 * Check the reason we woke from nap, and take appropriate action. 2887 * Returns (in r3): 2888 * 0 if nothing needs to be done 2889 * 1 if something happened that needs to be handled by the host 2890 * -1 if there was a guest wakeup (IPI or msgsnd) 2891 * -2 if we handled a PCI passthrough interrupt (returned by 2892 * kvmppc_read_intr only) 2893 * 2894 * Also sets r12 to the interrupt vector for any interrupt that needs 2895 * to be handled now by the host (0x500 for external interrupt), or zero. 2896 * Modifies all volatile registers (since it may call a C function). 2897 * This routine calls kvmppc_read_intr, a C function, if an external 2898 * interrupt is pending. 2899 */ 2900kvmppc_check_wake_reason: 2901 mfspr r6, SPRN_SRR1 2902BEGIN_FTR_SECTION 2903 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2904FTR_SECTION_ELSE 2905 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2906ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2907 cmpwi r6, 8 /* was it an external interrupt? */ 2908 beq 7f /* if so, see what it was */ 2909 li r3, 0 2910 li r12, 0 2911 cmpwi r6, 6 /* was it the decrementer? */ 2912 beq 0f 2913BEGIN_FTR_SECTION 2914 cmpwi r6, 5 /* privileged doorbell? */ 2915 beq 0f 2916 cmpwi r6, 3 /* hypervisor doorbell? */ 2917 beq 3f 2918END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2919 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2920 beq 4f 2921 li r3, 1 /* anything else, return 1 */ 29220: blr 2923 2924 /* hypervisor doorbell */ 29253: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2926 2927 /* 2928 * Clear the doorbell as we will invoke the handler 2929 * explicitly in the guest exit path. 2930 */ 2931 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2932 PPC_MSGCLR(6) 2933 /* see if it's a host IPI */ 2934 li r3, 1 2935BEGIN_FTR_SECTION 2936 PPC_MSGSYNC 2937 lwsync 2938END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2939 lbz r0, HSTATE_HOST_IPI(r13) 2940 cmpwi r0, 0 2941 bnelr 2942 /* if not, return -1 */ 2943 li r3, -1 2944 blr 2945 2946 /* Woken up due to Hypervisor maintenance interrupt */ 29474: li r12, BOOK3S_INTERRUPT_HMI 2948 li r3, 1 2949 blr 2950 2951 /* external interrupt - create a stack frame so we can call C */ 29527: mflr r0 2953 std r0, PPC_LR_STKOFF(r1) 2954 stdu r1, -PPC_MIN_STKFRM(r1) 2955 bl kvmppc_read_intr 2956 nop 2957 li r12, BOOK3S_INTERRUPT_EXTERNAL 2958 cmpdi r3, 1 2959 ble 1f 2960 2961 /* 2962 * Return code of 2 means PCI passthrough interrupt, but 2963 * we need to return back to host to complete handling the 2964 * interrupt. Trap reason is expected in r12 by guest 2965 * exit code. 2966 */ 2967 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 29681: 2969 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2970 addi r1, r1, PPC_MIN_STKFRM 2971 mtlr r0 2972 blr 2973 2974/* 2975 * Save away FP, VMX and VSX registers. 2976 * r3 = vcpu pointer 2977 * N.B. r30 and r31 are volatile across this function, 2978 * thus it is not callable from C. 2979 */ 2980kvmppc_save_fp: 2981 mflr r30 2982 mr r31,r3 2983 mfmsr r5 2984 ori r8,r5,MSR_FP 2985#ifdef CONFIG_ALTIVEC 2986BEGIN_FTR_SECTION 2987 oris r8,r8,MSR_VEC@h 2988END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2989#endif 2990#ifdef CONFIG_VSX 2991BEGIN_FTR_SECTION 2992 oris r8,r8,MSR_VSX@h 2993END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2994#endif 2995 mtmsrd r8 2996 addi r3,r3,VCPU_FPRS 2997 bl store_fp_state 2998#ifdef CONFIG_ALTIVEC 2999BEGIN_FTR_SECTION 3000 addi r3,r31,VCPU_VRS 3001 bl store_vr_state 3002END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3003#endif 3004 mfspr r6,SPRN_VRSAVE 3005 stw r6,VCPU_VRSAVE(r31) 3006 mtlr r30 3007 blr 3008 3009/* 3010 * Load up FP, VMX and VSX registers 3011 * r4 = vcpu pointer 3012 * N.B. r30 and r31 are volatile across this function, 3013 * thus it is not callable from C. 3014 */ 3015kvmppc_load_fp: 3016 mflr r30 3017 mr r31,r4 3018 mfmsr r9 3019 ori r8,r9,MSR_FP 3020#ifdef CONFIG_ALTIVEC 3021BEGIN_FTR_SECTION 3022 oris r8,r8,MSR_VEC@h 3023END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3024#endif 3025#ifdef CONFIG_VSX 3026BEGIN_FTR_SECTION 3027 oris r8,r8,MSR_VSX@h 3028END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3029#endif 3030 mtmsrd r8 3031 addi r3,r4,VCPU_FPRS 3032 bl load_fp_state 3033#ifdef CONFIG_ALTIVEC 3034BEGIN_FTR_SECTION 3035 addi r3,r31,VCPU_VRS 3036 bl load_vr_state 3037END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3038#endif 3039 lwz r7,VCPU_VRSAVE(r31) 3040 mtspr SPRN_VRSAVE,r7 3041 mtlr r30 3042 mr r4,r31 3043 blr 3044 3045#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3046/* 3047 * Save transactional state and TM-related registers. 3048 * Called with r3 pointing to the vcpu struct and r4 containing 3049 * the guest MSR value. 3050 * r5 is non-zero iff non-volatile register state needs to be maintained. 3051 * If r5 == 0, this can modify all checkpointed registers, but 3052 * restores r1 and r2 before exit. 3053 */ 3054_GLOBAL_TOC(kvmppc_save_tm_hv) 3055EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 3056 /* See if we need to handle fake suspend mode */ 3057BEGIN_FTR_SECTION 3058 b __kvmppc_save_tm 3059END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3060 3061 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 3062 cmpwi r0, 0 3063 beq __kvmppc_save_tm 3064 3065 /* The following code handles the fake_suspend = 1 case */ 3066 mflr r0 3067 std r0, PPC_LR_STKOFF(r1) 3068 stdu r1, -PPC_MIN_STKFRM(r1) 3069 3070 /* Turn on TM. */ 3071 mfmsr r8 3072 li r0, 1 3073 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 3074 mtmsrd r8 3075 3076 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 3077 beq 4f 3078BEGIN_FTR_SECTION 3079 bl pnv_power9_force_smt4_catch 3080END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3081 nop 3082 3083 /* We have to treclaim here because that's the only way to do S->N */ 3084 li r3, TM_CAUSE_KVM_RESCHED 3085 TRECLAIM(R3) 3086 3087 /* 3088 * We were in fake suspend, so we are not going to save the 3089 * register state as the guest checkpointed state (since 3090 * we already have it), therefore we can now use any volatile GPR. 3091 * In fact treclaim in fake suspend state doesn't modify 3092 * any registers. 3093 */ 3094 3095BEGIN_FTR_SECTION 3096 bl pnv_power9_force_smt4_release 3097END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3098 nop 3099 31004: 3101 mfspr r3, SPRN_PSSCR 3102 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 3103 li r0, PSSCR_FAKE_SUSPEND 3104 andc r3, r3, r0 3105 mtspr SPRN_PSSCR, r3 3106 3107 /* Don't save TEXASR, use value from last exit in real suspend state */ 3108 ld r9, HSTATE_KVM_VCPU(r13) 3109 mfspr r5, SPRN_TFHAR 3110 mfspr r6, SPRN_TFIAR 3111 std r5, VCPU_TFHAR(r9) 3112 std r6, VCPU_TFIAR(r9) 3113 3114 addi r1, r1, PPC_MIN_STKFRM 3115 ld r0, PPC_LR_STKOFF(r1) 3116 mtlr r0 3117 blr 3118 3119/* 3120 * Restore transactional state and TM-related registers. 3121 * Called with r3 pointing to the vcpu struct 3122 * and r4 containing the guest MSR value. 3123 * r5 is non-zero iff non-volatile register state needs to be maintained. 3124 * This potentially modifies all checkpointed registers. 3125 * It restores r1 and r2 from the PACA. 3126 */ 3127_GLOBAL_TOC(kvmppc_restore_tm_hv) 3128EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 3129 /* 3130 * If we are doing TM emulation for the guest on a POWER9 DD2, 3131 * then we don't actually do a trechkpt -- we either set up 3132 * fake-suspend mode, or emulate a TM rollback. 3133 */ 3134BEGIN_FTR_SECTION 3135 b __kvmppc_restore_tm 3136END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3137 mflr r0 3138 std r0, PPC_LR_STKOFF(r1) 3139 3140 li r0, 0 3141 stb r0, HSTATE_FAKE_SUSPEND(r13) 3142 3143 /* Turn on TM so we can restore TM SPRs */ 3144 mfmsr r5 3145 li r0, 1 3146 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 3147 mtmsrd r5 3148 3149 /* 3150 * The user may change these outside of a transaction, so they must 3151 * always be context switched. 3152 */ 3153 ld r5, VCPU_TFHAR(r3) 3154 ld r6, VCPU_TFIAR(r3) 3155 ld r7, VCPU_TEXASR(r3) 3156 mtspr SPRN_TFHAR, r5 3157 mtspr SPRN_TFIAR, r6 3158 mtspr SPRN_TEXASR, r7 3159 3160 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 3161 beqlr /* TM not active in guest */ 3162 3163 /* Make sure the failure summary is set */ 3164 oris r7, r7, (TEXASR_FS)@h 3165 mtspr SPRN_TEXASR, r7 3166 3167 cmpwi r5, 1 /* check for suspended state */ 3168 bgt 10f 3169 stb r5, HSTATE_FAKE_SUSPEND(r13) 3170 b 9f /* and return */ 317110: stdu r1, -PPC_MIN_STKFRM(r1) 3172 /* guest is in transactional state, so simulate rollback */ 3173 bl kvmhv_emulate_tm_rollback 3174 nop 3175 addi r1, r1, PPC_MIN_STKFRM 31769: ld r0, PPC_LR_STKOFF(r1) 3177 mtlr r0 3178 blr 3179#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 3180 3181/* 3182 * We come here if we get any exception or interrupt while we are 3183 * executing host real mode code while in guest MMU context. 3184 * r12 is (CR << 32) | vector 3185 * r13 points to our PACA 3186 * r12 is saved in HSTATE_SCRATCH0(r13) 3187 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE 3188 * r9 is saved in HSTATE_SCRATCH2(r13) 3189 * r13 is saved in HSPRG1 3190 * cfar is saved in HSTATE_CFAR(r13) 3191 * ppr is saved in HSTATE_PPR(r13) 3192 */ 3193kvmppc_bad_host_intr: 3194 /* 3195 * Switch to the emergency stack, but start half-way down in 3196 * case we were already on it. 3197 */ 3198 mr r9, r1 3199 std r1, PACAR1(r13) 3200 ld r1, PACAEMERGSP(r13) 3201 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 3202 std r9, 0(r1) 3203 std r0, GPR0(r1) 3204 std r9, GPR1(r1) 3205 std r2, GPR2(r1) 3206 SAVE_4GPRS(3, r1) 3207 SAVE_2GPRS(7, r1) 3208 srdi r0, r12, 32 3209 clrldi r12, r12, 32 3210 std r0, _CCR(r1) 3211 std r12, _TRAP(r1) 3212 andi. r0, r12, 2 3213 beq 1f 3214 mfspr r3, SPRN_HSRR0 3215 mfspr r4, SPRN_HSRR1 3216 mfspr r5, SPRN_HDAR 3217 mfspr r6, SPRN_HDSISR 3218 b 2f 32191: mfspr r3, SPRN_SRR0 3220 mfspr r4, SPRN_SRR1 3221 mfspr r5, SPRN_DAR 3222 mfspr r6, SPRN_DSISR 32232: std r3, _NIP(r1) 3224 std r4, _MSR(r1) 3225 std r5, _DAR(r1) 3226 std r6, _DSISR(r1) 3227 ld r9, HSTATE_SCRATCH2(r13) 3228 ld r12, HSTATE_SCRATCH0(r13) 3229 GET_SCRATCH0(r0) 3230 SAVE_4GPRS(9, r1) 3231 std r0, GPR13(r1) 3232 SAVE_NVGPRS(r1) 3233 ld r5, HSTATE_CFAR(r13) 3234 std r5, ORIG_GPR3(r1) 3235 mflr r3 3236#ifdef CONFIG_RELOCATABLE 3237 ld r4, HSTATE_SCRATCH1(r13) 3238#else 3239 mfctr r4 3240#endif 3241 mfxer r5 3242 lbz r6, PACAIRQSOFTMASK(r13) 3243 std r3, _LINK(r1) 3244 std r4, _CTR(r1) 3245 std r5, _XER(r1) 3246 std r6, SOFTE(r1) 3247 ld r2, PACATOC(r13) 3248 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 3249 std r3, STACK_FRAME_OVERHEAD-16(r1) 3250 3251 /* 3252 * On POWER9 do a minimal restore of the MMU and call C code, 3253 * which will print a message and panic. 3254 * XXX On POWER7 and POWER8, we just spin here since we don't 3255 * know what the other threads are doing (and we don't want to 3256 * coordinate with them) - but at least we now have register state 3257 * in memory that we might be able to look at from another CPU. 3258 */ 3259BEGIN_FTR_SECTION 3260 b . 3261END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 3262 ld r9, HSTATE_KVM_VCPU(r13) 3263 ld r10, VCPU_KVM(r9) 3264 3265 li r0, 0 3266 mtspr SPRN_AMR, r0 3267 mtspr SPRN_IAMR, r0 3268 mtspr SPRN_CIABR, r0 3269 mtspr SPRN_DAWRX, r0 3270 3271BEGIN_MMU_FTR_SECTION 3272 b 4f 3273END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 3274 3275 slbmte r0, r0 3276 slbia 3277 ptesync 3278 ld r8, PACA_SLBSHADOWPTR(r13) 3279 .rept SLB_NUM_BOLTED 3280 li r3, SLBSHADOW_SAVEAREA 3281 LDX_BE r5, r8, r3 3282 addi r3, r3, 8 3283 LDX_BE r6, r8, r3 3284 andis. r7, r5, SLB_ESID_V@h 3285 beq 3f 3286 slbmte r6, r5 32873: addi r8, r8, 16 3288 .endr 3289 32904: lwz r7, KVM_HOST_LPID(r10) 3291 mtspr SPRN_LPID, r7 3292 mtspr SPRN_PID, r0 3293 ld r8, KVM_HOST_LPCR(r10) 3294 mtspr SPRN_LPCR, r8 3295 isync 3296 li r0, KVM_GUEST_MODE_NONE 3297 stb r0, HSTATE_IN_GUEST(r13) 3298 3299 /* 3300 * Turn on the MMU and jump to C code 3301 */ 3302 bcl 20, 31, .+4 33035: mflr r3 3304 addi r3, r3, 9f - 5b 3305 li r4, -1 3306 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ 3307 ld r4, PACAKMSR(r13) 3308 mtspr SPRN_SRR0, r3 3309 mtspr SPRN_SRR1, r4 3310 RFI_TO_KERNEL 33119: addi r3, r1, STACK_FRAME_OVERHEAD 3312 bl kvmppc_bad_interrupt 3313 b 9b 3314 3315/* 3316 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3317 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3318 * r11 has the guest MSR value (in/out) 3319 * r9 has a vcpu pointer (in) 3320 * r0 is used as a scratch register 3321 */ 3322kvmppc_msr_interrupt: 3323 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3324 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3325 ld r11, VCPU_INTR_MSR(r9) 3326 bne 1f 3327 /* ... if transactional, change to suspended */ 3328 li r0, 1 33291: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3330 blr 3331 3332/* 3333 * Load up guest PMU state. R3 points to the vcpu struct. 3334 */ 3335_GLOBAL(kvmhv_load_guest_pmu) 3336EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) 3337 mr r4, r3 3338 mflr r0 3339 li r3, 1 3340 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3341 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3342 isync 3343BEGIN_FTR_SECTION 3344 ld r3, VCPU_MMCR(r4) 3345 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3346 cmpwi r5, MMCR0_PMAO 3347 beql kvmppc_fix_pmao 3348END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3349 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 3350 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 3351 lwz r6, VCPU_PMC + 8(r4) 3352 lwz r7, VCPU_PMC + 12(r4) 3353 lwz r8, VCPU_PMC + 16(r4) 3354 lwz r9, VCPU_PMC + 20(r4) 3355 mtspr SPRN_PMC1, r3 3356 mtspr SPRN_PMC2, r5 3357 mtspr SPRN_PMC3, r6 3358 mtspr SPRN_PMC4, r7 3359 mtspr SPRN_PMC5, r8 3360 mtspr SPRN_PMC6, r9 3361 ld r3, VCPU_MMCR(r4) 3362 ld r5, VCPU_MMCR + 8(r4) 3363 ld r6, VCPU_MMCR + 16(r4) 3364 ld r7, VCPU_SIAR(r4) 3365 ld r8, VCPU_SDAR(r4) 3366 mtspr SPRN_MMCR1, r5 3367 mtspr SPRN_MMCRA, r6 3368 mtspr SPRN_SIAR, r7 3369 mtspr SPRN_SDAR, r8 3370BEGIN_FTR_SECTION 3371 ld r5, VCPU_MMCR + 24(r4) 3372 ld r6, VCPU_SIER(r4) 3373 mtspr SPRN_MMCR2, r5 3374 mtspr SPRN_SIER, r6 3375BEGIN_FTR_SECTION_NESTED(96) 3376 lwz r7, VCPU_PMC + 24(r4) 3377 lwz r8, VCPU_PMC + 28(r4) 3378 ld r9, VCPU_MMCR + 32(r4) 3379 mtspr SPRN_SPMC1, r7 3380 mtspr SPRN_SPMC2, r8 3381 mtspr SPRN_MMCRS, r9 3382END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3383END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3384 mtspr SPRN_MMCR0, r3 3385 isync 3386 mtlr r0 3387 blr 3388 3389/* 3390 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 3391 */ 3392_GLOBAL(kvmhv_load_host_pmu) 3393EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) 3394 mflr r0 3395 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 3396 cmpwi r4, 0 3397 beq 23f /* skip if not */ 3398BEGIN_FTR_SECTION 3399 ld r3, HSTATE_MMCR0(r13) 3400 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3401 cmpwi r4, MMCR0_PMAO 3402 beql kvmppc_fix_pmao 3403END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3404 lwz r3, HSTATE_PMC1(r13) 3405 lwz r4, HSTATE_PMC2(r13) 3406 lwz r5, HSTATE_PMC3(r13) 3407 lwz r6, HSTATE_PMC4(r13) 3408 lwz r8, HSTATE_PMC5(r13) 3409 lwz r9, HSTATE_PMC6(r13) 3410 mtspr SPRN_PMC1, r3 3411 mtspr SPRN_PMC2, r4 3412 mtspr SPRN_PMC3, r5 3413 mtspr SPRN_PMC4, r6 3414 mtspr SPRN_PMC5, r8 3415 mtspr SPRN_PMC6, r9 3416 ld r3, HSTATE_MMCR0(r13) 3417 ld r4, HSTATE_MMCR1(r13) 3418 ld r5, HSTATE_MMCRA(r13) 3419 ld r6, HSTATE_SIAR(r13) 3420 ld r7, HSTATE_SDAR(r13) 3421 mtspr SPRN_MMCR1, r4 3422 mtspr SPRN_MMCRA, r5 3423 mtspr SPRN_SIAR, r6 3424 mtspr SPRN_SDAR, r7 3425BEGIN_FTR_SECTION 3426 ld r8, HSTATE_MMCR2(r13) 3427 ld r9, HSTATE_SIER(r13) 3428 mtspr SPRN_MMCR2, r8 3429 mtspr SPRN_SIER, r9 3430END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3431 mtspr SPRN_MMCR0, r3 3432 isync 3433 mtlr r0 343423: blr 3435 3436/* 3437 * Save guest PMU state into the vcpu struct. 3438 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 3439 */ 3440_GLOBAL(kvmhv_save_guest_pmu) 3441EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) 3442 mr r9, r3 3443 mr r8, r4 3444BEGIN_FTR_SECTION 3445 /* 3446 * POWER8 seems to have a hardware bug where setting 3447 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 3448 * when some counters are already negative doesn't seem 3449 * to cause a performance monitor alert (and hence interrupt). 3450 * The effect of this is that when saving the PMU state, 3451 * if there is no PMU alert pending when we read MMCR0 3452 * before freezing the counters, but one becomes pending 3453 * before we read the counters, we lose it. 3454 * To work around this, we need a way to freeze the counters 3455 * before reading MMCR0. Normally, freezing the counters 3456 * is done by writing MMCR0 (to set MMCR0[FC]) which 3457 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 3458 * we can also freeze the counters using MMCR2, by writing 3459 * 1s to all the counter freeze condition bits (there are 3460 * 9 bits each for 6 counters). 3461 */ 3462 li r3, -1 /* set all freeze bits */ 3463 clrrdi r3, r3, 10 3464 mfspr r10, SPRN_MMCR2 3465 mtspr SPRN_MMCR2, r3 3466 isync 3467END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3468 li r3, 1 3469 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3470 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 3471 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3472 mfspr r6, SPRN_MMCRA 3473 /* Clear MMCRA in order to disable SDAR updates */ 3474 li r7, 0 3475 mtspr SPRN_MMCRA, r7 3476 isync 3477 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 3478 bne 21f 3479 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 3480 b 22f 348121: mfspr r5, SPRN_MMCR1 3482 mfspr r7, SPRN_SIAR 3483 mfspr r8, SPRN_SDAR 3484 std r4, VCPU_MMCR(r9) 3485 std r5, VCPU_MMCR + 8(r9) 3486 std r6, VCPU_MMCR + 16(r9) 3487BEGIN_FTR_SECTION 3488 std r10, VCPU_MMCR + 24(r9) 3489END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3490 std r7, VCPU_SIAR(r9) 3491 std r8, VCPU_SDAR(r9) 3492 mfspr r3, SPRN_PMC1 3493 mfspr r4, SPRN_PMC2 3494 mfspr r5, SPRN_PMC3 3495 mfspr r6, SPRN_PMC4 3496 mfspr r7, SPRN_PMC5 3497 mfspr r8, SPRN_PMC6 3498 stw r3, VCPU_PMC(r9) 3499 stw r4, VCPU_PMC + 4(r9) 3500 stw r5, VCPU_PMC + 8(r9) 3501 stw r6, VCPU_PMC + 12(r9) 3502 stw r7, VCPU_PMC + 16(r9) 3503 stw r8, VCPU_PMC + 20(r9) 3504BEGIN_FTR_SECTION 3505 mfspr r5, SPRN_SIER 3506 std r5, VCPU_SIER(r9) 3507BEGIN_FTR_SECTION_NESTED(96) 3508 mfspr r6, SPRN_SPMC1 3509 mfspr r7, SPRN_SPMC2 3510 mfspr r8, SPRN_MMCRS 3511 stw r6, VCPU_PMC + 24(r9) 3512 stw r7, VCPU_PMC + 28(r9) 3513 std r8, VCPU_MMCR + 32(r9) 3514 lis r4, 0x8000 3515 mtspr SPRN_MMCRS, r4 3516END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3517END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 351822: blr 3519 3520/* 3521 * This works around a hardware bug on POWER8E processors, where 3522 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3523 * performance monitor interrupt. Instead, when we need to have 3524 * an interrupt pending, we have to arrange for a counter to overflow. 3525 */ 3526kvmppc_fix_pmao: 3527 li r3, 0 3528 mtspr SPRN_MMCR2, r3 3529 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3530 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3531 mtspr SPRN_MMCR0, r3 3532 lis r3, 0x7fff 3533 ori r3, r3, 0xffff 3534 mtspr SPRN_PMC6, r3 3535 isync 3536 blr 3537 3538#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3539/* 3540 * Start timing an activity 3541 * r3 = pointer to time accumulation struct, r4 = vcpu 3542 */ 3543kvmhv_start_timing: 3544 ld r5, HSTATE_KVM_VCORE(r13) 3545 ld r6, VCORE_TB_OFFSET_APPL(r5) 3546 mftb r5 3547 subf r5, r6, r5 /* subtract current timebase offset */ 3548 std r3, VCPU_CUR_ACTIVITY(r4) 3549 std r5, VCPU_ACTIVITY_START(r4) 3550 blr 3551 3552/* 3553 * Accumulate time to one activity and start another. 3554 * r3 = pointer to new time accumulation struct, r4 = vcpu 3555 */ 3556kvmhv_accumulate_time: 3557 ld r5, HSTATE_KVM_VCORE(r13) 3558 ld r8, VCORE_TB_OFFSET_APPL(r5) 3559 ld r5, VCPU_CUR_ACTIVITY(r4) 3560 ld r6, VCPU_ACTIVITY_START(r4) 3561 std r3, VCPU_CUR_ACTIVITY(r4) 3562 mftb r7 3563 subf r7, r8, r7 /* subtract current timebase offset */ 3564 std r7, VCPU_ACTIVITY_START(r4) 3565 cmpdi r5, 0 3566 beqlr 3567 subf r3, r6, r7 3568 ld r8, TAS_SEQCOUNT(r5) 3569 cmpdi r8, 0 3570 addi r8, r8, 1 3571 std r8, TAS_SEQCOUNT(r5) 3572 lwsync 3573 ld r7, TAS_TOTAL(r5) 3574 add r7, r7, r3 3575 std r7, TAS_TOTAL(r5) 3576 ld r6, TAS_MIN(r5) 3577 ld r7, TAS_MAX(r5) 3578 beq 3f 3579 cmpd r3, r6 3580 bge 1f 35813: std r3, TAS_MIN(r5) 35821: cmpd r3, r7 3583 ble 2f 3584 std r3, TAS_MAX(r5) 35852: lwsync 3586 addi r8, r8, 1 3587 std r8, TAS_SEQCOUNT(r5) 3588 blr 3589#endif 3590