1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * 6 * Derived from book3s_rmhandlers.S and other files, which are: 7 * 8 * Copyright SUSE Linux Products GmbH 2009 9 * 10 * Authors: Alexander Graf <agraf@suse.de> 11 */ 12 13#include <asm/ppc_asm.h> 14#include <asm/kvm_asm.h> 15#include <asm/reg.h> 16#include <asm/mmu.h> 17#include <asm/page.h> 18#include <asm/ptrace.h> 19#include <asm/hvcall.h> 20#include <asm/asm-offsets.h> 21#include <asm/exception-64s.h> 22#include <asm/kvm_book3s_asm.h> 23#include <asm/book3s/64/mmu-hash.h> 24#include <asm/export.h> 25#include <asm/tm.h> 26#include <asm/opal.h> 27#include <asm/xive-regs.h> 28#include <asm/thread_info.h> 29#include <asm/asm-compat.h> 30#include <asm/feature-fixups.h> 31#include <asm/cpuidle.h> 32#include <asm/ultravisor-api.h> 33 34/* Sign-extend HDEC if not on POWER9 */ 35#define EXTEND_HDEC(reg) \ 36BEGIN_FTR_SECTION; \ 37 extsw reg, reg; \ 38END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 39 40/* Values in HSTATE_NAPPING(r13) */ 41#define NAPPING_CEDE 1 42#define NAPPING_NOVCPU 2 43#define NAPPING_UNSPLIT 3 44 45/* Stack frame offsets for kvmppc_hv_entry */ 46#define SFS 208 47#define STACK_SLOT_TRAP (SFS-4) 48#define STACK_SLOT_SHORT_PATH (SFS-8) 49#define STACK_SLOT_TID (SFS-16) 50#define STACK_SLOT_PSSCR (SFS-24) 51#define STACK_SLOT_PID (SFS-32) 52#define STACK_SLOT_IAMR (SFS-40) 53#define STACK_SLOT_CIABR (SFS-48) 54#define STACK_SLOT_DAWR (SFS-56) 55#define STACK_SLOT_DAWRX (SFS-64) 56#define STACK_SLOT_HFSCR (SFS-72) 57#define STACK_SLOT_AMR (SFS-80) 58#define STACK_SLOT_UAMOR (SFS-88) 59/* the following is used by the P9 short path */ 60#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ 61 62/* 63 * Call kvmppc_hv_entry in real mode. 64 * Must be called with interrupts hard-disabled. 65 * 66 * Input Registers: 67 * 68 * LR = return address to continue at after eventually re-enabling MMU 69 */ 70_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 71 mflr r0 72 std r0, PPC_LR_STKOFF(r1) 73 stdu r1, -112(r1) 74 mfmsr r10 75 std r10, HSTATE_HOST_MSR(r13) 76 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 77 li r0,MSR_RI 78 andc r0,r10,r0 79 li r6,MSR_IR | MSR_DR 80 andc r6,r10,r6 81 mtmsrd r0,1 /* clear RI in MSR */ 82 mtsrr0 r5 83 mtsrr1 r6 84 RFI_TO_KERNEL 85 86kvmppc_call_hv_entry: 87BEGIN_FTR_SECTION 88 /* On P9, do LPCR setting, if necessary */ 89 ld r3, HSTATE_SPLIT_MODE(r13) 90 cmpdi r3, 0 91 beq 46f 92 lwz r4, KVM_SPLIT_DO_SET(r3) 93 cmpwi r4, 0 94 beq 46f 95 bl kvmhv_p9_set_lpcr 96 nop 9746: 98END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 99 100 ld r4, HSTATE_KVM_VCPU(r13) 101 bl kvmppc_hv_entry 102 103 /* Back from guest - restore host state and return to caller */ 104 105BEGIN_FTR_SECTION 106 /* Restore host DABR and DABRX */ 107 ld r5,HSTATE_DABR(r13) 108 li r6,7 109 mtspr SPRN_DABR,r5 110 mtspr SPRN_DABRX,r6 111END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 112 113 /* Restore SPRG3 */ 114 ld r3,PACA_SPRG_VDSO(r13) 115 mtspr SPRN_SPRG_VDSO_WRITE,r3 116 117 /* Reload the host's PMU registers */ 118 bl kvmhv_load_host_pmu 119 120 /* 121 * Reload DEC. HDEC interrupts were disabled when 122 * we reloaded the host's LPCR value. 123 */ 124 ld r3, HSTATE_DECEXP(r13) 125 mftb r4 126 subf r4, r4, r3 127 mtspr SPRN_DEC, r4 128 129 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 130 li r0, 0 131 stb r0, HSTATE_HWTHREAD_REQ(r13) 132 133 /* 134 * For external interrupts we need to call the Linux 135 * handler to process the interrupt. We do that by jumping 136 * to absolute address 0x500 for external interrupts. 137 * The [h]rfid at the end of the handler will return to 138 * the book3s_hv_interrupts.S code. For other interrupts 139 * we do the rfid to get back to the book3s_hv_interrupts.S 140 * code here. 141 */ 142 ld r8, 112+PPC_LR_STKOFF(r1) 143 addi r1, r1, 112 144 ld r7, HSTATE_HOST_MSR(r13) 145 146 /* Return the trap number on this thread as the return value */ 147 mr r3, r12 148 149 /* 150 * If we came back from the guest via a relocation-on interrupt, 151 * we will be in virtual mode at this point, which makes it a 152 * little easier to get back to the caller. 153 */ 154 mfmsr r0 155 andi. r0, r0, MSR_IR /* in real mode? */ 156 bne .Lvirt_return 157 158 /* RFI into the highmem handler */ 159 mfmsr r6 160 li r0, MSR_RI 161 andc r6, r6, r0 162 mtmsrd r6, 1 /* Clear RI in MSR */ 163 mtsrr0 r8 164 mtsrr1 r7 165 RFI_TO_KERNEL 166 167 /* Virtual-mode return */ 168.Lvirt_return: 169 mtlr r8 170 blr 171 172kvmppc_primary_no_guest: 173 /* We handle this much like a ceded vcpu */ 174 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 175 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 176 /* HDEC value came from DEC in the first place, it will fit */ 177 mfspr r3, SPRN_HDEC 178 mtspr SPRN_DEC, r3 179 /* 180 * Make sure the primary has finished the MMU switch. 181 * We should never get here on a secondary thread, but 182 * check it for robustness' sake. 183 */ 184 ld r5, HSTATE_KVM_VCORE(r13) 18565: lbz r0, VCORE_IN_GUEST(r5) 186 cmpwi r0, 0 187 beq 65b 188 /* Set LPCR. */ 189 ld r8,VCORE_LPCR(r5) 190 mtspr SPRN_LPCR,r8 191 isync 192 /* set our bit in napping_threads */ 193 ld r5, HSTATE_KVM_VCORE(r13) 194 lbz r7, HSTATE_PTID(r13) 195 li r0, 1 196 sld r0, r0, r7 197 addi r6, r5, VCORE_NAPPING_THREADS 1981: lwarx r3, 0, r6 199 or r3, r3, r0 200 stwcx. r3, 0, r6 201 bne 1b 202 /* order napping_threads update vs testing entry_exit_map */ 203 isync 204 li r12, 0 205 lwz r7, VCORE_ENTRY_EXIT(r5) 206 cmpwi r7, 0x100 207 bge kvm_novcpu_exit /* another thread already exiting */ 208 li r3, NAPPING_NOVCPU 209 stb r3, HSTATE_NAPPING(r13) 210 211 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 212 b kvm_do_nap 213 214/* 215 * kvm_novcpu_wakeup 216 * Entered from kvm_start_guest if kvm_hstate.napping is set 217 * to NAPPING_NOVCPU 218 * r2 = kernel TOC 219 * r13 = paca 220 */ 221kvm_novcpu_wakeup: 222 ld r1, HSTATE_HOST_R1(r13) 223 ld r5, HSTATE_KVM_VCORE(r13) 224 li r0, 0 225 stb r0, HSTATE_NAPPING(r13) 226 227 /* check the wake reason */ 228 bl kvmppc_check_wake_reason 229 230 /* 231 * Restore volatile registers since we could have called 232 * a C routine in kvmppc_check_wake_reason. 233 * r5 = VCORE 234 */ 235 ld r5, HSTATE_KVM_VCORE(r13) 236 237 /* see if any other thread is already exiting */ 238 lwz r0, VCORE_ENTRY_EXIT(r5) 239 cmpwi r0, 0x100 240 bge kvm_novcpu_exit 241 242 /* clear our bit in napping_threads */ 243 lbz r7, HSTATE_PTID(r13) 244 li r0, 1 245 sld r0, r0, r7 246 addi r6, r5, VCORE_NAPPING_THREADS 2474: lwarx r7, 0, r6 248 andc r7, r7, r0 249 stwcx. r7, 0, r6 250 bne 4b 251 252 /* See if the wake reason means we need to exit */ 253 cmpdi r3, 0 254 bge kvm_novcpu_exit 255 256 /* See if our timeslice has expired (HDEC is negative) */ 257 mfspr r0, SPRN_HDEC 258 EXTEND_HDEC(r0) 259 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 260 cmpdi r0, 0 261 blt kvm_novcpu_exit 262 263 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 264 ld r4, HSTATE_KVM_VCPU(r13) 265 cmpdi r4, 0 266 beq kvmppc_primary_no_guest 267 268#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 269 addi r3, r4, VCPU_TB_RMENTRY 270 bl kvmhv_start_timing 271#endif 272 b kvmppc_got_guest 273 274kvm_novcpu_exit: 275#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 276 ld r4, HSTATE_KVM_VCPU(r13) 277 cmpdi r4, 0 278 beq 13f 279 addi r3, r4, VCPU_TB_RMEXIT 280 bl kvmhv_accumulate_time 281#endif 28213: mr r3, r12 283 stw r12, STACK_SLOT_TRAP(r1) 284 bl kvmhv_commence_exit 285 nop 286 b kvmhv_switch_to_host 287 288/* 289 * We come in here when wakened from Linux offline idle code. 290 * Relocation is off 291 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 292 */ 293_GLOBAL(idle_kvm_start_guest) 294 ld r4,PACAEMERGSP(r13) 295 mfcr r5 296 mflr r0 297 std r1,0(r4) 298 std r5,8(r4) 299 std r0,16(r4) 300 subi r1,r4,STACK_FRAME_OVERHEAD 301 SAVE_NVGPRS(r1) 302 303 /* 304 * Could avoid this and pass it through in r3. For now, 305 * code expects it to be in SRR1. 306 */ 307 mtspr SPRN_SRR1,r3 308 309 li r0,0 310 stb r0,PACA_FTRACE_ENABLED(r13) 311 312 li r0,KVM_HWTHREAD_IN_KVM 313 stb r0,HSTATE_HWTHREAD_STATE(r13) 314 315 /* kvm cede / napping does not come through here */ 316 lbz r0,HSTATE_NAPPING(r13) 317 twnei r0,0 318 319 b 1f 320 321kvm_unsplit_wakeup: 322 li r0, 0 323 stb r0, HSTATE_NAPPING(r13) 324 3251: 326 327 /* 328 * We weren't napping due to cede, so this must be a secondary 329 * thread being woken up to run a guest, or being woken up due 330 * to a stray IPI. (Or due to some machine check or hypervisor 331 * maintenance interrupt while the core is in KVM.) 332 */ 333 334 /* Check the wake reason in SRR1 to see why we got here */ 335 bl kvmppc_check_wake_reason 336 /* 337 * kvmppc_check_wake_reason could invoke a C routine, but we 338 * have no volatile registers to restore when we return. 339 */ 340 341 cmpdi r3, 0 342 bge kvm_no_guest 343 344 /* get vcore pointer, NULL if we have nothing to run */ 345 ld r5,HSTATE_KVM_VCORE(r13) 346 cmpdi r5,0 347 /* if we have no vcore to run, go back to sleep */ 348 beq kvm_no_guest 349 350kvm_secondary_got_guest: 351 352 /* Set HSTATE_DSCR(r13) to something sensible */ 353 ld r6, PACA_DSCR_DEFAULT(r13) 354 std r6, HSTATE_DSCR(r13) 355 356 /* On thread 0 of a subcore, set HDEC to max */ 357 lbz r4, HSTATE_PTID(r13) 358 cmpwi r4, 0 359 bne 63f 360 LOAD_REG_ADDR(r6, decrementer_max) 361 ld r6, 0(r6) 362 mtspr SPRN_HDEC, r6 363 /* and set per-LPAR registers, if doing dynamic micro-threading */ 364 ld r6, HSTATE_SPLIT_MODE(r13) 365 cmpdi r6, 0 366 beq 63f 367BEGIN_FTR_SECTION 368 ld r0, KVM_SPLIT_RPR(r6) 369 mtspr SPRN_RPR, r0 370 ld r0, KVM_SPLIT_PMMAR(r6) 371 mtspr SPRN_PMMAR, r0 372 ld r0, KVM_SPLIT_LDBAR(r6) 373 mtspr SPRN_LDBAR, r0 374 isync 375FTR_SECTION_ELSE 376 /* On P9 we use the split_info for coordinating LPCR changes */ 377 lwz r4, KVM_SPLIT_DO_SET(r6) 378 cmpwi r4, 0 379 beq 1f 380 mr r3, r6 381 bl kvmhv_p9_set_lpcr 382 nop 3831: 384ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 38563: 386 /* Order load of vcpu after load of vcore */ 387 lwsync 388 ld r4, HSTATE_KVM_VCPU(r13) 389 bl kvmppc_hv_entry 390 391 /* Back from the guest, go back to nap */ 392 /* Clear our vcpu and vcore pointers so we don't come back in early */ 393 li r0, 0 394 std r0, HSTATE_KVM_VCPU(r13) 395 /* 396 * Once we clear HSTATE_KVM_VCORE(r13), the code in 397 * kvmppc_run_core() is going to assume that all our vcpu 398 * state is visible in memory. This lwsync makes sure 399 * that that is true. 400 */ 401 lwsync 402 std r0, HSTATE_KVM_VCORE(r13) 403 404 /* 405 * All secondaries exiting guest will fall through this path. 406 * Before proceeding, just check for HMI interrupt and 407 * invoke opal hmi handler. By now we are sure that the 408 * primary thread on this core/subcore has already made partition 409 * switch/TB resync and we are good to call opal hmi handler. 410 */ 411 cmpwi r12, BOOK3S_INTERRUPT_HMI 412 bne kvm_no_guest 413 414 li r3,0 /* NULL argument */ 415 bl hmi_exception_realmode 416/* 417 * At this point we have finished executing in the guest. 418 * We need to wait for hwthread_req to become zero, since 419 * we may not turn on the MMU while hwthread_req is non-zero. 420 * While waiting we also need to check if we get given a vcpu to run. 421 */ 422kvm_no_guest: 423 lbz r3, HSTATE_HWTHREAD_REQ(r13) 424 cmpwi r3, 0 425 bne 53f 426 HMT_MEDIUM 427 li r0, KVM_HWTHREAD_IN_KERNEL 428 stb r0, HSTATE_HWTHREAD_STATE(r13) 429 /* need to recheck hwthread_req after a barrier, to avoid race */ 430 sync 431 lbz r3, HSTATE_HWTHREAD_REQ(r13) 432 cmpwi r3, 0 433 bne 54f 434 435 /* 436 * Jump to idle_return_gpr_loss, which returns to the 437 * idle_kvm_start_guest caller. 438 */ 439 li r3, LPCR_PECE0 440 mfspr r4, SPRN_LPCR 441 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 442 mtspr SPRN_LPCR, r4 443 /* set up r3 for return */ 444 mfspr r3,SPRN_SRR1 445 REST_NVGPRS(r1) 446 addi r1, r1, STACK_FRAME_OVERHEAD 447 ld r0, 16(r1) 448 ld r5, 8(r1) 449 ld r1, 0(r1) 450 mtlr r0 451 mtcr r5 452 blr 453 45453: HMT_LOW 455 ld r5, HSTATE_KVM_VCORE(r13) 456 cmpdi r5, 0 457 bne 60f 458 ld r3, HSTATE_SPLIT_MODE(r13) 459 cmpdi r3, 0 460 beq kvm_no_guest 461 lwz r0, KVM_SPLIT_DO_SET(r3) 462 cmpwi r0, 0 463 bne kvmhv_do_set 464 lwz r0, KVM_SPLIT_DO_RESTORE(r3) 465 cmpwi r0, 0 466 bne kvmhv_do_restore 467 lbz r0, KVM_SPLIT_DO_NAP(r3) 468 cmpwi r0, 0 469 beq kvm_no_guest 470 HMT_MEDIUM 471 b kvm_unsplit_nap 47260: HMT_MEDIUM 473 b kvm_secondary_got_guest 474 47554: li r0, KVM_HWTHREAD_IN_KVM 476 stb r0, HSTATE_HWTHREAD_STATE(r13) 477 b kvm_no_guest 478 479kvmhv_do_set: 480 /* Set LPCR, LPIDR etc. on P9 */ 481 HMT_MEDIUM 482 bl kvmhv_p9_set_lpcr 483 nop 484 b kvm_no_guest 485 486kvmhv_do_restore: 487 HMT_MEDIUM 488 bl kvmhv_p9_restore_lpcr 489 nop 490 b kvm_no_guest 491 492/* 493 * Here the primary thread is trying to return the core to 494 * whole-core mode, so we need to nap. 495 */ 496kvm_unsplit_nap: 497 /* 498 * When secondaries are napping in kvm_unsplit_nap() with 499 * hwthread_req = 1, HMI goes ignored even though subcores are 500 * already exited the guest. Hence HMI keeps waking up secondaries 501 * from nap in a loop and secondaries always go back to nap since 502 * no vcore is assigned to them. This makes impossible for primary 503 * thread to get hold of secondary threads resulting into a soft 504 * lockup in KVM path. 505 * 506 * Let us check if HMI is pending and handle it before we go to nap. 507 */ 508 cmpwi r12, BOOK3S_INTERRUPT_HMI 509 bne 55f 510 li r3, 0 /* NULL argument */ 511 bl hmi_exception_realmode 51255: 513 /* 514 * Ensure that secondary doesn't nap when it has 515 * its vcore pointer set. 516 */ 517 sync /* matches smp_mb() before setting split_info.do_nap */ 518 ld r0, HSTATE_KVM_VCORE(r13) 519 cmpdi r0, 0 520 bne kvm_no_guest 521 /* clear any pending message */ 522BEGIN_FTR_SECTION 523 lis r6, (PPC_DBELL_SERVER << (63-36))@h 524 PPC_MSGCLR(6) 525END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 526 /* Set kvm_split_mode.napped[tid] = 1 */ 527 ld r3, HSTATE_SPLIT_MODE(r13) 528 li r0, 1 529 lbz r4, HSTATE_TID(r13) 530 addi r4, r4, KVM_SPLIT_NAPPED 531 stbx r0, r3, r4 532 /* Check the do_nap flag again after setting napped[] */ 533 sync 534 lbz r0, KVM_SPLIT_DO_NAP(r3) 535 cmpwi r0, 0 536 beq 57f 537 li r3, NAPPING_UNSPLIT 538 stb r3, HSTATE_NAPPING(r13) 539 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 540 mfspr r5, SPRN_LPCR 541 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 542 b kvm_nap_sequence 543 54457: li r0, 0 545 stbx r0, r3, r4 546 b kvm_no_guest 547 548/****************************************************************************** 549 * * 550 * Entry code * 551 * * 552 *****************************************************************************/ 553 554.global kvmppc_hv_entry 555kvmppc_hv_entry: 556 557 /* Required state: 558 * 559 * R4 = vcpu pointer (or NULL) 560 * MSR = ~IR|DR 561 * R13 = PACA 562 * R1 = host R1 563 * R2 = TOC 564 * all other volatile GPRS = free 565 * Does not preserve non-volatile GPRs or CR fields 566 */ 567 mflr r0 568 std r0, PPC_LR_STKOFF(r1) 569 stdu r1, -SFS(r1) 570 571 /* Save R1 in the PACA */ 572 std r1, HSTATE_HOST_R1(r13) 573 574 li r6, KVM_GUEST_MODE_HOST_HV 575 stb r6, HSTATE_IN_GUEST(r13) 576 577#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 578 /* Store initial timestamp */ 579 cmpdi r4, 0 580 beq 1f 581 addi r3, r4, VCPU_TB_RMENTRY 582 bl kvmhv_start_timing 5831: 584#endif 585 586 ld r5, HSTATE_KVM_VCORE(r13) 587 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 588 589 /* 590 * POWER7/POWER8 host -> guest partition switch code. 591 * We don't have to lock against concurrent tlbies, 592 * but we do have to coordinate across hardware threads. 593 */ 594 /* Set bit in entry map iff exit map is zero. */ 595 li r7, 1 596 lbz r6, HSTATE_PTID(r13) 597 sld r7, r7, r6 598 addi r8, r5, VCORE_ENTRY_EXIT 59921: lwarx r3, 0, r8 600 cmpwi r3, 0x100 /* any threads starting to exit? */ 601 bge secondary_too_late /* if so we're too late to the party */ 602 or r3, r3, r7 603 stwcx. r3, 0, r8 604 bne 21b 605 606 /* Primary thread switches to guest partition. */ 607 cmpwi r6,0 608 bne 10f 609 610 lwz r7,KVM_LPID(r9) 611BEGIN_FTR_SECTION 612 ld r6,KVM_SDR1(r9) 613 li r0,LPID_RSVD /* switch to reserved LPID */ 614 mtspr SPRN_LPID,r0 615 ptesync 616 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 617END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 618 mtspr SPRN_LPID,r7 619 isync 620 621 /* See if we need to flush the TLB. */ 622 mr r3, r9 /* kvm pointer */ 623 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 624 li r5, 0 /* nested vcpu pointer */ 625 bl kvmppc_check_need_tlb_flush 626 nop 627 ld r5, HSTATE_KVM_VCORE(r13) 628 629 /* Add timebase offset onto timebase */ 63022: ld r8,VCORE_TB_OFFSET(r5) 631 cmpdi r8,0 632 beq 37f 633 std r8, VCORE_TB_OFFSET_APPL(r5) 634 mftb r6 /* current host timebase */ 635 add r8,r8,r6 636 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 637 mftb r7 /* check if lower 24 bits overflowed */ 638 clrldi r6,r6,40 639 clrldi r7,r7,40 640 cmpld r7,r6 641 bge 37f 642 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 643 mtspr SPRN_TBU40,r8 644 645 /* Load guest PCR value to select appropriate compat mode */ 64637: ld r7, VCORE_PCR(r5) 647 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 648 cmpld r7, r6 649 beq 38f 650 or r7, r7, r6 651 mtspr SPRN_PCR, r7 65238: 653 654BEGIN_FTR_SECTION 655 /* DPDES and VTB are shared between threads */ 656 ld r8, VCORE_DPDES(r5) 657 ld r7, VCORE_VTB(r5) 658 mtspr SPRN_DPDES, r8 659 mtspr SPRN_VTB, r7 660END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 661 662 /* Mark the subcore state as inside guest */ 663 bl kvmppc_subcore_enter_guest 664 nop 665 ld r5, HSTATE_KVM_VCORE(r13) 666 ld r4, HSTATE_KVM_VCPU(r13) 667 li r0,1 668 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 669 670 /* Do we have a guest vcpu to run? */ 67110: cmpdi r4, 0 672 beq kvmppc_primary_no_guest 673kvmppc_got_guest: 674 /* Increment yield count if they have a VPA */ 675 ld r3, VCPU_VPA(r4) 676 cmpdi r3, 0 677 beq 25f 678 li r6, LPPACA_YIELDCOUNT 679 LWZX_BE r5, r3, r6 680 addi r5, r5, 1 681 STWX_BE r5, r3, r6 682 li r6, 1 683 stb r6, VCPU_VPA_DIRTY(r4) 68425: 685 686 /* Save purr/spurr */ 687 mfspr r5,SPRN_PURR 688 mfspr r6,SPRN_SPURR 689 std r5,HSTATE_PURR(r13) 690 std r6,HSTATE_SPURR(r13) 691 ld r7,VCPU_PURR(r4) 692 ld r8,VCPU_SPURR(r4) 693 mtspr SPRN_PURR,r7 694 mtspr SPRN_SPURR,r8 695 696 /* Save host values of some registers */ 697BEGIN_FTR_SECTION 698 mfspr r5, SPRN_TIDR 699 mfspr r6, SPRN_PSSCR 700 mfspr r7, SPRN_PID 701 std r5, STACK_SLOT_TID(r1) 702 std r6, STACK_SLOT_PSSCR(r1) 703 std r7, STACK_SLOT_PID(r1) 704 mfspr r5, SPRN_HFSCR 705 std r5, STACK_SLOT_HFSCR(r1) 706END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 707BEGIN_FTR_SECTION 708 mfspr r5, SPRN_CIABR 709 mfspr r6, SPRN_DAWR 710 mfspr r7, SPRN_DAWRX 711 mfspr r8, SPRN_IAMR 712 std r5, STACK_SLOT_CIABR(r1) 713 std r6, STACK_SLOT_DAWR(r1) 714 std r7, STACK_SLOT_DAWRX(r1) 715 std r8, STACK_SLOT_IAMR(r1) 716END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 717 718 mfspr r5, SPRN_AMR 719 std r5, STACK_SLOT_AMR(r1) 720 mfspr r6, SPRN_UAMOR 721 std r6, STACK_SLOT_UAMOR(r1) 722 723BEGIN_FTR_SECTION 724 /* Set partition DABR */ 725 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 726 lwz r5,VCPU_DABRX(r4) 727 ld r6,VCPU_DABR(r4) 728 mtspr SPRN_DABRX,r5 729 mtspr SPRN_DABR,r6 730 isync 731END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 732 733#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 734/* 735 * Branch around the call if both CPU_FTR_TM and 736 * CPU_FTR_P9_TM_HV_ASSIST are off. 737 */ 738BEGIN_FTR_SECTION 739 b 91f 740END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 741 /* 742 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 743 */ 744 mr r3, r4 745 ld r4, VCPU_MSR(r3) 746 li r5, 0 /* don't preserve non-vol regs */ 747 bl kvmppc_restore_tm_hv 748 nop 749 ld r4, HSTATE_KVM_VCPU(r13) 75091: 751#endif 752 753 /* Load guest PMU registers; r4 = vcpu pointer here */ 754 mr r3, r4 755 bl kvmhv_load_guest_pmu 756 757 /* Load up FP, VMX and VSX registers */ 758 ld r4, HSTATE_KVM_VCPU(r13) 759 bl kvmppc_load_fp 760 761 ld r14, VCPU_GPR(R14)(r4) 762 ld r15, VCPU_GPR(R15)(r4) 763 ld r16, VCPU_GPR(R16)(r4) 764 ld r17, VCPU_GPR(R17)(r4) 765 ld r18, VCPU_GPR(R18)(r4) 766 ld r19, VCPU_GPR(R19)(r4) 767 ld r20, VCPU_GPR(R20)(r4) 768 ld r21, VCPU_GPR(R21)(r4) 769 ld r22, VCPU_GPR(R22)(r4) 770 ld r23, VCPU_GPR(R23)(r4) 771 ld r24, VCPU_GPR(R24)(r4) 772 ld r25, VCPU_GPR(R25)(r4) 773 ld r26, VCPU_GPR(R26)(r4) 774 ld r27, VCPU_GPR(R27)(r4) 775 ld r28, VCPU_GPR(R28)(r4) 776 ld r29, VCPU_GPR(R29)(r4) 777 ld r30, VCPU_GPR(R30)(r4) 778 ld r31, VCPU_GPR(R31)(r4) 779 780 /* Switch DSCR to guest value */ 781 ld r5, VCPU_DSCR(r4) 782 mtspr SPRN_DSCR, r5 783 784BEGIN_FTR_SECTION 785 /* Skip next section on POWER7 */ 786 b 8f 787END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 788 /* Load up POWER8-specific registers */ 789 ld r5, VCPU_IAMR(r4) 790 lwz r6, VCPU_PSPB(r4) 791 ld r7, VCPU_FSCR(r4) 792 mtspr SPRN_IAMR, r5 793 mtspr SPRN_PSPB, r6 794 mtspr SPRN_FSCR, r7 795 /* 796 * Handle broken DAWR case by not writing it. This means we 797 * can still store the DAWR register for migration. 798 */ 799 LOAD_REG_ADDR(r5, dawr_force_enable) 800 lbz r5, 0(r5) 801 cmpdi r5, 0 802 beq 1f 803 ld r5, VCPU_DAWR(r4) 804 ld r6, VCPU_DAWRX(r4) 805 mtspr SPRN_DAWR, r5 806 mtspr SPRN_DAWRX, r6 8071: 808 ld r7, VCPU_CIABR(r4) 809 ld r8, VCPU_TAR(r4) 810 mtspr SPRN_CIABR, r7 811 mtspr SPRN_TAR, r8 812 ld r5, VCPU_IC(r4) 813 ld r8, VCPU_EBBHR(r4) 814 mtspr SPRN_IC, r5 815 mtspr SPRN_EBBHR, r8 816 ld r5, VCPU_EBBRR(r4) 817 ld r6, VCPU_BESCR(r4) 818 lwz r7, VCPU_GUEST_PID(r4) 819 ld r8, VCPU_WORT(r4) 820 mtspr SPRN_EBBRR, r5 821 mtspr SPRN_BESCR, r6 822 mtspr SPRN_PID, r7 823 mtspr SPRN_WORT, r8 824BEGIN_FTR_SECTION 825 /* POWER8-only registers */ 826 ld r5, VCPU_TCSCR(r4) 827 ld r6, VCPU_ACOP(r4) 828 ld r7, VCPU_CSIGR(r4) 829 ld r8, VCPU_TACR(r4) 830 mtspr SPRN_TCSCR, r5 831 mtspr SPRN_ACOP, r6 832 mtspr SPRN_CSIGR, r7 833 mtspr SPRN_TACR, r8 834 nop 835FTR_SECTION_ELSE 836 /* POWER9-only registers */ 837 ld r5, VCPU_TID(r4) 838 ld r6, VCPU_PSSCR(r4) 839 lbz r8, HSTATE_FAKE_SUSPEND(r13) 840 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 841 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG 842 ld r7, VCPU_HFSCR(r4) 843 mtspr SPRN_TIDR, r5 844 mtspr SPRN_PSSCR, r6 845 mtspr SPRN_HFSCR, r7 846ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 8478: 848 849 ld r5, VCPU_SPRG0(r4) 850 ld r6, VCPU_SPRG1(r4) 851 ld r7, VCPU_SPRG2(r4) 852 ld r8, VCPU_SPRG3(r4) 853 mtspr SPRN_SPRG0, r5 854 mtspr SPRN_SPRG1, r6 855 mtspr SPRN_SPRG2, r7 856 mtspr SPRN_SPRG3, r8 857 858 /* Load up DAR and DSISR */ 859 ld r5, VCPU_DAR(r4) 860 lwz r6, VCPU_DSISR(r4) 861 mtspr SPRN_DAR, r5 862 mtspr SPRN_DSISR, r6 863 864 /* Restore AMR and UAMOR, set AMOR to all 1s */ 865 ld r5,VCPU_AMR(r4) 866 ld r6,VCPU_UAMOR(r4) 867 li r7,-1 868 mtspr SPRN_AMR,r5 869 mtspr SPRN_UAMOR,r6 870 mtspr SPRN_AMOR,r7 871 872 /* Restore state of CTRL run bit; assume 1 on entry */ 873 lwz r5,VCPU_CTRL(r4) 874 andi. r5,r5,1 875 bne 4f 876 mfspr r6,SPRN_CTRLF 877 clrrdi r6,r6,1 878 mtspr SPRN_CTRLT,r6 8794: 880 /* Secondary threads wait for primary to have done partition switch */ 881 ld r5, HSTATE_KVM_VCORE(r13) 882 lbz r6, HSTATE_PTID(r13) 883 cmpwi r6, 0 884 beq 21f 885 lbz r0, VCORE_IN_GUEST(r5) 886 cmpwi r0, 0 887 bne 21f 888 HMT_LOW 88920: lwz r3, VCORE_ENTRY_EXIT(r5) 890 cmpwi r3, 0x100 891 bge no_switch_exit 892 lbz r0, VCORE_IN_GUEST(r5) 893 cmpwi r0, 0 894 beq 20b 895 HMT_MEDIUM 89621: 897 /* Set LPCR. */ 898 ld r8,VCORE_LPCR(r5) 899 mtspr SPRN_LPCR,r8 900 isync 901 902 /* 903 * Set the decrementer to the guest decrementer. 904 */ 905 ld r8,VCPU_DEC_EXPIRES(r4) 906 /* r8 is a host timebase value here, convert to guest TB */ 907 ld r5,HSTATE_KVM_VCORE(r13) 908 ld r6,VCORE_TB_OFFSET_APPL(r5) 909 add r8,r8,r6 910 mftb r7 911 subf r3,r7,r8 912 mtspr SPRN_DEC,r3 913 914 /* Check if HDEC expires soon */ 915 mfspr r3, SPRN_HDEC 916 EXTEND_HDEC(r3) 917 cmpdi r3, 512 /* 1 microsecond */ 918 blt hdec_soon 919 920 /* For hash guest, clear out and reload the SLB */ 921 ld r6, VCPU_KVM(r4) 922 lbz r0, KVM_RADIX(r6) 923 cmpwi r0, 0 924 bne 9f 925 li r6, 0 926 slbmte r6, r6 927 slbia 928 ptesync 929 930 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 931 lwz r5,VCPU_SLB_MAX(r4) 932 cmpwi r5,0 933 beq 9f 934 mtctr r5 935 addi r6,r4,VCPU_SLB 9361: ld r8,VCPU_SLB_E(r6) 937 ld r9,VCPU_SLB_V(r6) 938 slbmte r9,r8 939 addi r6,r6,VCPU_SLB_SIZE 940 bdnz 1b 9419: 942 943#ifdef CONFIG_KVM_XICS 944 /* We are entering the guest on that thread, push VCPU to XIVE */ 945 ld r11, VCPU_XIVE_SAVED_STATE(r4) 946 li r9, TM_QW1_OS 947 lwz r8, VCPU_XIVE_CAM_WORD(r4) 948 cmpwi r8, 0 949 beq no_xive 950 li r7, TM_QW1_OS + TM_WORD2 951 mfmsr r0 952 andi. r0, r0, MSR_DR /* in real mode? */ 953 beq 2f 954 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 955 cmpldi cr1, r10, 0 956 beq cr1, no_xive 957 eieio 958 stdx r11,r9,r10 959 stwx r8,r7,r10 960 b 3f 9612: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 962 cmpldi cr1, r10, 0 963 beq cr1, no_xive 964 eieio 965 stdcix r11,r9,r10 966 stwcix r8,r7,r10 9673: li r9, 1 968 stb r9, VCPU_XIVE_PUSHED(r4) 969 eieio 970 971 /* 972 * We clear the irq_pending flag. There is a small chance of a 973 * race vs. the escalation interrupt happening on another 974 * processor setting it again, but the only consequence is to 975 * cause a spurrious wakeup on the next H_CEDE which is not an 976 * issue. 977 */ 978 li r0,0 979 stb r0, VCPU_IRQ_PENDING(r4) 980 981 /* 982 * In single escalation mode, if the escalation interrupt is 983 * on, we mask it. 984 */ 985 lbz r0, VCPU_XIVE_ESC_ON(r4) 986 cmpwi cr1, r0,0 987 beq cr1, 1f 988 li r9, XIVE_ESB_SET_PQ_01 989 beq 4f /* in real mode? */ 990 ld r10, VCPU_XIVE_ESC_VADDR(r4) 991 ldx r0, r10, r9 992 b 5f 9934: ld r10, VCPU_XIVE_ESC_RADDR(r4) 994 ldcix r0, r10, r9 9955: sync 996 997 /* We have a possible subtle race here: The escalation interrupt might 998 * have fired and be on its way to the host queue while we mask it, 999 * and if we unmask it early enough (re-cede right away), there is 1000 * a theorical possibility that it fires again, thus landing in the 1001 * target queue more than once which is a big no-no. 1002 * 1003 * Fortunately, solving this is rather easy. If the above load setting 1004 * PQ to 01 returns a previous value where P is set, then we know the 1005 * escalation interrupt is somewhere on its way to the host. In that 1006 * case we simply don't clear the xive_esc_on flag below. It will be 1007 * eventually cleared by the handler for the escalation interrupt. 1008 * 1009 * Then, when doing a cede, we check that flag again before re-enabling 1010 * the escalation interrupt, and if set, we abort the cede. 1011 */ 1012 andi. r0, r0, XIVE_ESB_VAL_P 1013 bne- 1f 1014 1015 /* Now P is 0, we can clear the flag */ 1016 li r0, 0 1017 stb r0, VCPU_XIVE_ESC_ON(r4) 10181: 1019no_xive: 1020#endif /* CONFIG_KVM_XICS */ 1021 1022 li r0, 0 1023 stw r0, STACK_SLOT_SHORT_PATH(r1) 1024 1025deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 1026 /* Check if we can deliver an external or decrementer interrupt now */ 1027 ld r0, VCPU_PENDING_EXC(r4) 1028BEGIN_FTR_SECTION 1029 /* On POWER9, also check for emulated doorbell interrupt */ 1030 lbz r3, VCPU_DBELL_REQ(r4) 1031 or r0, r0, r3 1032END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1033 cmpdi r0, 0 1034 beq 71f 1035 mr r3, r4 1036 bl kvmppc_guest_entry_inject_int 1037 ld r4, HSTATE_KVM_VCPU(r13) 103871: 1039 ld r6, VCPU_SRR0(r4) 1040 ld r7, VCPU_SRR1(r4) 1041 mtspr SPRN_SRR0, r6 1042 mtspr SPRN_SRR1, r7 1043 1044fast_guest_entry_c: 1045 ld r10, VCPU_PC(r4) 1046 ld r11, VCPU_MSR(r4) 1047 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1048 rldicl r11, r11, 63 - MSR_HV_LG, 1 1049 rotldi r11, r11, 1 + MSR_HV_LG 1050 ori r11, r11, MSR_ME 1051 1052 ld r6, VCPU_CTR(r4) 1053 ld r7, VCPU_XER(r4) 1054 mtctr r6 1055 mtxer r7 1056 1057/* 1058 * Required state: 1059 * R4 = vcpu 1060 * R10: value for HSRR0 1061 * R11: value for HSRR1 1062 * R13 = PACA 1063 */ 1064fast_guest_return: 1065 li r0,0 1066 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1067 mtspr SPRN_HSRR0,r10 1068 mtspr SPRN_HSRR1,r11 1069 1070 /* Activate guest mode, so faults get handled by KVM */ 1071 li r9, KVM_GUEST_MODE_GUEST_HV 1072 stb r9, HSTATE_IN_GUEST(r13) 1073 1074#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1075 /* Accumulate timing */ 1076 addi r3, r4, VCPU_TB_GUEST 1077 bl kvmhv_accumulate_time 1078#endif 1079 1080 /* Enter guest */ 1081 1082BEGIN_FTR_SECTION 1083 ld r5, VCPU_CFAR(r4) 1084 mtspr SPRN_CFAR, r5 1085END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1086BEGIN_FTR_SECTION 1087 ld r0, VCPU_PPR(r4) 1088END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1089 1090 ld r5, VCPU_LR(r4) 1091 mtlr r5 1092 1093 ld r1, VCPU_GPR(R1)(r4) 1094 ld r5, VCPU_GPR(R5)(r4) 1095 ld r8, VCPU_GPR(R8)(r4) 1096 ld r9, VCPU_GPR(R9)(r4) 1097 ld r10, VCPU_GPR(R10)(r4) 1098 ld r11, VCPU_GPR(R11)(r4) 1099 ld r12, VCPU_GPR(R12)(r4) 1100 ld r13, VCPU_GPR(R13)(r4) 1101 1102BEGIN_FTR_SECTION 1103 mtspr SPRN_PPR, r0 1104END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1105 1106/* Move canary into DSISR to check for later */ 1107BEGIN_FTR_SECTION 1108 li r0, 0x7fff 1109 mtspr SPRN_HDSISR, r0 1110END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1111 1112 ld r6, VCPU_KVM(r4) 1113 lbz r7, KVM_SECURE_GUEST(r6) 1114 cmpdi r7, 0 1115 ld r6, VCPU_GPR(R6)(r4) 1116 ld r7, VCPU_GPR(R7)(r4) 1117 bne ret_to_ultra 1118 1119 lwz r0, VCPU_CR(r4) 1120 mtcr r0 1121 1122 ld r0, VCPU_GPR(R0)(r4) 1123 ld r2, VCPU_GPR(R2)(r4) 1124 ld r3, VCPU_GPR(R3)(r4) 1125 ld r4, VCPU_GPR(R4)(r4) 1126 HRFI_TO_GUEST 1127 b . 1128/* 1129 * Use UV_RETURN ultracall to return control back to the Ultravisor after 1130 * processing an hypercall or interrupt that was forwarded (a.k.a. reflected) 1131 * to the Hypervisor. 1132 * 1133 * All registers have already been loaded, except: 1134 * R0 = hcall result 1135 * R2 = SRR1, so UV can detect a synthesized interrupt (if any) 1136 * R3 = UV_RETURN 1137 */ 1138ret_to_ultra: 1139 lwz r0, VCPU_CR(r4) 1140 mtcr r0 1141 1142 ld r0, VCPU_GPR(R3)(r4) 1143 mfspr r2, SPRN_SRR1 1144 li r3, 0 1145 ori r3, r3, UV_RETURN 1146 ld r4, VCPU_GPR(R4)(r4) 1147 sc 2 1148 1149/* 1150 * Enter the guest on a P9 or later system where we have exactly 1151 * one vcpu per vcore and we don't need to go to real mode 1152 * (which implies that host and guest are both using radix MMU mode). 1153 * r3 = vcpu pointer 1154 * Most SPRs and all the VSRs have been loaded already. 1155 */ 1156_GLOBAL(__kvmhv_vcpu_entry_p9) 1157EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) 1158 mflr r0 1159 std r0, PPC_LR_STKOFF(r1) 1160 stdu r1, -SFS(r1) 1161 1162 li r0, 1 1163 stw r0, STACK_SLOT_SHORT_PATH(r1) 1164 1165 std r3, HSTATE_KVM_VCPU(r13) 1166 mfcr r4 1167 stw r4, SFS+8(r1) 1168 1169 std r1, HSTATE_HOST_R1(r13) 1170 1171 reg = 14 1172 .rept 18 1173 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1174 reg = reg + 1 1175 .endr 1176 1177 reg = 14 1178 .rept 18 1179 ld reg, __VCPU_GPR(reg)(r3) 1180 reg = reg + 1 1181 .endr 1182 1183 mfmsr r10 1184 std r10, HSTATE_HOST_MSR(r13) 1185 1186 mr r4, r3 1187 b fast_guest_entry_c 1188guest_exit_short_path: 1189 1190 li r0, KVM_GUEST_MODE_NONE 1191 stb r0, HSTATE_IN_GUEST(r13) 1192 1193 reg = 14 1194 .rept 18 1195 std reg, __VCPU_GPR(reg)(r9) 1196 reg = reg + 1 1197 .endr 1198 1199 reg = 14 1200 .rept 18 1201 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1202 reg = reg + 1 1203 .endr 1204 1205 lwz r4, SFS+8(r1) 1206 mtcr r4 1207 1208 mr r3, r12 /* trap number */ 1209 1210 addi r1, r1, SFS 1211 ld r0, PPC_LR_STKOFF(r1) 1212 mtlr r0 1213 1214 /* If we are in real mode, do a rfid to get back to the caller */ 1215 mfmsr r4 1216 andi. r5, r4, MSR_IR 1217 bnelr 1218 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ 1219 mtspr SPRN_SRR0, r0 1220 ld r10, HSTATE_HOST_MSR(r13) 1221 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG 1222 mtspr SPRN_SRR1, r10 1223 RFI_TO_KERNEL 1224 b . 1225 1226secondary_too_late: 1227 li r12, 0 1228 stw r12, STACK_SLOT_TRAP(r1) 1229 cmpdi r4, 0 1230 beq 11f 1231 stw r12, VCPU_TRAP(r4) 1232#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1233 addi r3, r4, VCPU_TB_RMEXIT 1234 bl kvmhv_accumulate_time 1235#endif 123611: b kvmhv_switch_to_host 1237 1238no_switch_exit: 1239 HMT_MEDIUM 1240 li r12, 0 1241 b 12f 1242hdec_soon: 1243 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 124412: stw r12, VCPU_TRAP(r4) 1245 mr r9, r4 1246#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1247 addi r3, r4, VCPU_TB_RMEXIT 1248 bl kvmhv_accumulate_time 1249#endif 1250 b guest_bypass 1251 1252/****************************************************************************** 1253 * * 1254 * Exit code * 1255 * * 1256 *****************************************************************************/ 1257 1258/* 1259 * We come here from the first-level interrupt handlers. 1260 */ 1261 .globl kvmppc_interrupt_hv 1262kvmppc_interrupt_hv: 1263 /* 1264 * Register contents: 1265 * R12 = (guest CR << 32) | interrupt vector 1266 * R13 = PACA 1267 * guest R12 saved in shadow VCPU SCRATCH0 1268 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE 1269 * guest R13 saved in SPRN_SCRATCH0 1270 */ 1271 std r9, HSTATE_SCRATCH2(r13) 1272 lbz r9, HSTATE_IN_GUEST(r13) 1273 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1274 beq kvmppc_bad_host_intr 1275#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1276 cmpwi r9, KVM_GUEST_MODE_GUEST 1277 ld r9, HSTATE_SCRATCH2(r13) 1278 beq kvmppc_interrupt_pr 1279#endif 1280 /* We're now back in the host but in guest MMU context */ 1281 li r9, KVM_GUEST_MODE_HOST_HV 1282 stb r9, HSTATE_IN_GUEST(r13) 1283 1284 ld r9, HSTATE_KVM_VCPU(r13) 1285 1286 /* Save registers */ 1287 1288 std r0, VCPU_GPR(R0)(r9) 1289 std r1, VCPU_GPR(R1)(r9) 1290 std r2, VCPU_GPR(R2)(r9) 1291 std r3, VCPU_GPR(R3)(r9) 1292 std r4, VCPU_GPR(R4)(r9) 1293 std r5, VCPU_GPR(R5)(r9) 1294 std r6, VCPU_GPR(R6)(r9) 1295 std r7, VCPU_GPR(R7)(r9) 1296 std r8, VCPU_GPR(R8)(r9) 1297 ld r0, HSTATE_SCRATCH2(r13) 1298 std r0, VCPU_GPR(R9)(r9) 1299 std r10, VCPU_GPR(R10)(r9) 1300 std r11, VCPU_GPR(R11)(r9) 1301 ld r3, HSTATE_SCRATCH0(r13) 1302 std r3, VCPU_GPR(R12)(r9) 1303 /* CR is in the high half of r12 */ 1304 srdi r4, r12, 32 1305 std r4, VCPU_CR(r9) 1306BEGIN_FTR_SECTION 1307 ld r3, HSTATE_CFAR(r13) 1308 std r3, VCPU_CFAR(r9) 1309END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1310BEGIN_FTR_SECTION 1311 ld r4, HSTATE_PPR(r13) 1312 std r4, VCPU_PPR(r9) 1313END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1314 1315 /* Restore R1/R2 so we can handle faults */ 1316 ld r1, HSTATE_HOST_R1(r13) 1317 ld r2, PACATOC(r13) 1318 1319 mfspr r10, SPRN_SRR0 1320 mfspr r11, SPRN_SRR1 1321 std r10, VCPU_SRR0(r9) 1322 std r11, VCPU_SRR1(r9) 1323 /* trap is in the low half of r12, clear CR from the high half */ 1324 clrldi r12, r12, 32 1325 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1326 beq 1f 1327 mfspr r10, SPRN_HSRR0 1328 mfspr r11, SPRN_HSRR1 1329 clrrdi r12, r12, 2 13301: std r10, VCPU_PC(r9) 1331 std r11, VCPU_MSR(r9) 1332 1333 GET_SCRATCH0(r3) 1334 mflr r4 1335 std r3, VCPU_GPR(R13)(r9) 1336 std r4, VCPU_LR(r9) 1337 1338 stw r12,VCPU_TRAP(r9) 1339 1340 /* 1341 * Now that we have saved away SRR0/1 and HSRR0/1, 1342 * interrupts are recoverable in principle, so set MSR_RI. 1343 * This becomes important for relocation-on interrupts from 1344 * the guest, which we can get in radix mode on POWER9. 1345 */ 1346 li r0, MSR_RI 1347 mtmsrd r0, 1 1348 1349#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1350 addi r3, r9, VCPU_TB_RMINTR 1351 mr r4, r9 1352 bl kvmhv_accumulate_time 1353 ld r5, VCPU_GPR(R5)(r9) 1354 ld r6, VCPU_GPR(R6)(r9) 1355 ld r7, VCPU_GPR(R7)(r9) 1356 ld r8, VCPU_GPR(R8)(r9) 1357#endif 1358 1359 /* Save HEIR (HV emulation assist reg) in emul_inst 1360 if this is an HEI (HV emulation interrupt, e40) */ 1361 li r3,KVM_INST_FETCH_FAILED 1362 stw r3,VCPU_LAST_INST(r9) 1363 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1364 bne 11f 1365 mfspr r3,SPRN_HEIR 136611: stw r3,VCPU_HEIR(r9) 1367 1368 /* these are volatile across C function calls */ 1369#ifdef CONFIG_RELOCATABLE 1370 ld r3, HSTATE_SCRATCH1(r13) 1371 mtctr r3 1372#else 1373 mfctr r3 1374#endif 1375 mfxer r4 1376 std r3, VCPU_CTR(r9) 1377 std r4, VCPU_XER(r9) 1378 1379 /* Save more register state */ 1380 mfdar r3 1381 mfdsisr r4 1382 std r3, VCPU_DAR(r9) 1383 stw r4, VCPU_DSISR(r9) 1384 1385 /* If this is a page table miss then see if it's theirs or ours */ 1386 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1387 beq kvmppc_hdsi 1388 std r3, VCPU_FAULT_DAR(r9) 1389 stw r4, VCPU_FAULT_DSISR(r9) 1390 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1391 beq kvmppc_hisi 1392 1393#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1394 /* For softpatch interrupt, go off and do TM instruction emulation */ 1395 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 1396 beq kvmppc_tm_emul 1397#endif 1398 1399 /* See if this is a leftover HDEC interrupt */ 1400 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1401 bne 2f 1402 mfspr r3,SPRN_HDEC 1403 EXTEND_HDEC(r3) 1404 cmpdi r3,0 1405 mr r4,r9 1406 bge fast_guest_return 14072: 1408 /* See if this is an hcall we can handle in real mode */ 1409 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1410 beq hcall_try_real_mode 1411 1412 /* Hypervisor doorbell - exit only if host IPI flag set */ 1413 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1414 bne 3f 1415BEGIN_FTR_SECTION 1416 PPC_MSGSYNC 1417 lwsync 1418 /* always exit if we're running a nested guest */ 1419 ld r0, VCPU_NESTED(r9) 1420 cmpdi r0, 0 1421 bne guest_exit_cont 1422END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1423 lbz r0, HSTATE_HOST_IPI(r13) 1424 cmpwi r0, 0 1425 beq maybe_reenter_guest 1426 b guest_exit_cont 14273: 1428 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1429 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1430 bne 14f 1431 mfspr r3, SPRN_HFSCR 1432 std r3, VCPU_HFSCR(r9) 1433 b guest_exit_cont 143414: 1435 /* External interrupt ? */ 1436 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1437 beq kvmppc_guest_external 1438 /* See if it is a machine check */ 1439 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1440 beq machine_check_realmode 1441 /* Or a hypervisor maintenance interrupt */ 1442 cmpwi r12, BOOK3S_INTERRUPT_HMI 1443 beq hmi_realmode 1444 1445guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1446 1447#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1448 addi r3, r9, VCPU_TB_RMEXIT 1449 mr r4, r9 1450 bl kvmhv_accumulate_time 1451#endif 1452#ifdef CONFIG_KVM_XICS 1453 /* We are exiting, pull the VP from the XIVE */ 1454 lbz r0, VCPU_XIVE_PUSHED(r9) 1455 cmpwi cr0, r0, 0 1456 beq 1f 1457 li r7, TM_SPC_PULL_OS_CTX 1458 li r6, TM_QW1_OS 1459 mfmsr r0 1460 andi. r0, r0, MSR_DR /* in real mode? */ 1461 beq 2f 1462 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1463 cmpldi cr0, r10, 0 1464 beq 1f 1465 /* First load to pull the context, we ignore the value */ 1466 eieio 1467 lwzx r11, r7, r10 1468 /* Second load to recover the context state (Words 0 and 1) */ 1469 ldx r11, r6, r10 1470 b 3f 14712: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1472 cmpldi cr0, r10, 0 1473 beq 1f 1474 /* First load to pull the context, we ignore the value */ 1475 eieio 1476 lwzcix r11, r7, r10 1477 /* Second load to recover the context state (Words 0 and 1) */ 1478 ldcix r11, r6, r10 14793: std r11, VCPU_XIVE_SAVED_STATE(r9) 1480 /* Fixup some of the state for the next load */ 1481 li r10, 0 1482 li r0, 0xff 1483 stb r10, VCPU_XIVE_PUSHED(r9) 1484 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1485 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1486 eieio 14871: 1488#endif /* CONFIG_KVM_XICS */ 1489 1490 /* If we came in through the P9 short path, go back out to C now */ 1491 lwz r0, STACK_SLOT_SHORT_PATH(r1) 1492 cmpwi r0, 0 1493 bne guest_exit_short_path 1494 1495 /* For hash guest, read the guest SLB and save it away */ 1496 ld r5, VCPU_KVM(r9) 1497 lbz r0, KVM_RADIX(r5) 1498 li r5, 0 1499 cmpwi r0, 0 1500 bne 3f /* for radix, save 0 entries */ 1501 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1502 mtctr r0 1503 li r6,0 1504 addi r7,r9,VCPU_SLB 15051: slbmfee r8,r6 1506 andis. r0,r8,SLB_ESID_V@h 1507 beq 2f 1508 add r8,r8,r6 /* put index in */ 1509 slbmfev r3,r6 1510 std r8,VCPU_SLB_E(r7) 1511 std r3,VCPU_SLB_V(r7) 1512 addi r7,r7,VCPU_SLB_SIZE 1513 addi r5,r5,1 15142: addi r6,r6,1 1515 bdnz 1b 1516 /* Finally clear out the SLB */ 1517 li r0,0 1518 slbmte r0,r0 1519 slbia 1520 ptesync 15213: stw r5,VCPU_SLB_MAX(r9) 1522 1523 /* load host SLB entries */ 1524BEGIN_MMU_FTR_SECTION 1525 b 0f 1526END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1527 ld r8,PACA_SLBSHADOWPTR(r13) 1528 1529 .rept SLB_NUM_BOLTED 1530 li r3, SLBSHADOW_SAVEAREA 1531 LDX_BE r5, r8, r3 1532 addi r3, r3, 8 1533 LDX_BE r6, r8, r3 1534 andis. r7,r5,SLB_ESID_V@h 1535 beq 1f 1536 slbmte r6,r5 15371: addi r8,r8,16 1538 .endr 15390: 1540 1541guest_bypass: 1542 stw r12, STACK_SLOT_TRAP(r1) 1543 1544 /* Save DEC */ 1545 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1546 ld r3, HSTATE_KVM_VCORE(r13) 1547 mfspr r5,SPRN_DEC 1548 mftb r6 1549 /* On P9, if the guest has large decr enabled, don't sign extend */ 1550BEGIN_FTR_SECTION 1551 ld r4, VCORE_LPCR(r3) 1552 andis. r4, r4, LPCR_LD@h 1553 bne 16f 1554END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1555 extsw r5,r5 155616: add r5,r5,r6 1557 /* r5 is a guest timebase value here, convert to host TB */ 1558 ld r4,VCORE_TB_OFFSET_APPL(r3) 1559 subf r5,r4,r5 1560 std r5,VCPU_DEC_EXPIRES(r9) 1561 1562 /* Increment exit count, poke other threads to exit */ 1563 mr r3, r12 1564 bl kvmhv_commence_exit 1565 nop 1566 ld r9, HSTATE_KVM_VCPU(r13) 1567 1568 /* Stop others sending VCPU interrupts to this physical CPU */ 1569 li r0, -1 1570 stw r0, VCPU_CPU(r9) 1571 stw r0, VCPU_THREAD_CPU(r9) 1572 1573 /* Save guest CTRL register, set runlatch to 1 */ 1574 mfspr r6,SPRN_CTRLF 1575 stw r6,VCPU_CTRL(r9) 1576 andi. r0,r6,1 1577 bne 4f 1578 ori r6,r6,1 1579 mtspr SPRN_CTRLT,r6 15804: 1581 /* 1582 * Save the guest PURR/SPURR 1583 */ 1584 mfspr r5,SPRN_PURR 1585 mfspr r6,SPRN_SPURR 1586 ld r7,VCPU_PURR(r9) 1587 ld r8,VCPU_SPURR(r9) 1588 std r5,VCPU_PURR(r9) 1589 std r6,VCPU_SPURR(r9) 1590 subf r5,r7,r5 1591 subf r6,r8,r6 1592 1593 /* 1594 * Restore host PURR/SPURR and add guest times 1595 * so that the time in the guest gets accounted. 1596 */ 1597 ld r3,HSTATE_PURR(r13) 1598 ld r4,HSTATE_SPURR(r13) 1599 add r3,r3,r5 1600 add r4,r4,r6 1601 mtspr SPRN_PURR,r3 1602 mtspr SPRN_SPURR,r4 1603 1604BEGIN_FTR_SECTION 1605 b 8f 1606END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1607 /* Save POWER8-specific registers */ 1608 mfspr r5, SPRN_IAMR 1609 mfspr r6, SPRN_PSPB 1610 mfspr r7, SPRN_FSCR 1611 std r5, VCPU_IAMR(r9) 1612 stw r6, VCPU_PSPB(r9) 1613 std r7, VCPU_FSCR(r9) 1614 mfspr r5, SPRN_IC 1615 mfspr r7, SPRN_TAR 1616 std r5, VCPU_IC(r9) 1617 std r7, VCPU_TAR(r9) 1618 mfspr r8, SPRN_EBBHR 1619 std r8, VCPU_EBBHR(r9) 1620 mfspr r5, SPRN_EBBRR 1621 mfspr r6, SPRN_BESCR 1622 mfspr r7, SPRN_PID 1623 mfspr r8, SPRN_WORT 1624 std r5, VCPU_EBBRR(r9) 1625 std r6, VCPU_BESCR(r9) 1626 stw r7, VCPU_GUEST_PID(r9) 1627 std r8, VCPU_WORT(r9) 1628BEGIN_FTR_SECTION 1629 mfspr r5, SPRN_TCSCR 1630 mfspr r6, SPRN_ACOP 1631 mfspr r7, SPRN_CSIGR 1632 mfspr r8, SPRN_TACR 1633 std r5, VCPU_TCSCR(r9) 1634 std r6, VCPU_ACOP(r9) 1635 std r7, VCPU_CSIGR(r9) 1636 std r8, VCPU_TACR(r9) 1637FTR_SECTION_ELSE 1638 mfspr r5, SPRN_TIDR 1639 mfspr r6, SPRN_PSSCR 1640 std r5, VCPU_TID(r9) 1641 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1642 rotldi r6, r6, 60 1643 std r6, VCPU_PSSCR(r9) 1644 /* Restore host HFSCR value */ 1645 ld r7, STACK_SLOT_HFSCR(r1) 1646 mtspr SPRN_HFSCR, r7 1647ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1648 /* 1649 * Restore various registers to 0, where non-zero values 1650 * set by the guest could disrupt the host. 1651 */ 1652 li r0, 0 1653 mtspr SPRN_PSPB, r0 1654 mtspr SPRN_WORT, r0 1655BEGIN_FTR_SECTION 1656 mtspr SPRN_TCSCR, r0 1657 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1658 li r0, 1 1659 sldi r0, r0, 31 1660 mtspr SPRN_MMCRS, r0 1661END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1662 1663 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1664 ld r8, STACK_SLOT_IAMR(r1) 1665 mtspr SPRN_IAMR, r8 1666 16678: /* Power7 jumps back in here */ 1668 mfspr r5,SPRN_AMR 1669 mfspr r6,SPRN_UAMOR 1670 std r5,VCPU_AMR(r9) 1671 std r6,VCPU_UAMOR(r9) 1672 ld r5,STACK_SLOT_AMR(r1) 1673 ld r6,STACK_SLOT_UAMOR(r1) 1674 mtspr SPRN_AMR, r5 1675 mtspr SPRN_UAMOR, r6 1676 1677 /* Switch DSCR back to host value */ 1678 mfspr r8, SPRN_DSCR 1679 ld r7, HSTATE_DSCR(r13) 1680 std r8, VCPU_DSCR(r9) 1681 mtspr SPRN_DSCR, r7 1682 1683 /* Save non-volatile GPRs */ 1684 std r14, VCPU_GPR(R14)(r9) 1685 std r15, VCPU_GPR(R15)(r9) 1686 std r16, VCPU_GPR(R16)(r9) 1687 std r17, VCPU_GPR(R17)(r9) 1688 std r18, VCPU_GPR(R18)(r9) 1689 std r19, VCPU_GPR(R19)(r9) 1690 std r20, VCPU_GPR(R20)(r9) 1691 std r21, VCPU_GPR(R21)(r9) 1692 std r22, VCPU_GPR(R22)(r9) 1693 std r23, VCPU_GPR(R23)(r9) 1694 std r24, VCPU_GPR(R24)(r9) 1695 std r25, VCPU_GPR(R25)(r9) 1696 std r26, VCPU_GPR(R26)(r9) 1697 std r27, VCPU_GPR(R27)(r9) 1698 std r28, VCPU_GPR(R28)(r9) 1699 std r29, VCPU_GPR(R29)(r9) 1700 std r30, VCPU_GPR(R30)(r9) 1701 std r31, VCPU_GPR(R31)(r9) 1702 1703 /* Save SPRGs */ 1704 mfspr r3, SPRN_SPRG0 1705 mfspr r4, SPRN_SPRG1 1706 mfspr r5, SPRN_SPRG2 1707 mfspr r6, SPRN_SPRG3 1708 std r3, VCPU_SPRG0(r9) 1709 std r4, VCPU_SPRG1(r9) 1710 std r5, VCPU_SPRG2(r9) 1711 std r6, VCPU_SPRG3(r9) 1712 1713 /* save FP state */ 1714 mr r3, r9 1715 bl kvmppc_save_fp 1716 1717#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1718/* 1719 * Branch around the call if both CPU_FTR_TM and 1720 * CPU_FTR_P9_TM_HV_ASSIST are off. 1721 */ 1722BEGIN_FTR_SECTION 1723 b 91f 1724END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 1725 /* 1726 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1727 */ 1728 mr r3, r9 1729 ld r4, VCPU_MSR(r3) 1730 li r5, 0 /* don't preserve non-vol regs */ 1731 bl kvmppc_save_tm_hv 1732 nop 1733 ld r9, HSTATE_KVM_VCPU(r13) 173491: 1735#endif 1736 1737 /* Increment yield count if they have a VPA */ 1738 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1739 cmpdi r8, 0 1740 beq 25f 1741 li r4, LPPACA_YIELDCOUNT 1742 LWZX_BE r3, r8, r4 1743 addi r3, r3, 1 1744 STWX_BE r3, r8, r4 1745 li r3, 1 1746 stb r3, VCPU_VPA_DIRTY(r9) 174725: 1748 /* Save PMU registers if requested */ 1749 /* r8 and cr0.eq are live here */ 1750 mr r3, r9 1751 li r4, 1 1752 beq 21f /* if no VPA, save PMU stuff anyway */ 1753 lbz r4, LPPACA_PMCINUSE(r8) 175421: bl kvmhv_save_guest_pmu 1755 ld r9, HSTATE_KVM_VCPU(r13) 1756 1757 /* Restore host values of some registers */ 1758BEGIN_FTR_SECTION 1759 ld r5, STACK_SLOT_CIABR(r1) 1760 ld r6, STACK_SLOT_DAWR(r1) 1761 ld r7, STACK_SLOT_DAWRX(r1) 1762 mtspr SPRN_CIABR, r5 1763 /* 1764 * If the DAWR doesn't work, it's ok to write these here as 1765 * this value should always be zero 1766 */ 1767 mtspr SPRN_DAWR, r6 1768 mtspr SPRN_DAWRX, r7 1769END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1770BEGIN_FTR_SECTION 1771 ld r5, STACK_SLOT_TID(r1) 1772 ld r6, STACK_SLOT_PSSCR(r1) 1773 ld r7, STACK_SLOT_PID(r1) 1774 mtspr SPRN_TIDR, r5 1775 mtspr SPRN_PSSCR, r6 1776 mtspr SPRN_PID, r7 1777END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1778 1779#ifdef CONFIG_PPC_RADIX_MMU 1780 /* 1781 * Are we running hash or radix ? 1782 */ 1783 ld r5, VCPU_KVM(r9) 1784 lbz r0, KVM_RADIX(r5) 1785 cmpwi cr2, r0, 0 1786 beq cr2, 2f 1787 1788 /* 1789 * Radix: do eieio; tlbsync; ptesync sequence in case we 1790 * interrupted the guest between a tlbie and a ptesync. 1791 */ 1792 eieio 1793 tlbsync 1794 ptesync 1795 1796 /* Radix: Handle the case where the guest used an illegal PID */ 1797 LOAD_REG_ADDR(r4, mmu_base_pid) 1798 lwz r3, VCPU_GUEST_PID(r9) 1799 lwz r5, 0(r4) 1800 cmpw cr0,r3,r5 1801 blt 2f 1802 1803 /* 1804 * Illegal PID, the HW might have prefetched and cached in the TLB 1805 * some translations for the LPID 0 / guest PID combination which 1806 * Linux doesn't know about, so we need to flush that PID out of 1807 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to 1808 * the right context. 1809 */ 1810 li r0,0 1811 mtspr SPRN_LPID,r0 1812 isync 1813 1814 /* Then do a congruence class local flush */ 1815 ld r6,VCPU_KVM(r9) 1816 lwz r0,KVM_TLB_SETS(r6) 1817 mtctr r0 1818 li r7,0x400 /* IS field = 0b01 */ 1819 ptesync 1820 sldi r0,r3,32 /* RS has PID */ 18211: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ 1822 addi r7,r7,0x1000 1823 bdnz 1b 1824 ptesync 1825 18262: 1827#endif /* CONFIG_PPC_RADIX_MMU */ 1828 1829 /* 1830 * POWER7/POWER8 guest -> host partition switch code. 1831 * We don't have to lock against tlbies but we do 1832 * have to coordinate the hardware threads. 1833 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1834 */ 1835kvmhv_switch_to_host: 1836 /* Secondary threads wait for primary to do partition switch */ 1837 ld r5,HSTATE_KVM_VCORE(r13) 1838 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1839 lbz r3,HSTATE_PTID(r13) 1840 cmpwi r3,0 1841 beq 15f 1842 HMT_LOW 184313: lbz r3,VCORE_IN_GUEST(r5) 1844 cmpwi r3,0 1845 bne 13b 1846 HMT_MEDIUM 1847 b 16f 1848 1849 /* Primary thread waits for all the secondaries to exit guest */ 185015: lwz r3,VCORE_ENTRY_EXIT(r5) 1851 rlwinm r0,r3,32-8,0xff 1852 clrldi r3,r3,56 1853 cmpw r3,r0 1854 bne 15b 1855 isync 1856 1857 /* Did we actually switch to the guest at all? */ 1858 lbz r6, VCORE_IN_GUEST(r5) 1859 cmpwi r6, 0 1860 beq 19f 1861 1862 /* Primary thread switches back to host partition */ 1863 lwz r7,KVM_HOST_LPID(r4) 1864BEGIN_FTR_SECTION 1865 ld r6,KVM_HOST_SDR1(r4) 1866 li r8,LPID_RSVD /* switch to reserved LPID */ 1867 mtspr SPRN_LPID,r8 1868 ptesync 1869 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1870END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1871 mtspr SPRN_LPID,r7 1872 isync 1873 1874BEGIN_FTR_SECTION 1875 /* DPDES and VTB are shared between threads */ 1876 mfspr r7, SPRN_DPDES 1877 mfspr r8, SPRN_VTB 1878 std r7, VCORE_DPDES(r5) 1879 std r8, VCORE_VTB(r5) 1880 /* clear DPDES so we don't get guest doorbells in the host */ 1881 li r8, 0 1882 mtspr SPRN_DPDES, r8 1883END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1884 1885 /* Subtract timebase offset from timebase */ 1886 ld r8, VCORE_TB_OFFSET_APPL(r5) 1887 cmpdi r8,0 1888 beq 17f 1889 li r0, 0 1890 std r0, VCORE_TB_OFFSET_APPL(r5) 1891 mftb r6 /* current guest timebase */ 1892 subf r8,r8,r6 1893 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1894 mftb r7 /* check if lower 24 bits overflowed */ 1895 clrldi r6,r6,40 1896 clrldi r7,r7,40 1897 cmpld r7,r6 1898 bge 17f 1899 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1900 mtspr SPRN_TBU40,r8 1901 190217: 1903 /* 1904 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1905 * above, which may or may not have already called 1906 * kvmppc_subcore_exit_guest. Fortunately, all that 1907 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1908 * it again here is benign even if kvmppc_realmode_hmi_handler 1909 * has already called it. 1910 */ 1911 bl kvmppc_subcore_exit_guest 1912 nop 191330: ld r5,HSTATE_KVM_VCORE(r13) 1914 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1915 1916 /* Reset PCR */ 1917 ld r0, VCORE_PCR(r5) 1918 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 1919 cmpld r0, r6 1920 beq 18f 1921 mtspr SPRN_PCR, r6 192218: 1923 /* Signal secondary CPUs to continue */ 1924 stb r0,VCORE_IN_GUEST(r5) 192519: lis r8,0x7fff /* MAX_INT@h */ 1926 mtspr SPRN_HDEC,r8 1927 192816: 1929BEGIN_FTR_SECTION 1930 /* On POWER9 with HPT-on-radix we need to wait for all other threads */ 1931 ld r3, HSTATE_SPLIT_MODE(r13) 1932 cmpdi r3, 0 1933 beq 47f 1934 lwz r8, KVM_SPLIT_DO_RESTORE(r3) 1935 cmpwi r8, 0 1936 beq 47f 1937 bl kvmhv_p9_restore_lpcr 1938 nop 1939 b 48f 194047: 1941END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1942 ld r8,KVM_HOST_LPCR(r4) 1943 mtspr SPRN_LPCR,r8 1944 isync 194548: 1946#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1947 /* Finish timing, if we have a vcpu */ 1948 ld r4, HSTATE_KVM_VCPU(r13) 1949 cmpdi r4, 0 1950 li r3, 0 1951 beq 2f 1952 bl kvmhv_accumulate_time 19532: 1954#endif 1955 /* Unset guest mode */ 1956 li r0, KVM_GUEST_MODE_NONE 1957 stb r0, HSTATE_IN_GUEST(r13) 1958 1959 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1960 ld r0, SFS+PPC_LR_STKOFF(r1) 1961 addi r1, r1, SFS 1962 mtlr r0 1963 blr 1964 1965kvmppc_guest_external: 1966 /* External interrupt, first check for host_ipi. If this is 1967 * set, we know the host wants us out so let's do it now 1968 */ 1969 bl kvmppc_read_intr 1970 1971 /* 1972 * Restore the active volatile registers after returning from 1973 * a C function. 1974 */ 1975 ld r9, HSTATE_KVM_VCPU(r13) 1976 li r12, BOOK3S_INTERRUPT_EXTERNAL 1977 1978 /* 1979 * kvmppc_read_intr return codes: 1980 * 1981 * Exit to host (r3 > 0) 1982 * 1 An interrupt is pending that needs to be handled by the host 1983 * Exit guest and return to host by branching to guest_exit_cont 1984 * 1985 * 2 Passthrough that needs completion in the host 1986 * Exit guest and return to host by branching to guest_exit_cont 1987 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1988 * to indicate to the host to complete handling the interrupt 1989 * 1990 * Before returning to guest, we check if any CPU is heading out 1991 * to the host and if so, we head out also. If no CPUs are heading 1992 * check return values <= 0. 1993 * 1994 * Return to guest (r3 <= 0) 1995 * 0 No external interrupt is pending 1996 * -1 A guest wakeup IPI (which has now been cleared) 1997 * In either case, we return to guest to deliver any pending 1998 * guest interrupts. 1999 * 2000 * -2 A PCI passthrough external interrupt was handled 2001 * (interrupt was delivered directly to guest) 2002 * Return to guest to deliver any pending guest interrupts. 2003 */ 2004 2005 cmpdi r3, 1 2006 ble 1f 2007 2008 /* Return code = 2 */ 2009 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 2010 stw r12, VCPU_TRAP(r9) 2011 b guest_exit_cont 2012 20131: /* Return code <= 1 */ 2014 cmpdi r3, 0 2015 bgt guest_exit_cont 2016 2017 /* Return code <= 0 */ 2018maybe_reenter_guest: 2019 ld r5, HSTATE_KVM_VCORE(r13) 2020 lwz r0, VCORE_ENTRY_EXIT(r5) 2021 cmpwi r0, 0x100 2022 mr r4, r9 2023 blt deliver_guest_interrupt 2024 b guest_exit_cont 2025 2026#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2027/* 2028 * Softpatch interrupt for transactional memory emulation cases 2029 * on POWER9 DD2.2. This is early in the guest exit path - we 2030 * haven't saved registers or done a treclaim yet. 2031 */ 2032kvmppc_tm_emul: 2033 /* Save instruction image in HEIR */ 2034 mfspr r3, SPRN_HEIR 2035 stw r3, VCPU_HEIR(r9) 2036 2037 /* 2038 * The cases we want to handle here are those where the guest 2039 * is in real suspend mode and is trying to transition to 2040 * transactional mode. 2041 */ 2042 lbz r0, HSTATE_FAKE_SUSPEND(r13) 2043 cmpwi r0, 0 /* keep exiting guest if in fake suspend */ 2044 bne guest_exit_cont 2045 rldicl r3, r11, 64 - MSR_TS_S_LG, 62 2046 cmpwi r3, 1 /* or if not in suspend state */ 2047 bne guest_exit_cont 2048 2049 /* Call C code to do the emulation */ 2050 mr r3, r9 2051 bl kvmhv_p9_tm_emulation_early 2052 nop 2053 ld r9, HSTATE_KVM_VCPU(r13) 2054 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 2055 cmpwi r3, 0 2056 beq guest_exit_cont /* continue exiting if not handled */ 2057 ld r10, VCPU_PC(r9) 2058 ld r11, VCPU_MSR(r9) 2059 b fast_interrupt_c_return /* go back to guest if handled */ 2060#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2061 2062/* 2063 * Check whether an HDSI is an HPTE not found fault or something else. 2064 * If it is an HPTE not found fault that is due to the guest accessing 2065 * a page that they have mapped but which we have paged out, then 2066 * we continue on with the guest exit path. In all other cases, 2067 * reflect the HDSI to the guest as a DSI. 2068 */ 2069kvmppc_hdsi: 2070 ld r3, VCPU_KVM(r9) 2071 lbz r0, KVM_RADIX(r3) 2072 mfspr r4, SPRN_HDAR 2073 mfspr r6, SPRN_HDSISR 2074BEGIN_FTR_SECTION 2075 /* Look for DSISR canary. If we find it, retry instruction */ 2076 cmpdi r6, 0x7fff 2077 beq 6f 2078END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2079 cmpwi r0, 0 2080 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 2081 /* HPTE not found fault or protection fault? */ 2082 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 2083 beq 1f /* if not, send it to the guest */ 2084 andi. r0, r11, MSR_DR /* data relocation enabled? */ 2085 beq 3f 2086BEGIN_FTR_SECTION 2087 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2088 b 4f 2089END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2090 clrrdi r0, r4, 28 2091 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2092 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 2093 bne 7f /* if no SLB entry found */ 20944: std r4, VCPU_FAULT_DAR(r9) 2095 stw r6, VCPU_FAULT_DSISR(r9) 2096 2097 /* Search the hash table. */ 2098 mr r3, r9 /* vcpu pointer */ 2099 li r7, 1 /* data fault */ 2100 bl kvmppc_hpte_hv_fault 2101 ld r9, HSTATE_KVM_VCPU(r13) 2102 ld r10, VCPU_PC(r9) 2103 ld r11, VCPU_MSR(r9) 2104 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 2105 cmpdi r3, 0 /* retry the instruction */ 2106 beq 6f 2107 cmpdi r3, -1 /* handle in kernel mode */ 2108 beq guest_exit_cont 2109 cmpdi r3, -2 /* MMIO emulation; need instr word */ 2110 beq 2f 2111 2112 /* Synthesize a DSI (or DSegI) for the guest */ 2113 ld r4, VCPU_FAULT_DAR(r9) 2114 mr r6, r3 21151: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 2116 mtspr SPRN_DSISR, r6 21177: mtspr SPRN_DAR, r4 2118 mtspr SPRN_SRR0, r10 2119 mtspr SPRN_SRR1, r11 2120 mr r10, r0 2121 bl kvmppc_msr_interrupt 2122fast_interrupt_c_return: 21236: ld r7, VCPU_CTR(r9) 2124 ld r8, VCPU_XER(r9) 2125 mtctr r7 2126 mtxer r8 2127 mr r4, r9 2128 b fast_guest_return 2129 21303: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 2131 ld r5, KVM_VRMA_SLB_V(r5) 2132 b 4b 2133 2134 /* If this is for emulated MMIO, load the instruction word */ 21352: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 2136 2137 /* Set guest mode to 'jump over instruction' so if lwz faults 2138 * we'll just continue at the next IP. */ 2139 li r0, KVM_GUEST_MODE_SKIP 2140 stb r0, HSTATE_IN_GUEST(r13) 2141 2142 /* Do the access with MSR:DR enabled */ 2143 mfmsr r3 2144 ori r4, r3, MSR_DR /* Enable paging for data */ 2145 mtmsrd r4 2146 lwz r8, 0(r10) 2147 mtmsrd r3 2148 2149 /* Store the result */ 2150 stw r8, VCPU_LAST_INST(r9) 2151 2152 /* Unset guest mode. */ 2153 li r0, KVM_GUEST_MODE_HOST_HV 2154 stb r0, HSTATE_IN_GUEST(r13) 2155 b guest_exit_cont 2156 2157.Lradix_hdsi: 2158 std r4, VCPU_FAULT_DAR(r9) 2159 stw r6, VCPU_FAULT_DSISR(r9) 2160.Lradix_hisi: 2161 mfspr r5, SPRN_ASDR 2162 std r5, VCPU_FAULT_GPA(r9) 2163 b guest_exit_cont 2164 2165/* 2166 * Similarly for an HISI, reflect it to the guest as an ISI unless 2167 * it is an HPTE not found fault for a page that we have paged out. 2168 */ 2169kvmppc_hisi: 2170 ld r3, VCPU_KVM(r9) 2171 lbz r0, KVM_RADIX(r3) 2172 cmpwi r0, 0 2173 bne .Lradix_hisi /* for radix, just save ASDR */ 2174 andis. r0, r11, SRR1_ISI_NOPT@h 2175 beq 1f 2176 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 2177 beq 3f 2178BEGIN_FTR_SECTION 2179 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2180 b 4f 2181END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2182 clrrdi r0, r10, 28 2183 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2184 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2185 bne 7f /* if no SLB entry found */ 21864: 2187 /* Search the hash table. */ 2188 mr r3, r9 /* vcpu pointer */ 2189 mr r4, r10 2190 mr r6, r11 2191 li r7, 0 /* instruction fault */ 2192 bl kvmppc_hpte_hv_fault 2193 ld r9, HSTATE_KVM_VCPU(r13) 2194 ld r10, VCPU_PC(r9) 2195 ld r11, VCPU_MSR(r9) 2196 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2197 cmpdi r3, 0 /* retry the instruction */ 2198 beq fast_interrupt_c_return 2199 cmpdi r3, -1 /* handle in kernel mode */ 2200 beq guest_exit_cont 2201 2202 /* Synthesize an ISI (or ISegI) for the guest */ 2203 mr r11, r3 22041: li r0, BOOK3S_INTERRUPT_INST_STORAGE 22057: mtspr SPRN_SRR0, r10 2206 mtspr SPRN_SRR1, r11 2207 mr r10, r0 2208 bl kvmppc_msr_interrupt 2209 b fast_interrupt_c_return 2210 22113: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2212 ld r5, KVM_VRMA_SLB_V(r6) 2213 b 4b 2214 2215/* 2216 * Try to handle an hcall in real mode. 2217 * Returns to the guest if we handle it, or continues on up to 2218 * the kernel if we can't (i.e. if we don't have a handler for 2219 * it, or if the handler returns H_TOO_HARD). 2220 * 2221 * r5 - r8 contain hcall args, 2222 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2223 */ 2224hcall_try_real_mode: 2225 ld r3,VCPU_GPR(R3)(r9) 2226 andi. r0,r11,MSR_PR 2227 /* sc 1 from userspace - reflect to guest syscall */ 2228 bne sc_1_fast_return 2229 /* sc 1 from nested guest - give it to L1 to handle */ 2230 ld r0, VCPU_NESTED(r9) 2231 cmpdi r0, 0 2232 bne guest_exit_cont 2233 clrrdi r3,r3,2 2234 cmpldi r3,hcall_real_table_end - hcall_real_table 2235 bge guest_exit_cont 2236 /* See if this hcall is enabled for in-kernel handling */ 2237 ld r4, VCPU_KVM(r9) 2238 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2239 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2240 add r4, r4, r0 2241 ld r0, KVM_ENABLED_HCALLS(r4) 2242 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2243 srd r0, r0, r4 2244 andi. r0, r0, 1 2245 beq guest_exit_cont 2246 /* Get pointer to handler, if any, and call it */ 2247 LOAD_REG_ADDR(r4, hcall_real_table) 2248 lwax r3,r3,r4 2249 cmpwi r3,0 2250 beq guest_exit_cont 2251 add r12,r3,r4 2252 mtctr r12 2253 mr r3,r9 /* get vcpu pointer */ 2254 ld r4,VCPU_GPR(R4)(r9) 2255 bctrl 2256 cmpdi r3,H_TOO_HARD 2257 beq hcall_real_fallback 2258 ld r4,HSTATE_KVM_VCPU(r13) 2259 std r3,VCPU_GPR(R3)(r4) 2260 ld r10,VCPU_PC(r4) 2261 ld r11,VCPU_MSR(r4) 2262 b fast_guest_return 2263 2264sc_1_fast_return: 2265 mtspr SPRN_SRR0,r10 2266 mtspr SPRN_SRR1,r11 2267 li r10, BOOK3S_INTERRUPT_SYSCALL 2268 bl kvmppc_msr_interrupt 2269 mr r4,r9 2270 b fast_guest_return 2271 2272 /* We've attempted a real mode hcall, but it's punted it back 2273 * to userspace. We need to restore some clobbered volatiles 2274 * before resuming the pass-it-to-qemu path */ 2275hcall_real_fallback: 2276 li r12,BOOK3S_INTERRUPT_SYSCALL 2277 ld r9, HSTATE_KVM_VCPU(r13) 2278 2279 b guest_exit_cont 2280 2281 .globl hcall_real_table 2282hcall_real_table: 2283 .long 0 /* 0 - unused */ 2284 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2285 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2286 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2287 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2288 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2289 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2290#ifdef CONFIG_SPAPR_TCE_IOMMU 2291 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2292 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2293#else 2294 .long 0 /* 0x1c */ 2295 .long 0 /* 0x20 */ 2296#endif 2297 .long 0 /* 0x24 - H_SET_SPRG0 */ 2298 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2299 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 2300 .long 0 /* 0x30 */ 2301 .long 0 /* 0x34 */ 2302 .long 0 /* 0x38 */ 2303 .long 0 /* 0x3c */ 2304 .long 0 /* 0x40 */ 2305 .long 0 /* 0x44 */ 2306 .long 0 /* 0x48 */ 2307 .long 0 /* 0x4c */ 2308 .long 0 /* 0x50 */ 2309 .long 0 /* 0x54 */ 2310 .long 0 /* 0x58 */ 2311 .long 0 /* 0x5c */ 2312 .long 0 /* 0x60 */ 2313#ifdef CONFIG_KVM_XICS 2314 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2315 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2316 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2317 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2318 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2319#else 2320 .long 0 /* 0x64 - H_EOI */ 2321 .long 0 /* 0x68 - H_CPPR */ 2322 .long 0 /* 0x6c - H_IPI */ 2323 .long 0 /* 0x70 - H_IPOLL */ 2324 .long 0 /* 0x74 - H_XIRR */ 2325#endif 2326 .long 0 /* 0x78 */ 2327 .long 0 /* 0x7c */ 2328 .long 0 /* 0x80 */ 2329 .long 0 /* 0x84 */ 2330 .long 0 /* 0x88 */ 2331 .long 0 /* 0x8c */ 2332 .long 0 /* 0x90 */ 2333 .long 0 /* 0x94 */ 2334 .long 0 /* 0x98 */ 2335 .long 0 /* 0x9c */ 2336 .long 0 /* 0xa0 */ 2337 .long 0 /* 0xa4 */ 2338 .long 0 /* 0xa8 */ 2339 .long 0 /* 0xac */ 2340 .long 0 /* 0xb0 */ 2341 .long 0 /* 0xb4 */ 2342 .long 0 /* 0xb8 */ 2343 .long 0 /* 0xbc */ 2344 .long 0 /* 0xc0 */ 2345 .long 0 /* 0xc4 */ 2346 .long 0 /* 0xc8 */ 2347 .long 0 /* 0xcc */ 2348 .long 0 /* 0xd0 */ 2349 .long 0 /* 0xd4 */ 2350 .long 0 /* 0xd8 */ 2351 .long 0 /* 0xdc */ 2352 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2353 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2354 .long 0 /* 0xe8 */ 2355 .long 0 /* 0xec */ 2356 .long 0 /* 0xf0 */ 2357 .long 0 /* 0xf4 */ 2358 .long 0 /* 0xf8 */ 2359 .long 0 /* 0xfc */ 2360 .long 0 /* 0x100 */ 2361 .long 0 /* 0x104 */ 2362 .long 0 /* 0x108 */ 2363 .long 0 /* 0x10c */ 2364 .long 0 /* 0x110 */ 2365 .long 0 /* 0x114 */ 2366 .long 0 /* 0x118 */ 2367 .long 0 /* 0x11c */ 2368 .long 0 /* 0x120 */ 2369 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2370 .long 0 /* 0x128 */ 2371 .long 0 /* 0x12c */ 2372 .long 0 /* 0x130 */ 2373 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2374#ifdef CONFIG_SPAPR_TCE_IOMMU 2375 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2376 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2377#else 2378 .long 0 /* 0x138 */ 2379 .long 0 /* 0x13c */ 2380#endif 2381 .long 0 /* 0x140 */ 2382 .long 0 /* 0x144 */ 2383 .long 0 /* 0x148 */ 2384 .long 0 /* 0x14c */ 2385 .long 0 /* 0x150 */ 2386 .long 0 /* 0x154 */ 2387 .long 0 /* 0x158 */ 2388 .long 0 /* 0x15c */ 2389 .long 0 /* 0x160 */ 2390 .long 0 /* 0x164 */ 2391 .long 0 /* 0x168 */ 2392 .long 0 /* 0x16c */ 2393 .long 0 /* 0x170 */ 2394 .long 0 /* 0x174 */ 2395 .long 0 /* 0x178 */ 2396 .long 0 /* 0x17c */ 2397 .long 0 /* 0x180 */ 2398 .long 0 /* 0x184 */ 2399 .long 0 /* 0x188 */ 2400 .long 0 /* 0x18c */ 2401 .long 0 /* 0x190 */ 2402 .long 0 /* 0x194 */ 2403 .long 0 /* 0x198 */ 2404 .long 0 /* 0x19c */ 2405 .long 0 /* 0x1a0 */ 2406 .long 0 /* 0x1a4 */ 2407 .long 0 /* 0x1a8 */ 2408 .long 0 /* 0x1ac */ 2409 .long 0 /* 0x1b0 */ 2410 .long 0 /* 0x1b4 */ 2411 .long 0 /* 0x1b8 */ 2412 .long 0 /* 0x1bc */ 2413 .long 0 /* 0x1c0 */ 2414 .long 0 /* 0x1c4 */ 2415 .long 0 /* 0x1c8 */ 2416 .long 0 /* 0x1cc */ 2417 .long 0 /* 0x1d0 */ 2418 .long 0 /* 0x1d4 */ 2419 .long 0 /* 0x1d8 */ 2420 .long 0 /* 0x1dc */ 2421 .long 0 /* 0x1e0 */ 2422 .long 0 /* 0x1e4 */ 2423 .long 0 /* 0x1e8 */ 2424 .long 0 /* 0x1ec */ 2425 .long 0 /* 0x1f0 */ 2426 .long 0 /* 0x1f4 */ 2427 .long 0 /* 0x1f8 */ 2428 .long 0 /* 0x1fc */ 2429 .long 0 /* 0x200 */ 2430 .long 0 /* 0x204 */ 2431 .long 0 /* 0x208 */ 2432 .long 0 /* 0x20c */ 2433 .long 0 /* 0x210 */ 2434 .long 0 /* 0x214 */ 2435 .long 0 /* 0x218 */ 2436 .long 0 /* 0x21c */ 2437 .long 0 /* 0x220 */ 2438 .long 0 /* 0x224 */ 2439 .long 0 /* 0x228 */ 2440 .long 0 /* 0x22c */ 2441 .long 0 /* 0x230 */ 2442 .long 0 /* 0x234 */ 2443 .long 0 /* 0x238 */ 2444 .long 0 /* 0x23c */ 2445 .long 0 /* 0x240 */ 2446 .long 0 /* 0x244 */ 2447 .long 0 /* 0x248 */ 2448 .long 0 /* 0x24c */ 2449 .long 0 /* 0x250 */ 2450 .long 0 /* 0x254 */ 2451 .long 0 /* 0x258 */ 2452 .long 0 /* 0x25c */ 2453 .long 0 /* 0x260 */ 2454 .long 0 /* 0x264 */ 2455 .long 0 /* 0x268 */ 2456 .long 0 /* 0x26c */ 2457 .long 0 /* 0x270 */ 2458 .long 0 /* 0x274 */ 2459 .long 0 /* 0x278 */ 2460 .long 0 /* 0x27c */ 2461 .long 0 /* 0x280 */ 2462 .long 0 /* 0x284 */ 2463 .long 0 /* 0x288 */ 2464 .long 0 /* 0x28c */ 2465 .long 0 /* 0x290 */ 2466 .long 0 /* 0x294 */ 2467 .long 0 /* 0x298 */ 2468 .long 0 /* 0x29c */ 2469 .long 0 /* 0x2a0 */ 2470 .long 0 /* 0x2a4 */ 2471 .long 0 /* 0x2a8 */ 2472 .long 0 /* 0x2ac */ 2473 .long 0 /* 0x2b0 */ 2474 .long 0 /* 0x2b4 */ 2475 .long 0 /* 0x2b8 */ 2476 .long 0 /* 0x2bc */ 2477 .long 0 /* 0x2c0 */ 2478 .long 0 /* 0x2c4 */ 2479 .long 0 /* 0x2c8 */ 2480 .long 0 /* 0x2cc */ 2481 .long 0 /* 0x2d0 */ 2482 .long 0 /* 0x2d4 */ 2483 .long 0 /* 0x2d8 */ 2484 .long 0 /* 0x2dc */ 2485 .long 0 /* 0x2e0 */ 2486 .long 0 /* 0x2e4 */ 2487 .long 0 /* 0x2e8 */ 2488 .long 0 /* 0x2ec */ 2489 .long 0 /* 0x2f0 */ 2490 .long 0 /* 0x2f4 */ 2491 .long 0 /* 0x2f8 */ 2492#ifdef CONFIG_KVM_XICS 2493 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2494#else 2495 .long 0 /* 0x2fc - H_XIRR_X*/ 2496#endif 2497 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2498 .globl hcall_real_table_end 2499hcall_real_table_end: 2500 2501_GLOBAL(kvmppc_h_set_xdabr) 2502EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2503 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2504 beq 6f 2505 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2506 andc. r0, r5, r0 2507 beq 3f 25086: li r3, H_PARAMETER 2509 blr 2510 2511_GLOBAL(kvmppc_h_set_dabr) 2512EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2513 li r5, DABRX_USER | DABRX_KERNEL 25143: 2515BEGIN_FTR_SECTION 2516 b 2f 2517END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2518 std r4,VCPU_DABR(r3) 2519 stw r5, VCPU_DABRX(r3) 2520 mtspr SPRN_DABRX, r5 2521 /* Work around P7 bug where DABR can get corrupted on mtspr */ 25221: mtspr SPRN_DABR,r4 2523 mfspr r5, SPRN_DABR 2524 cmpd r4, r5 2525 bne 1b 2526 isync 2527 li r3,0 2528 blr 2529 25302: 2531 LOAD_REG_ADDR(r11, dawr_force_enable) 2532 lbz r11, 0(r11) 2533 cmpdi r11, 0 2534 bne 3f 2535 li r3, H_HARDWARE 2536 blr 25373: 2538 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2539 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2540 rlwimi r5, r4, 2, DAWRX_WT 2541 clrrdi r4, r4, 3 2542 std r4, VCPU_DAWR(r3) 2543 std r5, VCPU_DAWRX(r3) 2544 /* 2545 * If came in through the real mode hcall handler then it is necessary 2546 * to write the registers since the return path won't. Otherwise it is 2547 * sufficient to store then in the vcpu struct as they will be loaded 2548 * next time the vcpu is run. 2549 */ 2550 mfmsr r6 2551 andi. r6, r6, MSR_DR /* in real mode? */ 2552 bne 4f 2553 mtspr SPRN_DAWR, r4 2554 mtspr SPRN_DAWRX, r5 25554: li r3, 0 2556 blr 2557 2558_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2559 ori r11,r11,MSR_EE 2560 std r11,VCPU_MSR(r3) 2561 li r0,1 2562 stb r0,VCPU_CEDED(r3) 2563 sync /* order setting ceded vs. testing prodded */ 2564 lbz r5,VCPU_PRODDED(r3) 2565 cmpwi r5,0 2566 bne kvm_cede_prodded 2567 li r12,0 /* set trap to 0 to say hcall is handled */ 2568 stw r12,VCPU_TRAP(r3) 2569 li r0,H_SUCCESS 2570 std r0,VCPU_GPR(R3)(r3) 2571 2572 /* 2573 * Set our bit in the bitmask of napping threads unless all the 2574 * other threads are already napping, in which case we send this 2575 * up to the host. 2576 */ 2577 ld r5,HSTATE_KVM_VCORE(r13) 2578 lbz r6,HSTATE_PTID(r13) 2579 lwz r8,VCORE_ENTRY_EXIT(r5) 2580 clrldi r8,r8,56 2581 li r0,1 2582 sld r0,r0,r6 2583 addi r6,r5,VCORE_NAPPING_THREADS 258431: lwarx r4,0,r6 2585 or r4,r4,r0 2586 cmpw r4,r8 2587 beq kvm_cede_exit 2588 stwcx. r4,0,r6 2589 bne 31b 2590 /* order napping_threads update vs testing entry_exit_map */ 2591 isync 2592 li r0,NAPPING_CEDE 2593 stb r0,HSTATE_NAPPING(r13) 2594 lwz r7,VCORE_ENTRY_EXIT(r5) 2595 cmpwi r7,0x100 2596 bge 33f /* another thread already exiting */ 2597 2598/* 2599 * Although not specifically required by the architecture, POWER7 2600 * preserves the following registers in nap mode, even if an SMT mode 2601 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2602 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2603 */ 2604 /* Save non-volatile GPRs */ 2605 std r14, VCPU_GPR(R14)(r3) 2606 std r15, VCPU_GPR(R15)(r3) 2607 std r16, VCPU_GPR(R16)(r3) 2608 std r17, VCPU_GPR(R17)(r3) 2609 std r18, VCPU_GPR(R18)(r3) 2610 std r19, VCPU_GPR(R19)(r3) 2611 std r20, VCPU_GPR(R20)(r3) 2612 std r21, VCPU_GPR(R21)(r3) 2613 std r22, VCPU_GPR(R22)(r3) 2614 std r23, VCPU_GPR(R23)(r3) 2615 std r24, VCPU_GPR(R24)(r3) 2616 std r25, VCPU_GPR(R25)(r3) 2617 std r26, VCPU_GPR(R26)(r3) 2618 std r27, VCPU_GPR(R27)(r3) 2619 std r28, VCPU_GPR(R28)(r3) 2620 std r29, VCPU_GPR(R29)(r3) 2621 std r30, VCPU_GPR(R30)(r3) 2622 std r31, VCPU_GPR(R31)(r3) 2623 2624 /* save FP state */ 2625 bl kvmppc_save_fp 2626 2627#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2628/* 2629 * Branch around the call if both CPU_FTR_TM and 2630 * CPU_FTR_P9_TM_HV_ASSIST are off. 2631 */ 2632BEGIN_FTR_SECTION 2633 b 91f 2634END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2635 /* 2636 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2637 */ 2638 ld r3, HSTATE_KVM_VCPU(r13) 2639 ld r4, VCPU_MSR(r3) 2640 li r5, 0 /* don't preserve non-vol regs */ 2641 bl kvmppc_save_tm_hv 2642 nop 264391: 2644#endif 2645 2646 /* 2647 * Set DEC to the smaller of DEC and HDEC, so that we wake 2648 * no later than the end of our timeslice (HDEC interrupts 2649 * don't wake us from nap). 2650 */ 2651 mfspr r3, SPRN_DEC 2652 mfspr r4, SPRN_HDEC 2653 mftb r5 2654BEGIN_FTR_SECTION 2655 /* On P9 check whether the guest has large decrementer mode enabled */ 2656 ld r6, HSTATE_KVM_VCORE(r13) 2657 ld r6, VCORE_LPCR(r6) 2658 andis. r6, r6, LPCR_LD@h 2659 bne 68f 2660END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2661 extsw r3, r3 266268: EXTEND_HDEC(r4) 2663 cmpd r3, r4 2664 ble 67f 2665 mtspr SPRN_DEC, r4 266667: 2667 /* save expiry time of guest decrementer */ 2668 add r3, r3, r5 2669 ld r4, HSTATE_KVM_VCPU(r13) 2670 ld r5, HSTATE_KVM_VCORE(r13) 2671 ld r6, VCORE_TB_OFFSET_APPL(r5) 2672 subf r3, r6, r3 /* convert to host TB value */ 2673 std r3, VCPU_DEC_EXPIRES(r4) 2674 2675#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2676 ld r4, HSTATE_KVM_VCPU(r13) 2677 addi r3, r4, VCPU_TB_CEDE 2678 bl kvmhv_accumulate_time 2679#endif 2680 2681 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2682 2683 /* Go back to host stack */ 2684 ld r1, HSTATE_HOST_R1(r13) 2685 2686 /* 2687 * Take a nap until a decrementer or external or doobell interrupt 2688 * occurs, with PECE1 and PECE0 set in LPCR. 2689 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2690 * Also clear the runlatch bit before napping. 2691 */ 2692kvm_do_nap: 2693 mfspr r0, SPRN_CTRLF 2694 clrrdi r0, r0, 1 2695 mtspr SPRN_CTRLT, r0 2696 2697 li r0,1 2698 stb r0,HSTATE_HWTHREAD_REQ(r13) 2699 mfspr r5,SPRN_LPCR 2700 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2701BEGIN_FTR_SECTION 2702 ori r5, r5, LPCR_PECEDH 2703 rlwimi r5, r3, 0, LPCR_PECEDP 2704END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2705 2706kvm_nap_sequence: /* desired LPCR value in r5 */ 2707BEGIN_FTR_SECTION 2708 /* 2709 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2710 * enable state loss = 1 (allow SMT mode switch) 2711 * requested level = 0 (just stop dispatching) 2712 */ 2713 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2714 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2715 li r4, LPCR_PECE_HVEE@higher 2716 sldi r4, r4, 32 2717 or r5, r5, r4 2718FTR_SECTION_ELSE 2719 li r3, PNV_THREAD_NAP 2720ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2721 mtspr SPRN_LPCR,r5 2722 isync 2723 2724BEGIN_FTR_SECTION 2725 bl isa300_idle_stop_mayloss 2726FTR_SECTION_ELSE 2727 bl isa206_idle_insn_mayloss 2728ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2729 2730 mfspr r0, SPRN_CTRLF 2731 ori r0, r0, 1 2732 mtspr SPRN_CTRLT, r0 2733 2734 mtspr SPRN_SRR1, r3 2735 2736 li r0, 0 2737 stb r0, PACA_FTRACE_ENABLED(r13) 2738 2739 li r0, KVM_HWTHREAD_IN_KVM 2740 stb r0, HSTATE_HWTHREAD_STATE(r13) 2741 2742 lbz r0, HSTATE_NAPPING(r13) 2743 cmpwi r0, NAPPING_CEDE 2744 beq kvm_end_cede 2745 cmpwi r0, NAPPING_NOVCPU 2746 beq kvm_novcpu_wakeup 2747 cmpwi r0, NAPPING_UNSPLIT 2748 beq kvm_unsplit_wakeup 2749 twi 31,0,0 /* Nap state must not be zero */ 2750 275133: mr r4, r3 2752 li r3, 0 2753 li r12, 0 2754 b 34f 2755 2756kvm_end_cede: 2757 /* Woken by external or decrementer interrupt */ 2758 2759 /* get vcpu pointer */ 2760 ld r4, HSTATE_KVM_VCPU(r13) 2761 2762#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2763 addi r3, r4, VCPU_TB_RMINTR 2764 bl kvmhv_accumulate_time 2765#endif 2766 2767#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2768/* 2769 * Branch around the call if both CPU_FTR_TM and 2770 * CPU_FTR_P9_TM_HV_ASSIST are off. 2771 */ 2772BEGIN_FTR_SECTION 2773 b 91f 2774END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2775 /* 2776 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2777 */ 2778 mr r3, r4 2779 ld r4, VCPU_MSR(r3) 2780 li r5, 0 /* don't preserve non-vol regs */ 2781 bl kvmppc_restore_tm_hv 2782 nop 2783 ld r4, HSTATE_KVM_VCPU(r13) 278491: 2785#endif 2786 2787 /* load up FP state */ 2788 bl kvmppc_load_fp 2789 2790 /* Restore guest decrementer */ 2791 ld r3, VCPU_DEC_EXPIRES(r4) 2792 ld r5, HSTATE_KVM_VCORE(r13) 2793 ld r6, VCORE_TB_OFFSET_APPL(r5) 2794 add r3, r3, r6 /* convert host TB to guest TB value */ 2795 mftb r7 2796 subf r3, r7, r3 2797 mtspr SPRN_DEC, r3 2798 2799 /* Load NV GPRS */ 2800 ld r14, VCPU_GPR(R14)(r4) 2801 ld r15, VCPU_GPR(R15)(r4) 2802 ld r16, VCPU_GPR(R16)(r4) 2803 ld r17, VCPU_GPR(R17)(r4) 2804 ld r18, VCPU_GPR(R18)(r4) 2805 ld r19, VCPU_GPR(R19)(r4) 2806 ld r20, VCPU_GPR(R20)(r4) 2807 ld r21, VCPU_GPR(R21)(r4) 2808 ld r22, VCPU_GPR(R22)(r4) 2809 ld r23, VCPU_GPR(R23)(r4) 2810 ld r24, VCPU_GPR(R24)(r4) 2811 ld r25, VCPU_GPR(R25)(r4) 2812 ld r26, VCPU_GPR(R26)(r4) 2813 ld r27, VCPU_GPR(R27)(r4) 2814 ld r28, VCPU_GPR(R28)(r4) 2815 ld r29, VCPU_GPR(R29)(r4) 2816 ld r30, VCPU_GPR(R30)(r4) 2817 ld r31, VCPU_GPR(R31)(r4) 2818 2819 /* Check the wake reason in SRR1 to see why we got here */ 2820 bl kvmppc_check_wake_reason 2821 2822 /* 2823 * Restore volatile registers since we could have called a 2824 * C routine in kvmppc_check_wake_reason 2825 * r4 = VCPU 2826 * r3 tells us whether we need to return to host or not 2827 * WARNING: it gets checked further down: 2828 * should not modify r3 until this check is done. 2829 */ 2830 ld r4, HSTATE_KVM_VCPU(r13) 2831 2832 /* clear our bit in vcore->napping_threads */ 283334: ld r5,HSTATE_KVM_VCORE(r13) 2834 lbz r7,HSTATE_PTID(r13) 2835 li r0,1 2836 sld r0,r0,r7 2837 addi r6,r5,VCORE_NAPPING_THREADS 283832: lwarx r7,0,r6 2839 andc r7,r7,r0 2840 stwcx. r7,0,r6 2841 bne 32b 2842 li r0,0 2843 stb r0,HSTATE_NAPPING(r13) 2844 2845 /* See if the wake reason saved in r3 means we need to exit */ 2846 stw r12, VCPU_TRAP(r4) 2847 mr r9, r4 2848 cmpdi r3, 0 2849 bgt guest_exit_cont 2850 b maybe_reenter_guest 2851 2852 /* cede when already previously prodded case */ 2853kvm_cede_prodded: 2854 li r0,0 2855 stb r0,VCPU_PRODDED(r3) 2856 sync /* order testing prodded vs. clearing ceded */ 2857 stb r0,VCPU_CEDED(r3) 2858 li r3,H_SUCCESS 2859 blr 2860 2861 /* we've ceded but we want to give control to the host */ 2862kvm_cede_exit: 2863 ld r9, HSTATE_KVM_VCPU(r13) 2864#ifdef CONFIG_KVM_XICS 2865 /* are we using XIVE with single escalation? */ 2866 ld r10, VCPU_XIVE_ESC_VADDR(r9) 2867 cmpdi r10, 0 2868 beq 3f 2869 li r6, XIVE_ESB_SET_PQ_00 2870 /* 2871 * If we still have a pending escalation, abort the cede, 2872 * and we must set PQ to 10 rather than 00 so that we don't 2873 * potentially end up with two entries for the escalation 2874 * interrupt in the XIVE interrupt queue. In that case 2875 * we also don't want to set xive_esc_on to 1 here in 2876 * case we race with xive_esc_irq(). 2877 */ 2878 lbz r5, VCPU_XIVE_ESC_ON(r9) 2879 cmpwi r5, 0 2880 beq 4f 2881 li r0, 0 2882 stb r0, VCPU_CEDED(r9) 2883 li r6, XIVE_ESB_SET_PQ_10 2884 b 5f 28854: li r0, 1 2886 stb r0, VCPU_XIVE_ESC_ON(r9) 2887 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */ 2888 sync 28895: /* Enable XIVE escalation */ 2890 mfmsr r0 2891 andi. r0, r0, MSR_DR /* in real mode? */ 2892 beq 1f 2893 ldx r0, r10, r6 2894 b 2f 28951: ld r10, VCPU_XIVE_ESC_RADDR(r9) 2896 ldcix r0, r10, r6 28972: sync 2898#endif /* CONFIG_KVM_XICS */ 28993: b guest_exit_cont 2900 2901 /* Try to do machine check recovery in real mode */ 2902machine_check_realmode: 2903 mr r3, r9 /* get vcpu pointer */ 2904 bl kvmppc_realmode_machine_check 2905 nop 2906 /* all machine checks go to virtual mode for further handling */ 2907 ld r9, HSTATE_KVM_VCPU(r13) 2908 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2909 b guest_exit_cont 2910 2911/* 2912 * Call C code to handle a HMI in real mode. 2913 * Only the primary thread does the call, secondary threads are handled 2914 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2915 * r9 points to the vcpu on entry 2916 */ 2917hmi_realmode: 2918 lbz r0, HSTATE_PTID(r13) 2919 cmpwi r0, 0 2920 bne guest_exit_cont 2921 bl kvmppc_realmode_hmi_handler 2922 ld r9, HSTATE_KVM_VCPU(r13) 2923 li r12, BOOK3S_INTERRUPT_HMI 2924 b guest_exit_cont 2925 2926/* 2927 * Check the reason we woke from nap, and take appropriate action. 2928 * Returns (in r3): 2929 * 0 if nothing needs to be done 2930 * 1 if something happened that needs to be handled by the host 2931 * -1 if there was a guest wakeup (IPI or msgsnd) 2932 * -2 if we handled a PCI passthrough interrupt (returned by 2933 * kvmppc_read_intr only) 2934 * 2935 * Also sets r12 to the interrupt vector for any interrupt that needs 2936 * to be handled now by the host (0x500 for external interrupt), or zero. 2937 * Modifies all volatile registers (since it may call a C function). 2938 * This routine calls kvmppc_read_intr, a C function, if an external 2939 * interrupt is pending. 2940 */ 2941kvmppc_check_wake_reason: 2942 mfspr r6, SPRN_SRR1 2943BEGIN_FTR_SECTION 2944 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2945FTR_SECTION_ELSE 2946 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2947ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2948 cmpwi r6, 8 /* was it an external interrupt? */ 2949 beq 7f /* if so, see what it was */ 2950 li r3, 0 2951 li r12, 0 2952 cmpwi r6, 6 /* was it the decrementer? */ 2953 beq 0f 2954BEGIN_FTR_SECTION 2955 cmpwi r6, 5 /* privileged doorbell? */ 2956 beq 0f 2957 cmpwi r6, 3 /* hypervisor doorbell? */ 2958 beq 3f 2959END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2960 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2961 beq 4f 2962 li r3, 1 /* anything else, return 1 */ 29630: blr 2964 2965 /* hypervisor doorbell */ 29663: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2967 2968 /* 2969 * Clear the doorbell as we will invoke the handler 2970 * explicitly in the guest exit path. 2971 */ 2972 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2973 PPC_MSGCLR(6) 2974 /* see if it's a host IPI */ 2975 li r3, 1 2976BEGIN_FTR_SECTION 2977 PPC_MSGSYNC 2978 lwsync 2979END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2980 lbz r0, HSTATE_HOST_IPI(r13) 2981 cmpwi r0, 0 2982 bnelr 2983 /* if not, return -1 */ 2984 li r3, -1 2985 blr 2986 2987 /* Woken up due to Hypervisor maintenance interrupt */ 29884: li r12, BOOK3S_INTERRUPT_HMI 2989 li r3, 1 2990 blr 2991 2992 /* external interrupt - create a stack frame so we can call C */ 29937: mflr r0 2994 std r0, PPC_LR_STKOFF(r1) 2995 stdu r1, -PPC_MIN_STKFRM(r1) 2996 bl kvmppc_read_intr 2997 nop 2998 li r12, BOOK3S_INTERRUPT_EXTERNAL 2999 cmpdi r3, 1 3000 ble 1f 3001 3002 /* 3003 * Return code of 2 means PCI passthrough interrupt, but 3004 * we need to return back to host to complete handling the 3005 * interrupt. Trap reason is expected in r12 by guest 3006 * exit code. 3007 */ 3008 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 30091: 3010 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 3011 addi r1, r1, PPC_MIN_STKFRM 3012 mtlr r0 3013 blr 3014 3015/* 3016 * Save away FP, VMX and VSX registers. 3017 * r3 = vcpu pointer 3018 * N.B. r30 and r31 are volatile across this function, 3019 * thus it is not callable from C. 3020 */ 3021kvmppc_save_fp: 3022 mflr r30 3023 mr r31,r3 3024 mfmsr r5 3025 ori r8,r5,MSR_FP 3026#ifdef CONFIG_ALTIVEC 3027BEGIN_FTR_SECTION 3028 oris r8,r8,MSR_VEC@h 3029END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3030#endif 3031#ifdef CONFIG_VSX 3032BEGIN_FTR_SECTION 3033 oris r8,r8,MSR_VSX@h 3034END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3035#endif 3036 mtmsrd r8 3037 addi r3,r3,VCPU_FPRS 3038 bl store_fp_state 3039#ifdef CONFIG_ALTIVEC 3040BEGIN_FTR_SECTION 3041 addi r3,r31,VCPU_VRS 3042 bl store_vr_state 3043END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3044#endif 3045 mfspr r6,SPRN_VRSAVE 3046 stw r6,VCPU_VRSAVE(r31) 3047 mtlr r30 3048 blr 3049 3050/* 3051 * Load up FP, VMX and VSX registers 3052 * r4 = vcpu pointer 3053 * N.B. r30 and r31 are volatile across this function, 3054 * thus it is not callable from C. 3055 */ 3056kvmppc_load_fp: 3057 mflr r30 3058 mr r31,r4 3059 mfmsr r9 3060 ori r8,r9,MSR_FP 3061#ifdef CONFIG_ALTIVEC 3062BEGIN_FTR_SECTION 3063 oris r8,r8,MSR_VEC@h 3064END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3065#endif 3066#ifdef CONFIG_VSX 3067BEGIN_FTR_SECTION 3068 oris r8,r8,MSR_VSX@h 3069END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3070#endif 3071 mtmsrd r8 3072 addi r3,r4,VCPU_FPRS 3073 bl load_fp_state 3074#ifdef CONFIG_ALTIVEC 3075BEGIN_FTR_SECTION 3076 addi r3,r31,VCPU_VRS 3077 bl load_vr_state 3078END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3079#endif 3080 lwz r7,VCPU_VRSAVE(r31) 3081 mtspr SPRN_VRSAVE,r7 3082 mtlr r30 3083 mr r4,r31 3084 blr 3085 3086#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3087/* 3088 * Save transactional state and TM-related registers. 3089 * Called with r3 pointing to the vcpu struct and r4 containing 3090 * the guest MSR value. 3091 * r5 is non-zero iff non-volatile register state needs to be maintained. 3092 * If r5 == 0, this can modify all checkpointed registers, but 3093 * restores r1 and r2 before exit. 3094 */ 3095_GLOBAL_TOC(kvmppc_save_tm_hv) 3096EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 3097 /* See if we need to handle fake suspend mode */ 3098BEGIN_FTR_SECTION 3099 b __kvmppc_save_tm 3100END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3101 3102 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 3103 cmpwi r0, 0 3104 beq __kvmppc_save_tm 3105 3106 /* The following code handles the fake_suspend = 1 case */ 3107 mflr r0 3108 std r0, PPC_LR_STKOFF(r1) 3109 stdu r1, -PPC_MIN_STKFRM(r1) 3110 3111 /* Turn on TM. */ 3112 mfmsr r8 3113 li r0, 1 3114 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 3115 mtmsrd r8 3116 3117 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 3118 beq 4f 3119BEGIN_FTR_SECTION 3120 bl pnv_power9_force_smt4_catch 3121END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3122 nop 3123 3124 /* We have to treclaim here because that's the only way to do S->N */ 3125 li r3, TM_CAUSE_KVM_RESCHED 3126 TRECLAIM(R3) 3127 3128 /* 3129 * We were in fake suspend, so we are not going to save the 3130 * register state as the guest checkpointed state (since 3131 * we already have it), therefore we can now use any volatile GPR. 3132 * In fact treclaim in fake suspend state doesn't modify 3133 * any registers. 3134 */ 3135 3136BEGIN_FTR_SECTION 3137 bl pnv_power9_force_smt4_release 3138END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3139 nop 3140 31414: 3142 mfspr r3, SPRN_PSSCR 3143 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 3144 li r0, PSSCR_FAKE_SUSPEND 3145 andc r3, r3, r0 3146 mtspr SPRN_PSSCR, r3 3147 3148 /* Don't save TEXASR, use value from last exit in real suspend state */ 3149 ld r9, HSTATE_KVM_VCPU(r13) 3150 mfspr r5, SPRN_TFHAR 3151 mfspr r6, SPRN_TFIAR 3152 std r5, VCPU_TFHAR(r9) 3153 std r6, VCPU_TFIAR(r9) 3154 3155 addi r1, r1, PPC_MIN_STKFRM 3156 ld r0, PPC_LR_STKOFF(r1) 3157 mtlr r0 3158 blr 3159 3160/* 3161 * Restore transactional state and TM-related registers. 3162 * Called with r3 pointing to the vcpu struct 3163 * and r4 containing the guest MSR value. 3164 * r5 is non-zero iff non-volatile register state needs to be maintained. 3165 * This potentially modifies all checkpointed registers. 3166 * It restores r1 and r2 from the PACA. 3167 */ 3168_GLOBAL_TOC(kvmppc_restore_tm_hv) 3169EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 3170 /* 3171 * If we are doing TM emulation for the guest on a POWER9 DD2, 3172 * then we don't actually do a trechkpt -- we either set up 3173 * fake-suspend mode, or emulate a TM rollback. 3174 */ 3175BEGIN_FTR_SECTION 3176 b __kvmppc_restore_tm 3177END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3178 mflr r0 3179 std r0, PPC_LR_STKOFF(r1) 3180 3181 li r0, 0 3182 stb r0, HSTATE_FAKE_SUSPEND(r13) 3183 3184 /* Turn on TM so we can restore TM SPRs */ 3185 mfmsr r5 3186 li r0, 1 3187 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 3188 mtmsrd r5 3189 3190 /* 3191 * The user may change these outside of a transaction, so they must 3192 * always be context switched. 3193 */ 3194 ld r5, VCPU_TFHAR(r3) 3195 ld r6, VCPU_TFIAR(r3) 3196 ld r7, VCPU_TEXASR(r3) 3197 mtspr SPRN_TFHAR, r5 3198 mtspr SPRN_TFIAR, r6 3199 mtspr SPRN_TEXASR, r7 3200 3201 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 3202 beqlr /* TM not active in guest */ 3203 3204 /* Make sure the failure summary is set */ 3205 oris r7, r7, (TEXASR_FS)@h 3206 mtspr SPRN_TEXASR, r7 3207 3208 cmpwi r5, 1 /* check for suspended state */ 3209 bgt 10f 3210 stb r5, HSTATE_FAKE_SUSPEND(r13) 3211 b 9f /* and return */ 321210: stdu r1, -PPC_MIN_STKFRM(r1) 3213 /* guest is in transactional state, so simulate rollback */ 3214 bl kvmhv_emulate_tm_rollback 3215 nop 3216 addi r1, r1, PPC_MIN_STKFRM 32179: ld r0, PPC_LR_STKOFF(r1) 3218 mtlr r0 3219 blr 3220#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 3221 3222/* 3223 * We come here if we get any exception or interrupt while we are 3224 * executing host real mode code while in guest MMU context. 3225 * r12 is (CR << 32) | vector 3226 * r13 points to our PACA 3227 * r12 is saved in HSTATE_SCRATCH0(r13) 3228 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE 3229 * r9 is saved in HSTATE_SCRATCH2(r13) 3230 * r13 is saved in HSPRG1 3231 * cfar is saved in HSTATE_CFAR(r13) 3232 * ppr is saved in HSTATE_PPR(r13) 3233 */ 3234kvmppc_bad_host_intr: 3235 /* 3236 * Switch to the emergency stack, but start half-way down in 3237 * case we were already on it. 3238 */ 3239 mr r9, r1 3240 std r1, PACAR1(r13) 3241 ld r1, PACAEMERGSP(r13) 3242 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 3243 std r9, 0(r1) 3244 std r0, GPR0(r1) 3245 std r9, GPR1(r1) 3246 std r2, GPR2(r1) 3247 SAVE_4GPRS(3, r1) 3248 SAVE_2GPRS(7, r1) 3249 srdi r0, r12, 32 3250 clrldi r12, r12, 32 3251 std r0, _CCR(r1) 3252 std r12, _TRAP(r1) 3253 andi. r0, r12, 2 3254 beq 1f 3255 mfspr r3, SPRN_HSRR0 3256 mfspr r4, SPRN_HSRR1 3257 mfspr r5, SPRN_HDAR 3258 mfspr r6, SPRN_HDSISR 3259 b 2f 32601: mfspr r3, SPRN_SRR0 3261 mfspr r4, SPRN_SRR1 3262 mfspr r5, SPRN_DAR 3263 mfspr r6, SPRN_DSISR 32642: std r3, _NIP(r1) 3265 std r4, _MSR(r1) 3266 std r5, _DAR(r1) 3267 std r6, _DSISR(r1) 3268 ld r9, HSTATE_SCRATCH2(r13) 3269 ld r12, HSTATE_SCRATCH0(r13) 3270 GET_SCRATCH0(r0) 3271 SAVE_4GPRS(9, r1) 3272 std r0, GPR13(r1) 3273 SAVE_NVGPRS(r1) 3274 ld r5, HSTATE_CFAR(r13) 3275 std r5, ORIG_GPR3(r1) 3276 mflr r3 3277#ifdef CONFIG_RELOCATABLE 3278 ld r4, HSTATE_SCRATCH1(r13) 3279#else 3280 mfctr r4 3281#endif 3282 mfxer r5 3283 lbz r6, PACAIRQSOFTMASK(r13) 3284 std r3, _LINK(r1) 3285 std r4, _CTR(r1) 3286 std r5, _XER(r1) 3287 std r6, SOFTE(r1) 3288 ld r2, PACATOC(r13) 3289 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 3290 std r3, STACK_FRAME_OVERHEAD-16(r1) 3291 3292 /* 3293 * On POWER9 do a minimal restore of the MMU and call C code, 3294 * which will print a message and panic. 3295 * XXX On POWER7 and POWER8, we just spin here since we don't 3296 * know what the other threads are doing (and we don't want to 3297 * coordinate with them) - but at least we now have register state 3298 * in memory that we might be able to look at from another CPU. 3299 */ 3300BEGIN_FTR_SECTION 3301 b . 3302END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 3303 ld r9, HSTATE_KVM_VCPU(r13) 3304 ld r10, VCPU_KVM(r9) 3305 3306 li r0, 0 3307 mtspr SPRN_AMR, r0 3308 mtspr SPRN_IAMR, r0 3309 mtspr SPRN_CIABR, r0 3310 mtspr SPRN_DAWRX, r0 3311 3312BEGIN_MMU_FTR_SECTION 3313 b 4f 3314END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 3315 3316 slbmte r0, r0 3317 slbia 3318 ptesync 3319 ld r8, PACA_SLBSHADOWPTR(r13) 3320 .rept SLB_NUM_BOLTED 3321 li r3, SLBSHADOW_SAVEAREA 3322 LDX_BE r5, r8, r3 3323 addi r3, r3, 8 3324 LDX_BE r6, r8, r3 3325 andis. r7, r5, SLB_ESID_V@h 3326 beq 3f 3327 slbmte r6, r5 33283: addi r8, r8, 16 3329 .endr 3330 33314: lwz r7, KVM_HOST_LPID(r10) 3332 mtspr SPRN_LPID, r7 3333 mtspr SPRN_PID, r0 3334 ld r8, KVM_HOST_LPCR(r10) 3335 mtspr SPRN_LPCR, r8 3336 isync 3337 li r0, KVM_GUEST_MODE_NONE 3338 stb r0, HSTATE_IN_GUEST(r13) 3339 3340 /* 3341 * Turn on the MMU and jump to C code 3342 */ 3343 bcl 20, 31, .+4 33445: mflr r3 3345 addi r3, r3, 9f - 5b 3346 li r4, -1 3347 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ 3348 ld r4, PACAKMSR(r13) 3349 mtspr SPRN_SRR0, r3 3350 mtspr SPRN_SRR1, r4 3351 RFI_TO_KERNEL 33529: addi r3, r1, STACK_FRAME_OVERHEAD 3353 bl kvmppc_bad_interrupt 3354 b 9b 3355 3356/* 3357 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3358 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3359 * r11 has the guest MSR value (in/out) 3360 * r9 has a vcpu pointer (in) 3361 * r0 is used as a scratch register 3362 */ 3363kvmppc_msr_interrupt: 3364 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3365 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3366 ld r11, VCPU_INTR_MSR(r9) 3367 bne 1f 3368 /* ... if transactional, change to suspended */ 3369 li r0, 1 33701: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3371 blr 3372 3373/* 3374 * Load up guest PMU state. R3 points to the vcpu struct. 3375 */ 3376_GLOBAL(kvmhv_load_guest_pmu) 3377EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) 3378 mr r4, r3 3379 mflr r0 3380 li r3, 1 3381 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3382 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3383 isync 3384BEGIN_FTR_SECTION 3385 ld r3, VCPU_MMCR(r4) 3386 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3387 cmpwi r5, MMCR0_PMAO 3388 beql kvmppc_fix_pmao 3389END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3390 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 3391 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 3392 lwz r6, VCPU_PMC + 8(r4) 3393 lwz r7, VCPU_PMC + 12(r4) 3394 lwz r8, VCPU_PMC + 16(r4) 3395 lwz r9, VCPU_PMC + 20(r4) 3396 mtspr SPRN_PMC1, r3 3397 mtspr SPRN_PMC2, r5 3398 mtspr SPRN_PMC3, r6 3399 mtspr SPRN_PMC4, r7 3400 mtspr SPRN_PMC5, r8 3401 mtspr SPRN_PMC6, r9 3402 ld r3, VCPU_MMCR(r4) 3403 ld r5, VCPU_MMCR + 8(r4) 3404 ld r6, VCPU_MMCR + 16(r4) 3405 ld r7, VCPU_SIAR(r4) 3406 ld r8, VCPU_SDAR(r4) 3407 mtspr SPRN_MMCR1, r5 3408 mtspr SPRN_MMCRA, r6 3409 mtspr SPRN_SIAR, r7 3410 mtspr SPRN_SDAR, r8 3411BEGIN_FTR_SECTION 3412 ld r5, VCPU_MMCR + 24(r4) 3413 ld r6, VCPU_SIER(r4) 3414 mtspr SPRN_MMCR2, r5 3415 mtspr SPRN_SIER, r6 3416BEGIN_FTR_SECTION_NESTED(96) 3417 lwz r7, VCPU_PMC + 24(r4) 3418 lwz r8, VCPU_PMC + 28(r4) 3419 ld r9, VCPU_MMCR + 32(r4) 3420 mtspr SPRN_SPMC1, r7 3421 mtspr SPRN_SPMC2, r8 3422 mtspr SPRN_MMCRS, r9 3423END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3424END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3425 mtspr SPRN_MMCR0, r3 3426 isync 3427 mtlr r0 3428 blr 3429 3430/* 3431 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 3432 */ 3433_GLOBAL(kvmhv_load_host_pmu) 3434EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) 3435 mflr r0 3436 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 3437 cmpwi r4, 0 3438 beq 23f /* skip if not */ 3439BEGIN_FTR_SECTION 3440 ld r3, HSTATE_MMCR0(r13) 3441 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3442 cmpwi r4, MMCR0_PMAO 3443 beql kvmppc_fix_pmao 3444END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3445 lwz r3, HSTATE_PMC1(r13) 3446 lwz r4, HSTATE_PMC2(r13) 3447 lwz r5, HSTATE_PMC3(r13) 3448 lwz r6, HSTATE_PMC4(r13) 3449 lwz r8, HSTATE_PMC5(r13) 3450 lwz r9, HSTATE_PMC6(r13) 3451 mtspr SPRN_PMC1, r3 3452 mtspr SPRN_PMC2, r4 3453 mtspr SPRN_PMC3, r5 3454 mtspr SPRN_PMC4, r6 3455 mtspr SPRN_PMC5, r8 3456 mtspr SPRN_PMC6, r9 3457 ld r3, HSTATE_MMCR0(r13) 3458 ld r4, HSTATE_MMCR1(r13) 3459 ld r5, HSTATE_MMCRA(r13) 3460 ld r6, HSTATE_SIAR(r13) 3461 ld r7, HSTATE_SDAR(r13) 3462 mtspr SPRN_MMCR1, r4 3463 mtspr SPRN_MMCRA, r5 3464 mtspr SPRN_SIAR, r6 3465 mtspr SPRN_SDAR, r7 3466BEGIN_FTR_SECTION 3467 ld r8, HSTATE_MMCR2(r13) 3468 ld r9, HSTATE_SIER(r13) 3469 mtspr SPRN_MMCR2, r8 3470 mtspr SPRN_SIER, r9 3471END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3472 mtspr SPRN_MMCR0, r3 3473 isync 3474 mtlr r0 347523: blr 3476 3477/* 3478 * Save guest PMU state into the vcpu struct. 3479 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 3480 */ 3481_GLOBAL(kvmhv_save_guest_pmu) 3482EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) 3483 mr r9, r3 3484 mr r8, r4 3485BEGIN_FTR_SECTION 3486 /* 3487 * POWER8 seems to have a hardware bug where setting 3488 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 3489 * when some counters are already negative doesn't seem 3490 * to cause a performance monitor alert (and hence interrupt). 3491 * The effect of this is that when saving the PMU state, 3492 * if there is no PMU alert pending when we read MMCR0 3493 * before freezing the counters, but one becomes pending 3494 * before we read the counters, we lose it. 3495 * To work around this, we need a way to freeze the counters 3496 * before reading MMCR0. Normally, freezing the counters 3497 * is done by writing MMCR0 (to set MMCR0[FC]) which 3498 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 3499 * we can also freeze the counters using MMCR2, by writing 3500 * 1s to all the counter freeze condition bits (there are 3501 * 9 bits each for 6 counters). 3502 */ 3503 li r3, -1 /* set all freeze bits */ 3504 clrrdi r3, r3, 10 3505 mfspr r10, SPRN_MMCR2 3506 mtspr SPRN_MMCR2, r3 3507 isync 3508END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3509 li r3, 1 3510 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3511 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 3512 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3513 mfspr r6, SPRN_MMCRA 3514 /* Clear MMCRA in order to disable SDAR updates */ 3515 li r7, 0 3516 mtspr SPRN_MMCRA, r7 3517 isync 3518 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 3519 bne 21f 3520 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 3521 b 22f 352221: mfspr r5, SPRN_MMCR1 3523 mfspr r7, SPRN_SIAR 3524 mfspr r8, SPRN_SDAR 3525 std r4, VCPU_MMCR(r9) 3526 std r5, VCPU_MMCR + 8(r9) 3527 std r6, VCPU_MMCR + 16(r9) 3528BEGIN_FTR_SECTION 3529 std r10, VCPU_MMCR + 24(r9) 3530END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3531 std r7, VCPU_SIAR(r9) 3532 std r8, VCPU_SDAR(r9) 3533 mfspr r3, SPRN_PMC1 3534 mfspr r4, SPRN_PMC2 3535 mfspr r5, SPRN_PMC3 3536 mfspr r6, SPRN_PMC4 3537 mfspr r7, SPRN_PMC5 3538 mfspr r8, SPRN_PMC6 3539 stw r3, VCPU_PMC(r9) 3540 stw r4, VCPU_PMC + 4(r9) 3541 stw r5, VCPU_PMC + 8(r9) 3542 stw r6, VCPU_PMC + 12(r9) 3543 stw r7, VCPU_PMC + 16(r9) 3544 stw r8, VCPU_PMC + 20(r9) 3545BEGIN_FTR_SECTION 3546 mfspr r5, SPRN_SIER 3547 std r5, VCPU_SIER(r9) 3548BEGIN_FTR_SECTION_NESTED(96) 3549 mfspr r6, SPRN_SPMC1 3550 mfspr r7, SPRN_SPMC2 3551 mfspr r8, SPRN_MMCRS 3552 stw r6, VCPU_PMC + 24(r9) 3553 stw r7, VCPU_PMC + 28(r9) 3554 std r8, VCPU_MMCR + 32(r9) 3555 lis r4, 0x8000 3556 mtspr SPRN_MMCRS, r4 3557END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3558END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 355922: blr 3560 3561/* 3562 * This works around a hardware bug on POWER8E processors, where 3563 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3564 * performance monitor interrupt. Instead, when we need to have 3565 * an interrupt pending, we have to arrange for a counter to overflow. 3566 */ 3567kvmppc_fix_pmao: 3568 li r3, 0 3569 mtspr SPRN_MMCR2, r3 3570 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3571 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3572 mtspr SPRN_MMCR0, r3 3573 lis r3, 0x7fff 3574 ori r3, r3, 0xffff 3575 mtspr SPRN_PMC6, r3 3576 isync 3577 blr 3578 3579#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3580/* 3581 * Start timing an activity 3582 * r3 = pointer to time accumulation struct, r4 = vcpu 3583 */ 3584kvmhv_start_timing: 3585 ld r5, HSTATE_KVM_VCORE(r13) 3586 ld r6, VCORE_TB_OFFSET_APPL(r5) 3587 mftb r5 3588 subf r5, r6, r5 /* subtract current timebase offset */ 3589 std r3, VCPU_CUR_ACTIVITY(r4) 3590 std r5, VCPU_ACTIVITY_START(r4) 3591 blr 3592 3593/* 3594 * Accumulate time to one activity and start another. 3595 * r3 = pointer to new time accumulation struct, r4 = vcpu 3596 */ 3597kvmhv_accumulate_time: 3598 ld r5, HSTATE_KVM_VCORE(r13) 3599 ld r8, VCORE_TB_OFFSET_APPL(r5) 3600 ld r5, VCPU_CUR_ACTIVITY(r4) 3601 ld r6, VCPU_ACTIVITY_START(r4) 3602 std r3, VCPU_CUR_ACTIVITY(r4) 3603 mftb r7 3604 subf r7, r8, r7 /* subtract current timebase offset */ 3605 std r7, VCPU_ACTIVITY_START(r4) 3606 cmpdi r5, 0 3607 beqlr 3608 subf r3, r6, r7 3609 ld r8, TAS_SEQCOUNT(r5) 3610 cmpdi r8, 0 3611 addi r8, r8, 1 3612 std r8, TAS_SEQCOUNT(r5) 3613 lwsync 3614 ld r7, TAS_TOTAL(r5) 3615 add r7, r7, r3 3616 std r7, TAS_TOTAL(r5) 3617 ld r6, TAS_MIN(r5) 3618 ld r7, TAS_MAX(r5) 3619 beq 3f 3620 cmpd r3, r6 3621 bge 1f 36223: std r3, TAS_MIN(r5) 36231: cmpd r3, r7 3624 ble 2f 3625 std r3, TAS_MAX(r5) 36262: lwsync 3627 addi r8, r8, 1 3628 std r8, TAS_SEQCOUNT(r5) 3629 blr 3630#endif 3631