1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/book3s/64/mmu-hash.h> 31#include <asm/export.h> 32#include <asm/tm.h> 33#include <asm/opal.h> 34#include <asm/xive-regs.h> 35#include <asm/thread_info.h> 36#include <asm/asm-compat.h> 37#include <asm/feature-fixups.h> 38#include <asm/cpuidle.h> 39 40/* Sign-extend HDEC if not on POWER9 */ 41#define EXTEND_HDEC(reg) \ 42BEGIN_FTR_SECTION; \ 43 extsw reg, reg; \ 44END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 45 46/* Values in HSTATE_NAPPING(r13) */ 47#define NAPPING_CEDE 1 48#define NAPPING_NOVCPU 2 49#define NAPPING_UNSPLIT 3 50 51/* Stack frame offsets for kvmppc_hv_entry */ 52#define SFS 208 53#define STACK_SLOT_TRAP (SFS-4) 54#define STACK_SLOT_SHORT_PATH (SFS-8) 55#define STACK_SLOT_TID (SFS-16) 56#define STACK_SLOT_PSSCR (SFS-24) 57#define STACK_SLOT_PID (SFS-32) 58#define STACK_SLOT_IAMR (SFS-40) 59#define STACK_SLOT_CIABR (SFS-48) 60#define STACK_SLOT_DAWR (SFS-56) 61#define STACK_SLOT_DAWRX (SFS-64) 62#define STACK_SLOT_HFSCR (SFS-72) 63#define STACK_SLOT_AMR (SFS-80) 64#define STACK_SLOT_UAMOR (SFS-88) 65/* the following is used by the P9 short path */ 66#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ 67 68/* 69 * Call kvmppc_hv_entry in real mode. 70 * Must be called with interrupts hard-disabled. 71 * 72 * Input Registers: 73 * 74 * LR = return address to continue at after eventually re-enabling MMU 75 */ 76_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 77 mflr r0 78 std r0, PPC_LR_STKOFF(r1) 79 stdu r1, -112(r1) 80 mfmsr r10 81 std r10, HSTATE_HOST_MSR(r13) 82 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 83 li r0,MSR_RI 84 andc r0,r10,r0 85 li r6,MSR_IR | MSR_DR 86 andc r6,r10,r6 87 mtmsrd r0,1 /* clear RI in MSR */ 88 mtsrr0 r5 89 mtsrr1 r6 90 RFI_TO_KERNEL 91 92kvmppc_call_hv_entry: 93BEGIN_FTR_SECTION 94 /* On P9, do LPCR setting, if necessary */ 95 ld r3, HSTATE_SPLIT_MODE(r13) 96 cmpdi r3, 0 97 beq 46f 98 lwz r4, KVM_SPLIT_DO_SET(r3) 99 cmpwi r4, 0 100 beq 46f 101 bl kvmhv_p9_set_lpcr 102 nop 10346: 104END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 105 106 ld r4, HSTATE_KVM_VCPU(r13) 107 bl kvmppc_hv_entry 108 109 /* Back from guest - restore host state and return to caller */ 110 111BEGIN_FTR_SECTION 112 /* Restore host DABR and DABRX */ 113 ld r5,HSTATE_DABR(r13) 114 li r6,7 115 mtspr SPRN_DABR,r5 116 mtspr SPRN_DABRX,r6 117END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 118 119 /* Restore SPRG3 */ 120 ld r3,PACA_SPRG_VDSO(r13) 121 mtspr SPRN_SPRG_VDSO_WRITE,r3 122 123 /* Reload the host's PMU registers */ 124 bl kvmhv_load_host_pmu 125 126 /* 127 * Reload DEC. HDEC interrupts were disabled when 128 * we reloaded the host's LPCR value. 129 */ 130 ld r3, HSTATE_DECEXP(r13) 131 mftb r4 132 subf r4, r4, r3 133 mtspr SPRN_DEC, r4 134 135 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 136 li r0, 0 137 stb r0, HSTATE_HWTHREAD_REQ(r13) 138 139 /* 140 * For external interrupts we need to call the Linux 141 * handler to process the interrupt. We do that by jumping 142 * to absolute address 0x500 for external interrupts. 143 * The [h]rfid at the end of the handler will return to 144 * the book3s_hv_interrupts.S code. For other interrupts 145 * we do the rfid to get back to the book3s_hv_interrupts.S 146 * code here. 147 */ 148 ld r8, 112+PPC_LR_STKOFF(r1) 149 addi r1, r1, 112 150 ld r7, HSTATE_HOST_MSR(r13) 151 152 /* Return the trap number on this thread as the return value */ 153 mr r3, r12 154 155 /* 156 * If we came back from the guest via a relocation-on interrupt, 157 * we will be in virtual mode at this point, which makes it a 158 * little easier to get back to the caller. 159 */ 160 mfmsr r0 161 andi. r0, r0, MSR_IR /* in real mode? */ 162 bne .Lvirt_return 163 164 /* RFI into the highmem handler */ 165 mfmsr r6 166 li r0, MSR_RI 167 andc r6, r6, r0 168 mtmsrd r6, 1 /* Clear RI in MSR */ 169 mtsrr0 r8 170 mtsrr1 r7 171 RFI_TO_KERNEL 172 173 /* Virtual-mode return */ 174.Lvirt_return: 175 mtlr r8 176 blr 177 178kvmppc_primary_no_guest: 179 /* We handle this much like a ceded vcpu */ 180 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 181 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 182 /* HDEC value came from DEC in the first place, it will fit */ 183 mfspr r3, SPRN_HDEC 184 mtspr SPRN_DEC, r3 185 /* 186 * Make sure the primary has finished the MMU switch. 187 * We should never get here on a secondary thread, but 188 * check it for robustness' sake. 189 */ 190 ld r5, HSTATE_KVM_VCORE(r13) 19165: lbz r0, VCORE_IN_GUEST(r5) 192 cmpwi r0, 0 193 beq 65b 194 /* Set LPCR. */ 195 ld r8,VCORE_LPCR(r5) 196 mtspr SPRN_LPCR,r8 197 isync 198 /* set our bit in napping_threads */ 199 ld r5, HSTATE_KVM_VCORE(r13) 200 lbz r7, HSTATE_PTID(r13) 201 li r0, 1 202 sld r0, r0, r7 203 addi r6, r5, VCORE_NAPPING_THREADS 2041: lwarx r3, 0, r6 205 or r3, r3, r0 206 stwcx. r3, 0, r6 207 bne 1b 208 /* order napping_threads update vs testing entry_exit_map */ 209 isync 210 li r12, 0 211 lwz r7, VCORE_ENTRY_EXIT(r5) 212 cmpwi r7, 0x100 213 bge kvm_novcpu_exit /* another thread already exiting */ 214 li r3, NAPPING_NOVCPU 215 stb r3, HSTATE_NAPPING(r13) 216 217 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 218 b kvm_do_nap 219 220/* 221 * kvm_novcpu_wakeup 222 * Entered from kvm_start_guest if kvm_hstate.napping is set 223 * to NAPPING_NOVCPU 224 * r2 = kernel TOC 225 * r13 = paca 226 */ 227kvm_novcpu_wakeup: 228 ld r1, HSTATE_HOST_R1(r13) 229 ld r5, HSTATE_KVM_VCORE(r13) 230 li r0, 0 231 stb r0, HSTATE_NAPPING(r13) 232 233 /* check the wake reason */ 234 bl kvmppc_check_wake_reason 235 236 /* 237 * Restore volatile registers since we could have called 238 * a C routine in kvmppc_check_wake_reason. 239 * r5 = VCORE 240 */ 241 ld r5, HSTATE_KVM_VCORE(r13) 242 243 /* see if any other thread is already exiting */ 244 lwz r0, VCORE_ENTRY_EXIT(r5) 245 cmpwi r0, 0x100 246 bge kvm_novcpu_exit 247 248 /* clear our bit in napping_threads */ 249 lbz r7, HSTATE_PTID(r13) 250 li r0, 1 251 sld r0, r0, r7 252 addi r6, r5, VCORE_NAPPING_THREADS 2534: lwarx r7, 0, r6 254 andc r7, r7, r0 255 stwcx. r7, 0, r6 256 bne 4b 257 258 /* See if the wake reason means we need to exit */ 259 cmpdi r3, 0 260 bge kvm_novcpu_exit 261 262 /* See if our timeslice has expired (HDEC is negative) */ 263 mfspr r0, SPRN_HDEC 264 EXTEND_HDEC(r0) 265 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 266 cmpdi r0, 0 267 blt kvm_novcpu_exit 268 269 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 270 ld r4, HSTATE_KVM_VCPU(r13) 271 cmpdi r4, 0 272 beq kvmppc_primary_no_guest 273 274#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 275 addi r3, r4, VCPU_TB_RMENTRY 276 bl kvmhv_start_timing 277#endif 278 b kvmppc_got_guest 279 280kvm_novcpu_exit: 281#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 282 ld r4, HSTATE_KVM_VCPU(r13) 283 cmpdi r4, 0 284 beq 13f 285 addi r3, r4, VCPU_TB_RMEXIT 286 bl kvmhv_accumulate_time 287#endif 28813: mr r3, r12 289 stw r12, STACK_SLOT_TRAP(r1) 290 bl kvmhv_commence_exit 291 nop 292 b kvmhv_switch_to_host 293 294/* 295 * We come in here when wakened from Linux offline idle code. 296 * Relocation is off 297 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 298 */ 299_GLOBAL(idle_kvm_start_guest) 300 ld r4,PACAEMERGSP(r13) 301 mfcr r5 302 mflr r0 303 std r1,0(r4) 304 std r5,8(r4) 305 std r0,16(r4) 306 subi r1,r4,STACK_FRAME_OVERHEAD 307 SAVE_NVGPRS(r1) 308 309 /* 310 * Could avoid this and pass it through in r3. For now, 311 * code expects it to be in SRR1. 312 */ 313 mtspr SPRN_SRR1,r3 314 315 li r0,0 316 stb r0,PACA_FTRACE_ENABLED(r13) 317 318 li r0,KVM_HWTHREAD_IN_KVM 319 stb r0,HSTATE_HWTHREAD_STATE(r13) 320 321 /* kvm cede / napping does not come through here */ 322 lbz r0,HSTATE_NAPPING(r13) 323 twnei r0,0 324 325 b 1f 326 327kvm_unsplit_wakeup: 328 li r0, 0 329 stb r0, HSTATE_NAPPING(r13) 330 3311: 332 333 /* 334 * We weren't napping due to cede, so this must be a secondary 335 * thread being woken up to run a guest, or being woken up due 336 * to a stray IPI. (Or due to some machine check or hypervisor 337 * maintenance interrupt while the core is in KVM.) 338 */ 339 340 /* Check the wake reason in SRR1 to see why we got here */ 341 bl kvmppc_check_wake_reason 342 /* 343 * kvmppc_check_wake_reason could invoke a C routine, but we 344 * have no volatile registers to restore when we return. 345 */ 346 347 cmpdi r3, 0 348 bge kvm_no_guest 349 350 /* get vcore pointer, NULL if we have nothing to run */ 351 ld r5,HSTATE_KVM_VCORE(r13) 352 cmpdi r5,0 353 /* if we have no vcore to run, go back to sleep */ 354 beq kvm_no_guest 355 356kvm_secondary_got_guest: 357 358 /* Set HSTATE_DSCR(r13) to something sensible */ 359 ld r6, PACA_DSCR_DEFAULT(r13) 360 std r6, HSTATE_DSCR(r13) 361 362 /* On thread 0 of a subcore, set HDEC to max */ 363 lbz r4, HSTATE_PTID(r13) 364 cmpwi r4, 0 365 bne 63f 366 LOAD_REG_ADDR(r6, decrementer_max) 367 ld r6, 0(r6) 368 mtspr SPRN_HDEC, r6 369 /* and set per-LPAR registers, if doing dynamic micro-threading */ 370 ld r6, HSTATE_SPLIT_MODE(r13) 371 cmpdi r6, 0 372 beq 63f 373BEGIN_FTR_SECTION 374 ld r0, KVM_SPLIT_RPR(r6) 375 mtspr SPRN_RPR, r0 376 ld r0, KVM_SPLIT_PMMAR(r6) 377 mtspr SPRN_PMMAR, r0 378 ld r0, KVM_SPLIT_LDBAR(r6) 379 mtspr SPRN_LDBAR, r0 380 isync 381FTR_SECTION_ELSE 382 /* On P9 we use the split_info for coordinating LPCR changes */ 383 lwz r4, KVM_SPLIT_DO_SET(r6) 384 cmpwi r4, 0 385 beq 1f 386 mr r3, r6 387 bl kvmhv_p9_set_lpcr 388 nop 3891: 390ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 39163: 392 /* Order load of vcpu after load of vcore */ 393 lwsync 394 ld r4, HSTATE_KVM_VCPU(r13) 395 bl kvmppc_hv_entry 396 397 /* Back from the guest, go back to nap */ 398 /* Clear our vcpu and vcore pointers so we don't come back in early */ 399 li r0, 0 400 std r0, HSTATE_KVM_VCPU(r13) 401 /* 402 * Once we clear HSTATE_KVM_VCORE(r13), the code in 403 * kvmppc_run_core() is going to assume that all our vcpu 404 * state is visible in memory. This lwsync makes sure 405 * that that is true. 406 */ 407 lwsync 408 std r0, HSTATE_KVM_VCORE(r13) 409 410 /* 411 * All secondaries exiting guest will fall through this path. 412 * Before proceeding, just check for HMI interrupt and 413 * invoke opal hmi handler. By now we are sure that the 414 * primary thread on this core/subcore has already made partition 415 * switch/TB resync and we are good to call opal hmi handler. 416 */ 417 cmpwi r12, BOOK3S_INTERRUPT_HMI 418 bne kvm_no_guest 419 420 li r3,0 /* NULL argument */ 421 bl hmi_exception_realmode 422/* 423 * At this point we have finished executing in the guest. 424 * We need to wait for hwthread_req to become zero, since 425 * we may not turn on the MMU while hwthread_req is non-zero. 426 * While waiting we also need to check if we get given a vcpu to run. 427 */ 428kvm_no_guest: 429 lbz r3, HSTATE_HWTHREAD_REQ(r13) 430 cmpwi r3, 0 431 bne 53f 432 HMT_MEDIUM 433 li r0, KVM_HWTHREAD_IN_KERNEL 434 stb r0, HSTATE_HWTHREAD_STATE(r13) 435 /* need to recheck hwthread_req after a barrier, to avoid race */ 436 sync 437 lbz r3, HSTATE_HWTHREAD_REQ(r13) 438 cmpwi r3, 0 439 bne 54f 440 441 /* 442 * Jump to idle_return_gpr_loss, which returns to the 443 * idle_kvm_start_guest caller. 444 */ 445 li r3, LPCR_PECE0 446 mfspr r4, SPRN_LPCR 447 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 448 mtspr SPRN_LPCR, r4 449 /* set up r3 for return */ 450 mfspr r3,SPRN_SRR1 451 REST_NVGPRS(r1) 452 addi r1, r1, STACK_FRAME_OVERHEAD 453 ld r0, 16(r1) 454 ld r5, 8(r1) 455 ld r1, 0(r1) 456 mtlr r0 457 mtcr r5 458 blr 459 46053: HMT_LOW 461 ld r5, HSTATE_KVM_VCORE(r13) 462 cmpdi r5, 0 463 bne 60f 464 ld r3, HSTATE_SPLIT_MODE(r13) 465 cmpdi r3, 0 466 beq kvm_no_guest 467 lwz r0, KVM_SPLIT_DO_SET(r3) 468 cmpwi r0, 0 469 bne kvmhv_do_set 470 lwz r0, KVM_SPLIT_DO_RESTORE(r3) 471 cmpwi r0, 0 472 bne kvmhv_do_restore 473 lbz r0, KVM_SPLIT_DO_NAP(r3) 474 cmpwi r0, 0 475 beq kvm_no_guest 476 HMT_MEDIUM 477 b kvm_unsplit_nap 47860: HMT_MEDIUM 479 b kvm_secondary_got_guest 480 48154: li r0, KVM_HWTHREAD_IN_KVM 482 stb r0, HSTATE_HWTHREAD_STATE(r13) 483 b kvm_no_guest 484 485kvmhv_do_set: 486 /* Set LPCR, LPIDR etc. on P9 */ 487 HMT_MEDIUM 488 bl kvmhv_p9_set_lpcr 489 nop 490 b kvm_no_guest 491 492kvmhv_do_restore: 493 HMT_MEDIUM 494 bl kvmhv_p9_restore_lpcr 495 nop 496 b kvm_no_guest 497 498/* 499 * Here the primary thread is trying to return the core to 500 * whole-core mode, so we need to nap. 501 */ 502kvm_unsplit_nap: 503 /* 504 * When secondaries are napping in kvm_unsplit_nap() with 505 * hwthread_req = 1, HMI goes ignored even though subcores are 506 * already exited the guest. Hence HMI keeps waking up secondaries 507 * from nap in a loop and secondaries always go back to nap since 508 * no vcore is assigned to them. This makes impossible for primary 509 * thread to get hold of secondary threads resulting into a soft 510 * lockup in KVM path. 511 * 512 * Let us check if HMI is pending and handle it before we go to nap. 513 */ 514 cmpwi r12, BOOK3S_INTERRUPT_HMI 515 bne 55f 516 li r3, 0 /* NULL argument */ 517 bl hmi_exception_realmode 51855: 519 /* 520 * Ensure that secondary doesn't nap when it has 521 * its vcore pointer set. 522 */ 523 sync /* matches smp_mb() before setting split_info.do_nap */ 524 ld r0, HSTATE_KVM_VCORE(r13) 525 cmpdi r0, 0 526 bne kvm_no_guest 527 /* clear any pending message */ 528BEGIN_FTR_SECTION 529 lis r6, (PPC_DBELL_SERVER << (63-36))@h 530 PPC_MSGCLR(6) 531END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 532 /* Set kvm_split_mode.napped[tid] = 1 */ 533 ld r3, HSTATE_SPLIT_MODE(r13) 534 li r0, 1 535 lbz r4, HSTATE_TID(r13) 536 addi r4, r4, KVM_SPLIT_NAPPED 537 stbx r0, r3, r4 538 /* Check the do_nap flag again after setting napped[] */ 539 sync 540 lbz r0, KVM_SPLIT_DO_NAP(r3) 541 cmpwi r0, 0 542 beq 57f 543 li r3, NAPPING_UNSPLIT 544 stb r3, HSTATE_NAPPING(r13) 545 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 546 mfspr r5, SPRN_LPCR 547 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 548 b kvm_nap_sequence 549 55057: li r0, 0 551 stbx r0, r3, r4 552 b kvm_no_guest 553 554/****************************************************************************** 555 * * 556 * Entry code * 557 * * 558 *****************************************************************************/ 559 560.global kvmppc_hv_entry 561kvmppc_hv_entry: 562 563 /* Required state: 564 * 565 * R4 = vcpu pointer (or NULL) 566 * MSR = ~IR|DR 567 * R13 = PACA 568 * R1 = host R1 569 * R2 = TOC 570 * all other volatile GPRS = free 571 * Does not preserve non-volatile GPRs or CR fields 572 */ 573 mflr r0 574 std r0, PPC_LR_STKOFF(r1) 575 stdu r1, -SFS(r1) 576 577 /* Save R1 in the PACA */ 578 std r1, HSTATE_HOST_R1(r13) 579 580 li r6, KVM_GUEST_MODE_HOST_HV 581 stb r6, HSTATE_IN_GUEST(r13) 582 583#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 584 /* Store initial timestamp */ 585 cmpdi r4, 0 586 beq 1f 587 addi r3, r4, VCPU_TB_RMENTRY 588 bl kvmhv_start_timing 5891: 590#endif 591 592 ld r5, HSTATE_KVM_VCORE(r13) 593 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 594 595 /* 596 * POWER7/POWER8 host -> guest partition switch code. 597 * We don't have to lock against concurrent tlbies, 598 * but we do have to coordinate across hardware threads. 599 */ 600 /* Set bit in entry map iff exit map is zero. */ 601 li r7, 1 602 lbz r6, HSTATE_PTID(r13) 603 sld r7, r7, r6 604 addi r8, r5, VCORE_ENTRY_EXIT 60521: lwarx r3, 0, r8 606 cmpwi r3, 0x100 /* any threads starting to exit? */ 607 bge secondary_too_late /* if so we're too late to the party */ 608 or r3, r3, r7 609 stwcx. r3, 0, r8 610 bne 21b 611 612 /* Primary thread switches to guest partition. */ 613 cmpwi r6,0 614 bne 10f 615 616 lwz r7,KVM_LPID(r9) 617BEGIN_FTR_SECTION 618 ld r6,KVM_SDR1(r9) 619 li r0,LPID_RSVD /* switch to reserved LPID */ 620 mtspr SPRN_LPID,r0 621 ptesync 622 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 623END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 624 mtspr SPRN_LPID,r7 625 isync 626 627 /* See if we need to flush the TLB. */ 628 mr r3, r9 /* kvm pointer */ 629 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 630 li r5, 0 /* nested vcpu pointer */ 631 bl kvmppc_check_need_tlb_flush 632 nop 633 ld r5, HSTATE_KVM_VCORE(r13) 634 635 /* Add timebase offset onto timebase */ 63622: ld r8,VCORE_TB_OFFSET(r5) 637 cmpdi r8,0 638 beq 37f 639 std r8, VCORE_TB_OFFSET_APPL(r5) 640 mftb r6 /* current host timebase */ 641 add r8,r8,r6 642 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 643 mftb r7 /* check if lower 24 bits overflowed */ 644 clrldi r6,r6,40 645 clrldi r7,r7,40 646 cmpld r7,r6 647 bge 37f 648 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 649 mtspr SPRN_TBU40,r8 650 651 /* Load guest PCR value to select appropriate compat mode */ 65237: ld r7, VCORE_PCR(r5) 653 cmpdi r7, 0 654 beq 38f 655 mtspr SPRN_PCR, r7 65638: 657 658BEGIN_FTR_SECTION 659 /* DPDES and VTB are shared between threads */ 660 ld r8, VCORE_DPDES(r5) 661 ld r7, VCORE_VTB(r5) 662 mtspr SPRN_DPDES, r8 663 mtspr SPRN_VTB, r7 664END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 665 666 /* Mark the subcore state as inside guest */ 667 bl kvmppc_subcore_enter_guest 668 nop 669 ld r5, HSTATE_KVM_VCORE(r13) 670 ld r4, HSTATE_KVM_VCPU(r13) 671 li r0,1 672 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 673 674 /* Do we have a guest vcpu to run? */ 67510: cmpdi r4, 0 676 beq kvmppc_primary_no_guest 677kvmppc_got_guest: 678 /* Increment yield count if they have a VPA */ 679 ld r3, VCPU_VPA(r4) 680 cmpdi r3, 0 681 beq 25f 682 li r6, LPPACA_YIELDCOUNT 683 LWZX_BE r5, r3, r6 684 addi r5, r5, 1 685 STWX_BE r5, r3, r6 686 li r6, 1 687 stb r6, VCPU_VPA_DIRTY(r4) 68825: 689 690 /* Save purr/spurr */ 691 mfspr r5,SPRN_PURR 692 mfspr r6,SPRN_SPURR 693 std r5,HSTATE_PURR(r13) 694 std r6,HSTATE_SPURR(r13) 695 ld r7,VCPU_PURR(r4) 696 ld r8,VCPU_SPURR(r4) 697 mtspr SPRN_PURR,r7 698 mtspr SPRN_SPURR,r8 699 700 /* Save host values of some registers */ 701BEGIN_FTR_SECTION 702 mfspr r5, SPRN_TIDR 703 mfspr r6, SPRN_PSSCR 704 mfspr r7, SPRN_PID 705 std r5, STACK_SLOT_TID(r1) 706 std r6, STACK_SLOT_PSSCR(r1) 707 std r7, STACK_SLOT_PID(r1) 708 mfspr r5, SPRN_HFSCR 709 std r5, STACK_SLOT_HFSCR(r1) 710END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 711BEGIN_FTR_SECTION 712 mfspr r5, SPRN_CIABR 713 mfspr r6, SPRN_DAWR 714 mfspr r7, SPRN_DAWRX 715 mfspr r8, SPRN_IAMR 716 std r5, STACK_SLOT_CIABR(r1) 717 std r6, STACK_SLOT_DAWR(r1) 718 std r7, STACK_SLOT_DAWRX(r1) 719 std r8, STACK_SLOT_IAMR(r1) 720END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 721 722 mfspr r5, SPRN_AMR 723 std r5, STACK_SLOT_AMR(r1) 724 mfspr r6, SPRN_UAMOR 725 std r6, STACK_SLOT_UAMOR(r1) 726 727BEGIN_FTR_SECTION 728 /* Set partition DABR */ 729 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 730 lwz r5,VCPU_DABRX(r4) 731 ld r6,VCPU_DABR(r4) 732 mtspr SPRN_DABRX,r5 733 mtspr SPRN_DABR,r6 734 isync 735END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 736 737#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 738/* 739 * Branch around the call if both CPU_FTR_TM and 740 * CPU_FTR_P9_TM_HV_ASSIST are off. 741 */ 742BEGIN_FTR_SECTION 743 b 91f 744END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 745 /* 746 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 747 */ 748 mr r3, r4 749 ld r4, VCPU_MSR(r3) 750 li r5, 0 /* don't preserve non-vol regs */ 751 bl kvmppc_restore_tm_hv 752 nop 753 ld r4, HSTATE_KVM_VCPU(r13) 75491: 755#endif 756 757 /* Load guest PMU registers; r4 = vcpu pointer here */ 758 mr r3, r4 759 bl kvmhv_load_guest_pmu 760 761 /* Load up FP, VMX and VSX registers */ 762 ld r4, HSTATE_KVM_VCPU(r13) 763 bl kvmppc_load_fp 764 765 ld r14, VCPU_GPR(R14)(r4) 766 ld r15, VCPU_GPR(R15)(r4) 767 ld r16, VCPU_GPR(R16)(r4) 768 ld r17, VCPU_GPR(R17)(r4) 769 ld r18, VCPU_GPR(R18)(r4) 770 ld r19, VCPU_GPR(R19)(r4) 771 ld r20, VCPU_GPR(R20)(r4) 772 ld r21, VCPU_GPR(R21)(r4) 773 ld r22, VCPU_GPR(R22)(r4) 774 ld r23, VCPU_GPR(R23)(r4) 775 ld r24, VCPU_GPR(R24)(r4) 776 ld r25, VCPU_GPR(R25)(r4) 777 ld r26, VCPU_GPR(R26)(r4) 778 ld r27, VCPU_GPR(R27)(r4) 779 ld r28, VCPU_GPR(R28)(r4) 780 ld r29, VCPU_GPR(R29)(r4) 781 ld r30, VCPU_GPR(R30)(r4) 782 ld r31, VCPU_GPR(R31)(r4) 783 784 /* Switch DSCR to guest value */ 785 ld r5, VCPU_DSCR(r4) 786 mtspr SPRN_DSCR, r5 787 788BEGIN_FTR_SECTION 789 /* Skip next section on POWER7 */ 790 b 8f 791END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 792 /* Load up POWER8-specific registers */ 793 ld r5, VCPU_IAMR(r4) 794 lwz r6, VCPU_PSPB(r4) 795 ld r7, VCPU_FSCR(r4) 796 mtspr SPRN_IAMR, r5 797 mtspr SPRN_PSPB, r6 798 mtspr SPRN_FSCR, r7 799 /* 800 * Handle broken DAWR case by not writing it. This means we 801 * can still store the DAWR register for migration. 802 */ 803 LOAD_REG_ADDR(r5, dawr_force_enable) 804 lbz r5, 0(r5) 805 cmpdi r5, 0 806 beq 1f 807 ld r5, VCPU_DAWR(r4) 808 ld r6, VCPU_DAWRX(r4) 809 mtspr SPRN_DAWR, r5 810 mtspr SPRN_DAWRX, r6 8111: 812 ld r7, VCPU_CIABR(r4) 813 ld r8, VCPU_TAR(r4) 814 mtspr SPRN_CIABR, r7 815 mtspr SPRN_TAR, r8 816 ld r5, VCPU_IC(r4) 817 ld r8, VCPU_EBBHR(r4) 818 mtspr SPRN_IC, r5 819 mtspr SPRN_EBBHR, r8 820 ld r5, VCPU_EBBRR(r4) 821 ld r6, VCPU_BESCR(r4) 822 lwz r7, VCPU_GUEST_PID(r4) 823 ld r8, VCPU_WORT(r4) 824 mtspr SPRN_EBBRR, r5 825 mtspr SPRN_BESCR, r6 826 mtspr SPRN_PID, r7 827 mtspr SPRN_WORT, r8 828BEGIN_FTR_SECTION 829 /* POWER8-only registers */ 830 ld r5, VCPU_TCSCR(r4) 831 ld r6, VCPU_ACOP(r4) 832 ld r7, VCPU_CSIGR(r4) 833 ld r8, VCPU_TACR(r4) 834 mtspr SPRN_TCSCR, r5 835 mtspr SPRN_ACOP, r6 836 mtspr SPRN_CSIGR, r7 837 mtspr SPRN_TACR, r8 838 nop 839FTR_SECTION_ELSE 840 /* POWER9-only registers */ 841 ld r5, VCPU_TID(r4) 842 ld r6, VCPU_PSSCR(r4) 843 lbz r8, HSTATE_FAKE_SUSPEND(r13) 844 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 845 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG 846 ld r7, VCPU_HFSCR(r4) 847 mtspr SPRN_TIDR, r5 848 mtspr SPRN_PSSCR, r6 849 mtspr SPRN_HFSCR, r7 850ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 8518: 852 853 ld r5, VCPU_SPRG0(r4) 854 ld r6, VCPU_SPRG1(r4) 855 ld r7, VCPU_SPRG2(r4) 856 ld r8, VCPU_SPRG3(r4) 857 mtspr SPRN_SPRG0, r5 858 mtspr SPRN_SPRG1, r6 859 mtspr SPRN_SPRG2, r7 860 mtspr SPRN_SPRG3, r8 861 862 /* Load up DAR and DSISR */ 863 ld r5, VCPU_DAR(r4) 864 lwz r6, VCPU_DSISR(r4) 865 mtspr SPRN_DAR, r5 866 mtspr SPRN_DSISR, r6 867 868 /* Restore AMR and UAMOR, set AMOR to all 1s */ 869 ld r5,VCPU_AMR(r4) 870 ld r6,VCPU_UAMOR(r4) 871 li r7,-1 872 mtspr SPRN_AMR,r5 873 mtspr SPRN_UAMOR,r6 874 mtspr SPRN_AMOR,r7 875 876 /* Restore state of CTRL run bit; assume 1 on entry */ 877 lwz r5,VCPU_CTRL(r4) 878 andi. r5,r5,1 879 bne 4f 880 mfspr r6,SPRN_CTRLF 881 clrrdi r6,r6,1 882 mtspr SPRN_CTRLT,r6 8834: 884 /* Secondary threads wait for primary to have done partition switch */ 885 ld r5, HSTATE_KVM_VCORE(r13) 886 lbz r6, HSTATE_PTID(r13) 887 cmpwi r6, 0 888 beq 21f 889 lbz r0, VCORE_IN_GUEST(r5) 890 cmpwi r0, 0 891 bne 21f 892 HMT_LOW 89320: lwz r3, VCORE_ENTRY_EXIT(r5) 894 cmpwi r3, 0x100 895 bge no_switch_exit 896 lbz r0, VCORE_IN_GUEST(r5) 897 cmpwi r0, 0 898 beq 20b 899 HMT_MEDIUM 90021: 901 /* Set LPCR. */ 902 ld r8,VCORE_LPCR(r5) 903 mtspr SPRN_LPCR,r8 904 isync 905 906 /* 907 * Set the decrementer to the guest decrementer. 908 */ 909 ld r8,VCPU_DEC_EXPIRES(r4) 910 /* r8 is a host timebase value here, convert to guest TB */ 911 ld r5,HSTATE_KVM_VCORE(r13) 912 ld r6,VCORE_TB_OFFSET_APPL(r5) 913 add r8,r8,r6 914 mftb r7 915 subf r3,r7,r8 916 mtspr SPRN_DEC,r3 917 918 /* Check if HDEC expires soon */ 919 mfspr r3, SPRN_HDEC 920 EXTEND_HDEC(r3) 921 cmpdi r3, 512 /* 1 microsecond */ 922 blt hdec_soon 923 924 /* For hash guest, clear out and reload the SLB */ 925 ld r6, VCPU_KVM(r4) 926 lbz r0, KVM_RADIX(r6) 927 cmpwi r0, 0 928 bne 9f 929 li r6, 0 930 slbmte r6, r6 931 slbia 932 ptesync 933 934 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 935 lwz r5,VCPU_SLB_MAX(r4) 936 cmpwi r5,0 937 beq 9f 938 mtctr r5 939 addi r6,r4,VCPU_SLB 9401: ld r8,VCPU_SLB_E(r6) 941 ld r9,VCPU_SLB_V(r6) 942 slbmte r9,r8 943 addi r6,r6,VCPU_SLB_SIZE 944 bdnz 1b 9459: 946 947#ifdef CONFIG_KVM_XICS 948 /* We are entering the guest on that thread, push VCPU to XIVE */ 949 ld r11, VCPU_XIVE_SAVED_STATE(r4) 950 li r9, TM_QW1_OS 951 lwz r8, VCPU_XIVE_CAM_WORD(r4) 952 li r7, TM_QW1_OS + TM_WORD2 953 mfmsr r0 954 andi. r0, r0, MSR_DR /* in real mode? */ 955 beq 2f 956 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 957 cmpldi cr1, r10, 0 958 beq cr1, no_xive 959 eieio 960 stdx r11,r9,r10 961 stwx r8,r7,r10 962 b 3f 9632: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 964 cmpldi cr1, r10, 0 965 beq cr1, no_xive 966 eieio 967 stdcix r11,r9,r10 968 stwcix r8,r7,r10 9693: li r9, 1 970 stb r9, VCPU_XIVE_PUSHED(r4) 971 eieio 972 973 /* 974 * We clear the irq_pending flag. There is a small chance of a 975 * race vs. the escalation interrupt happening on another 976 * processor setting it again, but the only consequence is to 977 * cause a spurrious wakeup on the next H_CEDE which is not an 978 * issue. 979 */ 980 li r0,0 981 stb r0, VCPU_IRQ_PENDING(r4) 982 983 /* 984 * In single escalation mode, if the escalation interrupt is 985 * on, we mask it. 986 */ 987 lbz r0, VCPU_XIVE_ESC_ON(r4) 988 cmpwi cr1, r0,0 989 beq cr1, 1f 990 li r9, XIVE_ESB_SET_PQ_01 991 beq 4f /* in real mode? */ 992 ld r10, VCPU_XIVE_ESC_VADDR(r4) 993 ldx r0, r10, r9 994 b 5f 9954: ld r10, VCPU_XIVE_ESC_RADDR(r4) 996 ldcix r0, r10, r9 9975: sync 998 999 /* We have a possible subtle race here: The escalation interrupt might 1000 * have fired and be on its way to the host queue while we mask it, 1001 * and if we unmask it early enough (re-cede right away), there is 1002 * a theorical possibility that it fires again, thus landing in the 1003 * target queue more than once which is a big no-no. 1004 * 1005 * Fortunately, solving this is rather easy. If the above load setting 1006 * PQ to 01 returns a previous value where P is set, then we know the 1007 * escalation interrupt is somewhere on its way to the host. In that 1008 * case we simply don't clear the xive_esc_on flag below. It will be 1009 * eventually cleared by the handler for the escalation interrupt. 1010 * 1011 * Then, when doing a cede, we check that flag again before re-enabling 1012 * the escalation interrupt, and if set, we abort the cede. 1013 */ 1014 andi. r0, r0, XIVE_ESB_VAL_P 1015 bne- 1f 1016 1017 /* Now P is 0, we can clear the flag */ 1018 li r0, 0 1019 stb r0, VCPU_XIVE_ESC_ON(r4) 10201: 1021no_xive: 1022#endif /* CONFIG_KVM_XICS */ 1023 1024 li r0, 0 1025 stw r0, STACK_SLOT_SHORT_PATH(r1) 1026 1027deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 1028 /* Check if we can deliver an external or decrementer interrupt now */ 1029 ld r0, VCPU_PENDING_EXC(r4) 1030BEGIN_FTR_SECTION 1031 /* On POWER9, also check for emulated doorbell interrupt */ 1032 lbz r3, VCPU_DBELL_REQ(r4) 1033 or r0, r0, r3 1034END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1035 cmpdi r0, 0 1036 beq 71f 1037 mr r3, r4 1038 bl kvmppc_guest_entry_inject_int 1039 ld r4, HSTATE_KVM_VCPU(r13) 104071: 1041 ld r6, VCPU_SRR0(r4) 1042 ld r7, VCPU_SRR1(r4) 1043 mtspr SPRN_SRR0, r6 1044 mtspr SPRN_SRR1, r7 1045 1046fast_guest_entry_c: 1047 ld r10, VCPU_PC(r4) 1048 ld r11, VCPU_MSR(r4) 1049 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1050 rldicl r11, r11, 63 - MSR_HV_LG, 1 1051 rotldi r11, r11, 1 + MSR_HV_LG 1052 ori r11, r11, MSR_ME 1053 1054 ld r6, VCPU_CTR(r4) 1055 ld r7, VCPU_XER(r4) 1056 mtctr r6 1057 mtxer r7 1058 1059/* 1060 * Required state: 1061 * R4 = vcpu 1062 * R10: value for HSRR0 1063 * R11: value for HSRR1 1064 * R13 = PACA 1065 */ 1066fast_guest_return: 1067 li r0,0 1068 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1069 mtspr SPRN_HSRR0,r10 1070 mtspr SPRN_HSRR1,r11 1071 1072 /* Activate guest mode, so faults get handled by KVM */ 1073 li r9, KVM_GUEST_MODE_GUEST_HV 1074 stb r9, HSTATE_IN_GUEST(r13) 1075 1076#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1077 /* Accumulate timing */ 1078 addi r3, r4, VCPU_TB_GUEST 1079 bl kvmhv_accumulate_time 1080#endif 1081 1082 /* Enter guest */ 1083 1084BEGIN_FTR_SECTION 1085 ld r5, VCPU_CFAR(r4) 1086 mtspr SPRN_CFAR, r5 1087END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1088BEGIN_FTR_SECTION 1089 ld r0, VCPU_PPR(r4) 1090END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1091 1092 ld r5, VCPU_LR(r4) 1093 ld r6, VCPU_CR(r4) 1094 mtlr r5 1095 mtcr r6 1096 1097 ld r1, VCPU_GPR(R1)(r4) 1098 ld r2, VCPU_GPR(R2)(r4) 1099 ld r3, VCPU_GPR(R3)(r4) 1100 ld r5, VCPU_GPR(R5)(r4) 1101 ld r6, VCPU_GPR(R6)(r4) 1102 ld r7, VCPU_GPR(R7)(r4) 1103 ld r8, VCPU_GPR(R8)(r4) 1104 ld r9, VCPU_GPR(R9)(r4) 1105 ld r10, VCPU_GPR(R10)(r4) 1106 ld r11, VCPU_GPR(R11)(r4) 1107 ld r12, VCPU_GPR(R12)(r4) 1108 ld r13, VCPU_GPR(R13)(r4) 1109 1110BEGIN_FTR_SECTION 1111 mtspr SPRN_PPR, r0 1112END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1113 1114/* Move canary into DSISR to check for later */ 1115BEGIN_FTR_SECTION 1116 li r0, 0x7fff 1117 mtspr SPRN_HDSISR, r0 1118END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1119 1120 ld r0, VCPU_GPR(R0)(r4) 1121 ld r4, VCPU_GPR(R4)(r4) 1122 HRFI_TO_GUEST 1123 b . 1124 1125/* 1126 * Enter the guest on a P9 or later system where we have exactly 1127 * one vcpu per vcore and we don't need to go to real mode 1128 * (which implies that host and guest are both using radix MMU mode). 1129 * r3 = vcpu pointer 1130 * Most SPRs and all the VSRs have been loaded already. 1131 */ 1132_GLOBAL(__kvmhv_vcpu_entry_p9) 1133EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) 1134 mflr r0 1135 std r0, PPC_LR_STKOFF(r1) 1136 stdu r1, -SFS(r1) 1137 1138 li r0, 1 1139 stw r0, STACK_SLOT_SHORT_PATH(r1) 1140 1141 std r3, HSTATE_KVM_VCPU(r13) 1142 mfcr r4 1143 stw r4, SFS+8(r1) 1144 1145 std r1, HSTATE_HOST_R1(r13) 1146 1147 reg = 14 1148 .rept 18 1149 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1150 reg = reg + 1 1151 .endr 1152 1153 reg = 14 1154 .rept 18 1155 ld reg, __VCPU_GPR(reg)(r3) 1156 reg = reg + 1 1157 .endr 1158 1159 mfmsr r10 1160 std r10, HSTATE_HOST_MSR(r13) 1161 1162 mr r4, r3 1163 b fast_guest_entry_c 1164guest_exit_short_path: 1165 1166 li r0, KVM_GUEST_MODE_NONE 1167 stb r0, HSTATE_IN_GUEST(r13) 1168 1169 reg = 14 1170 .rept 18 1171 std reg, __VCPU_GPR(reg)(r9) 1172 reg = reg + 1 1173 .endr 1174 1175 reg = 14 1176 .rept 18 1177 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1178 reg = reg + 1 1179 .endr 1180 1181 lwz r4, SFS+8(r1) 1182 mtcr r4 1183 1184 mr r3, r12 /* trap number */ 1185 1186 addi r1, r1, SFS 1187 ld r0, PPC_LR_STKOFF(r1) 1188 mtlr r0 1189 1190 /* If we are in real mode, do a rfid to get back to the caller */ 1191 mfmsr r4 1192 andi. r5, r4, MSR_IR 1193 bnelr 1194 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ 1195 mtspr SPRN_SRR0, r0 1196 ld r10, HSTATE_HOST_MSR(r13) 1197 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG 1198 mtspr SPRN_SRR1, r10 1199 RFI_TO_KERNEL 1200 b . 1201 1202secondary_too_late: 1203 li r12, 0 1204 stw r12, STACK_SLOT_TRAP(r1) 1205 cmpdi r4, 0 1206 beq 11f 1207 stw r12, VCPU_TRAP(r4) 1208#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1209 addi r3, r4, VCPU_TB_RMEXIT 1210 bl kvmhv_accumulate_time 1211#endif 121211: b kvmhv_switch_to_host 1213 1214no_switch_exit: 1215 HMT_MEDIUM 1216 li r12, 0 1217 b 12f 1218hdec_soon: 1219 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 122012: stw r12, VCPU_TRAP(r4) 1221 mr r9, r4 1222#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1223 addi r3, r4, VCPU_TB_RMEXIT 1224 bl kvmhv_accumulate_time 1225#endif 1226 b guest_bypass 1227 1228/****************************************************************************** 1229 * * 1230 * Exit code * 1231 * * 1232 *****************************************************************************/ 1233 1234/* 1235 * We come here from the first-level interrupt handlers. 1236 */ 1237 .globl kvmppc_interrupt_hv 1238kvmppc_interrupt_hv: 1239 /* 1240 * Register contents: 1241 * R12 = (guest CR << 32) | interrupt vector 1242 * R13 = PACA 1243 * guest R12 saved in shadow VCPU SCRATCH0 1244 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE 1245 * guest R13 saved in SPRN_SCRATCH0 1246 */ 1247 std r9, HSTATE_SCRATCH2(r13) 1248 lbz r9, HSTATE_IN_GUEST(r13) 1249 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1250 beq kvmppc_bad_host_intr 1251#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1252 cmpwi r9, KVM_GUEST_MODE_GUEST 1253 ld r9, HSTATE_SCRATCH2(r13) 1254 beq kvmppc_interrupt_pr 1255#endif 1256 /* We're now back in the host but in guest MMU context */ 1257 li r9, KVM_GUEST_MODE_HOST_HV 1258 stb r9, HSTATE_IN_GUEST(r13) 1259 1260 ld r9, HSTATE_KVM_VCPU(r13) 1261 1262 /* Save registers */ 1263 1264 std r0, VCPU_GPR(R0)(r9) 1265 std r1, VCPU_GPR(R1)(r9) 1266 std r2, VCPU_GPR(R2)(r9) 1267 std r3, VCPU_GPR(R3)(r9) 1268 std r4, VCPU_GPR(R4)(r9) 1269 std r5, VCPU_GPR(R5)(r9) 1270 std r6, VCPU_GPR(R6)(r9) 1271 std r7, VCPU_GPR(R7)(r9) 1272 std r8, VCPU_GPR(R8)(r9) 1273 ld r0, HSTATE_SCRATCH2(r13) 1274 std r0, VCPU_GPR(R9)(r9) 1275 std r10, VCPU_GPR(R10)(r9) 1276 std r11, VCPU_GPR(R11)(r9) 1277 ld r3, HSTATE_SCRATCH0(r13) 1278 std r3, VCPU_GPR(R12)(r9) 1279 /* CR is in the high half of r12 */ 1280 srdi r4, r12, 32 1281 std r4, VCPU_CR(r9) 1282BEGIN_FTR_SECTION 1283 ld r3, HSTATE_CFAR(r13) 1284 std r3, VCPU_CFAR(r9) 1285END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1286BEGIN_FTR_SECTION 1287 ld r4, HSTATE_PPR(r13) 1288 std r4, VCPU_PPR(r9) 1289END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1290 1291 /* Restore R1/R2 so we can handle faults */ 1292 ld r1, HSTATE_HOST_R1(r13) 1293 ld r2, PACATOC(r13) 1294 1295 mfspr r10, SPRN_SRR0 1296 mfspr r11, SPRN_SRR1 1297 std r10, VCPU_SRR0(r9) 1298 std r11, VCPU_SRR1(r9) 1299 /* trap is in the low half of r12, clear CR from the high half */ 1300 clrldi r12, r12, 32 1301 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1302 beq 1f 1303 mfspr r10, SPRN_HSRR0 1304 mfspr r11, SPRN_HSRR1 1305 clrrdi r12, r12, 2 13061: std r10, VCPU_PC(r9) 1307 std r11, VCPU_MSR(r9) 1308 1309 GET_SCRATCH0(r3) 1310 mflr r4 1311 std r3, VCPU_GPR(R13)(r9) 1312 std r4, VCPU_LR(r9) 1313 1314 stw r12,VCPU_TRAP(r9) 1315 1316 /* 1317 * Now that we have saved away SRR0/1 and HSRR0/1, 1318 * interrupts are recoverable in principle, so set MSR_RI. 1319 * This becomes important for relocation-on interrupts from 1320 * the guest, which we can get in radix mode on POWER9. 1321 */ 1322 li r0, MSR_RI 1323 mtmsrd r0, 1 1324 1325#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1326 addi r3, r9, VCPU_TB_RMINTR 1327 mr r4, r9 1328 bl kvmhv_accumulate_time 1329 ld r5, VCPU_GPR(R5)(r9) 1330 ld r6, VCPU_GPR(R6)(r9) 1331 ld r7, VCPU_GPR(R7)(r9) 1332 ld r8, VCPU_GPR(R8)(r9) 1333#endif 1334 1335 /* Save HEIR (HV emulation assist reg) in emul_inst 1336 if this is an HEI (HV emulation interrupt, e40) */ 1337 li r3,KVM_INST_FETCH_FAILED 1338 stw r3,VCPU_LAST_INST(r9) 1339 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1340 bne 11f 1341 mfspr r3,SPRN_HEIR 134211: stw r3,VCPU_HEIR(r9) 1343 1344 /* these are volatile across C function calls */ 1345#ifdef CONFIG_RELOCATABLE 1346 ld r3, HSTATE_SCRATCH1(r13) 1347 mtctr r3 1348#else 1349 mfctr r3 1350#endif 1351 mfxer r4 1352 std r3, VCPU_CTR(r9) 1353 std r4, VCPU_XER(r9) 1354 1355 /* Save more register state */ 1356 mfdar r3 1357 mfdsisr r4 1358 std r3, VCPU_DAR(r9) 1359 stw r4, VCPU_DSISR(r9) 1360 1361 /* If this is a page table miss then see if it's theirs or ours */ 1362 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1363 beq kvmppc_hdsi 1364 std r3, VCPU_FAULT_DAR(r9) 1365 stw r4, VCPU_FAULT_DSISR(r9) 1366 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1367 beq kvmppc_hisi 1368 1369#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1370 /* For softpatch interrupt, go off and do TM instruction emulation */ 1371 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 1372 beq kvmppc_tm_emul 1373#endif 1374 1375 /* See if this is a leftover HDEC interrupt */ 1376 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1377 bne 2f 1378 mfspr r3,SPRN_HDEC 1379 EXTEND_HDEC(r3) 1380 cmpdi r3,0 1381 mr r4,r9 1382 bge fast_guest_return 13832: 1384 /* See if this is an hcall we can handle in real mode */ 1385 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1386 beq hcall_try_real_mode 1387 1388 /* Hypervisor doorbell - exit only if host IPI flag set */ 1389 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1390 bne 3f 1391BEGIN_FTR_SECTION 1392 PPC_MSGSYNC 1393 lwsync 1394 /* always exit if we're running a nested guest */ 1395 ld r0, VCPU_NESTED(r9) 1396 cmpdi r0, 0 1397 bne guest_exit_cont 1398END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1399 lbz r0, HSTATE_HOST_IPI(r13) 1400 cmpwi r0, 0 1401 beq maybe_reenter_guest 1402 b guest_exit_cont 14033: 1404 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1405 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1406 bne 14f 1407 mfspr r3, SPRN_HFSCR 1408 std r3, VCPU_HFSCR(r9) 1409 b guest_exit_cont 141014: 1411 /* External interrupt ? */ 1412 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1413 beq kvmppc_guest_external 1414 /* See if it is a machine check */ 1415 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1416 beq machine_check_realmode 1417 /* Or a hypervisor maintenance interrupt */ 1418 cmpwi r12, BOOK3S_INTERRUPT_HMI 1419 beq hmi_realmode 1420 1421guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1422 1423#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1424 addi r3, r9, VCPU_TB_RMEXIT 1425 mr r4, r9 1426 bl kvmhv_accumulate_time 1427#endif 1428#ifdef CONFIG_KVM_XICS 1429 /* We are exiting, pull the VP from the XIVE */ 1430 lbz r0, VCPU_XIVE_PUSHED(r9) 1431 cmpwi cr0, r0, 0 1432 beq 1f 1433 li r7, TM_SPC_PULL_OS_CTX 1434 li r6, TM_QW1_OS 1435 mfmsr r0 1436 andi. r0, r0, MSR_DR /* in real mode? */ 1437 beq 2f 1438 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1439 cmpldi cr0, r10, 0 1440 beq 1f 1441 /* First load to pull the context, we ignore the value */ 1442 eieio 1443 lwzx r11, r7, r10 1444 /* Second load to recover the context state (Words 0 and 1) */ 1445 ldx r11, r6, r10 1446 b 3f 14472: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1448 cmpldi cr0, r10, 0 1449 beq 1f 1450 /* First load to pull the context, we ignore the value */ 1451 eieio 1452 lwzcix r11, r7, r10 1453 /* Second load to recover the context state (Words 0 and 1) */ 1454 ldcix r11, r6, r10 14553: std r11, VCPU_XIVE_SAVED_STATE(r9) 1456 /* Fixup some of the state for the next load */ 1457 li r10, 0 1458 li r0, 0xff 1459 stb r10, VCPU_XIVE_PUSHED(r9) 1460 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1461 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1462 eieio 14631: 1464#endif /* CONFIG_KVM_XICS */ 1465 1466 /* If we came in through the P9 short path, go back out to C now */ 1467 lwz r0, STACK_SLOT_SHORT_PATH(r1) 1468 cmpwi r0, 0 1469 bne guest_exit_short_path 1470 1471 /* For hash guest, read the guest SLB and save it away */ 1472 ld r5, VCPU_KVM(r9) 1473 lbz r0, KVM_RADIX(r5) 1474 li r5, 0 1475 cmpwi r0, 0 1476 bne 3f /* for radix, save 0 entries */ 1477 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1478 mtctr r0 1479 li r6,0 1480 addi r7,r9,VCPU_SLB 14811: slbmfee r8,r6 1482 andis. r0,r8,SLB_ESID_V@h 1483 beq 2f 1484 add r8,r8,r6 /* put index in */ 1485 slbmfev r3,r6 1486 std r8,VCPU_SLB_E(r7) 1487 std r3,VCPU_SLB_V(r7) 1488 addi r7,r7,VCPU_SLB_SIZE 1489 addi r5,r5,1 14902: addi r6,r6,1 1491 bdnz 1b 1492 /* Finally clear out the SLB */ 1493 li r0,0 1494 slbmte r0,r0 1495 slbia 1496 ptesync 14973: stw r5,VCPU_SLB_MAX(r9) 1498 1499 /* load host SLB entries */ 1500BEGIN_MMU_FTR_SECTION 1501 b 0f 1502END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1503 ld r8,PACA_SLBSHADOWPTR(r13) 1504 1505 .rept SLB_NUM_BOLTED 1506 li r3, SLBSHADOW_SAVEAREA 1507 LDX_BE r5, r8, r3 1508 addi r3, r3, 8 1509 LDX_BE r6, r8, r3 1510 andis. r7,r5,SLB_ESID_V@h 1511 beq 1f 1512 slbmte r6,r5 15131: addi r8,r8,16 1514 .endr 15150: 1516 1517guest_bypass: 1518 stw r12, STACK_SLOT_TRAP(r1) 1519 1520 /* Save DEC */ 1521 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1522 ld r3, HSTATE_KVM_VCORE(r13) 1523 mfspr r5,SPRN_DEC 1524 mftb r6 1525 /* On P9, if the guest has large decr enabled, don't sign extend */ 1526BEGIN_FTR_SECTION 1527 ld r4, VCORE_LPCR(r3) 1528 andis. r4, r4, LPCR_LD@h 1529 bne 16f 1530END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1531 extsw r5,r5 153216: add r5,r5,r6 1533 /* r5 is a guest timebase value here, convert to host TB */ 1534 ld r4,VCORE_TB_OFFSET_APPL(r3) 1535 subf r5,r4,r5 1536 std r5,VCPU_DEC_EXPIRES(r9) 1537 1538 /* Increment exit count, poke other threads to exit */ 1539 mr r3, r12 1540 bl kvmhv_commence_exit 1541 nop 1542 ld r9, HSTATE_KVM_VCPU(r13) 1543 1544 /* Stop others sending VCPU interrupts to this physical CPU */ 1545 li r0, -1 1546 stw r0, VCPU_CPU(r9) 1547 stw r0, VCPU_THREAD_CPU(r9) 1548 1549 /* Save guest CTRL register, set runlatch to 1 */ 1550 mfspr r6,SPRN_CTRLF 1551 stw r6,VCPU_CTRL(r9) 1552 andi. r0,r6,1 1553 bne 4f 1554 ori r6,r6,1 1555 mtspr SPRN_CTRLT,r6 15564: 1557 /* 1558 * Save the guest PURR/SPURR 1559 */ 1560 mfspr r5,SPRN_PURR 1561 mfspr r6,SPRN_SPURR 1562 ld r7,VCPU_PURR(r9) 1563 ld r8,VCPU_SPURR(r9) 1564 std r5,VCPU_PURR(r9) 1565 std r6,VCPU_SPURR(r9) 1566 subf r5,r7,r5 1567 subf r6,r8,r6 1568 1569 /* 1570 * Restore host PURR/SPURR and add guest times 1571 * so that the time in the guest gets accounted. 1572 */ 1573 ld r3,HSTATE_PURR(r13) 1574 ld r4,HSTATE_SPURR(r13) 1575 add r3,r3,r5 1576 add r4,r4,r6 1577 mtspr SPRN_PURR,r3 1578 mtspr SPRN_SPURR,r4 1579 1580BEGIN_FTR_SECTION 1581 b 8f 1582END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1583 /* Save POWER8-specific registers */ 1584 mfspr r5, SPRN_IAMR 1585 mfspr r6, SPRN_PSPB 1586 mfspr r7, SPRN_FSCR 1587 std r5, VCPU_IAMR(r9) 1588 stw r6, VCPU_PSPB(r9) 1589 std r7, VCPU_FSCR(r9) 1590 mfspr r5, SPRN_IC 1591 mfspr r7, SPRN_TAR 1592 std r5, VCPU_IC(r9) 1593 std r7, VCPU_TAR(r9) 1594 mfspr r8, SPRN_EBBHR 1595 std r8, VCPU_EBBHR(r9) 1596 mfspr r5, SPRN_EBBRR 1597 mfspr r6, SPRN_BESCR 1598 mfspr r7, SPRN_PID 1599 mfspr r8, SPRN_WORT 1600 std r5, VCPU_EBBRR(r9) 1601 std r6, VCPU_BESCR(r9) 1602 stw r7, VCPU_GUEST_PID(r9) 1603 std r8, VCPU_WORT(r9) 1604BEGIN_FTR_SECTION 1605 mfspr r5, SPRN_TCSCR 1606 mfspr r6, SPRN_ACOP 1607 mfspr r7, SPRN_CSIGR 1608 mfspr r8, SPRN_TACR 1609 std r5, VCPU_TCSCR(r9) 1610 std r6, VCPU_ACOP(r9) 1611 std r7, VCPU_CSIGR(r9) 1612 std r8, VCPU_TACR(r9) 1613FTR_SECTION_ELSE 1614 mfspr r5, SPRN_TIDR 1615 mfspr r6, SPRN_PSSCR 1616 std r5, VCPU_TID(r9) 1617 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1618 rotldi r6, r6, 60 1619 std r6, VCPU_PSSCR(r9) 1620 /* Restore host HFSCR value */ 1621 ld r7, STACK_SLOT_HFSCR(r1) 1622 mtspr SPRN_HFSCR, r7 1623ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1624 /* 1625 * Restore various registers to 0, where non-zero values 1626 * set by the guest could disrupt the host. 1627 */ 1628 li r0, 0 1629 mtspr SPRN_PSPB, r0 1630 mtspr SPRN_WORT, r0 1631BEGIN_FTR_SECTION 1632 mtspr SPRN_TCSCR, r0 1633 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1634 li r0, 1 1635 sldi r0, r0, 31 1636 mtspr SPRN_MMCRS, r0 1637END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1638 1639 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1640 ld r8, STACK_SLOT_IAMR(r1) 1641 mtspr SPRN_IAMR, r8 1642 16438: /* Power7 jumps back in here */ 1644 mfspr r5,SPRN_AMR 1645 mfspr r6,SPRN_UAMOR 1646 std r5,VCPU_AMR(r9) 1647 std r6,VCPU_UAMOR(r9) 1648 ld r5,STACK_SLOT_AMR(r1) 1649 ld r6,STACK_SLOT_UAMOR(r1) 1650 mtspr SPRN_AMR, r5 1651 mtspr SPRN_UAMOR, r6 1652 1653 /* Switch DSCR back to host value */ 1654 mfspr r8, SPRN_DSCR 1655 ld r7, HSTATE_DSCR(r13) 1656 std r8, VCPU_DSCR(r9) 1657 mtspr SPRN_DSCR, r7 1658 1659 /* Save non-volatile GPRs */ 1660 std r14, VCPU_GPR(R14)(r9) 1661 std r15, VCPU_GPR(R15)(r9) 1662 std r16, VCPU_GPR(R16)(r9) 1663 std r17, VCPU_GPR(R17)(r9) 1664 std r18, VCPU_GPR(R18)(r9) 1665 std r19, VCPU_GPR(R19)(r9) 1666 std r20, VCPU_GPR(R20)(r9) 1667 std r21, VCPU_GPR(R21)(r9) 1668 std r22, VCPU_GPR(R22)(r9) 1669 std r23, VCPU_GPR(R23)(r9) 1670 std r24, VCPU_GPR(R24)(r9) 1671 std r25, VCPU_GPR(R25)(r9) 1672 std r26, VCPU_GPR(R26)(r9) 1673 std r27, VCPU_GPR(R27)(r9) 1674 std r28, VCPU_GPR(R28)(r9) 1675 std r29, VCPU_GPR(R29)(r9) 1676 std r30, VCPU_GPR(R30)(r9) 1677 std r31, VCPU_GPR(R31)(r9) 1678 1679 /* Save SPRGs */ 1680 mfspr r3, SPRN_SPRG0 1681 mfspr r4, SPRN_SPRG1 1682 mfspr r5, SPRN_SPRG2 1683 mfspr r6, SPRN_SPRG3 1684 std r3, VCPU_SPRG0(r9) 1685 std r4, VCPU_SPRG1(r9) 1686 std r5, VCPU_SPRG2(r9) 1687 std r6, VCPU_SPRG3(r9) 1688 1689 /* save FP state */ 1690 mr r3, r9 1691 bl kvmppc_save_fp 1692 1693#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1694/* 1695 * Branch around the call if both CPU_FTR_TM and 1696 * CPU_FTR_P9_TM_HV_ASSIST are off. 1697 */ 1698BEGIN_FTR_SECTION 1699 b 91f 1700END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 1701 /* 1702 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1703 */ 1704 mr r3, r9 1705 ld r4, VCPU_MSR(r3) 1706 li r5, 0 /* don't preserve non-vol regs */ 1707 bl kvmppc_save_tm_hv 1708 nop 1709 ld r9, HSTATE_KVM_VCPU(r13) 171091: 1711#endif 1712 1713 /* Increment yield count if they have a VPA */ 1714 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1715 cmpdi r8, 0 1716 beq 25f 1717 li r4, LPPACA_YIELDCOUNT 1718 LWZX_BE r3, r8, r4 1719 addi r3, r3, 1 1720 STWX_BE r3, r8, r4 1721 li r3, 1 1722 stb r3, VCPU_VPA_DIRTY(r9) 172325: 1724 /* Save PMU registers if requested */ 1725 /* r8 and cr0.eq are live here */ 1726 mr r3, r9 1727 li r4, 1 1728 beq 21f /* if no VPA, save PMU stuff anyway */ 1729 lbz r4, LPPACA_PMCINUSE(r8) 173021: bl kvmhv_save_guest_pmu 1731 ld r9, HSTATE_KVM_VCPU(r13) 1732 1733 /* Restore host values of some registers */ 1734BEGIN_FTR_SECTION 1735 ld r5, STACK_SLOT_CIABR(r1) 1736 ld r6, STACK_SLOT_DAWR(r1) 1737 ld r7, STACK_SLOT_DAWRX(r1) 1738 mtspr SPRN_CIABR, r5 1739 /* 1740 * If the DAWR doesn't work, it's ok to write these here as 1741 * this value should always be zero 1742 */ 1743 mtspr SPRN_DAWR, r6 1744 mtspr SPRN_DAWRX, r7 1745END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1746BEGIN_FTR_SECTION 1747 ld r5, STACK_SLOT_TID(r1) 1748 ld r6, STACK_SLOT_PSSCR(r1) 1749 ld r7, STACK_SLOT_PID(r1) 1750 mtspr SPRN_TIDR, r5 1751 mtspr SPRN_PSSCR, r6 1752 mtspr SPRN_PID, r7 1753END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1754 1755#ifdef CONFIG_PPC_RADIX_MMU 1756 /* 1757 * Are we running hash or radix ? 1758 */ 1759 ld r5, VCPU_KVM(r9) 1760 lbz r0, KVM_RADIX(r5) 1761 cmpwi cr2, r0, 0 1762 beq cr2, 2f 1763 1764 /* 1765 * Radix: do eieio; tlbsync; ptesync sequence in case we 1766 * interrupted the guest between a tlbie and a ptesync. 1767 */ 1768 eieio 1769 tlbsync 1770 ptesync 1771 1772 /* Radix: Handle the case where the guest used an illegal PID */ 1773 LOAD_REG_ADDR(r4, mmu_base_pid) 1774 lwz r3, VCPU_GUEST_PID(r9) 1775 lwz r5, 0(r4) 1776 cmpw cr0,r3,r5 1777 blt 2f 1778 1779 /* 1780 * Illegal PID, the HW might have prefetched and cached in the TLB 1781 * some translations for the LPID 0 / guest PID combination which 1782 * Linux doesn't know about, so we need to flush that PID out of 1783 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to 1784 * the right context. 1785 */ 1786 li r0,0 1787 mtspr SPRN_LPID,r0 1788 isync 1789 1790 /* Then do a congruence class local flush */ 1791 ld r6,VCPU_KVM(r9) 1792 lwz r0,KVM_TLB_SETS(r6) 1793 mtctr r0 1794 li r7,0x400 /* IS field = 0b01 */ 1795 ptesync 1796 sldi r0,r3,32 /* RS has PID */ 17971: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ 1798 addi r7,r7,0x1000 1799 bdnz 1b 1800 ptesync 1801 18022: 1803#endif /* CONFIG_PPC_RADIX_MMU */ 1804 1805 /* 1806 * POWER7/POWER8 guest -> host partition switch code. 1807 * We don't have to lock against tlbies but we do 1808 * have to coordinate the hardware threads. 1809 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1810 */ 1811kvmhv_switch_to_host: 1812 /* Secondary threads wait for primary to do partition switch */ 1813 ld r5,HSTATE_KVM_VCORE(r13) 1814 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1815 lbz r3,HSTATE_PTID(r13) 1816 cmpwi r3,0 1817 beq 15f 1818 HMT_LOW 181913: lbz r3,VCORE_IN_GUEST(r5) 1820 cmpwi r3,0 1821 bne 13b 1822 HMT_MEDIUM 1823 b 16f 1824 1825 /* Primary thread waits for all the secondaries to exit guest */ 182615: lwz r3,VCORE_ENTRY_EXIT(r5) 1827 rlwinm r0,r3,32-8,0xff 1828 clrldi r3,r3,56 1829 cmpw r3,r0 1830 bne 15b 1831 isync 1832 1833 /* Did we actually switch to the guest at all? */ 1834 lbz r6, VCORE_IN_GUEST(r5) 1835 cmpwi r6, 0 1836 beq 19f 1837 1838 /* Primary thread switches back to host partition */ 1839 lwz r7,KVM_HOST_LPID(r4) 1840BEGIN_FTR_SECTION 1841 ld r6,KVM_HOST_SDR1(r4) 1842 li r8,LPID_RSVD /* switch to reserved LPID */ 1843 mtspr SPRN_LPID,r8 1844 ptesync 1845 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1846END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1847 mtspr SPRN_LPID,r7 1848 isync 1849 1850BEGIN_FTR_SECTION 1851 /* DPDES and VTB are shared between threads */ 1852 mfspr r7, SPRN_DPDES 1853 mfspr r8, SPRN_VTB 1854 std r7, VCORE_DPDES(r5) 1855 std r8, VCORE_VTB(r5) 1856 /* clear DPDES so we don't get guest doorbells in the host */ 1857 li r8, 0 1858 mtspr SPRN_DPDES, r8 1859END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1860 1861 /* Subtract timebase offset from timebase */ 1862 ld r8, VCORE_TB_OFFSET_APPL(r5) 1863 cmpdi r8,0 1864 beq 17f 1865 li r0, 0 1866 std r0, VCORE_TB_OFFSET_APPL(r5) 1867 mftb r6 /* current guest timebase */ 1868 subf r8,r8,r6 1869 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1870 mftb r7 /* check if lower 24 bits overflowed */ 1871 clrldi r6,r6,40 1872 clrldi r7,r7,40 1873 cmpld r7,r6 1874 bge 17f 1875 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1876 mtspr SPRN_TBU40,r8 1877 187817: 1879 /* 1880 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1881 * above, which may or may not have already called 1882 * kvmppc_subcore_exit_guest. Fortunately, all that 1883 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1884 * it again here is benign even if kvmppc_realmode_hmi_handler 1885 * has already called it. 1886 */ 1887 bl kvmppc_subcore_exit_guest 1888 nop 188930: ld r5,HSTATE_KVM_VCORE(r13) 1890 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1891 1892 /* Reset PCR */ 1893 ld r0, VCORE_PCR(r5) 1894 cmpdi r0, 0 1895 beq 18f 1896 li r0, 0 1897 mtspr SPRN_PCR, r0 189818: 1899 /* Signal secondary CPUs to continue */ 1900 stb r0,VCORE_IN_GUEST(r5) 190119: lis r8,0x7fff /* MAX_INT@h */ 1902 mtspr SPRN_HDEC,r8 1903 190416: 1905BEGIN_FTR_SECTION 1906 /* On POWER9 with HPT-on-radix we need to wait for all other threads */ 1907 ld r3, HSTATE_SPLIT_MODE(r13) 1908 cmpdi r3, 0 1909 beq 47f 1910 lwz r8, KVM_SPLIT_DO_RESTORE(r3) 1911 cmpwi r8, 0 1912 beq 47f 1913 bl kvmhv_p9_restore_lpcr 1914 nop 1915 b 48f 191647: 1917END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1918 ld r8,KVM_HOST_LPCR(r4) 1919 mtspr SPRN_LPCR,r8 1920 isync 192148: 1922#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1923 /* Finish timing, if we have a vcpu */ 1924 ld r4, HSTATE_KVM_VCPU(r13) 1925 cmpdi r4, 0 1926 li r3, 0 1927 beq 2f 1928 bl kvmhv_accumulate_time 19292: 1930#endif 1931 /* Unset guest mode */ 1932 li r0, KVM_GUEST_MODE_NONE 1933 stb r0, HSTATE_IN_GUEST(r13) 1934 1935 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1936 ld r0, SFS+PPC_LR_STKOFF(r1) 1937 addi r1, r1, SFS 1938 mtlr r0 1939 blr 1940 1941kvmppc_guest_external: 1942 /* External interrupt, first check for host_ipi. If this is 1943 * set, we know the host wants us out so let's do it now 1944 */ 1945 bl kvmppc_read_intr 1946 1947 /* 1948 * Restore the active volatile registers after returning from 1949 * a C function. 1950 */ 1951 ld r9, HSTATE_KVM_VCPU(r13) 1952 li r12, BOOK3S_INTERRUPT_EXTERNAL 1953 1954 /* 1955 * kvmppc_read_intr return codes: 1956 * 1957 * Exit to host (r3 > 0) 1958 * 1 An interrupt is pending that needs to be handled by the host 1959 * Exit guest and return to host by branching to guest_exit_cont 1960 * 1961 * 2 Passthrough that needs completion in the host 1962 * Exit guest and return to host by branching to guest_exit_cont 1963 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1964 * to indicate to the host to complete handling the interrupt 1965 * 1966 * Before returning to guest, we check if any CPU is heading out 1967 * to the host and if so, we head out also. If no CPUs are heading 1968 * check return values <= 0. 1969 * 1970 * Return to guest (r3 <= 0) 1971 * 0 No external interrupt is pending 1972 * -1 A guest wakeup IPI (which has now been cleared) 1973 * In either case, we return to guest to deliver any pending 1974 * guest interrupts. 1975 * 1976 * -2 A PCI passthrough external interrupt was handled 1977 * (interrupt was delivered directly to guest) 1978 * Return to guest to deliver any pending guest interrupts. 1979 */ 1980 1981 cmpdi r3, 1 1982 ble 1f 1983 1984 /* Return code = 2 */ 1985 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1986 stw r12, VCPU_TRAP(r9) 1987 b guest_exit_cont 1988 19891: /* Return code <= 1 */ 1990 cmpdi r3, 0 1991 bgt guest_exit_cont 1992 1993 /* Return code <= 0 */ 1994maybe_reenter_guest: 1995 ld r5, HSTATE_KVM_VCORE(r13) 1996 lwz r0, VCORE_ENTRY_EXIT(r5) 1997 cmpwi r0, 0x100 1998 mr r4, r9 1999 blt deliver_guest_interrupt 2000 b guest_exit_cont 2001 2002#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2003/* 2004 * Softpatch interrupt for transactional memory emulation cases 2005 * on POWER9 DD2.2. This is early in the guest exit path - we 2006 * haven't saved registers or done a treclaim yet. 2007 */ 2008kvmppc_tm_emul: 2009 /* Save instruction image in HEIR */ 2010 mfspr r3, SPRN_HEIR 2011 stw r3, VCPU_HEIR(r9) 2012 2013 /* 2014 * The cases we want to handle here are those where the guest 2015 * is in real suspend mode and is trying to transition to 2016 * transactional mode. 2017 */ 2018 lbz r0, HSTATE_FAKE_SUSPEND(r13) 2019 cmpwi r0, 0 /* keep exiting guest if in fake suspend */ 2020 bne guest_exit_cont 2021 rldicl r3, r11, 64 - MSR_TS_S_LG, 62 2022 cmpwi r3, 1 /* or if not in suspend state */ 2023 bne guest_exit_cont 2024 2025 /* Call C code to do the emulation */ 2026 mr r3, r9 2027 bl kvmhv_p9_tm_emulation_early 2028 nop 2029 ld r9, HSTATE_KVM_VCPU(r13) 2030 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 2031 cmpwi r3, 0 2032 beq guest_exit_cont /* continue exiting if not handled */ 2033 ld r10, VCPU_PC(r9) 2034 ld r11, VCPU_MSR(r9) 2035 b fast_interrupt_c_return /* go back to guest if handled */ 2036#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2037 2038/* 2039 * Check whether an HDSI is an HPTE not found fault or something else. 2040 * If it is an HPTE not found fault that is due to the guest accessing 2041 * a page that they have mapped but which we have paged out, then 2042 * we continue on with the guest exit path. In all other cases, 2043 * reflect the HDSI to the guest as a DSI. 2044 */ 2045kvmppc_hdsi: 2046 ld r3, VCPU_KVM(r9) 2047 lbz r0, KVM_RADIX(r3) 2048 mfspr r4, SPRN_HDAR 2049 mfspr r6, SPRN_HDSISR 2050BEGIN_FTR_SECTION 2051 /* Look for DSISR canary. If we find it, retry instruction */ 2052 cmpdi r6, 0x7fff 2053 beq 6f 2054END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2055 cmpwi r0, 0 2056 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 2057 /* HPTE not found fault or protection fault? */ 2058 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 2059 beq 1f /* if not, send it to the guest */ 2060 andi. r0, r11, MSR_DR /* data relocation enabled? */ 2061 beq 3f 2062BEGIN_FTR_SECTION 2063 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2064 b 4f 2065END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2066 clrrdi r0, r4, 28 2067 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2068 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 2069 bne 7f /* if no SLB entry found */ 20704: std r4, VCPU_FAULT_DAR(r9) 2071 stw r6, VCPU_FAULT_DSISR(r9) 2072 2073 /* Search the hash table. */ 2074 mr r3, r9 /* vcpu pointer */ 2075 li r7, 1 /* data fault */ 2076 bl kvmppc_hpte_hv_fault 2077 ld r9, HSTATE_KVM_VCPU(r13) 2078 ld r10, VCPU_PC(r9) 2079 ld r11, VCPU_MSR(r9) 2080 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 2081 cmpdi r3, 0 /* retry the instruction */ 2082 beq 6f 2083 cmpdi r3, -1 /* handle in kernel mode */ 2084 beq guest_exit_cont 2085 cmpdi r3, -2 /* MMIO emulation; need instr word */ 2086 beq 2f 2087 2088 /* Synthesize a DSI (or DSegI) for the guest */ 2089 ld r4, VCPU_FAULT_DAR(r9) 2090 mr r6, r3 20911: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 2092 mtspr SPRN_DSISR, r6 20937: mtspr SPRN_DAR, r4 2094 mtspr SPRN_SRR0, r10 2095 mtspr SPRN_SRR1, r11 2096 mr r10, r0 2097 bl kvmppc_msr_interrupt 2098fast_interrupt_c_return: 20996: ld r7, VCPU_CTR(r9) 2100 ld r8, VCPU_XER(r9) 2101 mtctr r7 2102 mtxer r8 2103 mr r4, r9 2104 b fast_guest_return 2105 21063: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 2107 ld r5, KVM_VRMA_SLB_V(r5) 2108 b 4b 2109 2110 /* If this is for emulated MMIO, load the instruction word */ 21112: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 2112 2113 /* Set guest mode to 'jump over instruction' so if lwz faults 2114 * we'll just continue at the next IP. */ 2115 li r0, KVM_GUEST_MODE_SKIP 2116 stb r0, HSTATE_IN_GUEST(r13) 2117 2118 /* Do the access with MSR:DR enabled */ 2119 mfmsr r3 2120 ori r4, r3, MSR_DR /* Enable paging for data */ 2121 mtmsrd r4 2122 lwz r8, 0(r10) 2123 mtmsrd r3 2124 2125 /* Store the result */ 2126 stw r8, VCPU_LAST_INST(r9) 2127 2128 /* Unset guest mode. */ 2129 li r0, KVM_GUEST_MODE_HOST_HV 2130 stb r0, HSTATE_IN_GUEST(r13) 2131 b guest_exit_cont 2132 2133.Lradix_hdsi: 2134 std r4, VCPU_FAULT_DAR(r9) 2135 stw r6, VCPU_FAULT_DSISR(r9) 2136.Lradix_hisi: 2137 mfspr r5, SPRN_ASDR 2138 std r5, VCPU_FAULT_GPA(r9) 2139 b guest_exit_cont 2140 2141/* 2142 * Similarly for an HISI, reflect it to the guest as an ISI unless 2143 * it is an HPTE not found fault for a page that we have paged out. 2144 */ 2145kvmppc_hisi: 2146 ld r3, VCPU_KVM(r9) 2147 lbz r0, KVM_RADIX(r3) 2148 cmpwi r0, 0 2149 bne .Lradix_hisi /* for radix, just save ASDR */ 2150 andis. r0, r11, SRR1_ISI_NOPT@h 2151 beq 1f 2152 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 2153 beq 3f 2154BEGIN_FTR_SECTION 2155 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2156 b 4f 2157END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2158 clrrdi r0, r10, 28 2159 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2160 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2161 bne 7f /* if no SLB entry found */ 21624: 2163 /* Search the hash table. */ 2164 mr r3, r9 /* vcpu pointer */ 2165 mr r4, r10 2166 mr r6, r11 2167 li r7, 0 /* instruction fault */ 2168 bl kvmppc_hpte_hv_fault 2169 ld r9, HSTATE_KVM_VCPU(r13) 2170 ld r10, VCPU_PC(r9) 2171 ld r11, VCPU_MSR(r9) 2172 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2173 cmpdi r3, 0 /* retry the instruction */ 2174 beq fast_interrupt_c_return 2175 cmpdi r3, -1 /* handle in kernel mode */ 2176 beq guest_exit_cont 2177 2178 /* Synthesize an ISI (or ISegI) for the guest */ 2179 mr r11, r3 21801: li r0, BOOK3S_INTERRUPT_INST_STORAGE 21817: mtspr SPRN_SRR0, r10 2182 mtspr SPRN_SRR1, r11 2183 mr r10, r0 2184 bl kvmppc_msr_interrupt 2185 b fast_interrupt_c_return 2186 21873: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2188 ld r5, KVM_VRMA_SLB_V(r6) 2189 b 4b 2190 2191/* 2192 * Try to handle an hcall in real mode. 2193 * Returns to the guest if we handle it, or continues on up to 2194 * the kernel if we can't (i.e. if we don't have a handler for 2195 * it, or if the handler returns H_TOO_HARD). 2196 * 2197 * r5 - r8 contain hcall args, 2198 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2199 */ 2200hcall_try_real_mode: 2201 ld r3,VCPU_GPR(R3)(r9) 2202 andi. r0,r11,MSR_PR 2203 /* sc 1 from userspace - reflect to guest syscall */ 2204 bne sc_1_fast_return 2205 /* sc 1 from nested guest - give it to L1 to handle */ 2206 ld r0, VCPU_NESTED(r9) 2207 cmpdi r0, 0 2208 bne guest_exit_cont 2209 clrrdi r3,r3,2 2210 cmpldi r3,hcall_real_table_end - hcall_real_table 2211 bge guest_exit_cont 2212 /* See if this hcall is enabled for in-kernel handling */ 2213 ld r4, VCPU_KVM(r9) 2214 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2215 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2216 add r4, r4, r0 2217 ld r0, KVM_ENABLED_HCALLS(r4) 2218 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2219 srd r0, r0, r4 2220 andi. r0, r0, 1 2221 beq guest_exit_cont 2222 /* Get pointer to handler, if any, and call it */ 2223 LOAD_REG_ADDR(r4, hcall_real_table) 2224 lwax r3,r3,r4 2225 cmpwi r3,0 2226 beq guest_exit_cont 2227 add r12,r3,r4 2228 mtctr r12 2229 mr r3,r9 /* get vcpu pointer */ 2230 ld r4,VCPU_GPR(R4)(r9) 2231 bctrl 2232 cmpdi r3,H_TOO_HARD 2233 beq hcall_real_fallback 2234 ld r4,HSTATE_KVM_VCPU(r13) 2235 std r3,VCPU_GPR(R3)(r4) 2236 ld r10,VCPU_PC(r4) 2237 ld r11,VCPU_MSR(r4) 2238 b fast_guest_return 2239 2240sc_1_fast_return: 2241 mtspr SPRN_SRR0,r10 2242 mtspr SPRN_SRR1,r11 2243 li r10, BOOK3S_INTERRUPT_SYSCALL 2244 bl kvmppc_msr_interrupt 2245 mr r4,r9 2246 b fast_guest_return 2247 2248 /* We've attempted a real mode hcall, but it's punted it back 2249 * to userspace. We need to restore some clobbered volatiles 2250 * before resuming the pass-it-to-qemu path */ 2251hcall_real_fallback: 2252 li r12,BOOK3S_INTERRUPT_SYSCALL 2253 ld r9, HSTATE_KVM_VCPU(r13) 2254 2255 b guest_exit_cont 2256 2257 .globl hcall_real_table 2258hcall_real_table: 2259 .long 0 /* 0 - unused */ 2260 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2261 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2262 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2263 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2264 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2265 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2266#ifdef CONFIG_SPAPR_TCE_IOMMU 2267 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2268 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2269#else 2270 .long 0 /* 0x1c */ 2271 .long 0 /* 0x20 */ 2272#endif 2273 .long 0 /* 0x24 - H_SET_SPRG0 */ 2274 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2275 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 2276 .long 0 /* 0x30 */ 2277 .long 0 /* 0x34 */ 2278 .long 0 /* 0x38 */ 2279 .long 0 /* 0x3c */ 2280 .long 0 /* 0x40 */ 2281 .long 0 /* 0x44 */ 2282 .long 0 /* 0x48 */ 2283 .long 0 /* 0x4c */ 2284 .long 0 /* 0x50 */ 2285 .long 0 /* 0x54 */ 2286 .long 0 /* 0x58 */ 2287 .long 0 /* 0x5c */ 2288 .long 0 /* 0x60 */ 2289#ifdef CONFIG_KVM_XICS 2290 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2291 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2292 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2293 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2294 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2295#else 2296 .long 0 /* 0x64 - H_EOI */ 2297 .long 0 /* 0x68 - H_CPPR */ 2298 .long 0 /* 0x6c - H_IPI */ 2299 .long 0 /* 0x70 - H_IPOLL */ 2300 .long 0 /* 0x74 - H_XIRR */ 2301#endif 2302 .long 0 /* 0x78 */ 2303 .long 0 /* 0x7c */ 2304 .long 0 /* 0x80 */ 2305 .long 0 /* 0x84 */ 2306 .long 0 /* 0x88 */ 2307 .long 0 /* 0x8c */ 2308 .long 0 /* 0x90 */ 2309 .long 0 /* 0x94 */ 2310 .long 0 /* 0x98 */ 2311 .long 0 /* 0x9c */ 2312 .long 0 /* 0xa0 */ 2313 .long 0 /* 0xa4 */ 2314 .long 0 /* 0xa8 */ 2315 .long 0 /* 0xac */ 2316 .long 0 /* 0xb0 */ 2317 .long 0 /* 0xb4 */ 2318 .long 0 /* 0xb8 */ 2319 .long 0 /* 0xbc */ 2320 .long 0 /* 0xc0 */ 2321 .long 0 /* 0xc4 */ 2322 .long 0 /* 0xc8 */ 2323 .long 0 /* 0xcc */ 2324 .long 0 /* 0xd0 */ 2325 .long 0 /* 0xd4 */ 2326 .long 0 /* 0xd8 */ 2327 .long 0 /* 0xdc */ 2328 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2329 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2330 .long 0 /* 0xe8 */ 2331 .long 0 /* 0xec */ 2332 .long 0 /* 0xf0 */ 2333 .long 0 /* 0xf4 */ 2334 .long 0 /* 0xf8 */ 2335 .long 0 /* 0xfc */ 2336 .long 0 /* 0x100 */ 2337 .long 0 /* 0x104 */ 2338 .long 0 /* 0x108 */ 2339 .long 0 /* 0x10c */ 2340 .long 0 /* 0x110 */ 2341 .long 0 /* 0x114 */ 2342 .long 0 /* 0x118 */ 2343 .long 0 /* 0x11c */ 2344 .long 0 /* 0x120 */ 2345 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2346 .long 0 /* 0x128 */ 2347 .long 0 /* 0x12c */ 2348 .long 0 /* 0x130 */ 2349 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2350#ifdef CONFIG_SPAPR_TCE_IOMMU 2351 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2352 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2353#else 2354 .long 0 /* 0x138 */ 2355 .long 0 /* 0x13c */ 2356#endif 2357 .long 0 /* 0x140 */ 2358 .long 0 /* 0x144 */ 2359 .long 0 /* 0x148 */ 2360 .long 0 /* 0x14c */ 2361 .long 0 /* 0x150 */ 2362 .long 0 /* 0x154 */ 2363 .long 0 /* 0x158 */ 2364 .long 0 /* 0x15c */ 2365 .long 0 /* 0x160 */ 2366 .long 0 /* 0x164 */ 2367 .long 0 /* 0x168 */ 2368 .long 0 /* 0x16c */ 2369 .long 0 /* 0x170 */ 2370 .long 0 /* 0x174 */ 2371 .long 0 /* 0x178 */ 2372 .long 0 /* 0x17c */ 2373 .long 0 /* 0x180 */ 2374 .long 0 /* 0x184 */ 2375 .long 0 /* 0x188 */ 2376 .long 0 /* 0x18c */ 2377 .long 0 /* 0x190 */ 2378 .long 0 /* 0x194 */ 2379 .long 0 /* 0x198 */ 2380 .long 0 /* 0x19c */ 2381 .long 0 /* 0x1a0 */ 2382 .long 0 /* 0x1a4 */ 2383 .long 0 /* 0x1a8 */ 2384 .long 0 /* 0x1ac */ 2385 .long 0 /* 0x1b0 */ 2386 .long 0 /* 0x1b4 */ 2387 .long 0 /* 0x1b8 */ 2388 .long 0 /* 0x1bc */ 2389 .long 0 /* 0x1c0 */ 2390 .long 0 /* 0x1c4 */ 2391 .long 0 /* 0x1c8 */ 2392 .long 0 /* 0x1cc */ 2393 .long 0 /* 0x1d0 */ 2394 .long 0 /* 0x1d4 */ 2395 .long 0 /* 0x1d8 */ 2396 .long 0 /* 0x1dc */ 2397 .long 0 /* 0x1e0 */ 2398 .long 0 /* 0x1e4 */ 2399 .long 0 /* 0x1e8 */ 2400 .long 0 /* 0x1ec */ 2401 .long 0 /* 0x1f0 */ 2402 .long 0 /* 0x1f4 */ 2403 .long 0 /* 0x1f8 */ 2404 .long 0 /* 0x1fc */ 2405 .long 0 /* 0x200 */ 2406 .long 0 /* 0x204 */ 2407 .long 0 /* 0x208 */ 2408 .long 0 /* 0x20c */ 2409 .long 0 /* 0x210 */ 2410 .long 0 /* 0x214 */ 2411 .long 0 /* 0x218 */ 2412 .long 0 /* 0x21c */ 2413 .long 0 /* 0x220 */ 2414 .long 0 /* 0x224 */ 2415 .long 0 /* 0x228 */ 2416 .long 0 /* 0x22c */ 2417 .long 0 /* 0x230 */ 2418 .long 0 /* 0x234 */ 2419 .long 0 /* 0x238 */ 2420 .long 0 /* 0x23c */ 2421 .long 0 /* 0x240 */ 2422 .long 0 /* 0x244 */ 2423 .long 0 /* 0x248 */ 2424 .long 0 /* 0x24c */ 2425 .long 0 /* 0x250 */ 2426 .long 0 /* 0x254 */ 2427 .long 0 /* 0x258 */ 2428 .long 0 /* 0x25c */ 2429 .long 0 /* 0x260 */ 2430 .long 0 /* 0x264 */ 2431 .long 0 /* 0x268 */ 2432 .long 0 /* 0x26c */ 2433 .long 0 /* 0x270 */ 2434 .long 0 /* 0x274 */ 2435 .long 0 /* 0x278 */ 2436 .long 0 /* 0x27c */ 2437 .long 0 /* 0x280 */ 2438 .long 0 /* 0x284 */ 2439 .long 0 /* 0x288 */ 2440 .long 0 /* 0x28c */ 2441 .long 0 /* 0x290 */ 2442 .long 0 /* 0x294 */ 2443 .long 0 /* 0x298 */ 2444 .long 0 /* 0x29c */ 2445 .long 0 /* 0x2a0 */ 2446 .long 0 /* 0x2a4 */ 2447 .long 0 /* 0x2a8 */ 2448 .long 0 /* 0x2ac */ 2449 .long 0 /* 0x2b0 */ 2450 .long 0 /* 0x2b4 */ 2451 .long 0 /* 0x2b8 */ 2452 .long 0 /* 0x2bc */ 2453 .long 0 /* 0x2c0 */ 2454 .long 0 /* 0x2c4 */ 2455 .long 0 /* 0x2c8 */ 2456 .long 0 /* 0x2cc */ 2457 .long 0 /* 0x2d0 */ 2458 .long 0 /* 0x2d4 */ 2459 .long 0 /* 0x2d8 */ 2460 .long 0 /* 0x2dc */ 2461 .long 0 /* 0x2e0 */ 2462 .long 0 /* 0x2e4 */ 2463 .long 0 /* 0x2e8 */ 2464 .long 0 /* 0x2ec */ 2465 .long 0 /* 0x2f0 */ 2466 .long 0 /* 0x2f4 */ 2467 .long 0 /* 0x2f8 */ 2468#ifdef CONFIG_KVM_XICS 2469 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2470#else 2471 .long 0 /* 0x2fc - H_XIRR_X*/ 2472#endif 2473 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2474 .globl hcall_real_table_end 2475hcall_real_table_end: 2476 2477_GLOBAL(kvmppc_h_set_xdabr) 2478EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2479 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2480 beq 6f 2481 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2482 andc. r0, r5, r0 2483 beq 3f 24846: li r3, H_PARAMETER 2485 blr 2486 2487_GLOBAL(kvmppc_h_set_dabr) 2488EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2489 li r5, DABRX_USER | DABRX_KERNEL 24903: 2491BEGIN_FTR_SECTION 2492 b 2f 2493END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2494 std r4,VCPU_DABR(r3) 2495 stw r5, VCPU_DABRX(r3) 2496 mtspr SPRN_DABRX, r5 2497 /* Work around P7 bug where DABR can get corrupted on mtspr */ 24981: mtspr SPRN_DABR,r4 2499 mfspr r5, SPRN_DABR 2500 cmpd r4, r5 2501 bne 1b 2502 isync 2503 li r3,0 2504 blr 2505 25062: 2507 LOAD_REG_ADDR(r11, dawr_force_enable) 2508 lbz r11, 0(r11) 2509 cmpdi r11, 0 2510 li r3, H_HARDWARE 2511 beqlr 2512 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2513 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2514 rlwimi r5, r4, 2, DAWRX_WT 2515 clrrdi r4, r4, 3 2516 std r4, VCPU_DAWR(r3) 2517 std r5, VCPU_DAWRX(r3) 2518 mtspr SPRN_DAWR, r4 2519 mtspr SPRN_DAWRX, r5 2520 li r3, 0 2521 blr 2522 2523_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2524 ori r11,r11,MSR_EE 2525 std r11,VCPU_MSR(r3) 2526 li r0,1 2527 stb r0,VCPU_CEDED(r3) 2528 sync /* order setting ceded vs. testing prodded */ 2529 lbz r5,VCPU_PRODDED(r3) 2530 cmpwi r5,0 2531 bne kvm_cede_prodded 2532 li r12,0 /* set trap to 0 to say hcall is handled */ 2533 stw r12,VCPU_TRAP(r3) 2534 li r0,H_SUCCESS 2535 std r0,VCPU_GPR(R3)(r3) 2536 2537 /* 2538 * Set our bit in the bitmask of napping threads unless all the 2539 * other threads are already napping, in which case we send this 2540 * up to the host. 2541 */ 2542 ld r5,HSTATE_KVM_VCORE(r13) 2543 lbz r6,HSTATE_PTID(r13) 2544 lwz r8,VCORE_ENTRY_EXIT(r5) 2545 clrldi r8,r8,56 2546 li r0,1 2547 sld r0,r0,r6 2548 addi r6,r5,VCORE_NAPPING_THREADS 254931: lwarx r4,0,r6 2550 or r4,r4,r0 2551 cmpw r4,r8 2552 beq kvm_cede_exit 2553 stwcx. r4,0,r6 2554 bne 31b 2555 /* order napping_threads update vs testing entry_exit_map */ 2556 isync 2557 li r0,NAPPING_CEDE 2558 stb r0,HSTATE_NAPPING(r13) 2559 lwz r7,VCORE_ENTRY_EXIT(r5) 2560 cmpwi r7,0x100 2561 bge 33f /* another thread already exiting */ 2562 2563/* 2564 * Although not specifically required by the architecture, POWER7 2565 * preserves the following registers in nap mode, even if an SMT mode 2566 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2567 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2568 */ 2569 /* Save non-volatile GPRs */ 2570 std r14, VCPU_GPR(R14)(r3) 2571 std r15, VCPU_GPR(R15)(r3) 2572 std r16, VCPU_GPR(R16)(r3) 2573 std r17, VCPU_GPR(R17)(r3) 2574 std r18, VCPU_GPR(R18)(r3) 2575 std r19, VCPU_GPR(R19)(r3) 2576 std r20, VCPU_GPR(R20)(r3) 2577 std r21, VCPU_GPR(R21)(r3) 2578 std r22, VCPU_GPR(R22)(r3) 2579 std r23, VCPU_GPR(R23)(r3) 2580 std r24, VCPU_GPR(R24)(r3) 2581 std r25, VCPU_GPR(R25)(r3) 2582 std r26, VCPU_GPR(R26)(r3) 2583 std r27, VCPU_GPR(R27)(r3) 2584 std r28, VCPU_GPR(R28)(r3) 2585 std r29, VCPU_GPR(R29)(r3) 2586 std r30, VCPU_GPR(R30)(r3) 2587 std r31, VCPU_GPR(R31)(r3) 2588 2589 /* save FP state */ 2590 bl kvmppc_save_fp 2591 2592#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2593/* 2594 * Branch around the call if both CPU_FTR_TM and 2595 * CPU_FTR_P9_TM_HV_ASSIST are off. 2596 */ 2597BEGIN_FTR_SECTION 2598 b 91f 2599END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2600 /* 2601 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2602 */ 2603 ld r3, HSTATE_KVM_VCPU(r13) 2604 ld r4, VCPU_MSR(r3) 2605 li r5, 0 /* don't preserve non-vol regs */ 2606 bl kvmppc_save_tm_hv 2607 nop 260891: 2609#endif 2610 2611 /* 2612 * Set DEC to the smaller of DEC and HDEC, so that we wake 2613 * no later than the end of our timeslice (HDEC interrupts 2614 * don't wake us from nap). 2615 */ 2616 mfspr r3, SPRN_DEC 2617 mfspr r4, SPRN_HDEC 2618 mftb r5 2619BEGIN_FTR_SECTION 2620 /* On P9 check whether the guest has large decrementer mode enabled */ 2621 ld r6, HSTATE_KVM_VCORE(r13) 2622 ld r6, VCORE_LPCR(r6) 2623 andis. r6, r6, LPCR_LD@h 2624 bne 68f 2625END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2626 extsw r3, r3 262768: EXTEND_HDEC(r4) 2628 cmpd r3, r4 2629 ble 67f 2630 mtspr SPRN_DEC, r4 263167: 2632 /* save expiry time of guest decrementer */ 2633 add r3, r3, r5 2634 ld r4, HSTATE_KVM_VCPU(r13) 2635 ld r5, HSTATE_KVM_VCORE(r13) 2636 ld r6, VCORE_TB_OFFSET_APPL(r5) 2637 subf r3, r6, r3 /* convert to host TB value */ 2638 std r3, VCPU_DEC_EXPIRES(r4) 2639 2640#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2641 ld r4, HSTATE_KVM_VCPU(r13) 2642 addi r3, r4, VCPU_TB_CEDE 2643 bl kvmhv_accumulate_time 2644#endif 2645 2646 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2647 2648 /* Go back to host stack */ 2649 ld r1, HSTATE_HOST_R1(r13) 2650 2651 /* 2652 * Take a nap until a decrementer or external or doobell interrupt 2653 * occurs, with PECE1 and PECE0 set in LPCR. 2654 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2655 * Also clear the runlatch bit before napping. 2656 */ 2657kvm_do_nap: 2658 mfspr r0, SPRN_CTRLF 2659 clrrdi r0, r0, 1 2660 mtspr SPRN_CTRLT, r0 2661 2662 li r0,1 2663 stb r0,HSTATE_HWTHREAD_REQ(r13) 2664 mfspr r5,SPRN_LPCR 2665 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2666BEGIN_FTR_SECTION 2667 ori r5, r5, LPCR_PECEDH 2668 rlwimi r5, r3, 0, LPCR_PECEDP 2669END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2670 2671kvm_nap_sequence: /* desired LPCR value in r5 */ 2672BEGIN_FTR_SECTION 2673 /* 2674 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2675 * enable state loss = 1 (allow SMT mode switch) 2676 * requested level = 0 (just stop dispatching) 2677 */ 2678 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2679 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2680 li r4, LPCR_PECE_HVEE@higher 2681 sldi r4, r4, 32 2682 or r5, r5, r4 2683FTR_SECTION_ELSE 2684 li r3, PNV_THREAD_NAP 2685ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2686 mtspr SPRN_LPCR,r5 2687 isync 2688 2689BEGIN_FTR_SECTION 2690 bl isa300_idle_stop_mayloss 2691FTR_SECTION_ELSE 2692 bl isa206_idle_insn_mayloss 2693ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2694 2695 mfspr r0, SPRN_CTRLF 2696 ori r0, r0, 1 2697 mtspr SPRN_CTRLT, r0 2698 2699 mtspr SPRN_SRR1, r3 2700 2701 li r0, 0 2702 stb r0, PACA_FTRACE_ENABLED(r13) 2703 2704 li r0, KVM_HWTHREAD_IN_KVM 2705 stb r0, HSTATE_HWTHREAD_STATE(r13) 2706 2707 lbz r0, HSTATE_NAPPING(r13) 2708 cmpwi r0, NAPPING_CEDE 2709 beq kvm_end_cede 2710 cmpwi r0, NAPPING_NOVCPU 2711 beq kvm_novcpu_wakeup 2712 cmpwi r0, NAPPING_UNSPLIT 2713 beq kvm_unsplit_wakeup 2714 twi 31,0,0 /* Nap state must not be zero */ 2715 271633: mr r4, r3 2717 li r3, 0 2718 li r12, 0 2719 b 34f 2720 2721kvm_end_cede: 2722 /* Woken by external or decrementer interrupt */ 2723 2724 /* get vcpu pointer */ 2725 ld r4, HSTATE_KVM_VCPU(r13) 2726 2727#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2728 addi r3, r4, VCPU_TB_RMINTR 2729 bl kvmhv_accumulate_time 2730#endif 2731 2732#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2733/* 2734 * Branch around the call if both CPU_FTR_TM and 2735 * CPU_FTR_P9_TM_HV_ASSIST are off. 2736 */ 2737BEGIN_FTR_SECTION 2738 b 91f 2739END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2740 /* 2741 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2742 */ 2743 mr r3, r4 2744 ld r4, VCPU_MSR(r3) 2745 li r5, 0 /* don't preserve non-vol regs */ 2746 bl kvmppc_restore_tm_hv 2747 nop 2748 ld r4, HSTATE_KVM_VCPU(r13) 274991: 2750#endif 2751 2752 /* load up FP state */ 2753 bl kvmppc_load_fp 2754 2755 /* Restore guest decrementer */ 2756 ld r3, VCPU_DEC_EXPIRES(r4) 2757 ld r5, HSTATE_KVM_VCORE(r13) 2758 ld r6, VCORE_TB_OFFSET_APPL(r5) 2759 add r3, r3, r6 /* convert host TB to guest TB value */ 2760 mftb r7 2761 subf r3, r7, r3 2762 mtspr SPRN_DEC, r3 2763 2764 /* Load NV GPRS */ 2765 ld r14, VCPU_GPR(R14)(r4) 2766 ld r15, VCPU_GPR(R15)(r4) 2767 ld r16, VCPU_GPR(R16)(r4) 2768 ld r17, VCPU_GPR(R17)(r4) 2769 ld r18, VCPU_GPR(R18)(r4) 2770 ld r19, VCPU_GPR(R19)(r4) 2771 ld r20, VCPU_GPR(R20)(r4) 2772 ld r21, VCPU_GPR(R21)(r4) 2773 ld r22, VCPU_GPR(R22)(r4) 2774 ld r23, VCPU_GPR(R23)(r4) 2775 ld r24, VCPU_GPR(R24)(r4) 2776 ld r25, VCPU_GPR(R25)(r4) 2777 ld r26, VCPU_GPR(R26)(r4) 2778 ld r27, VCPU_GPR(R27)(r4) 2779 ld r28, VCPU_GPR(R28)(r4) 2780 ld r29, VCPU_GPR(R29)(r4) 2781 ld r30, VCPU_GPR(R30)(r4) 2782 ld r31, VCPU_GPR(R31)(r4) 2783 2784 /* Check the wake reason in SRR1 to see why we got here */ 2785 bl kvmppc_check_wake_reason 2786 2787 /* 2788 * Restore volatile registers since we could have called a 2789 * C routine in kvmppc_check_wake_reason 2790 * r4 = VCPU 2791 * r3 tells us whether we need to return to host or not 2792 * WARNING: it gets checked further down: 2793 * should not modify r3 until this check is done. 2794 */ 2795 ld r4, HSTATE_KVM_VCPU(r13) 2796 2797 /* clear our bit in vcore->napping_threads */ 279834: ld r5,HSTATE_KVM_VCORE(r13) 2799 lbz r7,HSTATE_PTID(r13) 2800 li r0,1 2801 sld r0,r0,r7 2802 addi r6,r5,VCORE_NAPPING_THREADS 280332: lwarx r7,0,r6 2804 andc r7,r7,r0 2805 stwcx. r7,0,r6 2806 bne 32b 2807 li r0,0 2808 stb r0,HSTATE_NAPPING(r13) 2809 2810 /* See if the wake reason saved in r3 means we need to exit */ 2811 stw r12, VCPU_TRAP(r4) 2812 mr r9, r4 2813 cmpdi r3, 0 2814 bgt guest_exit_cont 2815 b maybe_reenter_guest 2816 2817 /* cede when already previously prodded case */ 2818kvm_cede_prodded: 2819 li r0,0 2820 stb r0,VCPU_PRODDED(r3) 2821 sync /* order testing prodded vs. clearing ceded */ 2822 stb r0,VCPU_CEDED(r3) 2823 li r3,H_SUCCESS 2824 blr 2825 2826 /* we've ceded but we want to give control to the host */ 2827kvm_cede_exit: 2828 ld r9, HSTATE_KVM_VCPU(r13) 2829#ifdef CONFIG_KVM_XICS 2830 /* Abort if we still have a pending escalation */ 2831 lbz r5, VCPU_XIVE_ESC_ON(r9) 2832 cmpwi r5, 0 2833 beq 1f 2834 li r0, 0 2835 stb r0, VCPU_CEDED(r9) 28361: /* Enable XIVE escalation */ 2837 li r5, XIVE_ESB_SET_PQ_00 2838 mfmsr r0 2839 andi. r0, r0, MSR_DR /* in real mode? */ 2840 beq 1f 2841 ld r10, VCPU_XIVE_ESC_VADDR(r9) 2842 cmpdi r10, 0 2843 beq 3f 2844 ldx r0, r10, r5 2845 b 2f 28461: ld r10, VCPU_XIVE_ESC_RADDR(r9) 2847 cmpdi r10, 0 2848 beq 3f 2849 ldcix r0, r10, r5 28502: sync 2851 li r0, 1 2852 stb r0, VCPU_XIVE_ESC_ON(r9) 2853#endif /* CONFIG_KVM_XICS */ 28543: b guest_exit_cont 2855 2856 /* Try to do machine check recovery in real mode */ 2857machine_check_realmode: 2858 mr r3, r9 /* get vcpu pointer */ 2859 bl kvmppc_realmode_machine_check 2860 nop 2861 /* all machine checks go to virtual mode for further handling */ 2862 ld r9, HSTATE_KVM_VCPU(r13) 2863 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2864 b guest_exit_cont 2865 2866/* 2867 * Call C code to handle a HMI in real mode. 2868 * Only the primary thread does the call, secondary threads are handled 2869 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2870 * r9 points to the vcpu on entry 2871 */ 2872hmi_realmode: 2873 lbz r0, HSTATE_PTID(r13) 2874 cmpwi r0, 0 2875 bne guest_exit_cont 2876 bl kvmppc_realmode_hmi_handler 2877 ld r9, HSTATE_KVM_VCPU(r13) 2878 li r12, BOOK3S_INTERRUPT_HMI 2879 b guest_exit_cont 2880 2881/* 2882 * Check the reason we woke from nap, and take appropriate action. 2883 * Returns (in r3): 2884 * 0 if nothing needs to be done 2885 * 1 if something happened that needs to be handled by the host 2886 * -1 if there was a guest wakeup (IPI or msgsnd) 2887 * -2 if we handled a PCI passthrough interrupt (returned by 2888 * kvmppc_read_intr only) 2889 * 2890 * Also sets r12 to the interrupt vector for any interrupt that needs 2891 * to be handled now by the host (0x500 for external interrupt), or zero. 2892 * Modifies all volatile registers (since it may call a C function). 2893 * This routine calls kvmppc_read_intr, a C function, if an external 2894 * interrupt is pending. 2895 */ 2896kvmppc_check_wake_reason: 2897 mfspr r6, SPRN_SRR1 2898BEGIN_FTR_SECTION 2899 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2900FTR_SECTION_ELSE 2901 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2902ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2903 cmpwi r6, 8 /* was it an external interrupt? */ 2904 beq 7f /* if so, see what it was */ 2905 li r3, 0 2906 li r12, 0 2907 cmpwi r6, 6 /* was it the decrementer? */ 2908 beq 0f 2909BEGIN_FTR_SECTION 2910 cmpwi r6, 5 /* privileged doorbell? */ 2911 beq 0f 2912 cmpwi r6, 3 /* hypervisor doorbell? */ 2913 beq 3f 2914END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2915 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2916 beq 4f 2917 li r3, 1 /* anything else, return 1 */ 29180: blr 2919 2920 /* hypervisor doorbell */ 29213: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2922 2923 /* 2924 * Clear the doorbell as we will invoke the handler 2925 * explicitly in the guest exit path. 2926 */ 2927 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2928 PPC_MSGCLR(6) 2929 /* see if it's a host IPI */ 2930 li r3, 1 2931BEGIN_FTR_SECTION 2932 PPC_MSGSYNC 2933 lwsync 2934END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2935 lbz r0, HSTATE_HOST_IPI(r13) 2936 cmpwi r0, 0 2937 bnelr 2938 /* if not, return -1 */ 2939 li r3, -1 2940 blr 2941 2942 /* Woken up due to Hypervisor maintenance interrupt */ 29434: li r12, BOOK3S_INTERRUPT_HMI 2944 li r3, 1 2945 blr 2946 2947 /* external interrupt - create a stack frame so we can call C */ 29487: mflr r0 2949 std r0, PPC_LR_STKOFF(r1) 2950 stdu r1, -PPC_MIN_STKFRM(r1) 2951 bl kvmppc_read_intr 2952 nop 2953 li r12, BOOK3S_INTERRUPT_EXTERNAL 2954 cmpdi r3, 1 2955 ble 1f 2956 2957 /* 2958 * Return code of 2 means PCI passthrough interrupt, but 2959 * we need to return back to host to complete handling the 2960 * interrupt. Trap reason is expected in r12 by guest 2961 * exit code. 2962 */ 2963 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 29641: 2965 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2966 addi r1, r1, PPC_MIN_STKFRM 2967 mtlr r0 2968 blr 2969 2970/* 2971 * Save away FP, VMX and VSX registers. 2972 * r3 = vcpu pointer 2973 * N.B. r30 and r31 are volatile across this function, 2974 * thus it is not callable from C. 2975 */ 2976kvmppc_save_fp: 2977 mflr r30 2978 mr r31,r3 2979 mfmsr r5 2980 ori r8,r5,MSR_FP 2981#ifdef CONFIG_ALTIVEC 2982BEGIN_FTR_SECTION 2983 oris r8,r8,MSR_VEC@h 2984END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2985#endif 2986#ifdef CONFIG_VSX 2987BEGIN_FTR_SECTION 2988 oris r8,r8,MSR_VSX@h 2989END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2990#endif 2991 mtmsrd r8 2992 addi r3,r3,VCPU_FPRS 2993 bl store_fp_state 2994#ifdef CONFIG_ALTIVEC 2995BEGIN_FTR_SECTION 2996 addi r3,r31,VCPU_VRS 2997 bl store_vr_state 2998END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2999#endif 3000 mfspr r6,SPRN_VRSAVE 3001 stw r6,VCPU_VRSAVE(r31) 3002 mtlr r30 3003 blr 3004 3005/* 3006 * Load up FP, VMX and VSX registers 3007 * r4 = vcpu pointer 3008 * N.B. r30 and r31 are volatile across this function, 3009 * thus it is not callable from C. 3010 */ 3011kvmppc_load_fp: 3012 mflr r30 3013 mr r31,r4 3014 mfmsr r9 3015 ori r8,r9,MSR_FP 3016#ifdef CONFIG_ALTIVEC 3017BEGIN_FTR_SECTION 3018 oris r8,r8,MSR_VEC@h 3019END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3020#endif 3021#ifdef CONFIG_VSX 3022BEGIN_FTR_SECTION 3023 oris r8,r8,MSR_VSX@h 3024END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3025#endif 3026 mtmsrd r8 3027 addi r3,r4,VCPU_FPRS 3028 bl load_fp_state 3029#ifdef CONFIG_ALTIVEC 3030BEGIN_FTR_SECTION 3031 addi r3,r31,VCPU_VRS 3032 bl load_vr_state 3033END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3034#endif 3035 lwz r7,VCPU_VRSAVE(r31) 3036 mtspr SPRN_VRSAVE,r7 3037 mtlr r30 3038 mr r4,r31 3039 blr 3040 3041#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3042/* 3043 * Save transactional state and TM-related registers. 3044 * Called with r3 pointing to the vcpu struct and r4 containing 3045 * the guest MSR value. 3046 * r5 is non-zero iff non-volatile register state needs to be maintained. 3047 * If r5 == 0, this can modify all checkpointed registers, but 3048 * restores r1 and r2 before exit. 3049 */ 3050_GLOBAL_TOC(kvmppc_save_tm_hv) 3051EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 3052 /* See if we need to handle fake suspend mode */ 3053BEGIN_FTR_SECTION 3054 b __kvmppc_save_tm 3055END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3056 3057 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 3058 cmpwi r0, 0 3059 beq __kvmppc_save_tm 3060 3061 /* The following code handles the fake_suspend = 1 case */ 3062 mflr r0 3063 std r0, PPC_LR_STKOFF(r1) 3064 stdu r1, -PPC_MIN_STKFRM(r1) 3065 3066 /* Turn on TM. */ 3067 mfmsr r8 3068 li r0, 1 3069 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 3070 mtmsrd r8 3071 3072 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 3073 beq 4f 3074BEGIN_FTR_SECTION 3075 bl pnv_power9_force_smt4_catch 3076END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3077 nop 3078 3079 /* We have to treclaim here because that's the only way to do S->N */ 3080 li r3, TM_CAUSE_KVM_RESCHED 3081 TRECLAIM(R3) 3082 3083 /* 3084 * We were in fake suspend, so we are not going to save the 3085 * register state as the guest checkpointed state (since 3086 * we already have it), therefore we can now use any volatile GPR. 3087 * In fact treclaim in fake suspend state doesn't modify 3088 * any registers. 3089 */ 3090 3091BEGIN_FTR_SECTION 3092 bl pnv_power9_force_smt4_release 3093END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3094 nop 3095 30964: 3097 mfspr r3, SPRN_PSSCR 3098 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 3099 li r0, PSSCR_FAKE_SUSPEND 3100 andc r3, r3, r0 3101 mtspr SPRN_PSSCR, r3 3102 3103 /* Don't save TEXASR, use value from last exit in real suspend state */ 3104 ld r9, HSTATE_KVM_VCPU(r13) 3105 mfspr r5, SPRN_TFHAR 3106 mfspr r6, SPRN_TFIAR 3107 std r5, VCPU_TFHAR(r9) 3108 std r6, VCPU_TFIAR(r9) 3109 3110 addi r1, r1, PPC_MIN_STKFRM 3111 ld r0, PPC_LR_STKOFF(r1) 3112 mtlr r0 3113 blr 3114 3115/* 3116 * Restore transactional state and TM-related registers. 3117 * Called with r3 pointing to the vcpu struct 3118 * and r4 containing the guest MSR value. 3119 * r5 is non-zero iff non-volatile register state needs to be maintained. 3120 * This potentially modifies all checkpointed registers. 3121 * It restores r1 and r2 from the PACA. 3122 */ 3123_GLOBAL_TOC(kvmppc_restore_tm_hv) 3124EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 3125 /* 3126 * If we are doing TM emulation for the guest on a POWER9 DD2, 3127 * then we don't actually do a trechkpt -- we either set up 3128 * fake-suspend mode, or emulate a TM rollback. 3129 */ 3130BEGIN_FTR_SECTION 3131 b __kvmppc_restore_tm 3132END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3133 mflr r0 3134 std r0, PPC_LR_STKOFF(r1) 3135 3136 li r0, 0 3137 stb r0, HSTATE_FAKE_SUSPEND(r13) 3138 3139 /* Turn on TM so we can restore TM SPRs */ 3140 mfmsr r5 3141 li r0, 1 3142 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 3143 mtmsrd r5 3144 3145 /* 3146 * The user may change these outside of a transaction, so they must 3147 * always be context switched. 3148 */ 3149 ld r5, VCPU_TFHAR(r3) 3150 ld r6, VCPU_TFIAR(r3) 3151 ld r7, VCPU_TEXASR(r3) 3152 mtspr SPRN_TFHAR, r5 3153 mtspr SPRN_TFIAR, r6 3154 mtspr SPRN_TEXASR, r7 3155 3156 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 3157 beqlr /* TM not active in guest */ 3158 3159 /* Make sure the failure summary is set */ 3160 oris r7, r7, (TEXASR_FS)@h 3161 mtspr SPRN_TEXASR, r7 3162 3163 cmpwi r5, 1 /* check for suspended state */ 3164 bgt 10f 3165 stb r5, HSTATE_FAKE_SUSPEND(r13) 3166 b 9f /* and return */ 316710: stdu r1, -PPC_MIN_STKFRM(r1) 3168 /* guest is in transactional state, so simulate rollback */ 3169 bl kvmhv_emulate_tm_rollback 3170 nop 3171 addi r1, r1, PPC_MIN_STKFRM 31729: ld r0, PPC_LR_STKOFF(r1) 3173 mtlr r0 3174 blr 3175#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 3176 3177/* 3178 * We come here if we get any exception or interrupt while we are 3179 * executing host real mode code while in guest MMU context. 3180 * r12 is (CR << 32) | vector 3181 * r13 points to our PACA 3182 * r12 is saved in HSTATE_SCRATCH0(r13) 3183 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE 3184 * r9 is saved in HSTATE_SCRATCH2(r13) 3185 * r13 is saved in HSPRG1 3186 * cfar is saved in HSTATE_CFAR(r13) 3187 * ppr is saved in HSTATE_PPR(r13) 3188 */ 3189kvmppc_bad_host_intr: 3190 /* 3191 * Switch to the emergency stack, but start half-way down in 3192 * case we were already on it. 3193 */ 3194 mr r9, r1 3195 std r1, PACAR1(r13) 3196 ld r1, PACAEMERGSP(r13) 3197 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 3198 std r9, 0(r1) 3199 std r0, GPR0(r1) 3200 std r9, GPR1(r1) 3201 std r2, GPR2(r1) 3202 SAVE_4GPRS(3, r1) 3203 SAVE_2GPRS(7, r1) 3204 srdi r0, r12, 32 3205 clrldi r12, r12, 32 3206 std r0, _CCR(r1) 3207 std r12, _TRAP(r1) 3208 andi. r0, r12, 2 3209 beq 1f 3210 mfspr r3, SPRN_HSRR0 3211 mfspr r4, SPRN_HSRR1 3212 mfspr r5, SPRN_HDAR 3213 mfspr r6, SPRN_HDSISR 3214 b 2f 32151: mfspr r3, SPRN_SRR0 3216 mfspr r4, SPRN_SRR1 3217 mfspr r5, SPRN_DAR 3218 mfspr r6, SPRN_DSISR 32192: std r3, _NIP(r1) 3220 std r4, _MSR(r1) 3221 std r5, _DAR(r1) 3222 std r6, _DSISR(r1) 3223 ld r9, HSTATE_SCRATCH2(r13) 3224 ld r12, HSTATE_SCRATCH0(r13) 3225 GET_SCRATCH0(r0) 3226 SAVE_4GPRS(9, r1) 3227 std r0, GPR13(r1) 3228 SAVE_NVGPRS(r1) 3229 ld r5, HSTATE_CFAR(r13) 3230 std r5, ORIG_GPR3(r1) 3231 mflr r3 3232#ifdef CONFIG_RELOCATABLE 3233 ld r4, HSTATE_SCRATCH1(r13) 3234#else 3235 mfctr r4 3236#endif 3237 mfxer r5 3238 lbz r6, PACAIRQSOFTMASK(r13) 3239 std r3, _LINK(r1) 3240 std r4, _CTR(r1) 3241 std r5, _XER(r1) 3242 std r6, SOFTE(r1) 3243 ld r2, PACATOC(r13) 3244 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 3245 std r3, STACK_FRAME_OVERHEAD-16(r1) 3246 3247 /* 3248 * On POWER9 do a minimal restore of the MMU and call C code, 3249 * which will print a message and panic. 3250 * XXX On POWER7 and POWER8, we just spin here since we don't 3251 * know what the other threads are doing (and we don't want to 3252 * coordinate with them) - but at least we now have register state 3253 * in memory that we might be able to look at from another CPU. 3254 */ 3255BEGIN_FTR_SECTION 3256 b . 3257END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 3258 ld r9, HSTATE_KVM_VCPU(r13) 3259 ld r10, VCPU_KVM(r9) 3260 3261 li r0, 0 3262 mtspr SPRN_AMR, r0 3263 mtspr SPRN_IAMR, r0 3264 mtspr SPRN_CIABR, r0 3265 mtspr SPRN_DAWRX, r0 3266 3267BEGIN_MMU_FTR_SECTION 3268 b 4f 3269END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 3270 3271 slbmte r0, r0 3272 slbia 3273 ptesync 3274 ld r8, PACA_SLBSHADOWPTR(r13) 3275 .rept SLB_NUM_BOLTED 3276 li r3, SLBSHADOW_SAVEAREA 3277 LDX_BE r5, r8, r3 3278 addi r3, r3, 8 3279 LDX_BE r6, r8, r3 3280 andis. r7, r5, SLB_ESID_V@h 3281 beq 3f 3282 slbmte r6, r5 32833: addi r8, r8, 16 3284 .endr 3285 32864: lwz r7, KVM_HOST_LPID(r10) 3287 mtspr SPRN_LPID, r7 3288 mtspr SPRN_PID, r0 3289 ld r8, KVM_HOST_LPCR(r10) 3290 mtspr SPRN_LPCR, r8 3291 isync 3292 li r0, KVM_GUEST_MODE_NONE 3293 stb r0, HSTATE_IN_GUEST(r13) 3294 3295 /* 3296 * Turn on the MMU and jump to C code 3297 */ 3298 bcl 20, 31, .+4 32995: mflr r3 3300 addi r3, r3, 9f - 5b 3301 li r4, -1 3302 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ 3303 ld r4, PACAKMSR(r13) 3304 mtspr SPRN_SRR0, r3 3305 mtspr SPRN_SRR1, r4 3306 RFI_TO_KERNEL 33079: addi r3, r1, STACK_FRAME_OVERHEAD 3308 bl kvmppc_bad_interrupt 3309 b 9b 3310 3311/* 3312 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3313 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3314 * r11 has the guest MSR value (in/out) 3315 * r9 has a vcpu pointer (in) 3316 * r0 is used as a scratch register 3317 */ 3318kvmppc_msr_interrupt: 3319 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3320 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3321 ld r11, VCPU_INTR_MSR(r9) 3322 bne 1f 3323 /* ... if transactional, change to suspended */ 3324 li r0, 1 33251: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3326 blr 3327 3328/* 3329 * Load up guest PMU state. R3 points to the vcpu struct. 3330 */ 3331_GLOBAL(kvmhv_load_guest_pmu) 3332EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) 3333 mr r4, r3 3334 mflr r0 3335 li r3, 1 3336 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3337 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3338 isync 3339BEGIN_FTR_SECTION 3340 ld r3, VCPU_MMCR(r4) 3341 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3342 cmpwi r5, MMCR0_PMAO 3343 beql kvmppc_fix_pmao 3344END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3345 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 3346 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 3347 lwz r6, VCPU_PMC + 8(r4) 3348 lwz r7, VCPU_PMC + 12(r4) 3349 lwz r8, VCPU_PMC + 16(r4) 3350 lwz r9, VCPU_PMC + 20(r4) 3351 mtspr SPRN_PMC1, r3 3352 mtspr SPRN_PMC2, r5 3353 mtspr SPRN_PMC3, r6 3354 mtspr SPRN_PMC4, r7 3355 mtspr SPRN_PMC5, r8 3356 mtspr SPRN_PMC6, r9 3357 ld r3, VCPU_MMCR(r4) 3358 ld r5, VCPU_MMCR + 8(r4) 3359 ld r6, VCPU_MMCR + 16(r4) 3360 ld r7, VCPU_SIAR(r4) 3361 ld r8, VCPU_SDAR(r4) 3362 mtspr SPRN_MMCR1, r5 3363 mtspr SPRN_MMCRA, r6 3364 mtspr SPRN_SIAR, r7 3365 mtspr SPRN_SDAR, r8 3366BEGIN_FTR_SECTION 3367 ld r5, VCPU_MMCR + 24(r4) 3368 ld r6, VCPU_SIER(r4) 3369 mtspr SPRN_MMCR2, r5 3370 mtspr SPRN_SIER, r6 3371BEGIN_FTR_SECTION_NESTED(96) 3372 lwz r7, VCPU_PMC + 24(r4) 3373 lwz r8, VCPU_PMC + 28(r4) 3374 ld r9, VCPU_MMCR + 32(r4) 3375 mtspr SPRN_SPMC1, r7 3376 mtspr SPRN_SPMC2, r8 3377 mtspr SPRN_MMCRS, r9 3378END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3379END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3380 mtspr SPRN_MMCR0, r3 3381 isync 3382 mtlr r0 3383 blr 3384 3385/* 3386 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 3387 */ 3388_GLOBAL(kvmhv_load_host_pmu) 3389EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) 3390 mflr r0 3391 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 3392 cmpwi r4, 0 3393 beq 23f /* skip if not */ 3394BEGIN_FTR_SECTION 3395 ld r3, HSTATE_MMCR0(r13) 3396 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3397 cmpwi r4, MMCR0_PMAO 3398 beql kvmppc_fix_pmao 3399END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3400 lwz r3, HSTATE_PMC1(r13) 3401 lwz r4, HSTATE_PMC2(r13) 3402 lwz r5, HSTATE_PMC3(r13) 3403 lwz r6, HSTATE_PMC4(r13) 3404 lwz r8, HSTATE_PMC5(r13) 3405 lwz r9, HSTATE_PMC6(r13) 3406 mtspr SPRN_PMC1, r3 3407 mtspr SPRN_PMC2, r4 3408 mtspr SPRN_PMC3, r5 3409 mtspr SPRN_PMC4, r6 3410 mtspr SPRN_PMC5, r8 3411 mtspr SPRN_PMC6, r9 3412 ld r3, HSTATE_MMCR0(r13) 3413 ld r4, HSTATE_MMCR1(r13) 3414 ld r5, HSTATE_MMCRA(r13) 3415 ld r6, HSTATE_SIAR(r13) 3416 ld r7, HSTATE_SDAR(r13) 3417 mtspr SPRN_MMCR1, r4 3418 mtspr SPRN_MMCRA, r5 3419 mtspr SPRN_SIAR, r6 3420 mtspr SPRN_SDAR, r7 3421BEGIN_FTR_SECTION 3422 ld r8, HSTATE_MMCR2(r13) 3423 ld r9, HSTATE_SIER(r13) 3424 mtspr SPRN_MMCR2, r8 3425 mtspr SPRN_SIER, r9 3426END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3427 mtspr SPRN_MMCR0, r3 3428 isync 3429 mtlr r0 343023: blr 3431 3432/* 3433 * Save guest PMU state into the vcpu struct. 3434 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 3435 */ 3436_GLOBAL(kvmhv_save_guest_pmu) 3437EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) 3438 mr r9, r3 3439 mr r8, r4 3440BEGIN_FTR_SECTION 3441 /* 3442 * POWER8 seems to have a hardware bug where setting 3443 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 3444 * when some counters are already negative doesn't seem 3445 * to cause a performance monitor alert (and hence interrupt). 3446 * The effect of this is that when saving the PMU state, 3447 * if there is no PMU alert pending when we read MMCR0 3448 * before freezing the counters, but one becomes pending 3449 * before we read the counters, we lose it. 3450 * To work around this, we need a way to freeze the counters 3451 * before reading MMCR0. Normally, freezing the counters 3452 * is done by writing MMCR0 (to set MMCR0[FC]) which 3453 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 3454 * we can also freeze the counters using MMCR2, by writing 3455 * 1s to all the counter freeze condition bits (there are 3456 * 9 bits each for 6 counters). 3457 */ 3458 li r3, -1 /* set all freeze bits */ 3459 clrrdi r3, r3, 10 3460 mfspr r10, SPRN_MMCR2 3461 mtspr SPRN_MMCR2, r3 3462 isync 3463END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3464 li r3, 1 3465 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3466 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 3467 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3468 mfspr r6, SPRN_MMCRA 3469 /* Clear MMCRA in order to disable SDAR updates */ 3470 li r7, 0 3471 mtspr SPRN_MMCRA, r7 3472 isync 3473 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 3474 bne 21f 3475 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 3476 b 22f 347721: mfspr r5, SPRN_MMCR1 3478 mfspr r7, SPRN_SIAR 3479 mfspr r8, SPRN_SDAR 3480 std r4, VCPU_MMCR(r9) 3481 std r5, VCPU_MMCR + 8(r9) 3482 std r6, VCPU_MMCR + 16(r9) 3483BEGIN_FTR_SECTION 3484 std r10, VCPU_MMCR + 24(r9) 3485END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3486 std r7, VCPU_SIAR(r9) 3487 std r8, VCPU_SDAR(r9) 3488 mfspr r3, SPRN_PMC1 3489 mfspr r4, SPRN_PMC2 3490 mfspr r5, SPRN_PMC3 3491 mfspr r6, SPRN_PMC4 3492 mfspr r7, SPRN_PMC5 3493 mfspr r8, SPRN_PMC6 3494 stw r3, VCPU_PMC(r9) 3495 stw r4, VCPU_PMC + 4(r9) 3496 stw r5, VCPU_PMC + 8(r9) 3497 stw r6, VCPU_PMC + 12(r9) 3498 stw r7, VCPU_PMC + 16(r9) 3499 stw r8, VCPU_PMC + 20(r9) 3500BEGIN_FTR_SECTION 3501 mfspr r5, SPRN_SIER 3502 std r5, VCPU_SIER(r9) 3503BEGIN_FTR_SECTION_NESTED(96) 3504 mfspr r6, SPRN_SPMC1 3505 mfspr r7, SPRN_SPMC2 3506 mfspr r8, SPRN_MMCRS 3507 stw r6, VCPU_PMC + 24(r9) 3508 stw r7, VCPU_PMC + 28(r9) 3509 std r8, VCPU_MMCR + 32(r9) 3510 lis r4, 0x8000 3511 mtspr SPRN_MMCRS, r4 3512END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3513END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 351422: blr 3515 3516/* 3517 * This works around a hardware bug on POWER8E processors, where 3518 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3519 * performance monitor interrupt. Instead, when we need to have 3520 * an interrupt pending, we have to arrange for a counter to overflow. 3521 */ 3522kvmppc_fix_pmao: 3523 li r3, 0 3524 mtspr SPRN_MMCR2, r3 3525 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3526 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3527 mtspr SPRN_MMCR0, r3 3528 lis r3, 0x7fff 3529 ori r3, r3, 0xffff 3530 mtspr SPRN_PMC6, r3 3531 isync 3532 blr 3533 3534#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3535/* 3536 * Start timing an activity 3537 * r3 = pointer to time accumulation struct, r4 = vcpu 3538 */ 3539kvmhv_start_timing: 3540 ld r5, HSTATE_KVM_VCORE(r13) 3541 ld r6, VCORE_TB_OFFSET_APPL(r5) 3542 mftb r5 3543 subf r5, r6, r5 /* subtract current timebase offset */ 3544 std r3, VCPU_CUR_ACTIVITY(r4) 3545 std r5, VCPU_ACTIVITY_START(r4) 3546 blr 3547 3548/* 3549 * Accumulate time to one activity and start another. 3550 * r3 = pointer to new time accumulation struct, r4 = vcpu 3551 */ 3552kvmhv_accumulate_time: 3553 ld r5, HSTATE_KVM_VCORE(r13) 3554 ld r8, VCORE_TB_OFFSET_APPL(r5) 3555 ld r5, VCPU_CUR_ACTIVITY(r4) 3556 ld r6, VCPU_ACTIVITY_START(r4) 3557 std r3, VCPU_CUR_ACTIVITY(r4) 3558 mftb r7 3559 subf r7, r8, r7 /* subtract current timebase offset */ 3560 std r7, VCPU_ACTIVITY_START(r4) 3561 cmpdi r5, 0 3562 beqlr 3563 subf r3, r6, r7 3564 ld r8, TAS_SEQCOUNT(r5) 3565 cmpdi r8, 0 3566 addi r8, r8, 1 3567 std r8, TAS_SEQCOUNT(r5) 3568 lwsync 3569 ld r7, TAS_TOTAL(r5) 3570 add r7, r7, r3 3571 std r7, TAS_TOTAL(r5) 3572 ld r6, TAS_MIN(r5) 3573 ld r7, TAS_MAX(r5) 3574 beq 3f 3575 cmpd r3, r6 3576 bge 1f 35773: std r3, TAS_MIN(r5) 35781: cmpd r3, r7 3579 ble 2f 3580 std r3, TAS_MAX(r5) 35812: lwsync 3582 addi r8, r8, 1 3583 std r8, TAS_SEQCOUNT(r5) 3584 blr 3585#endif 3586