1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * 6 * Derived from book3s_rmhandlers.S and other files, which are: 7 * 8 * Copyright SUSE Linux Products GmbH 2009 9 * 10 * Authors: Alexander Graf <agraf@suse.de> 11 */ 12 13#include <asm/ppc_asm.h> 14#include <asm/code-patching-asm.h> 15#include <asm/kvm_asm.h> 16#include <asm/reg.h> 17#include <asm/mmu.h> 18#include <asm/page.h> 19#include <asm/ptrace.h> 20#include <asm/hvcall.h> 21#include <asm/asm-offsets.h> 22#include <asm/exception-64s.h> 23#include <asm/kvm_book3s_asm.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/export.h> 26#include <asm/tm.h> 27#include <asm/opal.h> 28#include <asm/thread_info.h> 29#include <asm/asm-compat.h> 30#include <asm/feature-fixups.h> 31#include <asm/cpuidle.h> 32 33/* Values in HSTATE_NAPPING(r13) */ 34#define NAPPING_CEDE 1 35#define NAPPING_NOVCPU 2 36#define NAPPING_UNSPLIT 3 37 38/* Stack frame offsets for kvmppc_hv_entry */ 39#define SFS 160 40#define STACK_SLOT_TRAP (SFS-4) 41#define STACK_SLOT_TID (SFS-16) 42#define STACK_SLOT_PSSCR (SFS-24) 43#define STACK_SLOT_PID (SFS-32) 44#define STACK_SLOT_IAMR (SFS-40) 45#define STACK_SLOT_CIABR (SFS-48) 46#define STACK_SLOT_DAWR0 (SFS-56) 47#define STACK_SLOT_DAWRX0 (SFS-64) 48#define STACK_SLOT_HFSCR (SFS-72) 49#define STACK_SLOT_AMR (SFS-80) 50#define STACK_SLOT_UAMOR (SFS-88) 51#define STACK_SLOT_FSCR (SFS-96) 52 53/* 54 * Call kvmppc_hv_entry in real mode. 55 * Must be called with interrupts hard-disabled. 56 * 57 * Input Registers: 58 * 59 * LR = return address to continue at after eventually re-enabling MMU 60 */ 61_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 62 mflr r0 63 std r0, PPC_LR_STKOFF(r1) 64 stdu r1, -112(r1) 65 mfmsr r10 66 std r10, HSTATE_HOST_MSR(r13) 67 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 68 li r0,MSR_RI 69 andc r0,r10,r0 70 li r6,MSR_IR | MSR_DR 71 andc r6,r10,r6 72 mtmsrd r0,1 /* clear RI in MSR */ 73 mtsrr0 r5 74 mtsrr1 r6 75 RFI_TO_KERNEL 76 77kvmppc_call_hv_entry: 78 ld r4, HSTATE_KVM_VCPU(r13) 79 bl kvmppc_hv_entry 80 81 /* Back from guest - restore host state and return to caller */ 82 83BEGIN_FTR_SECTION 84 /* Restore host DABR and DABRX */ 85 ld r5,HSTATE_DABR(r13) 86 li r6,7 87 mtspr SPRN_DABR,r5 88 mtspr SPRN_DABRX,r6 89END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 90 91 /* Restore SPRG3 */ 92 ld r3,PACA_SPRG_VDSO(r13) 93 mtspr SPRN_SPRG_VDSO_WRITE,r3 94 95 /* Reload the host's PMU registers */ 96 bl kvmhv_load_host_pmu 97 98 /* 99 * Reload DEC. HDEC interrupts were disabled when 100 * we reloaded the host's LPCR value. 101 */ 102 ld r3, HSTATE_DECEXP(r13) 103 mftb r4 104 subf r4, r4, r3 105 mtspr SPRN_DEC, r4 106 107 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 108 li r0, 0 109 stb r0, HSTATE_HWTHREAD_REQ(r13) 110 111 /* 112 * For external interrupts we need to call the Linux 113 * handler to process the interrupt. We do that by jumping 114 * to absolute address 0x500 for external interrupts. 115 * The [h]rfid at the end of the handler will return to 116 * the book3s_hv_interrupts.S code. For other interrupts 117 * we do the rfid to get back to the book3s_hv_interrupts.S 118 * code here. 119 */ 120 ld r8, 112+PPC_LR_STKOFF(r1) 121 addi r1, r1, 112 122 ld r7, HSTATE_HOST_MSR(r13) 123 124 /* Return the trap number on this thread as the return value */ 125 mr r3, r12 126 127 /* RFI into the highmem handler */ 128 mfmsr r6 129 li r0, MSR_RI 130 andc r6, r6, r0 131 mtmsrd r6, 1 /* Clear RI in MSR */ 132 mtsrr0 r8 133 mtsrr1 r7 134 RFI_TO_KERNEL 135 136kvmppc_primary_no_guest: 137 /* We handle this much like a ceded vcpu */ 138 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 139 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 140 /* HDEC value came from DEC in the first place, it will fit */ 141 mfspr r3, SPRN_HDEC 142 mtspr SPRN_DEC, r3 143 /* 144 * Make sure the primary has finished the MMU switch. 145 * We should never get here on a secondary thread, but 146 * check it for robustness' sake. 147 */ 148 ld r5, HSTATE_KVM_VCORE(r13) 14965: lbz r0, VCORE_IN_GUEST(r5) 150 cmpwi r0, 0 151 beq 65b 152 /* Set LPCR. */ 153 ld r8,VCORE_LPCR(r5) 154 mtspr SPRN_LPCR,r8 155 isync 156 /* set our bit in napping_threads */ 157 ld r5, HSTATE_KVM_VCORE(r13) 158 lbz r7, HSTATE_PTID(r13) 159 li r0, 1 160 sld r0, r0, r7 161 addi r6, r5, VCORE_NAPPING_THREADS 1621: lwarx r3, 0, r6 163 or r3, r3, r0 164 stwcx. r3, 0, r6 165 bne 1b 166 /* order napping_threads update vs testing entry_exit_map */ 167 isync 168 li r12, 0 169 lwz r7, VCORE_ENTRY_EXIT(r5) 170 cmpwi r7, 0x100 171 bge kvm_novcpu_exit /* another thread already exiting */ 172 li r3, NAPPING_NOVCPU 173 stb r3, HSTATE_NAPPING(r13) 174 175 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 176 b kvm_do_nap 177 178/* 179 * kvm_novcpu_wakeup 180 * Entered from kvm_start_guest if kvm_hstate.napping is set 181 * to NAPPING_NOVCPU 182 * r2 = kernel TOC 183 * r13 = paca 184 */ 185kvm_novcpu_wakeup: 186 ld r1, HSTATE_HOST_R1(r13) 187 ld r5, HSTATE_KVM_VCORE(r13) 188 li r0, 0 189 stb r0, HSTATE_NAPPING(r13) 190 191 /* check the wake reason */ 192 bl kvmppc_check_wake_reason 193 194 /* 195 * Restore volatile registers since we could have called 196 * a C routine in kvmppc_check_wake_reason. 197 * r5 = VCORE 198 */ 199 ld r5, HSTATE_KVM_VCORE(r13) 200 201 /* see if any other thread is already exiting */ 202 lwz r0, VCORE_ENTRY_EXIT(r5) 203 cmpwi r0, 0x100 204 bge kvm_novcpu_exit 205 206 /* clear our bit in napping_threads */ 207 lbz r7, HSTATE_PTID(r13) 208 li r0, 1 209 sld r0, r0, r7 210 addi r6, r5, VCORE_NAPPING_THREADS 2114: lwarx r7, 0, r6 212 andc r7, r7, r0 213 stwcx. r7, 0, r6 214 bne 4b 215 216 /* See if the wake reason means we need to exit */ 217 cmpdi r3, 0 218 bge kvm_novcpu_exit 219 220 /* See if our timeslice has expired (HDEC is negative) */ 221 mfspr r0, SPRN_HDEC 222 extsw r0, r0 223 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 224 cmpdi r0, 0 225 blt kvm_novcpu_exit 226 227 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 228 ld r4, HSTATE_KVM_VCPU(r13) 229 cmpdi r4, 0 230 beq kvmppc_primary_no_guest 231 232#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 233 addi r3, r4, VCPU_TB_RMENTRY 234 bl kvmhv_start_timing 235#endif 236 b kvmppc_got_guest 237 238kvm_novcpu_exit: 239#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 240 ld r4, HSTATE_KVM_VCPU(r13) 241 cmpdi r4, 0 242 beq 13f 243 addi r3, r4, VCPU_TB_RMEXIT 244 bl kvmhv_accumulate_time 245#endif 24613: mr r3, r12 247 stw r12, STACK_SLOT_TRAP(r1) 248 bl kvmhv_commence_exit 249 nop 250 b kvmhv_switch_to_host 251 252/* 253 * We come in here when wakened from Linux offline idle code. 254 * Relocation is off 255 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 256 */ 257_GLOBAL(idle_kvm_start_guest) 258 ld r4,PACAEMERGSP(r13) 259 mfcr r5 260 mflr r0 261 std r1,0(r4) 262 std r5,8(r4) 263 std r0,16(r4) 264 subi r1,r4,STACK_FRAME_OVERHEAD 265 SAVE_NVGPRS(r1) 266 267 /* 268 * Could avoid this and pass it through in r3. For now, 269 * code expects it to be in SRR1. 270 */ 271 mtspr SPRN_SRR1,r3 272 273 li r0,0 274 stb r0,PACA_FTRACE_ENABLED(r13) 275 276 li r0,KVM_HWTHREAD_IN_KVM 277 stb r0,HSTATE_HWTHREAD_STATE(r13) 278 279 /* kvm cede / napping does not come through here */ 280 lbz r0,HSTATE_NAPPING(r13) 281 twnei r0,0 282 283 b 1f 284 285kvm_unsplit_wakeup: 286 li r0, 0 287 stb r0, HSTATE_NAPPING(r13) 288 2891: 290 291 /* 292 * We weren't napping due to cede, so this must be a secondary 293 * thread being woken up to run a guest, or being woken up due 294 * to a stray IPI. (Or due to some machine check or hypervisor 295 * maintenance interrupt while the core is in KVM.) 296 */ 297 298 /* Check the wake reason in SRR1 to see why we got here */ 299 bl kvmppc_check_wake_reason 300 /* 301 * kvmppc_check_wake_reason could invoke a C routine, but we 302 * have no volatile registers to restore when we return. 303 */ 304 305 cmpdi r3, 0 306 bge kvm_no_guest 307 308 /* get vcore pointer, NULL if we have nothing to run */ 309 ld r5,HSTATE_KVM_VCORE(r13) 310 cmpdi r5,0 311 /* if we have no vcore to run, go back to sleep */ 312 beq kvm_no_guest 313 314kvm_secondary_got_guest: 315 316 /* Set HSTATE_DSCR(r13) to something sensible */ 317 ld r6, PACA_DSCR_DEFAULT(r13) 318 std r6, HSTATE_DSCR(r13) 319 320 /* On thread 0 of a subcore, set HDEC to max */ 321 lbz r4, HSTATE_PTID(r13) 322 cmpwi r4, 0 323 bne 63f 324 lis r6,0x7fff /* MAX_INT@h */ 325 mtspr SPRN_HDEC, r6 326 /* and set per-LPAR registers, if doing dynamic micro-threading */ 327 ld r6, HSTATE_SPLIT_MODE(r13) 328 cmpdi r6, 0 329 beq 63f 330 ld r0, KVM_SPLIT_RPR(r6) 331 mtspr SPRN_RPR, r0 332 ld r0, KVM_SPLIT_PMMAR(r6) 333 mtspr SPRN_PMMAR, r0 334 ld r0, KVM_SPLIT_LDBAR(r6) 335 mtspr SPRN_LDBAR, r0 336 isync 33763: 338 /* Order load of vcpu after load of vcore */ 339 lwsync 340 ld r4, HSTATE_KVM_VCPU(r13) 341 bl kvmppc_hv_entry 342 343 /* Back from the guest, go back to nap */ 344 /* Clear our vcpu and vcore pointers so we don't come back in early */ 345 li r0, 0 346 std r0, HSTATE_KVM_VCPU(r13) 347 /* 348 * Once we clear HSTATE_KVM_VCORE(r13), the code in 349 * kvmppc_run_core() is going to assume that all our vcpu 350 * state is visible in memory. This lwsync makes sure 351 * that that is true. 352 */ 353 lwsync 354 std r0, HSTATE_KVM_VCORE(r13) 355 356 /* 357 * All secondaries exiting guest will fall through this path. 358 * Before proceeding, just check for HMI interrupt and 359 * invoke opal hmi handler. By now we are sure that the 360 * primary thread on this core/subcore has already made partition 361 * switch/TB resync and we are good to call opal hmi handler. 362 */ 363 cmpwi r12, BOOK3S_INTERRUPT_HMI 364 bne kvm_no_guest 365 366 li r3,0 /* NULL argument */ 367 bl hmi_exception_realmode 368/* 369 * At this point we have finished executing in the guest. 370 * We need to wait for hwthread_req to become zero, since 371 * we may not turn on the MMU while hwthread_req is non-zero. 372 * While waiting we also need to check if we get given a vcpu to run. 373 */ 374kvm_no_guest: 375 lbz r3, HSTATE_HWTHREAD_REQ(r13) 376 cmpwi r3, 0 377 bne 53f 378 HMT_MEDIUM 379 li r0, KVM_HWTHREAD_IN_KERNEL 380 stb r0, HSTATE_HWTHREAD_STATE(r13) 381 /* need to recheck hwthread_req after a barrier, to avoid race */ 382 sync 383 lbz r3, HSTATE_HWTHREAD_REQ(r13) 384 cmpwi r3, 0 385 bne 54f 386 387 /* 388 * Jump to idle_return_gpr_loss, which returns to the 389 * idle_kvm_start_guest caller. 390 */ 391 li r3, LPCR_PECE0 392 mfspr r4, SPRN_LPCR 393 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 394 mtspr SPRN_LPCR, r4 395 /* set up r3 for return */ 396 mfspr r3,SPRN_SRR1 397 REST_NVGPRS(r1) 398 addi r1, r1, STACK_FRAME_OVERHEAD 399 ld r0, 16(r1) 400 ld r5, 8(r1) 401 ld r1, 0(r1) 402 mtlr r0 403 mtcr r5 404 blr 405 40653: 407 HMT_LOW 408 ld r5, HSTATE_KVM_VCORE(r13) 409 cmpdi r5, 0 410 bne 60f 411 ld r3, HSTATE_SPLIT_MODE(r13) 412 cmpdi r3, 0 413 beq kvm_no_guest 414 lbz r0, KVM_SPLIT_DO_NAP(r3) 415 cmpwi r0, 0 416 beq kvm_no_guest 417 HMT_MEDIUM 418 b kvm_unsplit_nap 41960: HMT_MEDIUM 420 b kvm_secondary_got_guest 421 42254: li r0, KVM_HWTHREAD_IN_KVM 423 stb r0, HSTATE_HWTHREAD_STATE(r13) 424 b kvm_no_guest 425 426/* 427 * Here the primary thread is trying to return the core to 428 * whole-core mode, so we need to nap. 429 */ 430kvm_unsplit_nap: 431 /* 432 * When secondaries are napping in kvm_unsplit_nap() with 433 * hwthread_req = 1, HMI goes ignored even though subcores are 434 * already exited the guest. Hence HMI keeps waking up secondaries 435 * from nap in a loop and secondaries always go back to nap since 436 * no vcore is assigned to them. This makes impossible for primary 437 * thread to get hold of secondary threads resulting into a soft 438 * lockup in KVM path. 439 * 440 * Let us check if HMI is pending and handle it before we go to nap. 441 */ 442 cmpwi r12, BOOK3S_INTERRUPT_HMI 443 bne 55f 444 li r3, 0 /* NULL argument */ 445 bl hmi_exception_realmode 44655: 447 /* 448 * Ensure that secondary doesn't nap when it has 449 * its vcore pointer set. 450 */ 451 sync /* matches smp_mb() before setting split_info.do_nap */ 452 ld r0, HSTATE_KVM_VCORE(r13) 453 cmpdi r0, 0 454 bne kvm_no_guest 455 /* clear any pending message */ 456BEGIN_FTR_SECTION 457 lis r6, (PPC_DBELL_SERVER << (63-36))@h 458 PPC_MSGCLR(6) 459END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 460 /* Set kvm_split_mode.napped[tid] = 1 */ 461 ld r3, HSTATE_SPLIT_MODE(r13) 462 li r0, 1 463 lhz r4, PACAPACAINDEX(r13) 464 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ 465 addi r4, r4, KVM_SPLIT_NAPPED 466 stbx r0, r3, r4 467 /* Check the do_nap flag again after setting napped[] */ 468 sync 469 lbz r0, KVM_SPLIT_DO_NAP(r3) 470 cmpwi r0, 0 471 beq 57f 472 li r3, NAPPING_UNSPLIT 473 stb r3, HSTATE_NAPPING(r13) 474 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 475 mfspr r5, SPRN_LPCR 476 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 477 b kvm_nap_sequence 478 47957: li r0, 0 480 stbx r0, r3, r4 481 b kvm_no_guest 482 483/****************************************************************************** 484 * * 485 * Entry code * 486 * * 487 *****************************************************************************/ 488 489.global kvmppc_hv_entry 490kvmppc_hv_entry: 491 492 /* Required state: 493 * 494 * R4 = vcpu pointer (or NULL) 495 * MSR = ~IR|DR 496 * R13 = PACA 497 * R1 = host R1 498 * R2 = TOC 499 * all other volatile GPRS = free 500 * Does not preserve non-volatile GPRs or CR fields 501 */ 502 mflr r0 503 std r0, PPC_LR_STKOFF(r1) 504 stdu r1, -SFS(r1) 505 506 /* Save R1 in the PACA */ 507 std r1, HSTATE_HOST_R1(r13) 508 509 li r6, KVM_GUEST_MODE_HOST_HV 510 stb r6, HSTATE_IN_GUEST(r13) 511 512#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 513 /* Store initial timestamp */ 514 cmpdi r4, 0 515 beq 1f 516 addi r3, r4, VCPU_TB_RMENTRY 517 bl kvmhv_start_timing 5181: 519#endif 520 521 ld r5, HSTATE_KVM_VCORE(r13) 522 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 523 524 /* 525 * POWER7/POWER8 host -> guest partition switch code. 526 * We don't have to lock against concurrent tlbies, 527 * but we do have to coordinate across hardware threads. 528 */ 529 /* Set bit in entry map iff exit map is zero. */ 530 li r7, 1 531 lbz r6, HSTATE_PTID(r13) 532 sld r7, r7, r6 533 addi r8, r5, VCORE_ENTRY_EXIT 53421: lwarx r3, 0, r8 535 cmpwi r3, 0x100 /* any threads starting to exit? */ 536 bge secondary_too_late /* if so we're too late to the party */ 537 or r3, r3, r7 538 stwcx. r3, 0, r8 539 bne 21b 540 541 /* Primary thread switches to guest partition. */ 542 cmpwi r6,0 543 bne 10f 544 545 lwz r7,KVM_LPID(r9) 546 ld r6,KVM_SDR1(r9) 547 li r0,LPID_RSVD /* switch to reserved LPID */ 548 mtspr SPRN_LPID,r0 549 ptesync 550 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 551 mtspr SPRN_LPID,r7 552 isync 553 554 /* See if we need to flush the TLB. */ 555 mr r3, r9 /* kvm pointer */ 556 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 557 li r5, 0 /* nested vcpu pointer */ 558 bl kvmppc_check_need_tlb_flush 559 nop 560 ld r5, HSTATE_KVM_VCORE(r13) 561 562 /* Add timebase offset onto timebase */ 56322: ld r8,VCORE_TB_OFFSET(r5) 564 cmpdi r8,0 565 beq 37f 566 std r8, VCORE_TB_OFFSET_APPL(r5) 567 mftb r6 /* current host timebase */ 568 add r8,r8,r6 569 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 570 mftb r7 /* check if lower 24 bits overflowed */ 571 clrldi r6,r6,40 572 clrldi r7,r7,40 573 cmpld r7,r6 574 bge 37f 575 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 576 mtspr SPRN_TBU40,r8 577 578 /* Load guest PCR value to select appropriate compat mode */ 57937: ld r7, VCORE_PCR(r5) 580 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 581 cmpld r7, r6 582 beq 38f 583 or r7, r7, r6 584 mtspr SPRN_PCR, r7 58538: 586 587BEGIN_FTR_SECTION 588 /* DPDES and VTB are shared between threads */ 589 ld r8, VCORE_DPDES(r5) 590 ld r7, VCORE_VTB(r5) 591 mtspr SPRN_DPDES, r8 592 mtspr SPRN_VTB, r7 593END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 594 595 /* Mark the subcore state as inside guest */ 596 bl kvmppc_subcore_enter_guest 597 nop 598 ld r5, HSTATE_KVM_VCORE(r13) 599 ld r4, HSTATE_KVM_VCPU(r13) 600 li r0,1 601 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 602 603 /* Do we have a guest vcpu to run? */ 60410: cmpdi r4, 0 605 beq kvmppc_primary_no_guest 606kvmppc_got_guest: 607 /* Increment yield count if they have a VPA */ 608 ld r3, VCPU_VPA(r4) 609 cmpdi r3, 0 610 beq 25f 611 li r6, LPPACA_YIELDCOUNT 612 LWZX_BE r5, r3, r6 613 addi r5, r5, 1 614 STWX_BE r5, r3, r6 615 li r6, 1 616 stb r6, VCPU_VPA_DIRTY(r4) 61725: 618 619 /* Save purr/spurr */ 620 mfspr r5,SPRN_PURR 621 mfspr r6,SPRN_SPURR 622 std r5,HSTATE_PURR(r13) 623 std r6,HSTATE_SPURR(r13) 624 ld r7,VCPU_PURR(r4) 625 ld r8,VCPU_SPURR(r4) 626 mtspr SPRN_PURR,r7 627 mtspr SPRN_SPURR,r8 628 629 /* Save host values of some registers */ 630BEGIN_FTR_SECTION 631 mfspr r5, SPRN_CIABR 632 mfspr r6, SPRN_DAWR0 633 mfspr r7, SPRN_DAWRX0 634 mfspr r8, SPRN_IAMR 635 std r5, STACK_SLOT_CIABR(r1) 636 std r6, STACK_SLOT_DAWR0(r1) 637 std r7, STACK_SLOT_DAWRX0(r1) 638 std r8, STACK_SLOT_IAMR(r1) 639 mfspr r5, SPRN_FSCR 640 std r5, STACK_SLOT_FSCR(r1) 641END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 642 643 mfspr r5, SPRN_AMR 644 std r5, STACK_SLOT_AMR(r1) 645 mfspr r6, SPRN_UAMOR 646 std r6, STACK_SLOT_UAMOR(r1) 647 648BEGIN_FTR_SECTION 649 /* Set partition DABR */ 650 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 651 lwz r5,VCPU_DABRX(r4) 652 ld r6,VCPU_DABR(r4) 653 mtspr SPRN_DABRX,r5 654 mtspr SPRN_DABR,r6 655 isync 656END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 657 658#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 659BEGIN_FTR_SECTION 660 b 91f 661END_FTR_SECTION_IFCLR(CPU_FTR_TM) 662 /* 663 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 664 */ 665 mr r3, r4 666 ld r4, VCPU_MSR(r3) 667 li r5, 0 /* don't preserve non-vol regs */ 668 bl kvmppc_restore_tm_hv 669 nop 670 ld r4, HSTATE_KVM_VCPU(r13) 67191: 672#endif 673 674 /* Load guest PMU registers; r4 = vcpu pointer here */ 675 mr r3, r4 676 bl kvmhv_load_guest_pmu 677 678 /* Load up FP, VMX and VSX registers */ 679 ld r4, HSTATE_KVM_VCPU(r13) 680 bl kvmppc_load_fp 681 682 ld r14, VCPU_GPR(R14)(r4) 683 ld r15, VCPU_GPR(R15)(r4) 684 ld r16, VCPU_GPR(R16)(r4) 685 ld r17, VCPU_GPR(R17)(r4) 686 ld r18, VCPU_GPR(R18)(r4) 687 ld r19, VCPU_GPR(R19)(r4) 688 ld r20, VCPU_GPR(R20)(r4) 689 ld r21, VCPU_GPR(R21)(r4) 690 ld r22, VCPU_GPR(R22)(r4) 691 ld r23, VCPU_GPR(R23)(r4) 692 ld r24, VCPU_GPR(R24)(r4) 693 ld r25, VCPU_GPR(R25)(r4) 694 ld r26, VCPU_GPR(R26)(r4) 695 ld r27, VCPU_GPR(R27)(r4) 696 ld r28, VCPU_GPR(R28)(r4) 697 ld r29, VCPU_GPR(R29)(r4) 698 ld r30, VCPU_GPR(R30)(r4) 699 ld r31, VCPU_GPR(R31)(r4) 700 701 /* Switch DSCR to guest value */ 702 ld r5, VCPU_DSCR(r4) 703 mtspr SPRN_DSCR, r5 704 705BEGIN_FTR_SECTION 706 /* Skip next section on POWER7 */ 707 b 8f 708END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 709 /* Load up POWER8-specific registers */ 710 ld r5, VCPU_IAMR(r4) 711 lwz r6, VCPU_PSPB(r4) 712 ld r7, VCPU_FSCR(r4) 713 mtspr SPRN_IAMR, r5 714 mtspr SPRN_PSPB, r6 715 mtspr SPRN_FSCR, r7 716 /* 717 * Handle broken DAWR case by not writing it. This means we 718 * can still store the DAWR register for migration. 719 */ 720 LOAD_REG_ADDR(r5, dawr_force_enable) 721 lbz r5, 0(r5) 722 cmpdi r5, 0 723 beq 1f 724 ld r5, VCPU_DAWR0(r4) 725 ld r6, VCPU_DAWRX0(r4) 726 mtspr SPRN_DAWR0, r5 727 mtspr SPRN_DAWRX0, r6 7281: 729 ld r7, VCPU_CIABR(r4) 730 ld r8, VCPU_TAR(r4) 731 mtspr SPRN_CIABR, r7 732 mtspr SPRN_TAR, r8 733 ld r5, VCPU_IC(r4) 734 ld r8, VCPU_EBBHR(r4) 735 mtspr SPRN_IC, r5 736 mtspr SPRN_EBBHR, r8 737 ld r5, VCPU_EBBRR(r4) 738 ld r6, VCPU_BESCR(r4) 739 lwz r7, VCPU_GUEST_PID(r4) 740 ld r8, VCPU_WORT(r4) 741 mtspr SPRN_EBBRR, r5 742 mtspr SPRN_BESCR, r6 743 mtspr SPRN_PID, r7 744 mtspr SPRN_WORT, r8 745 /* POWER8-only registers */ 746 ld r5, VCPU_TCSCR(r4) 747 ld r6, VCPU_ACOP(r4) 748 ld r7, VCPU_CSIGR(r4) 749 ld r8, VCPU_TACR(r4) 750 mtspr SPRN_TCSCR, r5 751 mtspr SPRN_ACOP, r6 752 mtspr SPRN_CSIGR, r7 753 mtspr SPRN_TACR, r8 754 nop 7558: 756 757 ld r5, VCPU_SPRG0(r4) 758 ld r6, VCPU_SPRG1(r4) 759 ld r7, VCPU_SPRG2(r4) 760 ld r8, VCPU_SPRG3(r4) 761 mtspr SPRN_SPRG0, r5 762 mtspr SPRN_SPRG1, r6 763 mtspr SPRN_SPRG2, r7 764 mtspr SPRN_SPRG3, r8 765 766 /* Load up DAR and DSISR */ 767 ld r5, VCPU_DAR(r4) 768 lwz r6, VCPU_DSISR(r4) 769 mtspr SPRN_DAR, r5 770 mtspr SPRN_DSISR, r6 771 772 /* Restore AMR and UAMOR, set AMOR to all 1s */ 773 ld r5,VCPU_AMR(r4) 774 ld r6,VCPU_UAMOR(r4) 775 li r7,-1 776 mtspr SPRN_AMR,r5 777 mtspr SPRN_UAMOR,r6 778 mtspr SPRN_AMOR,r7 779 780 /* Restore state of CTRL run bit; assume 1 on entry */ 781 lwz r5,VCPU_CTRL(r4) 782 andi. r5,r5,1 783 bne 4f 784 mfspr r6,SPRN_CTRLF 785 clrrdi r6,r6,1 786 mtspr SPRN_CTRLT,r6 7874: 788 /* Secondary threads wait for primary to have done partition switch */ 789 ld r5, HSTATE_KVM_VCORE(r13) 790 lbz r6, HSTATE_PTID(r13) 791 cmpwi r6, 0 792 beq 21f 793 lbz r0, VCORE_IN_GUEST(r5) 794 cmpwi r0, 0 795 bne 21f 796 HMT_LOW 79720: lwz r3, VCORE_ENTRY_EXIT(r5) 798 cmpwi r3, 0x100 799 bge no_switch_exit 800 lbz r0, VCORE_IN_GUEST(r5) 801 cmpwi r0, 0 802 beq 20b 803 HMT_MEDIUM 80421: 805 /* Set LPCR. */ 806 ld r8,VCORE_LPCR(r5) 807 mtspr SPRN_LPCR,r8 808 isync 809 810 /* 811 * Set the decrementer to the guest decrementer. 812 */ 813 ld r8,VCPU_DEC_EXPIRES(r4) 814 /* r8 is a host timebase value here, convert to guest TB */ 815 ld r5,HSTATE_KVM_VCORE(r13) 816 ld r6,VCORE_TB_OFFSET_APPL(r5) 817 add r8,r8,r6 818 mftb r7 819 subf r3,r7,r8 820 mtspr SPRN_DEC,r3 821 822 /* Check if HDEC expires soon */ 823 mfspr r3, SPRN_HDEC 824 extsw r3, r3 825 cmpdi r3, 512 /* 1 microsecond */ 826 blt hdec_soon 827 828 /* Clear out and reload the SLB */ 829 li r6, 0 830 slbmte r6, r6 831 PPC_SLBIA(6) 832 ptesync 833 834 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 835 lwz r5,VCPU_SLB_MAX(r4) 836 cmpwi r5,0 837 beq 9f 838 mtctr r5 839 addi r6,r4,VCPU_SLB 8401: ld r8,VCPU_SLB_E(r6) 841 ld r9,VCPU_SLB_V(r6) 842 slbmte r9,r8 843 addi r6,r6,VCPU_SLB_SIZE 844 bdnz 1b 8459: 846 847deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 848 /* Check if we can deliver an external or decrementer interrupt now */ 849 ld r0, VCPU_PENDING_EXC(r4) 850 cmpdi r0, 0 851 beq 71f 852 mr r3, r4 853 bl kvmppc_guest_entry_inject_int 854 ld r4, HSTATE_KVM_VCPU(r13) 85571: 856 ld r6, VCPU_SRR0(r4) 857 ld r7, VCPU_SRR1(r4) 858 mtspr SPRN_SRR0, r6 859 mtspr SPRN_SRR1, r7 860 861 ld r10, VCPU_PC(r4) 862 ld r11, VCPU_MSR(r4) 863 /* r11 = vcpu->arch.msr & ~MSR_HV */ 864 rldicl r11, r11, 63 - MSR_HV_LG, 1 865 rotldi r11, r11, 1 + MSR_HV_LG 866 ori r11, r11, MSR_ME 867 868 ld r6, VCPU_CTR(r4) 869 ld r7, VCPU_XER(r4) 870 mtctr r6 871 mtxer r7 872 873/* 874 * Required state: 875 * R4 = vcpu 876 * R10: value for HSRR0 877 * R11: value for HSRR1 878 * R13 = PACA 879 */ 880fast_guest_return: 881 li r0,0 882 stb r0,VCPU_CEDED(r4) /* cancel cede */ 883 mtspr SPRN_HSRR0,r10 884 mtspr SPRN_HSRR1,r11 885 886 /* Activate guest mode, so faults get handled by KVM */ 887 li r9, KVM_GUEST_MODE_GUEST_HV 888 stb r9, HSTATE_IN_GUEST(r13) 889 890#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 891 /* Accumulate timing */ 892 addi r3, r4, VCPU_TB_GUEST 893 bl kvmhv_accumulate_time 894#endif 895 896 /* Enter guest */ 897 898BEGIN_FTR_SECTION 899 ld r5, VCPU_CFAR(r4) 900 mtspr SPRN_CFAR, r5 901END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 902BEGIN_FTR_SECTION 903 ld r0, VCPU_PPR(r4) 904END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 905 906 ld r5, VCPU_LR(r4) 907 mtlr r5 908 909 ld r1, VCPU_GPR(R1)(r4) 910 ld r5, VCPU_GPR(R5)(r4) 911 ld r8, VCPU_GPR(R8)(r4) 912 ld r9, VCPU_GPR(R9)(r4) 913 ld r10, VCPU_GPR(R10)(r4) 914 ld r11, VCPU_GPR(R11)(r4) 915 ld r12, VCPU_GPR(R12)(r4) 916 ld r13, VCPU_GPR(R13)(r4) 917 918BEGIN_FTR_SECTION 919 mtspr SPRN_PPR, r0 920END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 921 922 ld r6, VCPU_GPR(R6)(r4) 923 ld r7, VCPU_GPR(R7)(r4) 924 925 ld r0, VCPU_CR(r4) 926 mtcr r0 927 928 ld r0, VCPU_GPR(R0)(r4) 929 ld r2, VCPU_GPR(R2)(r4) 930 ld r3, VCPU_GPR(R3)(r4) 931 ld r4, VCPU_GPR(R4)(r4) 932 HRFI_TO_GUEST 933 b . 934 935secondary_too_late: 936 li r12, 0 937 stw r12, STACK_SLOT_TRAP(r1) 938 cmpdi r4, 0 939 beq 11f 940 stw r12, VCPU_TRAP(r4) 941#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 942 addi r3, r4, VCPU_TB_RMEXIT 943 bl kvmhv_accumulate_time 944#endif 94511: b kvmhv_switch_to_host 946 947no_switch_exit: 948 HMT_MEDIUM 949 li r12, 0 950 b 12f 951hdec_soon: 952 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 95312: stw r12, VCPU_TRAP(r4) 954 mr r9, r4 955#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 956 addi r3, r4, VCPU_TB_RMEXIT 957 bl kvmhv_accumulate_time 958#endif 959 b guest_bypass 960 961/****************************************************************************** 962 * * 963 * Exit code * 964 * * 965 *****************************************************************************/ 966 967/* 968 * We come here from the first-level interrupt handlers. 969 */ 970 .globl kvmppc_interrupt_hv 971kvmppc_interrupt_hv: 972 /* 973 * Register contents: 974 * R9 = HSTATE_IN_GUEST 975 * R12 = (guest CR << 32) | interrupt vector 976 * R13 = PACA 977 * guest R12 saved in shadow VCPU SCRATCH0 978 * guest R13 saved in SPRN_SCRATCH0 979 * guest R9 saved in HSTATE_SCRATCH2 980 */ 981 /* We're now back in the host but in guest MMU context */ 982 cmpwi r9,KVM_GUEST_MODE_HOST_HV 983 beq kvmppc_bad_host_intr 984 li r9, KVM_GUEST_MODE_HOST_HV 985 stb r9, HSTATE_IN_GUEST(r13) 986 987 ld r9, HSTATE_KVM_VCPU(r13) 988 989 /* Save registers */ 990 991 std r0, VCPU_GPR(R0)(r9) 992 std r1, VCPU_GPR(R1)(r9) 993 std r2, VCPU_GPR(R2)(r9) 994 std r3, VCPU_GPR(R3)(r9) 995 std r4, VCPU_GPR(R4)(r9) 996 std r5, VCPU_GPR(R5)(r9) 997 std r6, VCPU_GPR(R6)(r9) 998 std r7, VCPU_GPR(R7)(r9) 999 std r8, VCPU_GPR(R8)(r9) 1000 ld r0, HSTATE_SCRATCH2(r13) 1001 std r0, VCPU_GPR(R9)(r9) 1002 std r10, VCPU_GPR(R10)(r9) 1003 std r11, VCPU_GPR(R11)(r9) 1004 ld r3, HSTATE_SCRATCH0(r13) 1005 std r3, VCPU_GPR(R12)(r9) 1006 /* CR is in the high half of r12 */ 1007 srdi r4, r12, 32 1008 std r4, VCPU_CR(r9) 1009BEGIN_FTR_SECTION 1010 ld r3, HSTATE_CFAR(r13) 1011 std r3, VCPU_CFAR(r9) 1012END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1013BEGIN_FTR_SECTION 1014 ld r4, HSTATE_PPR(r13) 1015 std r4, VCPU_PPR(r9) 1016END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1017 1018 /* Restore R1/R2 so we can handle faults */ 1019 ld r1, HSTATE_HOST_R1(r13) 1020 ld r2, PACATOC(r13) 1021 1022 mfspr r10, SPRN_SRR0 1023 mfspr r11, SPRN_SRR1 1024 std r10, VCPU_SRR0(r9) 1025 std r11, VCPU_SRR1(r9) 1026 /* trap is in the low half of r12, clear CR from the high half */ 1027 clrldi r12, r12, 32 1028 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1029 beq 1f 1030 mfspr r10, SPRN_HSRR0 1031 mfspr r11, SPRN_HSRR1 1032 clrrdi r12, r12, 2 10331: std r10, VCPU_PC(r9) 1034 std r11, VCPU_MSR(r9) 1035 1036 GET_SCRATCH0(r3) 1037 mflr r4 1038 std r3, VCPU_GPR(R13)(r9) 1039 std r4, VCPU_LR(r9) 1040 1041 stw r12,VCPU_TRAP(r9) 1042 1043 /* 1044 * Now that we have saved away SRR0/1 and HSRR0/1, 1045 * interrupts are recoverable in principle, so set MSR_RI. 1046 * This becomes important for relocation-on interrupts from 1047 * the guest, which we can get in radix mode on POWER9. 1048 */ 1049 li r0, MSR_RI 1050 mtmsrd r0, 1 1051 1052#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1053 addi r3, r9, VCPU_TB_RMINTR 1054 mr r4, r9 1055 bl kvmhv_accumulate_time 1056 ld r5, VCPU_GPR(R5)(r9) 1057 ld r6, VCPU_GPR(R6)(r9) 1058 ld r7, VCPU_GPR(R7)(r9) 1059 ld r8, VCPU_GPR(R8)(r9) 1060#endif 1061 1062 /* Save HEIR (HV emulation assist reg) in emul_inst 1063 if this is an HEI (HV emulation interrupt, e40) */ 1064 li r3,KVM_INST_FETCH_FAILED 1065 stw r3,VCPU_LAST_INST(r9) 1066 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1067 bne 11f 1068 mfspr r3,SPRN_HEIR 106911: stw r3,VCPU_HEIR(r9) 1070 1071 /* these are volatile across C function calls */ 1072 mfctr r3 1073 mfxer r4 1074 std r3, VCPU_CTR(r9) 1075 std r4, VCPU_XER(r9) 1076 1077 /* Save more register state */ 1078 mfdar r3 1079 mfdsisr r4 1080 std r3, VCPU_DAR(r9) 1081 stw r4, VCPU_DSISR(r9) 1082 1083 /* If this is a page table miss then see if it's theirs or ours */ 1084 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1085 beq kvmppc_hdsi 1086 std r3, VCPU_FAULT_DAR(r9) 1087 stw r4, VCPU_FAULT_DSISR(r9) 1088 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1089 beq kvmppc_hisi 1090 1091 /* See if this is a leftover HDEC interrupt */ 1092 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1093 bne 2f 1094 mfspr r3,SPRN_HDEC 1095 extsw r3, r3 1096 cmpdi r3,0 1097 mr r4,r9 1098 bge fast_guest_return 10992: 1100 /* See if this is an hcall we can handle in real mode */ 1101 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1102 beq hcall_try_real_mode 1103 1104 /* Hypervisor doorbell - exit only if host IPI flag set */ 1105 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1106 bne 3f 1107 lbz r0, HSTATE_HOST_IPI(r13) 1108 cmpwi r0, 0 1109 beq maybe_reenter_guest 1110 b guest_exit_cont 11113: 1112 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1113 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1114 bne 14f 1115 mfspr r3, SPRN_HFSCR 1116 std r3, VCPU_HFSCR(r9) 1117 b guest_exit_cont 111814: 1119 /* External interrupt ? */ 1120 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1121 beq kvmppc_guest_external 1122 /* See if it is a machine check */ 1123 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1124 beq machine_check_realmode 1125 /* Or a hypervisor maintenance interrupt */ 1126 cmpwi r12, BOOK3S_INTERRUPT_HMI 1127 beq hmi_realmode 1128 1129guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1130 1131#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1132 addi r3, r9, VCPU_TB_RMEXIT 1133 mr r4, r9 1134 bl kvmhv_accumulate_time 1135#endif 1136 1137 /* 1138 * Possibly flush the link stack here, before we do a blr in 1139 * kvmhv_switch_to_host. 1140 */ 11411: nop 1142 patch_site 1b patch__call_kvm_flush_link_stack 1143 1144 /* For hash guest, read the guest SLB and save it away */ 1145 li r5, 0 1146 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1147 mtctr r0 1148 li r6,0 1149 addi r7,r9,VCPU_SLB 11501: slbmfee r8,r6 1151 andis. r0,r8,SLB_ESID_V@h 1152 beq 2f 1153 add r8,r8,r6 /* put index in */ 1154 slbmfev r3,r6 1155 std r8,VCPU_SLB_E(r7) 1156 std r3,VCPU_SLB_V(r7) 1157 addi r7,r7,VCPU_SLB_SIZE 1158 addi r5,r5,1 11592: addi r6,r6,1 1160 bdnz 1b 1161 /* Finally clear out the SLB */ 1162 li r0,0 1163 slbmte r0,r0 1164 PPC_SLBIA(6) 1165 ptesync 1166 stw r5,VCPU_SLB_MAX(r9) 1167 1168 /* load host SLB entries */ 1169 ld r8,PACA_SLBSHADOWPTR(r13) 1170 1171 .rept SLB_NUM_BOLTED 1172 li r3, SLBSHADOW_SAVEAREA 1173 LDX_BE r5, r8, r3 1174 addi r3, r3, 8 1175 LDX_BE r6, r8, r3 1176 andis. r7,r5,SLB_ESID_V@h 1177 beq 1f 1178 slbmte r6,r5 11791: addi r8,r8,16 1180 .endr 1181 1182guest_bypass: 1183 stw r12, STACK_SLOT_TRAP(r1) 1184 1185 /* Save DEC */ 1186 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1187 ld r3, HSTATE_KVM_VCORE(r13) 1188 mfspr r5,SPRN_DEC 1189 mftb r6 1190 extsw r5,r5 119116: add r5,r5,r6 1192 /* r5 is a guest timebase value here, convert to host TB */ 1193 ld r4,VCORE_TB_OFFSET_APPL(r3) 1194 subf r5,r4,r5 1195 std r5,VCPU_DEC_EXPIRES(r9) 1196 1197 /* Increment exit count, poke other threads to exit */ 1198 mr r3, r12 1199 bl kvmhv_commence_exit 1200 nop 1201 ld r9, HSTATE_KVM_VCPU(r13) 1202 1203 /* Stop others sending VCPU interrupts to this physical CPU */ 1204 li r0, -1 1205 stw r0, VCPU_CPU(r9) 1206 stw r0, VCPU_THREAD_CPU(r9) 1207 1208 /* Save guest CTRL register, set runlatch to 1 */ 1209 mfspr r6,SPRN_CTRLF 1210 stw r6,VCPU_CTRL(r9) 1211 andi. r0,r6,1 1212 bne 4f 1213 ori r6,r6,1 1214 mtspr SPRN_CTRLT,r6 12154: 1216 /* 1217 * Save the guest PURR/SPURR 1218 */ 1219 mfspr r5,SPRN_PURR 1220 mfspr r6,SPRN_SPURR 1221 ld r7,VCPU_PURR(r9) 1222 ld r8,VCPU_SPURR(r9) 1223 std r5,VCPU_PURR(r9) 1224 std r6,VCPU_SPURR(r9) 1225 subf r5,r7,r5 1226 subf r6,r8,r6 1227 1228 /* 1229 * Restore host PURR/SPURR and add guest times 1230 * so that the time in the guest gets accounted. 1231 */ 1232 ld r3,HSTATE_PURR(r13) 1233 ld r4,HSTATE_SPURR(r13) 1234 add r3,r3,r5 1235 add r4,r4,r6 1236 mtspr SPRN_PURR,r3 1237 mtspr SPRN_SPURR,r4 1238 1239BEGIN_FTR_SECTION 1240 b 8f 1241END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1242 /* Save POWER8-specific registers */ 1243 mfspr r5, SPRN_IAMR 1244 mfspr r6, SPRN_PSPB 1245 mfspr r7, SPRN_FSCR 1246 std r5, VCPU_IAMR(r9) 1247 stw r6, VCPU_PSPB(r9) 1248 std r7, VCPU_FSCR(r9) 1249 mfspr r5, SPRN_IC 1250 mfspr r7, SPRN_TAR 1251 std r5, VCPU_IC(r9) 1252 std r7, VCPU_TAR(r9) 1253 mfspr r8, SPRN_EBBHR 1254 std r8, VCPU_EBBHR(r9) 1255 mfspr r5, SPRN_EBBRR 1256 mfspr r6, SPRN_BESCR 1257 mfspr r7, SPRN_PID 1258 mfspr r8, SPRN_WORT 1259 std r5, VCPU_EBBRR(r9) 1260 std r6, VCPU_BESCR(r9) 1261 stw r7, VCPU_GUEST_PID(r9) 1262 std r8, VCPU_WORT(r9) 1263 mfspr r5, SPRN_TCSCR 1264 mfspr r6, SPRN_ACOP 1265 mfspr r7, SPRN_CSIGR 1266 mfspr r8, SPRN_TACR 1267 std r5, VCPU_TCSCR(r9) 1268 std r6, VCPU_ACOP(r9) 1269 std r7, VCPU_CSIGR(r9) 1270 std r8, VCPU_TACR(r9) 1271BEGIN_FTR_SECTION 1272 ld r5, STACK_SLOT_FSCR(r1) 1273 mtspr SPRN_FSCR, r5 1274END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1275 /* 1276 * Restore various registers to 0, where non-zero values 1277 * set by the guest could disrupt the host. 1278 */ 1279 li r0, 0 1280 mtspr SPRN_PSPB, r0 1281 mtspr SPRN_WORT, r0 1282 mtspr SPRN_TCSCR, r0 1283 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1284 li r0, 1 1285 sldi r0, r0, 31 1286 mtspr SPRN_MMCRS, r0 1287 1288 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1289 ld r8, STACK_SLOT_IAMR(r1) 1290 mtspr SPRN_IAMR, r8 1291 12928: /* Power7 jumps back in here */ 1293 mfspr r5,SPRN_AMR 1294 mfspr r6,SPRN_UAMOR 1295 std r5,VCPU_AMR(r9) 1296 std r6,VCPU_UAMOR(r9) 1297 ld r5,STACK_SLOT_AMR(r1) 1298 ld r6,STACK_SLOT_UAMOR(r1) 1299 mtspr SPRN_AMR, r5 1300 mtspr SPRN_UAMOR, r6 1301 1302 /* Switch DSCR back to host value */ 1303 mfspr r8, SPRN_DSCR 1304 ld r7, HSTATE_DSCR(r13) 1305 std r8, VCPU_DSCR(r9) 1306 mtspr SPRN_DSCR, r7 1307 1308 /* Save non-volatile GPRs */ 1309 std r14, VCPU_GPR(R14)(r9) 1310 std r15, VCPU_GPR(R15)(r9) 1311 std r16, VCPU_GPR(R16)(r9) 1312 std r17, VCPU_GPR(R17)(r9) 1313 std r18, VCPU_GPR(R18)(r9) 1314 std r19, VCPU_GPR(R19)(r9) 1315 std r20, VCPU_GPR(R20)(r9) 1316 std r21, VCPU_GPR(R21)(r9) 1317 std r22, VCPU_GPR(R22)(r9) 1318 std r23, VCPU_GPR(R23)(r9) 1319 std r24, VCPU_GPR(R24)(r9) 1320 std r25, VCPU_GPR(R25)(r9) 1321 std r26, VCPU_GPR(R26)(r9) 1322 std r27, VCPU_GPR(R27)(r9) 1323 std r28, VCPU_GPR(R28)(r9) 1324 std r29, VCPU_GPR(R29)(r9) 1325 std r30, VCPU_GPR(R30)(r9) 1326 std r31, VCPU_GPR(R31)(r9) 1327 1328 /* Save SPRGs */ 1329 mfspr r3, SPRN_SPRG0 1330 mfspr r4, SPRN_SPRG1 1331 mfspr r5, SPRN_SPRG2 1332 mfspr r6, SPRN_SPRG3 1333 std r3, VCPU_SPRG0(r9) 1334 std r4, VCPU_SPRG1(r9) 1335 std r5, VCPU_SPRG2(r9) 1336 std r6, VCPU_SPRG3(r9) 1337 1338 /* save FP state */ 1339 mr r3, r9 1340 bl kvmppc_save_fp 1341 1342#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1343BEGIN_FTR_SECTION 1344 b 91f 1345END_FTR_SECTION_IFCLR(CPU_FTR_TM) 1346 /* 1347 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1348 */ 1349 mr r3, r9 1350 ld r4, VCPU_MSR(r3) 1351 li r5, 0 /* don't preserve non-vol regs */ 1352 bl kvmppc_save_tm_hv 1353 nop 1354 ld r9, HSTATE_KVM_VCPU(r13) 135591: 1356#endif 1357 1358 /* Increment yield count if they have a VPA */ 1359 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1360 cmpdi r8, 0 1361 beq 25f 1362 li r4, LPPACA_YIELDCOUNT 1363 LWZX_BE r3, r8, r4 1364 addi r3, r3, 1 1365 STWX_BE r3, r8, r4 1366 li r3, 1 1367 stb r3, VCPU_VPA_DIRTY(r9) 136825: 1369 /* Save PMU registers if requested */ 1370 /* r8 and cr0.eq are live here */ 1371 mr r3, r9 1372 li r4, 1 1373 beq 21f /* if no VPA, save PMU stuff anyway */ 1374 lbz r4, LPPACA_PMCINUSE(r8) 137521: bl kvmhv_save_guest_pmu 1376 ld r9, HSTATE_KVM_VCPU(r13) 1377 1378 /* Restore host values of some registers */ 1379BEGIN_FTR_SECTION 1380 ld r5, STACK_SLOT_CIABR(r1) 1381 ld r6, STACK_SLOT_DAWR0(r1) 1382 ld r7, STACK_SLOT_DAWRX0(r1) 1383 mtspr SPRN_CIABR, r5 1384 /* 1385 * If the DAWR doesn't work, it's ok to write these here as 1386 * this value should always be zero 1387 */ 1388 mtspr SPRN_DAWR0, r6 1389 mtspr SPRN_DAWRX0, r7 1390END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1391 1392 /* 1393 * POWER7/POWER8 guest -> host partition switch code. 1394 * We don't have to lock against tlbies but we do 1395 * have to coordinate the hardware threads. 1396 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1397 */ 1398kvmhv_switch_to_host: 1399 /* Secondary threads wait for primary to do partition switch */ 1400 ld r5,HSTATE_KVM_VCORE(r13) 1401 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1402 lbz r3,HSTATE_PTID(r13) 1403 cmpwi r3,0 1404 beq 15f 1405 HMT_LOW 140613: lbz r3,VCORE_IN_GUEST(r5) 1407 cmpwi r3,0 1408 bne 13b 1409 HMT_MEDIUM 1410 b 16f 1411 1412 /* Primary thread waits for all the secondaries to exit guest */ 141315: lwz r3,VCORE_ENTRY_EXIT(r5) 1414 rlwinm r0,r3,32-8,0xff 1415 clrldi r3,r3,56 1416 cmpw r3,r0 1417 bne 15b 1418 isync 1419 1420 /* Did we actually switch to the guest at all? */ 1421 lbz r6, VCORE_IN_GUEST(r5) 1422 cmpwi r6, 0 1423 beq 19f 1424 1425 /* Primary thread switches back to host partition */ 1426 lwz r7,KVM_HOST_LPID(r4) 1427 ld r6,KVM_HOST_SDR1(r4) 1428 li r8,LPID_RSVD /* switch to reserved LPID */ 1429 mtspr SPRN_LPID,r8 1430 ptesync 1431 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1432 mtspr SPRN_LPID,r7 1433 isync 1434 1435BEGIN_FTR_SECTION 1436 /* DPDES and VTB are shared between threads */ 1437 mfspr r7, SPRN_DPDES 1438 mfspr r8, SPRN_VTB 1439 std r7, VCORE_DPDES(r5) 1440 std r8, VCORE_VTB(r5) 1441 /* clear DPDES so we don't get guest doorbells in the host */ 1442 li r8, 0 1443 mtspr SPRN_DPDES, r8 1444END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1445 1446 /* Subtract timebase offset from timebase */ 1447 ld r8, VCORE_TB_OFFSET_APPL(r5) 1448 cmpdi r8,0 1449 beq 17f 1450 li r0, 0 1451 std r0, VCORE_TB_OFFSET_APPL(r5) 1452 mftb r6 /* current guest timebase */ 1453 subf r8,r8,r6 1454 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1455 mftb r7 /* check if lower 24 bits overflowed */ 1456 clrldi r6,r6,40 1457 clrldi r7,r7,40 1458 cmpld r7,r6 1459 bge 17f 1460 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1461 mtspr SPRN_TBU40,r8 1462 146317: 1464 /* 1465 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1466 * above, which may or may not have already called 1467 * kvmppc_subcore_exit_guest. Fortunately, all that 1468 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1469 * it again here is benign even if kvmppc_realmode_hmi_handler 1470 * has already called it. 1471 */ 1472 bl kvmppc_subcore_exit_guest 1473 nop 147430: ld r5,HSTATE_KVM_VCORE(r13) 1475 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1476 1477 /* Reset PCR */ 1478 ld r0, VCORE_PCR(r5) 1479 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 1480 cmpld r0, r6 1481 beq 18f 1482 mtspr SPRN_PCR, r6 148318: 1484 /* Signal secondary CPUs to continue */ 1485 li r0, 0 1486 stb r0,VCORE_IN_GUEST(r5) 148719: lis r8,0x7fff /* MAX_INT@h */ 1488 mtspr SPRN_HDEC,r8 1489 149016: ld r8,KVM_HOST_LPCR(r4) 1491 mtspr SPRN_LPCR,r8 1492 isync 1493 1494#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1495 /* Finish timing, if we have a vcpu */ 1496 ld r4, HSTATE_KVM_VCPU(r13) 1497 cmpdi r4, 0 1498 li r3, 0 1499 beq 2f 1500 bl kvmhv_accumulate_time 15012: 1502#endif 1503 /* Unset guest mode */ 1504 li r0, KVM_GUEST_MODE_NONE 1505 stb r0, HSTATE_IN_GUEST(r13) 1506 1507 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1508 ld r0, SFS+PPC_LR_STKOFF(r1) 1509 addi r1, r1, SFS 1510 mtlr r0 1511 blr 1512 1513.balign 32 1514.global kvm_flush_link_stack 1515kvm_flush_link_stack: 1516 /* Save LR into r0 */ 1517 mflr r0 1518 1519 /* Flush the link stack. On Power8 it's up to 32 entries in size. */ 1520 .rept 32 1521 bl .+4 1522 .endr 1523 1524 /* And on Power9 it's up to 64. */ 1525BEGIN_FTR_SECTION 1526 .rept 32 1527 bl .+4 1528 .endr 1529END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1530 1531 /* Restore LR */ 1532 mtlr r0 1533 blr 1534 1535kvmppc_guest_external: 1536 /* External interrupt, first check for host_ipi. If this is 1537 * set, we know the host wants us out so let's do it now 1538 */ 1539 bl kvmppc_read_intr 1540 1541 /* 1542 * Restore the active volatile registers after returning from 1543 * a C function. 1544 */ 1545 ld r9, HSTATE_KVM_VCPU(r13) 1546 li r12, BOOK3S_INTERRUPT_EXTERNAL 1547 1548 /* 1549 * kvmppc_read_intr return codes: 1550 * 1551 * Exit to host (r3 > 0) 1552 * 1 An interrupt is pending that needs to be handled by the host 1553 * Exit guest and return to host by branching to guest_exit_cont 1554 * 1555 * 2 Passthrough that needs completion in the host 1556 * Exit guest and return to host by branching to guest_exit_cont 1557 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1558 * to indicate to the host to complete handling the interrupt 1559 * 1560 * Before returning to guest, we check if any CPU is heading out 1561 * to the host and if so, we head out also. If no CPUs are heading 1562 * check return values <= 0. 1563 * 1564 * Return to guest (r3 <= 0) 1565 * 0 No external interrupt is pending 1566 * -1 A guest wakeup IPI (which has now been cleared) 1567 * In either case, we return to guest to deliver any pending 1568 * guest interrupts. 1569 * 1570 * -2 A PCI passthrough external interrupt was handled 1571 * (interrupt was delivered directly to guest) 1572 * Return to guest to deliver any pending guest interrupts. 1573 */ 1574 1575 cmpdi r3, 1 1576 ble 1f 1577 1578 /* Return code = 2 */ 1579 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1580 stw r12, VCPU_TRAP(r9) 1581 b guest_exit_cont 1582 15831: /* Return code <= 1 */ 1584 cmpdi r3, 0 1585 bgt guest_exit_cont 1586 1587 /* Return code <= 0 */ 1588maybe_reenter_guest: 1589 ld r5, HSTATE_KVM_VCORE(r13) 1590 lwz r0, VCORE_ENTRY_EXIT(r5) 1591 cmpwi r0, 0x100 1592 mr r4, r9 1593 blt deliver_guest_interrupt 1594 b guest_exit_cont 1595 1596/* 1597 * Check whether an HDSI is an HPTE not found fault or something else. 1598 * If it is an HPTE not found fault that is due to the guest accessing 1599 * a page that they have mapped but which we have paged out, then 1600 * we continue on with the guest exit path. In all other cases, 1601 * reflect the HDSI to the guest as a DSI. 1602 */ 1603kvmppc_hdsi: 1604 mfspr r4, SPRN_HDAR 1605 mfspr r6, SPRN_HDSISR 1606 /* HPTE not found fault or protection fault? */ 1607 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1608 beq 1f /* if not, send it to the guest */ 1609 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1610 beq 3f 1611 clrrdi r0, r4, 28 1612 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1613 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 1614 bne 7f /* if no SLB entry found */ 16154: std r4, VCPU_FAULT_DAR(r9) 1616 stw r6, VCPU_FAULT_DSISR(r9) 1617 1618 /* Search the hash table. */ 1619 mr r3, r9 /* vcpu pointer */ 1620 li r7, 1 /* data fault */ 1621 bl kvmppc_hpte_hv_fault 1622 ld r9, HSTATE_KVM_VCPU(r13) 1623 ld r10, VCPU_PC(r9) 1624 ld r11, VCPU_MSR(r9) 1625 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1626 cmpdi r3, 0 /* retry the instruction */ 1627 beq 6f 1628 cmpdi r3, -1 /* handle in kernel mode */ 1629 beq guest_exit_cont 1630 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1631 beq 2f 1632 1633 /* Synthesize a DSI (or DSegI) for the guest */ 1634 ld r4, VCPU_FAULT_DAR(r9) 1635 mr r6, r3 16361: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 1637 mtspr SPRN_DSISR, r6 16387: mtspr SPRN_DAR, r4 1639 mtspr SPRN_SRR0, r10 1640 mtspr SPRN_SRR1, r11 1641 mr r10, r0 1642 bl kvmppc_msr_interrupt 1643fast_interrupt_c_return: 16446: ld r7, VCPU_CTR(r9) 1645 ld r8, VCPU_XER(r9) 1646 mtctr r7 1647 mtxer r8 1648 mr r4, r9 1649 b fast_guest_return 1650 16513: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1652 ld r5, KVM_VRMA_SLB_V(r5) 1653 b 4b 1654 1655 /* If this is for emulated MMIO, load the instruction word */ 16562: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1657 1658 /* Set guest mode to 'jump over instruction' so if lwz faults 1659 * we'll just continue at the next IP. */ 1660 li r0, KVM_GUEST_MODE_SKIP 1661 stb r0, HSTATE_IN_GUEST(r13) 1662 1663 /* Do the access with MSR:DR enabled */ 1664 mfmsr r3 1665 ori r4, r3, MSR_DR /* Enable paging for data */ 1666 mtmsrd r4 1667 lwz r8, 0(r10) 1668 mtmsrd r3 1669 1670 /* Store the result */ 1671 stw r8, VCPU_LAST_INST(r9) 1672 1673 /* Unset guest mode. */ 1674 li r0, KVM_GUEST_MODE_HOST_HV 1675 stb r0, HSTATE_IN_GUEST(r13) 1676 b guest_exit_cont 1677 1678/* 1679 * Similarly for an HISI, reflect it to the guest as an ISI unless 1680 * it is an HPTE not found fault for a page that we have paged out. 1681 */ 1682kvmppc_hisi: 1683 andis. r0, r11, SRR1_ISI_NOPT@h 1684 beq 1f 1685 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1686 beq 3f 1687 clrrdi r0, r10, 28 1688 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1689 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 1690 bne 7f /* if no SLB entry found */ 16914: 1692 /* Search the hash table. */ 1693 mr r3, r9 /* vcpu pointer */ 1694 mr r4, r10 1695 mr r6, r11 1696 li r7, 0 /* instruction fault */ 1697 bl kvmppc_hpte_hv_fault 1698 ld r9, HSTATE_KVM_VCPU(r13) 1699 ld r10, VCPU_PC(r9) 1700 ld r11, VCPU_MSR(r9) 1701 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1702 cmpdi r3, 0 /* retry the instruction */ 1703 beq fast_interrupt_c_return 1704 cmpdi r3, -1 /* handle in kernel mode */ 1705 beq guest_exit_cont 1706 1707 /* Synthesize an ISI (or ISegI) for the guest */ 1708 mr r11, r3 17091: li r0, BOOK3S_INTERRUPT_INST_STORAGE 17107: mtspr SPRN_SRR0, r10 1711 mtspr SPRN_SRR1, r11 1712 mr r10, r0 1713 bl kvmppc_msr_interrupt 1714 b fast_interrupt_c_return 1715 17163: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1717 ld r5, KVM_VRMA_SLB_V(r6) 1718 b 4b 1719 1720/* 1721 * Try to handle an hcall in real mode. 1722 * Returns to the guest if we handle it, or continues on up to 1723 * the kernel if we can't (i.e. if we don't have a handler for 1724 * it, or if the handler returns H_TOO_HARD). 1725 * 1726 * r5 - r8 contain hcall args, 1727 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 1728 */ 1729hcall_try_real_mode: 1730 ld r3,VCPU_GPR(R3)(r9) 1731 andi. r0,r11,MSR_PR 1732 /* sc 1 from userspace - reflect to guest syscall */ 1733 bne sc_1_fast_return 1734 clrrdi r3,r3,2 1735 cmpldi r3,hcall_real_table_end - hcall_real_table 1736 bge guest_exit_cont 1737 /* See if this hcall is enabled for in-kernel handling */ 1738 ld r4, VCPU_KVM(r9) 1739 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 1740 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 1741 add r4, r4, r0 1742 ld r0, KVM_ENABLED_HCALLS(r4) 1743 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 1744 srd r0, r0, r4 1745 andi. r0, r0, 1 1746 beq guest_exit_cont 1747 /* Get pointer to handler, if any, and call it */ 1748 LOAD_REG_ADDR(r4, hcall_real_table) 1749 lwax r3,r3,r4 1750 cmpwi r3,0 1751 beq guest_exit_cont 1752 add r12,r3,r4 1753 mtctr r12 1754 mr r3,r9 /* get vcpu pointer */ 1755 ld r4,VCPU_GPR(R4)(r9) 1756 bctrl 1757 cmpdi r3,H_TOO_HARD 1758 beq hcall_real_fallback 1759 ld r4,HSTATE_KVM_VCPU(r13) 1760 std r3,VCPU_GPR(R3)(r4) 1761 ld r10,VCPU_PC(r4) 1762 ld r11,VCPU_MSR(r4) 1763 b fast_guest_return 1764 1765sc_1_fast_return: 1766 mtspr SPRN_SRR0,r10 1767 mtspr SPRN_SRR1,r11 1768 li r10, BOOK3S_INTERRUPT_SYSCALL 1769 bl kvmppc_msr_interrupt 1770 mr r4,r9 1771 b fast_guest_return 1772 1773 /* We've attempted a real mode hcall, but it's punted it back 1774 * to userspace. We need to restore some clobbered volatiles 1775 * before resuming the pass-it-to-qemu path */ 1776hcall_real_fallback: 1777 li r12,BOOK3S_INTERRUPT_SYSCALL 1778 ld r9, HSTATE_KVM_VCPU(r13) 1779 1780 b guest_exit_cont 1781 1782 .globl hcall_real_table 1783hcall_real_table: 1784 .long 0 /* 0 - unused */ 1785 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 1786 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 1787 .long DOTSYM(kvmppc_h_read) - hcall_real_table 1788 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 1789 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 1790 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 1791#ifdef CONFIG_SPAPR_TCE_IOMMU 1792 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 1793 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 1794#else 1795 .long 0 /* 0x1c */ 1796 .long 0 /* 0x20 */ 1797#endif 1798 .long 0 /* 0x24 - H_SET_SPRG0 */ 1799 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 1800 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 1801 .long 0 /* 0x30 */ 1802 .long 0 /* 0x34 */ 1803 .long 0 /* 0x38 */ 1804 .long 0 /* 0x3c */ 1805 .long 0 /* 0x40 */ 1806 .long 0 /* 0x44 */ 1807 .long 0 /* 0x48 */ 1808 .long 0 /* 0x4c */ 1809 .long 0 /* 0x50 */ 1810 .long 0 /* 0x54 */ 1811 .long 0 /* 0x58 */ 1812 .long 0 /* 0x5c */ 1813 .long 0 /* 0x60 */ 1814#ifdef CONFIG_KVM_XICS 1815 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 1816 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 1817 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 1818 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 1819 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 1820#else 1821 .long 0 /* 0x64 - H_EOI */ 1822 .long 0 /* 0x68 - H_CPPR */ 1823 .long 0 /* 0x6c - H_IPI */ 1824 .long 0 /* 0x70 - H_IPOLL */ 1825 .long 0 /* 0x74 - H_XIRR */ 1826#endif 1827 .long 0 /* 0x78 */ 1828 .long 0 /* 0x7c */ 1829 .long 0 /* 0x80 */ 1830 .long 0 /* 0x84 */ 1831 .long 0 /* 0x88 */ 1832 .long 0 /* 0x8c */ 1833 .long 0 /* 0x90 */ 1834 .long 0 /* 0x94 */ 1835 .long 0 /* 0x98 */ 1836 .long 0 /* 0x9c */ 1837 .long 0 /* 0xa0 */ 1838 .long 0 /* 0xa4 */ 1839 .long 0 /* 0xa8 */ 1840 .long 0 /* 0xac */ 1841 .long 0 /* 0xb0 */ 1842 .long 0 /* 0xb4 */ 1843 .long 0 /* 0xb8 */ 1844 .long 0 /* 0xbc */ 1845 .long 0 /* 0xc0 */ 1846 .long 0 /* 0xc4 */ 1847 .long 0 /* 0xc8 */ 1848 .long 0 /* 0xcc */ 1849 .long 0 /* 0xd0 */ 1850 .long 0 /* 0xd4 */ 1851 .long 0 /* 0xd8 */ 1852 .long 0 /* 0xdc */ 1853 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 1854 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 1855 .long 0 /* 0xe8 */ 1856 .long 0 /* 0xec */ 1857 .long 0 /* 0xf0 */ 1858 .long 0 /* 0xf4 */ 1859 .long 0 /* 0xf8 */ 1860 .long 0 /* 0xfc */ 1861 .long 0 /* 0x100 */ 1862 .long 0 /* 0x104 */ 1863 .long 0 /* 0x108 */ 1864 .long 0 /* 0x10c */ 1865 .long 0 /* 0x110 */ 1866 .long 0 /* 0x114 */ 1867 .long 0 /* 0x118 */ 1868 .long 0 /* 0x11c */ 1869 .long 0 /* 0x120 */ 1870 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 1871 .long 0 /* 0x128 */ 1872 .long 0 /* 0x12c */ 1873 .long 0 /* 0x130 */ 1874 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 1875#ifdef CONFIG_SPAPR_TCE_IOMMU 1876 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 1877 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 1878#else 1879 .long 0 /* 0x138 */ 1880 .long 0 /* 0x13c */ 1881#endif 1882 .long 0 /* 0x140 */ 1883 .long 0 /* 0x144 */ 1884 .long 0 /* 0x148 */ 1885 .long 0 /* 0x14c */ 1886 .long 0 /* 0x150 */ 1887 .long 0 /* 0x154 */ 1888 .long 0 /* 0x158 */ 1889 .long 0 /* 0x15c */ 1890 .long 0 /* 0x160 */ 1891 .long 0 /* 0x164 */ 1892 .long 0 /* 0x168 */ 1893 .long 0 /* 0x16c */ 1894 .long 0 /* 0x170 */ 1895 .long 0 /* 0x174 */ 1896 .long 0 /* 0x178 */ 1897 .long 0 /* 0x17c */ 1898 .long 0 /* 0x180 */ 1899 .long 0 /* 0x184 */ 1900 .long 0 /* 0x188 */ 1901 .long 0 /* 0x18c */ 1902 .long 0 /* 0x190 */ 1903 .long 0 /* 0x194 */ 1904 .long 0 /* 0x198 */ 1905 .long 0 /* 0x19c */ 1906 .long 0 /* 0x1a0 */ 1907 .long 0 /* 0x1a4 */ 1908 .long 0 /* 0x1a8 */ 1909 .long 0 /* 0x1ac */ 1910 .long 0 /* 0x1b0 */ 1911 .long 0 /* 0x1b4 */ 1912 .long 0 /* 0x1b8 */ 1913 .long 0 /* 0x1bc */ 1914 .long 0 /* 0x1c0 */ 1915 .long 0 /* 0x1c4 */ 1916 .long 0 /* 0x1c8 */ 1917 .long 0 /* 0x1cc */ 1918 .long 0 /* 0x1d0 */ 1919 .long 0 /* 0x1d4 */ 1920 .long 0 /* 0x1d8 */ 1921 .long 0 /* 0x1dc */ 1922 .long 0 /* 0x1e0 */ 1923 .long 0 /* 0x1e4 */ 1924 .long 0 /* 0x1e8 */ 1925 .long 0 /* 0x1ec */ 1926 .long 0 /* 0x1f0 */ 1927 .long 0 /* 0x1f4 */ 1928 .long 0 /* 0x1f8 */ 1929 .long 0 /* 0x1fc */ 1930 .long 0 /* 0x200 */ 1931 .long 0 /* 0x204 */ 1932 .long 0 /* 0x208 */ 1933 .long 0 /* 0x20c */ 1934 .long 0 /* 0x210 */ 1935 .long 0 /* 0x214 */ 1936 .long 0 /* 0x218 */ 1937 .long 0 /* 0x21c */ 1938 .long 0 /* 0x220 */ 1939 .long 0 /* 0x224 */ 1940 .long 0 /* 0x228 */ 1941 .long 0 /* 0x22c */ 1942 .long 0 /* 0x230 */ 1943 .long 0 /* 0x234 */ 1944 .long 0 /* 0x238 */ 1945 .long 0 /* 0x23c */ 1946 .long 0 /* 0x240 */ 1947 .long 0 /* 0x244 */ 1948 .long 0 /* 0x248 */ 1949 .long 0 /* 0x24c */ 1950 .long 0 /* 0x250 */ 1951 .long 0 /* 0x254 */ 1952 .long 0 /* 0x258 */ 1953 .long 0 /* 0x25c */ 1954 .long 0 /* 0x260 */ 1955 .long 0 /* 0x264 */ 1956 .long 0 /* 0x268 */ 1957 .long 0 /* 0x26c */ 1958 .long 0 /* 0x270 */ 1959 .long 0 /* 0x274 */ 1960 .long 0 /* 0x278 */ 1961 .long 0 /* 0x27c */ 1962 .long 0 /* 0x280 */ 1963 .long 0 /* 0x284 */ 1964 .long 0 /* 0x288 */ 1965 .long 0 /* 0x28c */ 1966 .long 0 /* 0x290 */ 1967 .long 0 /* 0x294 */ 1968 .long 0 /* 0x298 */ 1969 .long 0 /* 0x29c */ 1970 .long 0 /* 0x2a0 */ 1971 .long 0 /* 0x2a4 */ 1972 .long 0 /* 0x2a8 */ 1973 .long 0 /* 0x2ac */ 1974 .long 0 /* 0x2b0 */ 1975 .long 0 /* 0x2b4 */ 1976 .long 0 /* 0x2b8 */ 1977 .long 0 /* 0x2bc */ 1978 .long 0 /* 0x2c0 */ 1979 .long 0 /* 0x2c4 */ 1980 .long 0 /* 0x2c8 */ 1981 .long 0 /* 0x2cc */ 1982 .long 0 /* 0x2d0 */ 1983 .long 0 /* 0x2d4 */ 1984 .long 0 /* 0x2d8 */ 1985 .long 0 /* 0x2dc */ 1986 .long 0 /* 0x2e0 */ 1987 .long 0 /* 0x2e4 */ 1988 .long 0 /* 0x2e8 */ 1989 .long 0 /* 0x2ec */ 1990 .long 0 /* 0x2f0 */ 1991 .long 0 /* 0x2f4 */ 1992 .long 0 /* 0x2f8 */ 1993#ifdef CONFIG_KVM_XICS 1994 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 1995#else 1996 .long 0 /* 0x2fc - H_XIRR_X*/ 1997#endif 1998 .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table 1999 .globl hcall_real_table_end 2000hcall_real_table_end: 2001 2002_GLOBAL(kvmppc_h_set_xdabr) 2003EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2004 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2005 beq 6f 2006 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2007 andc. r0, r5, r0 2008 beq 3f 20096: li r3, H_PARAMETER 2010 blr 2011 2012_GLOBAL(kvmppc_h_set_dabr) 2013EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2014 li r5, DABRX_USER | DABRX_KERNEL 20153: 2016BEGIN_FTR_SECTION 2017 b 2f 2018END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2019 std r4,VCPU_DABR(r3) 2020 stw r5, VCPU_DABRX(r3) 2021 mtspr SPRN_DABRX, r5 2022 /* Work around P7 bug where DABR can get corrupted on mtspr */ 20231: mtspr SPRN_DABR,r4 2024 mfspr r5, SPRN_DABR 2025 cmpd r4, r5 2026 bne 1b 2027 isync 2028 li r3,0 2029 blr 2030 20312: 2032 LOAD_REG_ADDR(r11, dawr_force_enable) 2033 lbz r11, 0(r11) 2034 cmpdi r11, 0 2035 bne 3f 2036 li r3, H_HARDWARE 2037 blr 20383: 2039 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2040 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2041 rlwimi r5, r4, 2, DAWRX_WT 2042 clrrdi r4, r4, 3 2043 std r4, VCPU_DAWR0(r3) 2044 std r5, VCPU_DAWRX0(r3) 2045 /* 2046 * If came in through the real mode hcall handler then it is necessary 2047 * to write the registers since the return path won't. Otherwise it is 2048 * sufficient to store then in the vcpu struct as they will be loaded 2049 * next time the vcpu is run. 2050 */ 2051 mfmsr r6 2052 andi. r6, r6, MSR_DR /* in real mode? */ 2053 bne 4f 2054 mtspr SPRN_DAWR0, r4 2055 mtspr SPRN_DAWRX0, r5 20564: li r3, 0 2057 blr 2058 2059_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2060 ori r11,r11,MSR_EE 2061 std r11,VCPU_MSR(r3) 2062 li r0,1 2063 stb r0,VCPU_CEDED(r3) 2064 sync /* order setting ceded vs. testing prodded */ 2065 lbz r5,VCPU_PRODDED(r3) 2066 cmpwi r5,0 2067 bne kvm_cede_prodded 2068 li r12,0 /* set trap to 0 to say hcall is handled */ 2069 stw r12,VCPU_TRAP(r3) 2070 li r0,H_SUCCESS 2071 std r0,VCPU_GPR(R3)(r3) 2072 2073 /* 2074 * Set our bit in the bitmask of napping threads unless all the 2075 * other threads are already napping, in which case we send this 2076 * up to the host. 2077 */ 2078 ld r5,HSTATE_KVM_VCORE(r13) 2079 lbz r6,HSTATE_PTID(r13) 2080 lwz r8,VCORE_ENTRY_EXIT(r5) 2081 clrldi r8,r8,56 2082 li r0,1 2083 sld r0,r0,r6 2084 addi r6,r5,VCORE_NAPPING_THREADS 208531: lwarx r4,0,r6 2086 or r4,r4,r0 2087 cmpw r4,r8 2088 beq kvm_cede_exit 2089 stwcx. r4,0,r6 2090 bne 31b 2091 /* order napping_threads update vs testing entry_exit_map */ 2092 isync 2093 li r0,NAPPING_CEDE 2094 stb r0,HSTATE_NAPPING(r13) 2095 lwz r7,VCORE_ENTRY_EXIT(r5) 2096 cmpwi r7,0x100 2097 bge 33f /* another thread already exiting */ 2098 2099/* 2100 * Although not specifically required by the architecture, POWER7 2101 * preserves the following registers in nap mode, even if an SMT mode 2102 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2103 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2104 */ 2105 /* Save non-volatile GPRs */ 2106 std r14, VCPU_GPR(R14)(r3) 2107 std r15, VCPU_GPR(R15)(r3) 2108 std r16, VCPU_GPR(R16)(r3) 2109 std r17, VCPU_GPR(R17)(r3) 2110 std r18, VCPU_GPR(R18)(r3) 2111 std r19, VCPU_GPR(R19)(r3) 2112 std r20, VCPU_GPR(R20)(r3) 2113 std r21, VCPU_GPR(R21)(r3) 2114 std r22, VCPU_GPR(R22)(r3) 2115 std r23, VCPU_GPR(R23)(r3) 2116 std r24, VCPU_GPR(R24)(r3) 2117 std r25, VCPU_GPR(R25)(r3) 2118 std r26, VCPU_GPR(R26)(r3) 2119 std r27, VCPU_GPR(R27)(r3) 2120 std r28, VCPU_GPR(R28)(r3) 2121 std r29, VCPU_GPR(R29)(r3) 2122 std r30, VCPU_GPR(R30)(r3) 2123 std r31, VCPU_GPR(R31)(r3) 2124 2125 /* save FP state */ 2126 bl kvmppc_save_fp 2127 2128#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2129BEGIN_FTR_SECTION 2130 b 91f 2131END_FTR_SECTION_IFCLR(CPU_FTR_TM) 2132 /* 2133 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2134 */ 2135 ld r3, HSTATE_KVM_VCPU(r13) 2136 ld r4, VCPU_MSR(r3) 2137 li r5, 0 /* don't preserve non-vol regs */ 2138 bl kvmppc_save_tm_hv 2139 nop 214091: 2141#endif 2142 2143 /* 2144 * Set DEC to the smaller of DEC and HDEC, so that we wake 2145 * no later than the end of our timeslice (HDEC interrupts 2146 * don't wake us from nap). 2147 */ 2148 mfspr r3, SPRN_DEC 2149 mfspr r4, SPRN_HDEC 2150 mftb r5 2151 extsw r3, r3 2152 extsw r4, r4 2153 cmpd r3, r4 2154 ble 67f 2155 mtspr SPRN_DEC, r4 215667: 2157 /* save expiry time of guest decrementer */ 2158 add r3, r3, r5 2159 ld r4, HSTATE_KVM_VCPU(r13) 2160 ld r5, HSTATE_KVM_VCORE(r13) 2161 ld r6, VCORE_TB_OFFSET_APPL(r5) 2162 subf r3, r6, r3 /* convert to host TB value */ 2163 std r3, VCPU_DEC_EXPIRES(r4) 2164 2165#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2166 ld r4, HSTATE_KVM_VCPU(r13) 2167 addi r3, r4, VCPU_TB_CEDE 2168 bl kvmhv_accumulate_time 2169#endif 2170 2171 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2172 2173 /* Go back to host stack */ 2174 ld r1, HSTATE_HOST_R1(r13) 2175 2176 /* 2177 * Take a nap until a decrementer or external or doobell interrupt 2178 * occurs, with PECE1 and PECE0 set in LPCR. 2179 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2180 * Also clear the runlatch bit before napping. 2181 */ 2182kvm_do_nap: 2183 mfspr r0, SPRN_CTRLF 2184 clrrdi r0, r0, 1 2185 mtspr SPRN_CTRLT, r0 2186 2187 li r0,1 2188 stb r0,HSTATE_HWTHREAD_REQ(r13) 2189 mfspr r5,SPRN_LPCR 2190 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2191BEGIN_FTR_SECTION 2192 ori r5, r5, LPCR_PECEDH 2193 rlwimi r5, r3, 0, LPCR_PECEDP 2194END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2195 2196kvm_nap_sequence: /* desired LPCR value in r5 */ 2197 li r3, PNV_THREAD_NAP 2198 mtspr SPRN_LPCR,r5 2199 isync 2200 2201 bl isa206_idle_insn_mayloss 2202 2203 mfspr r0, SPRN_CTRLF 2204 ori r0, r0, 1 2205 mtspr SPRN_CTRLT, r0 2206 2207 mtspr SPRN_SRR1, r3 2208 2209 li r0, 0 2210 stb r0, PACA_FTRACE_ENABLED(r13) 2211 2212 li r0, KVM_HWTHREAD_IN_KVM 2213 stb r0, HSTATE_HWTHREAD_STATE(r13) 2214 2215 lbz r0, HSTATE_NAPPING(r13) 2216 cmpwi r0, NAPPING_CEDE 2217 beq kvm_end_cede 2218 cmpwi r0, NAPPING_NOVCPU 2219 beq kvm_novcpu_wakeup 2220 cmpwi r0, NAPPING_UNSPLIT 2221 beq kvm_unsplit_wakeup 2222 twi 31,0,0 /* Nap state must not be zero */ 2223 222433: mr r4, r3 2225 li r3, 0 2226 li r12, 0 2227 b 34f 2228 2229kvm_end_cede: 2230 /* Woken by external or decrementer interrupt */ 2231 2232 /* get vcpu pointer */ 2233 ld r4, HSTATE_KVM_VCPU(r13) 2234 2235#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2236 addi r3, r4, VCPU_TB_RMINTR 2237 bl kvmhv_accumulate_time 2238#endif 2239 2240#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2241BEGIN_FTR_SECTION 2242 b 91f 2243END_FTR_SECTION_IFCLR(CPU_FTR_TM) 2244 /* 2245 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2246 */ 2247 mr r3, r4 2248 ld r4, VCPU_MSR(r3) 2249 li r5, 0 /* don't preserve non-vol regs */ 2250 bl kvmppc_restore_tm_hv 2251 nop 2252 ld r4, HSTATE_KVM_VCPU(r13) 225391: 2254#endif 2255 2256 /* load up FP state */ 2257 bl kvmppc_load_fp 2258 2259 /* Restore guest decrementer */ 2260 ld r3, VCPU_DEC_EXPIRES(r4) 2261 ld r5, HSTATE_KVM_VCORE(r13) 2262 ld r6, VCORE_TB_OFFSET_APPL(r5) 2263 add r3, r3, r6 /* convert host TB to guest TB value */ 2264 mftb r7 2265 subf r3, r7, r3 2266 mtspr SPRN_DEC, r3 2267 2268 /* Load NV GPRS */ 2269 ld r14, VCPU_GPR(R14)(r4) 2270 ld r15, VCPU_GPR(R15)(r4) 2271 ld r16, VCPU_GPR(R16)(r4) 2272 ld r17, VCPU_GPR(R17)(r4) 2273 ld r18, VCPU_GPR(R18)(r4) 2274 ld r19, VCPU_GPR(R19)(r4) 2275 ld r20, VCPU_GPR(R20)(r4) 2276 ld r21, VCPU_GPR(R21)(r4) 2277 ld r22, VCPU_GPR(R22)(r4) 2278 ld r23, VCPU_GPR(R23)(r4) 2279 ld r24, VCPU_GPR(R24)(r4) 2280 ld r25, VCPU_GPR(R25)(r4) 2281 ld r26, VCPU_GPR(R26)(r4) 2282 ld r27, VCPU_GPR(R27)(r4) 2283 ld r28, VCPU_GPR(R28)(r4) 2284 ld r29, VCPU_GPR(R29)(r4) 2285 ld r30, VCPU_GPR(R30)(r4) 2286 ld r31, VCPU_GPR(R31)(r4) 2287 2288 /* Check the wake reason in SRR1 to see why we got here */ 2289 bl kvmppc_check_wake_reason 2290 2291 /* 2292 * Restore volatile registers since we could have called a 2293 * C routine in kvmppc_check_wake_reason 2294 * r4 = VCPU 2295 * r3 tells us whether we need to return to host or not 2296 * WARNING: it gets checked further down: 2297 * should not modify r3 until this check is done. 2298 */ 2299 ld r4, HSTATE_KVM_VCPU(r13) 2300 2301 /* clear our bit in vcore->napping_threads */ 230234: ld r5,HSTATE_KVM_VCORE(r13) 2303 lbz r7,HSTATE_PTID(r13) 2304 li r0,1 2305 sld r0,r0,r7 2306 addi r6,r5,VCORE_NAPPING_THREADS 230732: lwarx r7,0,r6 2308 andc r7,r7,r0 2309 stwcx. r7,0,r6 2310 bne 32b 2311 li r0,0 2312 stb r0,HSTATE_NAPPING(r13) 2313 2314 /* See if the wake reason saved in r3 means we need to exit */ 2315 stw r12, VCPU_TRAP(r4) 2316 mr r9, r4 2317 cmpdi r3, 0 2318 bgt guest_exit_cont 2319 b maybe_reenter_guest 2320 2321 /* cede when already previously prodded case */ 2322kvm_cede_prodded: 2323 li r0,0 2324 stb r0,VCPU_PRODDED(r3) 2325 sync /* order testing prodded vs. clearing ceded */ 2326 stb r0,VCPU_CEDED(r3) 2327 li r3,H_SUCCESS 2328 blr 2329 2330 /* we've ceded but we want to give control to the host */ 2331kvm_cede_exit: 2332 ld r9, HSTATE_KVM_VCPU(r13) 2333 b guest_exit_cont 2334 2335 /* Try to do machine check recovery in real mode */ 2336machine_check_realmode: 2337 mr r3, r9 /* get vcpu pointer */ 2338 bl kvmppc_realmode_machine_check 2339 nop 2340 /* all machine checks go to virtual mode for further handling */ 2341 ld r9, HSTATE_KVM_VCPU(r13) 2342 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2343 b guest_exit_cont 2344 2345/* 2346 * Call C code to handle a HMI in real mode. 2347 * Only the primary thread does the call, secondary threads are handled 2348 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2349 * r9 points to the vcpu on entry 2350 */ 2351hmi_realmode: 2352 lbz r0, HSTATE_PTID(r13) 2353 cmpwi r0, 0 2354 bne guest_exit_cont 2355 bl kvmppc_realmode_hmi_handler 2356 ld r9, HSTATE_KVM_VCPU(r13) 2357 li r12, BOOK3S_INTERRUPT_HMI 2358 b guest_exit_cont 2359 2360/* 2361 * Check the reason we woke from nap, and take appropriate action. 2362 * Returns (in r3): 2363 * 0 if nothing needs to be done 2364 * 1 if something happened that needs to be handled by the host 2365 * -1 if there was a guest wakeup (IPI or msgsnd) 2366 * -2 if we handled a PCI passthrough interrupt (returned by 2367 * kvmppc_read_intr only) 2368 * 2369 * Also sets r12 to the interrupt vector for any interrupt that needs 2370 * to be handled now by the host (0x500 for external interrupt), or zero. 2371 * Modifies all volatile registers (since it may call a C function). 2372 * This routine calls kvmppc_read_intr, a C function, if an external 2373 * interrupt is pending. 2374 */ 2375kvmppc_check_wake_reason: 2376 mfspr r6, SPRN_SRR1 2377BEGIN_FTR_SECTION 2378 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2379FTR_SECTION_ELSE 2380 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2381ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2382 cmpwi r6, 8 /* was it an external interrupt? */ 2383 beq 7f /* if so, see what it was */ 2384 li r3, 0 2385 li r12, 0 2386 cmpwi r6, 6 /* was it the decrementer? */ 2387 beq 0f 2388BEGIN_FTR_SECTION 2389 cmpwi r6, 5 /* privileged doorbell? */ 2390 beq 0f 2391 cmpwi r6, 3 /* hypervisor doorbell? */ 2392 beq 3f 2393END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2394 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2395 beq 4f 2396 li r3, 1 /* anything else, return 1 */ 23970: blr 2398 2399 /* hypervisor doorbell */ 24003: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2401 2402 /* 2403 * Clear the doorbell as we will invoke the handler 2404 * explicitly in the guest exit path. 2405 */ 2406 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2407 PPC_MSGCLR(6) 2408 /* see if it's a host IPI */ 2409 li r3, 1 2410 lbz r0, HSTATE_HOST_IPI(r13) 2411 cmpwi r0, 0 2412 bnelr 2413 /* if not, return -1 */ 2414 li r3, -1 2415 blr 2416 2417 /* Woken up due to Hypervisor maintenance interrupt */ 24184: li r12, BOOK3S_INTERRUPT_HMI 2419 li r3, 1 2420 blr 2421 2422 /* external interrupt - create a stack frame so we can call C */ 24237: mflr r0 2424 std r0, PPC_LR_STKOFF(r1) 2425 stdu r1, -PPC_MIN_STKFRM(r1) 2426 bl kvmppc_read_intr 2427 nop 2428 li r12, BOOK3S_INTERRUPT_EXTERNAL 2429 cmpdi r3, 1 2430 ble 1f 2431 2432 /* 2433 * Return code of 2 means PCI passthrough interrupt, but 2434 * we need to return back to host to complete handling the 2435 * interrupt. Trap reason is expected in r12 by guest 2436 * exit code. 2437 */ 2438 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 24391: 2440 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2441 addi r1, r1, PPC_MIN_STKFRM 2442 mtlr r0 2443 blr 2444 2445/* 2446 * Save away FP, VMX and VSX registers. 2447 * r3 = vcpu pointer 2448 * N.B. r30 and r31 are volatile across this function, 2449 * thus it is not callable from C. 2450 */ 2451kvmppc_save_fp: 2452 mflr r30 2453 mr r31,r3 2454 mfmsr r5 2455 ori r8,r5,MSR_FP 2456#ifdef CONFIG_ALTIVEC 2457BEGIN_FTR_SECTION 2458 oris r8,r8,MSR_VEC@h 2459END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2460#endif 2461#ifdef CONFIG_VSX 2462BEGIN_FTR_SECTION 2463 oris r8,r8,MSR_VSX@h 2464END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2465#endif 2466 mtmsrd r8 2467 addi r3,r3,VCPU_FPRS 2468 bl store_fp_state 2469#ifdef CONFIG_ALTIVEC 2470BEGIN_FTR_SECTION 2471 addi r3,r31,VCPU_VRS 2472 bl store_vr_state 2473END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2474#endif 2475 mfspr r6,SPRN_VRSAVE 2476 stw r6,VCPU_VRSAVE(r31) 2477 mtlr r30 2478 blr 2479 2480/* 2481 * Load up FP, VMX and VSX registers 2482 * r4 = vcpu pointer 2483 * N.B. r30 and r31 are volatile across this function, 2484 * thus it is not callable from C. 2485 */ 2486kvmppc_load_fp: 2487 mflr r30 2488 mr r31,r4 2489 mfmsr r9 2490 ori r8,r9,MSR_FP 2491#ifdef CONFIG_ALTIVEC 2492BEGIN_FTR_SECTION 2493 oris r8,r8,MSR_VEC@h 2494END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2495#endif 2496#ifdef CONFIG_VSX 2497BEGIN_FTR_SECTION 2498 oris r8,r8,MSR_VSX@h 2499END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2500#endif 2501 mtmsrd r8 2502 addi r3,r4,VCPU_FPRS 2503 bl load_fp_state 2504#ifdef CONFIG_ALTIVEC 2505BEGIN_FTR_SECTION 2506 addi r3,r31,VCPU_VRS 2507 bl load_vr_state 2508END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2509#endif 2510 lwz r7,VCPU_VRSAVE(r31) 2511 mtspr SPRN_VRSAVE,r7 2512 mtlr r30 2513 mr r4,r31 2514 blr 2515 2516#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2517/* 2518 * Save transactional state and TM-related registers. 2519 * Called with r3 pointing to the vcpu struct and r4 containing 2520 * the guest MSR value. 2521 * r5 is non-zero iff non-volatile register state needs to be maintained. 2522 * If r5 == 0, this can modify all checkpointed registers, but 2523 * restores r1 and r2 before exit. 2524 */ 2525_GLOBAL_TOC(kvmppc_save_tm_hv) 2526EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 2527 /* See if we need to handle fake suspend mode */ 2528BEGIN_FTR_SECTION 2529 b __kvmppc_save_tm 2530END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 2531 2532 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 2533 cmpwi r0, 0 2534 beq __kvmppc_save_tm 2535 2536 /* The following code handles the fake_suspend = 1 case */ 2537 mflr r0 2538 std r0, PPC_LR_STKOFF(r1) 2539 stdu r1, -PPC_MIN_STKFRM(r1) 2540 2541 /* Turn on TM. */ 2542 mfmsr r8 2543 li r0, 1 2544 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 2545 mtmsrd r8 2546 2547 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 2548 beq 4f 2549BEGIN_FTR_SECTION 2550 bl pnv_power9_force_smt4_catch 2551END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2552 nop 2553 2554 /* We have to treclaim here because that's the only way to do S->N */ 2555 li r3, TM_CAUSE_KVM_RESCHED 2556 TRECLAIM(R3) 2557 2558 /* 2559 * We were in fake suspend, so we are not going to save the 2560 * register state as the guest checkpointed state (since 2561 * we already have it), therefore we can now use any volatile GPR. 2562 * In fact treclaim in fake suspend state doesn't modify 2563 * any registers. 2564 */ 2565 2566BEGIN_FTR_SECTION 2567 bl pnv_power9_force_smt4_release 2568END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2569 nop 2570 25714: 2572 mfspr r3, SPRN_PSSCR 2573 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 2574 li r0, PSSCR_FAKE_SUSPEND 2575 andc r3, r3, r0 2576 mtspr SPRN_PSSCR, r3 2577 2578 /* Don't save TEXASR, use value from last exit in real suspend state */ 2579 ld r9, HSTATE_KVM_VCPU(r13) 2580 mfspr r5, SPRN_TFHAR 2581 mfspr r6, SPRN_TFIAR 2582 std r5, VCPU_TFHAR(r9) 2583 std r6, VCPU_TFIAR(r9) 2584 2585 addi r1, r1, PPC_MIN_STKFRM 2586 ld r0, PPC_LR_STKOFF(r1) 2587 mtlr r0 2588 blr 2589 2590/* 2591 * Restore transactional state and TM-related registers. 2592 * Called with r3 pointing to the vcpu struct 2593 * and r4 containing the guest MSR value. 2594 * r5 is non-zero iff non-volatile register state needs to be maintained. 2595 * This potentially modifies all checkpointed registers. 2596 * It restores r1 and r2 from the PACA. 2597 */ 2598_GLOBAL_TOC(kvmppc_restore_tm_hv) 2599EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 2600 /* 2601 * If we are doing TM emulation for the guest on a POWER9 DD2, 2602 * then we don't actually do a trechkpt -- we either set up 2603 * fake-suspend mode, or emulate a TM rollback. 2604 */ 2605BEGIN_FTR_SECTION 2606 b __kvmppc_restore_tm 2607END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 2608 mflr r0 2609 std r0, PPC_LR_STKOFF(r1) 2610 2611 li r0, 0 2612 stb r0, HSTATE_FAKE_SUSPEND(r13) 2613 2614 /* Turn on TM so we can restore TM SPRs */ 2615 mfmsr r5 2616 li r0, 1 2617 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 2618 mtmsrd r5 2619 2620 /* 2621 * The user may change these outside of a transaction, so they must 2622 * always be context switched. 2623 */ 2624 ld r5, VCPU_TFHAR(r3) 2625 ld r6, VCPU_TFIAR(r3) 2626 ld r7, VCPU_TEXASR(r3) 2627 mtspr SPRN_TFHAR, r5 2628 mtspr SPRN_TFIAR, r6 2629 mtspr SPRN_TEXASR, r7 2630 2631 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 2632 beqlr /* TM not active in guest */ 2633 2634 /* Make sure the failure summary is set */ 2635 oris r7, r7, (TEXASR_FS)@h 2636 mtspr SPRN_TEXASR, r7 2637 2638 cmpwi r5, 1 /* check for suspended state */ 2639 bgt 10f 2640 stb r5, HSTATE_FAKE_SUSPEND(r13) 2641 b 9f /* and return */ 264210: stdu r1, -PPC_MIN_STKFRM(r1) 2643 /* guest is in transactional state, so simulate rollback */ 2644 bl kvmhv_emulate_tm_rollback 2645 nop 2646 addi r1, r1, PPC_MIN_STKFRM 26479: ld r0, PPC_LR_STKOFF(r1) 2648 mtlr r0 2649 blr 2650#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2651 2652/* 2653 * We come here if we get any exception or interrupt while we are 2654 * executing host real mode code while in guest MMU context. 2655 * r12 is (CR << 32) | vector 2656 * r13 points to our PACA 2657 * r12 is saved in HSTATE_SCRATCH0(r13) 2658 * r9 is saved in HSTATE_SCRATCH2(r13) 2659 * r13 is saved in HSPRG1 2660 * cfar is saved in HSTATE_CFAR(r13) 2661 * ppr is saved in HSTATE_PPR(r13) 2662 */ 2663kvmppc_bad_host_intr: 2664 /* 2665 * Switch to the emergency stack, but start half-way down in 2666 * case we were already on it. 2667 */ 2668 mr r9, r1 2669 std r1, PACAR1(r13) 2670 ld r1, PACAEMERGSP(r13) 2671 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 2672 std r9, 0(r1) 2673 std r0, GPR0(r1) 2674 std r9, GPR1(r1) 2675 std r2, GPR2(r1) 2676 SAVE_4GPRS(3, r1) 2677 SAVE_2GPRS(7, r1) 2678 srdi r0, r12, 32 2679 clrldi r12, r12, 32 2680 std r0, _CCR(r1) 2681 std r12, _TRAP(r1) 2682 andi. r0, r12, 2 2683 beq 1f 2684 mfspr r3, SPRN_HSRR0 2685 mfspr r4, SPRN_HSRR1 2686 mfspr r5, SPRN_HDAR 2687 mfspr r6, SPRN_HDSISR 2688 b 2f 26891: mfspr r3, SPRN_SRR0 2690 mfspr r4, SPRN_SRR1 2691 mfspr r5, SPRN_DAR 2692 mfspr r6, SPRN_DSISR 26932: std r3, _NIP(r1) 2694 std r4, _MSR(r1) 2695 std r5, _DAR(r1) 2696 std r6, _DSISR(r1) 2697 ld r9, HSTATE_SCRATCH2(r13) 2698 ld r12, HSTATE_SCRATCH0(r13) 2699 GET_SCRATCH0(r0) 2700 SAVE_4GPRS(9, r1) 2701 std r0, GPR13(r1) 2702 SAVE_NVGPRS(r1) 2703 ld r5, HSTATE_CFAR(r13) 2704 std r5, ORIG_GPR3(r1) 2705 mflr r3 2706 mfctr r4 2707 mfxer r5 2708 lbz r6, PACAIRQSOFTMASK(r13) 2709 std r3, _LINK(r1) 2710 std r4, _CTR(r1) 2711 std r5, _XER(r1) 2712 std r6, SOFTE(r1) 2713 ld r2, PACATOC(r13) 2714 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 2715 std r3, STACK_FRAME_OVERHEAD-16(r1) 2716 2717 /* 2718 * XXX On POWER7 and POWER8, we just spin here since we don't 2719 * know what the other threads are doing (and we don't want to 2720 * coordinate with them) - but at least we now have register state 2721 * in memory that we might be able to look at from another CPU. 2722 */ 2723 b . 2724 2725/* 2726 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2727 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2728 * r11 has the guest MSR value (in/out) 2729 * r9 has a vcpu pointer (in) 2730 * r0 is used as a scratch register 2731 */ 2732kvmppc_msr_interrupt: 2733 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2734 cmpwi r0, 2 /* Check if we are in transactional state.. */ 2735 ld r11, VCPU_INTR_MSR(r9) 2736 bne 1f 2737 /* ... if transactional, change to suspended */ 2738 li r0, 1 27391: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2740 blr 2741 2742/* 2743 * Load up guest PMU state. R3 points to the vcpu struct. 2744 */ 2745_GLOBAL(kvmhv_load_guest_pmu) 2746EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) 2747 mr r4, r3 2748 mflr r0 2749 li r3, 1 2750 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 2751 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 2752 isync 2753BEGIN_FTR_SECTION 2754 ld r3, VCPU_MMCR(r4) 2755 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 2756 cmpwi r5, MMCR0_PMAO 2757 beql kvmppc_fix_pmao 2758END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 2759 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 2760 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 2761 lwz r6, VCPU_PMC + 8(r4) 2762 lwz r7, VCPU_PMC + 12(r4) 2763 lwz r8, VCPU_PMC + 16(r4) 2764 lwz r9, VCPU_PMC + 20(r4) 2765 mtspr SPRN_PMC1, r3 2766 mtspr SPRN_PMC2, r5 2767 mtspr SPRN_PMC3, r6 2768 mtspr SPRN_PMC4, r7 2769 mtspr SPRN_PMC5, r8 2770 mtspr SPRN_PMC6, r9 2771 ld r3, VCPU_MMCR(r4) 2772 ld r5, VCPU_MMCR + 8(r4) 2773 ld r6, VCPU_MMCRA(r4) 2774 ld r7, VCPU_SIAR(r4) 2775 ld r8, VCPU_SDAR(r4) 2776 mtspr SPRN_MMCR1, r5 2777 mtspr SPRN_MMCRA, r6 2778 mtspr SPRN_SIAR, r7 2779 mtspr SPRN_SDAR, r8 2780BEGIN_FTR_SECTION 2781 ld r5, VCPU_MMCR + 24(r4) 2782 ld r6, VCPU_SIER + 8(r4) 2783 ld r7, VCPU_SIER + 16(r4) 2784 mtspr SPRN_MMCR3, r5 2785 mtspr SPRN_SIER2, r6 2786 mtspr SPRN_SIER3, r7 2787END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 2788BEGIN_FTR_SECTION 2789 ld r5, VCPU_MMCR + 16(r4) 2790 ld r6, VCPU_SIER(r4) 2791 mtspr SPRN_MMCR2, r5 2792 mtspr SPRN_SIER, r6 2793BEGIN_FTR_SECTION_NESTED(96) 2794 lwz r7, VCPU_PMC + 24(r4) 2795 lwz r8, VCPU_PMC + 28(r4) 2796 ld r9, VCPU_MMCRS(r4) 2797 mtspr SPRN_SPMC1, r7 2798 mtspr SPRN_SPMC2, r8 2799 mtspr SPRN_MMCRS, r9 2800END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 2801END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2802 mtspr SPRN_MMCR0, r3 2803 isync 2804 mtlr r0 2805 blr 2806 2807/* 2808 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 2809 */ 2810_GLOBAL(kvmhv_load_host_pmu) 2811EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) 2812 mflr r0 2813 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 2814 cmpwi r4, 0 2815 beq 23f /* skip if not */ 2816BEGIN_FTR_SECTION 2817 ld r3, HSTATE_MMCR0(r13) 2818 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 2819 cmpwi r4, MMCR0_PMAO 2820 beql kvmppc_fix_pmao 2821END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 2822 lwz r3, HSTATE_PMC1(r13) 2823 lwz r4, HSTATE_PMC2(r13) 2824 lwz r5, HSTATE_PMC3(r13) 2825 lwz r6, HSTATE_PMC4(r13) 2826 lwz r8, HSTATE_PMC5(r13) 2827 lwz r9, HSTATE_PMC6(r13) 2828 mtspr SPRN_PMC1, r3 2829 mtspr SPRN_PMC2, r4 2830 mtspr SPRN_PMC3, r5 2831 mtspr SPRN_PMC4, r6 2832 mtspr SPRN_PMC5, r8 2833 mtspr SPRN_PMC6, r9 2834 ld r3, HSTATE_MMCR0(r13) 2835 ld r4, HSTATE_MMCR1(r13) 2836 ld r5, HSTATE_MMCRA(r13) 2837 ld r6, HSTATE_SIAR(r13) 2838 ld r7, HSTATE_SDAR(r13) 2839 mtspr SPRN_MMCR1, r4 2840 mtspr SPRN_MMCRA, r5 2841 mtspr SPRN_SIAR, r6 2842 mtspr SPRN_SDAR, r7 2843BEGIN_FTR_SECTION 2844 ld r8, HSTATE_MMCR2(r13) 2845 ld r9, HSTATE_SIER(r13) 2846 mtspr SPRN_MMCR2, r8 2847 mtspr SPRN_SIER, r9 2848END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2849BEGIN_FTR_SECTION 2850 ld r5, HSTATE_MMCR3(r13) 2851 ld r6, HSTATE_SIER2(r13) 2852 ld r7, HSTATE_SIER3(r13) 2853 mtspr SPRN_MMCR3, r5 2854 mtspr SPRN_SIER2, r6 2855 mtspr SPRN_SIER3, r7 2856END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 2857 mtspr SPRN_MMCR0, r3 2858 isync 2859 mtlr r0 286023: blr 2861 2862/* 2863 * Save guest PMU state into the vcpu struct. 2864 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 2865 */ 2866_GLOBAL(kvmhv_save_guest_pmu) 2867EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) 2868 mr r9, r3 2869 mr r8, r4 2870BEGIN_FTR_SECTION 2871 /* 2872 * POWER8 seems to have a hardware bug where setting 2873 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 2874 * when some counters are already negative doesn't seem 2875 * to cause a performance monitor alert (and hence interrupt). 2876 * The effect of this is that when saving the PMU state, 2877 * if there is no PMU alert pending when we read MMCR0 2878 * before freezing the counters, but one becomes pending 2879 * before we read the counters, we lose it. 2880 * To work around this, we need a way to freeze the counters 2881 * before reading MMCR0. Normally, freezing the counters 2882 * is done by writing MMCR0 (to set MMCR0[FC]) which 2883 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 2884 * we can also freeze the counters using MMCR2, by writing 2885 * 1s to all the counter freeze condition bits (there are 2886 * 9 bits each for 6 counters). 2887 */ 2888 li r3, -1 /* set all freeze bits */ 2889 clrrdi r3, r3, 10 2890 mfspr r10, SPRN_MMCR2 2891 mtspr SPRN_MMCR2, r3 2892 isync 2893END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2894 li r3, 1 2895 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 2896 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 2897 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 2898 mfspr r6, SPRN_MMCRA 2899 /* Clear MMCRA in order to disable SDAR updates */ 2900 li r7, 0 2901 mtspr SPRN_MMCRA, r7 2902 isync 2903 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 2904 bne 21f 2905 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 2906 b 22f 290721: mfspr r5, SPRN_MMCR1 2908 mfspr r7, SPRN_SIAR 2909 mfspr r8, SPRN_SDAR 2910 std r4, VCPU_MMCR(r9) 2911 std r5, VCPU_MMCR + 8(r9) 2912 std r6, VCPU_MMCRA(r9) 2913BEGIN_FTR_SECTION 2914 std r10, VCPU_MMCR + 16(r9) 2915END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2916BEGIN_FTR_SECTION 2917 mfspr r5, SPRN_MMCR3 2918 mfspr r6, SPRN_SIER2 2919 mfspr r7, SPRN_SIER3 2920 std r5, VCPU_MMCR + 24(r9) 2921 std r6, VCPU_SIER + 8(r9) 2922 std r7, VCPU_SIER + 16(r9) 2923END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 2924 std r7, VCPU_SIAR(r9) 2925 std r8, VCPU_SDAR(r9) 2926 mfspr r3, SPRN_PMC1 2927 mfspr r4, SPRN_PMC2 2928 mfspr r5, SPRN_PMC3 2929 mfspr r6, SPRN_PMC4 2930 mfspr r7, SPRN_PMC5 2931 mfspr r8, SPRN_PMC6 2932 stw r3, VCPU_PMC(r9) 2933 stw r4, VCPU_PMC + 4(r9) 2934 stw r5, VCPU_PMC + 8(r9) 2935 stw r6, VCPU_PMC + 12(r9) 2936 stw r7, VCPU_PMC + 16(r9) 2937 stw r8, VCPU_PMC + 20(r9) 2938BEGIN_FTR_SECTION 2939 mfspr r5, SPRN_SIER 2940 std r5, VCPU_SIER(r9) 2941BEGIN_FTR_SECTION_NESTED(96) 2942 mfspr r6, SPRN_SPMC1 2943 mfspr r7, SPRN_SPMC2 2944 mfspr r8, SPRN_MMCRS 2945 stw r6, VCPU_PMC + 24(r9) 2946 stw r7, VCPU_PMC + 28(r9) 2947 std r8, VCPU_MMCRS(r9) 2948 lis r4, 0x8000 2949 mtspr SPRN_MMCRS, r4 2950END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 2951END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 295222: blr 2953 2954/* 2955 * This works around a hardware bug on POWER8E processors, where 2956 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 2957 * performance monitor interrupt. Instead, when we need to have 2958 * an interrupt pending, we have to arrange for a counter to overflow. 2959 */ 2960kvmppc_fix_pmao: 2961 li r3, 0 2962 mtspr SPRN_MMCR2, r3 2963 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 2964 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 2965 mtspr SPRN_MMCR0, r3 2966 lis r3, 0x7fff 2967 ori r3, r3, 0xffff 2968 mtspr SPRN_PMC6, r3 2969 isync 2970 blr 2971 2972#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2973/* 2974 * Start timing an activity 2975 * r3 = pointer to time accumulation struct, r4 = vcpu 2976 */ 2977kvmhv_start_timing: 2978 ld r5, HSTATE_KVM_VCORE(r13) 2979 ld r6, VCORE_TB_OFFSET_APPL(r5) 2980 mftb r5 2981 subf r5, r6, r5 /* subtract current timebase offset */ 2982 std r3, VCPU_CUR_ACTIVITY(r4) 2983 std r5, VCPU_ACTIVITY_START(r4) 2984 blr 2985 2986/* 2987 * Accumulate time to one activity and start another. 2988 * r3 = pointer to new time accumulation struct, r4 = vcpu 2989 */ 2990kvmhv_accumulate_time: 2991 ld r5, HSTATE_KVM_VCORE(r13) 2992 ld r8, VCORE_TB_OFFSET_APPL(r5) 2993 ld r5, VCPU_CUR_ACTIVITY(r4) 2994 ld r6, VCPU_ACTIVITY_START(r4) 2995 std r3, VCPU_CUR_ACTIVITY(r4) 2996 mftb r7 2997 subf r7, r8, r7 /* subtract current timebase offset */ 2998 std r7, VCPU_ACTIVITY_START(r4) 2999 cmpdi r5, 0 3000 beqlr 3001 subf r3, r6, r7 3002 ld r8, TAS_SEQCOUNT(r5) 3003 cmpdi r8, 0 3004 addi r8, r8, 1 3005 std r8, TAS_SEQCOUNT(r5) 3006 lwsync 3007 ld r7, TAS_TOTAL(r5) 3008 add r7, r7, r3 3009 std r7, TAS_TOTAL(r5) 3010 ld r6, TAS_MIN(r5) 3011 ld r7, TAS_MAX(r5) 3012 beq 3f 3013 cmpd r3, r6 3014 bge 1f 30153: std r3, TAS_MIN(r5) 30161: cmpd r3, r7 3017 ble 2f 3018 std r3, TAS_MAX(r5) 30192: lwsync 3020 addi r8, r8, 1 3021 std r8, TAS_SEQCOUNT(r5) 3022 blr 3023#endif 3024