1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/book3s/64/mmu-hash.h> 31#include <asm/tm.h> 32#include <asm/opal.h> 33#include <asm/xive-regs.h> 34 35/* Sign-extend HDEC if not on POWER9 */ 36#define EXTEND_HDEC(reg) \ 37BEGIN_FTR_SECTION; \ 38 extsw reg, reg; \ 39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 40 41#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 42 43/* Values in HSTATE_NAPPING(r13) */ 44#define NAPPING_CEDE 1 45#define NAPPING_NOVCPU 2 46 47/* Stack frame offsets for kvmppc_hv_entry */ 48#define SFS 160 49#define STACK_SLOT_TRAP (SFS-4) 50#define STACK_SLOT_TID (SFS-16) 51#define STACK_SLOT_PSSCR (SFS-24) 52#define STACK_SLOT_PID (SFS-32) 53#define STACK_SLOT_IAMR (SFS-40) 54#define STACK_SLOT_CIABR (SFS-48) 55#define STACK_SLOT_DAWR (SFS-56) 56#define STACK_SLOT_DAWRX (SFS-64) 57#define STACK_SLOT_HFSCR (SFS-72) 58 59/* 60 * Call kvmppc_hv_entry in real mode. 61 * Must be called with interrupts hard-disabled. 62 * 63 * Input Registers: 64 * 65 * LR = return address to continue at after eventually re-enabling MMU 66 */ 67_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 68 mflr r0 69 std r0, PPC_LR_STKOFF(r1) 70 stdu r1, -112(r1) 71 mfmsr r10 72 std r10, HSTATE_HOST_MSR(r13) 73 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 74 li r0,MSR_RI 75 andc r0,r10,r0 76 li r6,MSR_IR | MSR_DR 77 andc r6,r10,r6 78 mtmsrd r0,1 /* clear RI in MSR */ 79 mtsrr0 r5 80 mtsrr1 r6 81 RFI 82 83kvmppc_call_hv_entry: 84 ld r4, HSTATE_KVM_VCPU(r13) 85 bl kvmppc_hv_entry 86 87 /* Back from guest - restore host state and return to caller */ 88 89BEGIN_FTR_SECTION 90 /* Restore host DABR and DABRX */ 91 ld r5,HSTATE_DABR(r13) 92 li r6,7 93 mtspr SPRN_DABR,r5 94 mtspr SPRN_DABRX,r6 95END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 96 97 /* Restore SPRG3 */ 98 ld r3,PACA_SPRG_VDSO(r13) 99 mtspr SPRN_SPRG_VDSO_WRITE,r3 100 101 /* Reload the host's PMU registers */ 102 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 103 lbz r4, LPPACA_PMCINUSE(r3) 104 cmpwi r4, 0 105 beq 23f /* skip if not */ 106BEGIN_FTR_SECTION 107 ld r3, HSTATE_MMCR0(r13) 108 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 109 cmpwi r4, MMCR0_PMAO 110 beql kvmppc_fix_pmao 111END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 112 lwz r3, HSTATE_PMC1(r13) 113 lwz r4, HSTATE_PMC2(r13) 114 lwz r5, HSTATE_PMC3(r13) 115 lwz r6, HSTATE_PMC4(r13) 116 lwz r8, HSTATE_PMC5(r13) 117 lwz r9, HSTATE_PMC6(r13) 118 mtspr SPRN_PMC1, r3 119 mtspr SPRN_PMC2, r4 120 mtspr SPRN_PMC3, r5 121 mtspr SPRN_PMC4, r6 122 mtspr SPRN_PMC5, r8 123 mtspr SPRN_PMC6, r9 124 ld r3, HSTATE_MMCR0(r13) 125 ld r4, HSTATE_MMCR1(r13) 126 ld r5, HSTATE_MMCRA(r13) 127 ld r6, HSTATE_SIAR(r13) 128 ld r7, HSTATE_SDAR(r13) 129 mtspr SPRN_MMCR1, r4 130 mtspr SPRN_MMCRA, r5 131 mtspr SPRN_SIAR, r6 132 mtspr SPRN_SDAR, r7 133BEGIN_FTR_SECTION 134 ld r8, HSTATE_MMCR2(r13) 135 ld r9, HSTATE_SIER(r13) 136 mtspr SPRN_MMCR2, r8 137 mtspr SPRN_SIER, r9 138END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 139 mtspr SPRN_MMCR0, r3 140 isync 14123: 142 143 /* 144 * Reload DEC. HDEC interrupts were disabled when 145 * we reloaded the host's LPCR value. 146 */ 147 ld r3, HSTATE_DECEXP(r13) 148 mftb r4 149 subf r4, r4, r3 150 mtspr SPRN_DEC, r4 151 152 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 153 li r0, 0 154 stb r0, HSTATE_HWTHREAD_REQ(r13) 155 156 /* 157 * For external interrupts we need to call the Linux 158 * handler to process the interrupt. We do that by jumping 159 * to absolute address 0x500 for external interrupts. 160 * The [h]rfid at the end of the handler will return to 161 * the book3s_hv_interrupts.S code. For other interrupts 162 * we do the rfid to get back to the book3s_hv_interrupts.S 163 * code here. 164 */ 165 ld r8, 112+PPC_LR_STKOFF(r1) 166 addi r1, r1, 112 167 ld r7, HSTATE_HOST_MSR(r13) 168 169 /* Return the trap number on this thread as the return value */ 170 mr r3, r12 171 172 /* 173 * If we came back from the guest via a relocation-on interrupt, 174 * we will be in virtual mode at this point, which makes it a 175 * little easier to get back to the caller. 176 */ 177 mfmsr r0 178 andi. r0, r0, MSR_IR /* in real mode? */ 179 bne .Lvirt_return 180 181 /* RFI into the highmem handler */ 182 mfmsr r6 183 li r0, MSR_RI 184 andc r6, r6, r0 185 mtmsrd r6, 1 /* Clear RI in MSR */ 186 mtsrr0 r8 187 mtsrr1 r7 188 RFI 189 190 /* Virtual-mode return */ 191.Lvirt_return: 192 mtlr r8 193 blr 194 195kvmppc_primary_no_guest: 196 /* We handle this much like a ceded vcpu */ 197 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 198 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 199 /* HDEC value came from DEC in the first place, it will fit */ 200 mfspr r3, SPRN_HDEC 201 mtspr SPRN_DEC, r3 202 /* 203 * Make sure the primary has finished the MMU switch. 204 * We should never get here on a secondary thread, but 205 * check it for robustness' sake. 206 */ 207 ld r5, HSTATE_KVM_VCORE(r13) 20865: lbz r0, VCORE_IN_GUEST(r5) 209 cmpwi r0, 0 210 beq 65b 211 /* Set LPCR. */ 212 ld r8,VCORE_LPCR(r5) 213 mtspr SPRN_LPCR,r8 214 isync 215 /* set our bit in napping_threads */ 216 ld r5, HSTATE_KVM_VCORE(r13) 217 lbz r7, HSTATE_PTID(r13) 218 li r0, 1 219 sld r0, r0, r7 220 addi r6, r5, VCORE_NAPPING_THREADS 2211: lwarx r3, 0, r6 222 or r3, r3, r0 223 stwcx. r3, 0, r6 224 bne 1b 225 /* order napping_threads update vs testing entry_exit_map */ 226 isync 227 li r12, 0 228 lwz r7, VCORE_ENTRY_EXIT(r5) 229 cmpwi r7, 0x100 230 bge kvm_novcpu_exit /* another thread already exiting */ 231 li r3, NAPPING_NOVCPU 232 stb r3, HSTATE_NAPPING(r13) 233 234 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 235 b kvm_do_nap 236 237/* 238 * kvm_novcpu_wakeup 239 * Entered from kvm_start_guest if kvm_hstate.napping is set 240 * to NAPPING_NOVCPU 241 * r2 = kernel TOC 242 * r13 = paca 243 */ 244kvm_novcpu_wakeup: 245 ld r1, HSTATE_HOST_R1(r13) 246 ld r5, HSTATE_KVM_VCORE(r13) 247 li r0, 0 248 stb r0, HSTATE_NAPPING(r13) 249 250 /* check the wake reason */ 251 bl kvmppc_check_wake_reason 252 253 /* 254 * Restore volatile registers since we could have called 255 * a C routine in kvmppc_check_wake_reason. 256 * r5 = VCORE 257 */ 258 ld r5, HSTATE_KVM_VCORE(r13) 259 260 /* see if any other thread is already exiting */ 261 lwz r0, VCORE_ENTRY_EXIT(r5) 262 cmpwi r0, 0x100 263 bge kvm_novcpu_exit 264 265 /* clear our bit in napping_threads */ 266 lbz r7, HSTATE_PTID(r13) 267 li r0, 1 268 sld r0, r0, r7 269 addi r6, r5, VCORE_NAPPING_THREADS 2704: lwarx r7, 0, r6 271 andc r7, r7, r0 272 stwcx. r7, 0, r6 273 bne 4b 274 275 /* See if the wake reason means we need to exit */ 276 cmpdi r3, 0 277 bge kvm_novcpu_exit 278 279 /* See if our timeslice has expired (HDEC is negative) */ 280 mfspr r0, SPRN_HDEC 281 EXTEND_HDEC(r0) 282 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 283 cmpdi r0, 0 284 blt kvm_novcpu_exit 285 286 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 287 ld r4, HSTATE_KVM_VCPU(r13) 288 cmpdi r4, 0 289 beq kvmppc_primary_no_guest 290 291#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 292 addi r3, r4, VCPU_TB_RMENTRY 293 bl kvmhv_start_timing 294#endif 295 b kvmppc_got_guest 296 297kvm_novcpu_exit: 298#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 299 ld r4, HSTATE_KVM_VCPU(r13) 300 cmpdi r4, 0 301 beq 13f 302 addi r3, r4, VCPU_TB_RMEXIT 303 bl kvmhv_accumulate_time 304#endif 30513: mr r3, r12 306 stw r12, STACK_SLOT_TRAP(r1) 307 bl kvmhv_commence_exit 308 nop 309 lwz r12, STACK_SLOT_TRAP(r1) 310 b kvmhv_switch_to_host 311 312/* 313 * We come in here when wakened from nap mode. 314 * Relocation is off and most register values are lost. 315 * r13 points to the PACA. 316 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 317 */ 318 .globl kvm_start_guest 319kvm_start_guest: 320 /* Set runlatch bit the minute you wake up from nap */ 321 mfspr r0, SPRN_CTRLF 322 ori r0, r0, 1 323 mtspr SPRN_CTRLT, r0 324 325 /* 326 * Could avoid this and pass it through in r3. For now, 327 * code expects it to be in SRR1. 328 */ 329 mtspr SPRN_SRR1,r3 330 331 ld r2,PACATOC(r13) 332 333 li r0,KVM_HWTHREAD_IN_KVM 334 stb r0,HSTATE_HWTHREAD_STATE(r13) 335 336 /* NV GPR values from power7_idle() will no longer be valid */ 337 li r0,1 338 stb r0,PACA_NAPSTATELOST(r13) 339 340 /* were we napping due to cede? */ 341 lbz r0,HSTATE_NAPPING(r13) 342 cmpwi r0,NAPPING_CEDE 343 beq kvm_end_cede 344 cmpwi r0,NAPPING_NOVCPU 345 beq kvm_novcpu_wakeup 346 347 ld r1,PACAEMERGSP(r13) 348 subi r1,r1,STACK_FRAME_OVERHEAD 349 350 /* 351 * We weren't napping due to cede, so this must be a secondary 352 * thread being woken up to run a guest, or being woken up due 353 * to a stray IPI. (Or due to some machine check or hypervisor 354 * maintenance interrupt while the core is in KVM.) 355 */ 356 357 /* Check the wake reason in SRR1 to see why we got here */ 358 bl kvmppc_check_wake_reason 359 /* 360 * kvmppc_check_wake_reason could invoke a C routine, but we 361 * have no volatile registers to restore when we return. 362 */ 363 364 cmpdi r3, 0 365 bge kvm_no_guest 366 367 /* get vcore pointer, NULL if we have nothing to run */ 368 ld r5,HSTATE_KVM_VCORE(r13) 369 cmpdi r5,0 370 /* if we have no vcore to run, go back to sleep */ 371 beq kvm_no_guest 372 373kvm_secondary_got_guest: 374 375 /* Set HSTATE_DSCR(r13) to something sensible */ 376 ld r6, PACA_DSCR_DEFAULT(r13) 377 std r6, HSTATE_DSCR(r13) 378 379 /* On thread 0 of a subcore, set HDEC to max */ 380 lbz r4, HSTATE_PTID(r13) 381 cmpwi r4, 0 382 bne 63f 383 LOAD_REG_ADDR(r6, decrementer_max) 384 ld r6, 0(r6) 385 mtspr SPRN_HDEC, r6 386 /* and set per-LPAR registers, if doing dynamic micro-threading */ 387 ld r6, HSTATE_SPLIT_MODE(r13) 388 cmpdi r6, 0 389 beq 63f 390 ld r0, KVM_SPLIT_RPR(r6) 391 mtspr SPRN_RPR, r0 392 ld r0, KVM_SPLIT_PMMAR(r6) 393 mtspr SPRN_PMMAR, r0 394 ld r0, KVM_SPLIT_LDBAR(r6) 395 mtspr SPRN_LDBAR, r0 396 isync 39763: 398 /* Order load of vcpu after load of vcore */ 399 lwsync 400 ld r4, HSTATE_KVM_VCPU(r13) 401 bl kvmppc_hv_entry 402 403 /* Back from the guest, go back to nap */ 404 /* Clear our vcpu and vcore pointers so we don't come back in early */ 405 li r0, 0 406 std r0, HSTATE_KVM_VCPU(r13) 407 /* 408 * Once we clear HSTATE_KVM_VCORE(r13), the code in 409 * kvmppc_run_core() is going to assume that all our vcpu 410 * state is visible in memory. This lwsync makes sure 411 * that that is true. 412 */ 413 lwsync 414 std r0, HSTATE_KVM_VCORE(r13) 415 416 /* 417 * All secondaries exiting guest will fall through this path. 418 * Before proceeding, just check for HMI interrupt and 419 * invoke opal hmi handler. By now we are sure that the 420 * primary thread on this core/subcore has already made partition 421 * switch/TB resync and we are good to call opal hmi handler. 422 */ 423 cmpwi r12, BOOK3S_INTERRUPT_HMI 424 bne kvm_no_guest 425 426 li r3,0 /* NULL argument */ 427 bl hmi_exception_realmode 428/* 429 * At this point we have finished executing in the guest. 430 * We need to wait for hwthread_req to become zero, since 431 * we may not turn on the MMU while hwthread_req is non-zero. 432 * While waiting we also need to check if we get given a vcpu to run. 433 */ 434kvm_no_guest: 435 lbz r3, HSTATE_HWTHREAD_REQ(r13) 436 cmpwi r3, 0 437 bne 53f 438 HMT_MEDIUM 439 li r0, KVM_HWTHREAD_IN_KERNEL 440 stb r0, HSTATE_HWTHREAD_STATE(r13) 441 /* need to recheck hwthread_req after a barrier, to avoid race */ 442 sync 443 lbz r3, HSTATE_HWTHREAD_REQ(r13) 444 cmpwi r3, 0 445 bne 54f 446/* 447 * We jump to pnv_wakeup_loss, which will return to the caller 448 * of power7_nap in the powernv cpu offline loop. The value we 449 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss 450 * requires SRR1 in r12. 451 */ 452 li r3, LPCR_PECE0 453 mfspr r4, SPRN_LPCR 454 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 455 mtspr SPRN_LPCR, r4 456 li r3, 0 457 mfspr r12,SPRN_SRR1 458 b pnv_wakeup_loss 459 46053: HMT_LOW 461 ld r5, HSTATE_KVM_VCORE(r13) 462 cmpdi r5, 0 463 bne 60f 464 ld r3, HSTATE_SPLIT_MODE(r13) 465 cmpdi r3, 0 466 beq kvm_no_guest 467 lbz r0, KVM_SPLIT_DO_NAP(r3) 468 cmpwi r0, 0 469 beq kvm_no_guest 470 HMT_MEDIUM 471 b kvm_unsplit_nap 47260: HMT_MEDIUM 473 b kvm_secondary_got_guest 474 47554: li r0, KVM_HWTHREAD_IN_KVM 476 stb r0, HSTATE_HWTHREAD_STATE(r13) 477 b kvm_no_guest 478 479/* 480 * Here the primary thread is trying to return the core to 481 * whole-core mode, so we need to nap. 482 */ 483kvm_unsplit_nap: 484 /* 485 * When secondaries are napping in kvm_unsplit_nap() with 486 * hwthread_req = 1, HMI goes ignored even though subcores are 487 * already exited the guest. Hence HMI keeps waking up secondaries 488 * from nap in a loop and secondaries always go back to nap since 489 * no vcore is assigned to them. This makes impossible for primary 490 * thread to get hold of secondary threads resulting into a soft 491 * lockup in KVM path. 492 * 493 * Let us check if HMI is pending and handle it before we go to nap. 494 */ 495 cmpwi r12, BOOK3S_INTERRUPT_HMI 496 bne 55f 497 li r3, 0 /* NULL argument */ 498 bl hmi_exception_realmode 49955: 500 /* 501 * Ensure that secondary doesn't nap when it has 502 * its vcore pointer set. 503 */ 504 sync /* matches smp_mb() before setting split_info.do_nap */ 505 ld r0, HSTATE_KVM_VCORE(r13) 506 cmpdi r0, 0 507 bne kvm_no_guest 508 /* clear any pending message */ 509BEGIN_FTR_SECTION 510 lis r6, (PPC_DBELL_SERVER << (63-36))@h 511 PPC_MSGCLR(6) 512END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 513 /* Set kvm_split_mode.napped[tid] = 1 */ 514 ld r3, HSTATE_SPLIT_MODE(r13) 515 li r0, 1 516 lhz r4, PACAPACAINDEX(r13) 517 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ 518 addi r4, r4, KVM_SPLIT_NAPPED 519 stbx r0, r3, r4 520 /* Check the do_nap flag again after setting napped[] */ 521 sync 522 lbz r0, KVM_SPLIT_DO_NAP(r3) 523 cmpwi r0, 0 524 beq 57f 525 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 526 mfspr r5, SPRN_LPCR 527 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 528 b kvm_nap_sequence 529 53057: li r0, 0 531 stbx r0, r3, r4 532 b kvm_no_guest 533 534/****************************************************************************** 535 * * 536 * Entry code * 537 * * 538 *****************************************************************************/ 539 540.global kvmppc_hv_entry 541kvmppc_hv_entry: 542 543 /* Required state: 544 * 545 * R4 = vcpu pointer (or NULL) 546 * MSR = ~IR|DR 547 * R13 = PACA 548 * R1 = host R1 549 * R2 = TOC 550 * all other volatile GPRS = free 551 * Does not preserve non-volatile GPRs or CR fields 552 */ 553 mflr r0 554 std r0, PPC_LR_STKOFF(r1) 555 stdu r1, -SFS(r1) 556 557 /* Save R1 in the PACA */ 558 std r1, HSTATE_HOST_R1(r13) 559 560 li r6, KVM_GUEST_MODE_HOST_HV 561 stb r6, HSTATE_IN_GUEST(r13) 562 563#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 564 /* Store initial timestamp */ 565 cmpdi r4, 0 566 beq 1f 567 addi r3, r4, VCPU_TB_RMENTRY 568 bl kvmhv_start_timing 5691: 570#endif 571 572 /* Use cr7 as an indication of radix mode */ 573 ld r5, HSTATE_KVM_VCORE(r13) 574 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 575 lbz r0, KVM_RADIX(r9) 576 cmpwi cr7, r0, 0 577 578 /* Clear out SLB if hash */ 579 bne cr7, 2f 580 li r6,0 581 slbmte r6,r6 582 slbia 583 ptesync 5842: 585 /* 586 * POWER7/POWER8 host -> guest partition switch code. 587 * We don't have to lock against concurrent tlbies, 588 * but we do have to coordinate across hardware threads. 589 */ 590 /* Set bit in entry map iff exit map is zero. */ 591 li r7, 1 592 lbz r6, HSTATE_PTID(r13) 593 sld r7, r7, r6 594 addi r8, r5, VCORE_ENTRY_EXIT 59521: lwarx r3, 0, r8 596 cmpwi r3, 0x100 /* any threads starting to exit? */ 597 bge secondary_too_late /* if so we're too late to the party */ 598 or r3, r3, r7 599 stwcx. r3, 0, r8 600 bne 21b 601 602 /* Primary thread switches to guest partition. */ 603 cmpwi r6,0 604 bne 10f 605 lwz r7,KVM_LPID(r9) 606BEGIN_FTR_SECTION 607 ld r6,KVM_SDR1(r9) 608 li r0,LPID_RSVD /* switch to reserved LPID */ 609 mtspr SPRN_LPID,r0 610 ptesync 611 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 612END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 613 mtspr SPRN_LPID,r7 614 isync 615 616 /* See if we need to flush the TLB */ 617 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 618BEGIN_FTR_SECTION 619 /* 620 * On POWER9, individual threads can come in here, but the 621 * TLB is shared between the 4 threads in a core, hence 622 * invalidating on one thread invalidates for all. 623 * Thus we make all 4 threads use the same bit here. 624 */ 625 clrrdi r6,r6,2 626END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 627 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 628 srdi r6,r6,6 /* doubleword number */ 629 sldi r6,r6,3 /* address offset */ 630 add r6,r6,r9 631 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 632 li r8,1 633 sld r8,r8,r7 634 ld r7,0(r6) 635 and. r7,r7,r8 636 beq 22f 637 /* Flush the TLB of any entries for this LPID */ 638 lwz r0,KVM_TLB_SETS(r9) 639 mtctr r0 640 li r7,0x800 /* IS field = 0b10 */ 641 ptesync 642 li r0,0 /* RS for P9 version of tlbiel */ 643 bne cr7, 29f 64428: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ 645 addi r7,r7,0x1000 646 bdnz 28b 647 b 30f 64829: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */ 649 addi r7,r7,0x1000 650 bdnz 29b 65130: ptesync 65223: ldarx r7,0,r6 /* clear the bit after TLB flushed */ 653 andc r7,r7,r8 654 stdcx. r7,0,r6 655 bne 23b 656 657 /* Add timebase offset onto timebase */ 65822: ld r8,VCORE_TB_OFFSET(r5) 659 cmpdi r8,0 660 beq 37f 661 mftb r6 /* current host timebase */ 662 add r8,r8,r6 663 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 664 mftb r7 /* check if lower 24 bits overflowed */ 665 clrldi r6,r6,40 666 clrldi r7,r7,40 667 cmpld r7,r6 668 bge 37f 669 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 670 mtspr SPRN_TBU40,r8 671 672 /* Load guest PCR value to select appropriate compat mode */ 67337: ld r7, VCORE_PCR(r5) 674 cmpdi r7, 0 675 beq 38f 676 mtspr SPRN_PCR, r7 67738: 678 679BEGIN_FTR_SECTION 680 /* DPDES and VTB are shared between threads */ 681 ld r8, VCORE_DPDES(r5) 682 ld r7, VCORE_VTB(r5) 683 mtspr SPRN_DPDES, r8 684 mtspr SPRN_VTB, r7 685END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 686 687 /* Mark the subcore state as inside guest */ 688 bl kvmppc_subcore_enter_guest 689 nop 690 ld r5, HSTATE_KVM_VCORE(r13) 691 ld r4, HSTATE_KVM_VCPU(r13) 692 li r0,1 693 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 694 695 /* Do we have a guest vcpu to run? */ 69610: cmpdi r4, 0 697 beq kvmppc_primary_no_guest 698kvmppc_got_guest: 699 700 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 701 lwz r5,VCPU_SLB_MAX(r4) 702 cmpwi r5,0 703 beq 9f 704 mtctr r5 705 addi r6,r4,VCPU_SLB 7061: ld r8,VCPU_SLB_E(r6) 707 ld r9,VCPU_SLB_V(r6) 708 slbmte r9,r8 709 addi r6,r6,VCPU_SLB_SIZE 710 bdnz 1b 7119: 712 /* Increment yield count if they have a VPA */ 713 ld r3, VCPU_VPA(r4) 714 cmpdi r3, 0 715 beq 25f 716 li r6, LPPACA_YIELDCOUNT 717 LWZX_BE r5, r3, r6 718 addi r5, r5, 1 719 STWX_BE r5, r3, r6 720 li r6, 1 721 stb r6, VCPU_VPA_DIRTY(r4) 72225: 723 724 /* Save purr/spurr */ 725 mfspr r5,SPRN_PURR 726 mfspr r6,SPRN_SPURR 727 std r5,HSTATE_PURR(r13) 728 std r6,HSTATE_SPURR(r13) 729 ld r7,VCPU_PURR(r4) 730 ld r8,VCPU_SPURR(r4) 731 mtspr SPRN_PURR,r7 732 mtspr SPRN_SPURR,r8 733 734 /* Save host values of some registers */ 735BEGIN_FTR_SECTION 736 mfspr r5, SPRN_TIDR 737 mfspr r6, SPRN_PSSCR 738 mfspr r7, SPRN_PID 739 mfspr r8, SPRN_IAMR 740 std r5, STACK_SLOT_TID(r1) 741 std r6, STACK_SLOT_PSSCR(r1) 742 std r7, STACK_SLOT_PID(r1) 743 std r8, STACK_SLOT_IAMR(r1) 744 mfspr r5, SPRN_HFSCR 745 std r5, STACK_SLOT_HFSCR(r1) 746END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 747BEGIN_FTR_SECTION 748 mfspr r5, SPRN_CIABR 749 mfspr r6, SPRN_DAWR 750 mfspr r7, SPRN_DAWRX 751 std r5, STACK_SLOT_CIABR(r1) 752 std r6, STACK_SLOT_DAWR(r1) 753 std r7, STACK_SLOT_DAWRX(r1) 754END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 755 756BEGIN_FTR_SECTION 757 /* Set partition DABR */ 758 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 759 lwz r5,VCPU_DABRX(r4) 760 ld r6,VCPU_DABR(r4) 761 mtspr SPRN_DABRX,r5 762 mtspr SPRN_DABR,r6 763 isync 764END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 765 766#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 767BEGIN_FTR_SECTION 768 bl kvmppc_restore_tm 769END_FTR_SECTION_IFSET(CPU_FTR_TM) 770#endif 771 772 /* Load guest PMU registers */ 773 /* R4 is live here (vcpu pointer) */ 774 li r3, 1 775 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 776 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 777 isync 778BEGIN_FTR_SECTION 779 ld r3, VCPU_MMCR(r4) 780 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 781 cmpwi r5, MMCR0_PMAO 782 beql kvmppc_fix_pmao 783END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 784 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 785 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 786 lwz r6, VCPU_PMC + 8(r4) 787 lwz r7, VCPU_PMC + 12(r4) 788 lwz r8, VCPU_PMC + 16(r4) 789 lwz r9, VCPU_PMC + 20(r4) 790 mtspr SPRN_PMC1, r3 791 mtspr SPRN_PMC2, r5 792 mtspr SPRN_PMC3, r6 793 mtspr SPRN_PMC4, r7 794 mtspr SPRN_PMC5, r8 795 mtspr SPRN_PMC6, r9 796 ld r3, VCPU_MMCR(r4) 797 ld r5, VCPU_MMCR + 8(r4) 798 ld r6, VCPU_MMCR + 16(r4) 799 ld r7, VCPU_SIAR(r4) 800 ld r8, VCPU_SDAR(r4) 801 mtspr SPRN_MMCR1, r5 802 mtspr SPRN_MMCRA, r6 803 mtspr SPRN_SIAR, r7 804 mtspr SPRN_SDAR, r8 805BEGIN_FTR_SECTION 806 ld r5, VCPU_MMCR + 24(r4) 807 ld r6, VCPU_SIER(r4) 808 mtspr SPRN_MMCR2, r5 809 mtspr SPRN_SIER, r6 810BEGIN_FTR_SECTION_NESTED(96) 811 lwz r7, VCPU_PMC + 24(r4) 812 lwz r8, VCPU_PMC + 28(r4) 813 ld r9, VCPU_MMCR + 32(r4) 814 mtspr SPRN_SPMC1, r7 815 mtspr SPRN_SPMC2, r8 816 mtspr SPRN_MMCRS, r9 817END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 818END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 819 mtspr SPRN_MMCR0, r3 820 isync 821 822 /* Load up FP, VMX and VSX registers */ 823 bl kvmppc_load_fp 824 825 ld r14, VCPU_GPR(R14)(r4) 826 ld r15, VCPU_GPR(R15)(r4) 827 ld r16, VCPU_GPR(R16)(r4) 828 ld r17, VCPU_GPR(R17)(r4) 829 ld r18, VCPU_GPR(R18)(r4) 830 ld r19, VCPU_GPR(R19)(r4) 831 ld r20, VCPU_GPR(R20)(r4) 832 ld r21, VCPU_GPR(R21)(r4) 833 ld r22, VCPU_GPR(R22)(r4) 834 ld r23, VCPU_GPR(R23)(r4) 835 ld r24, VCPU_GPR(R24)(r4) 836 ld r25, VCPU_GPR(R25)(r4) 837 ld r26, VCPU_GPR(R26)(r4) 838 ld r27, VCPU_GPR(R27)(r4) 839 ld r28, VCPU_GPR(R28)(r4) 840 ld r29, VCPU_GPR(R29)(r4) 841 ld r30, VCPU_GPR(R30)(r4) 842 ld r31, VCPU_GPR(R31)(r4) 843 844 /* Switch DSCR to guest value */ 845 ld r5, VCPU_DSCR(r4) 846 mtspr SPRN_DSCR, r5 847 848BEGIN_FTR_SECTION 849 /* Skip next section on POWER7 */ 850 b 8f 851END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 852 /* Load up POWER8-specific registers */ 853 ld r5, VCPU_IAMR(r4) 854 lwz r6, VCPU_PSPB(r4) 855 ld r7, VCPU_FSCR(r4) 856 mtspr SPRN_IAMR, r5 857 mtspr SPRN_PSPB, r6 858 mtspr SPRN_FSCR, r7 859 ld r5, VCPU_DAWR(r4) 860 ld r6, VCPU_DAWRX(r4) 861 ld r7, VCPU_CIABR(r4) 862 ld r8, VCPU_TAR(r4) 863 mtspr SPRN_DAWR, r5 864 mtspr SPRN_DAWRX, r6 865 mtspr SPRN_CIABR, r7 866 mtspr SPRN_TAR, r8 867 ld r5, VCPU_IC(r4) 868 ld r8, VCPU_EBBHR(r4) 869 mtspr SPRN_IC, r5 870 mtspr SPRN_EBBHR, r8 871 ld r5, VCPU_EBBRR(r4) 872 ld r6, VCPU_BESCR(r4) 873 lwz r7, VCPU_GUEST_PID(r4) 874 ld r8, VCPU_WORT(r4) 875 mtspr SPRN_EBBRR, r5 876 mtspr SPRN_BESCR, r6 877 mtspr SPRN_PID, r7 878 mtspr SPRN_WORT, r8 879BEGIN_FTR_SECTION 880 PPC_INVALIDATE_ERAT 881END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 882BEGIN_FTR_SECTION 883 /* POWER8-only registers */ 884 ld r5, VCPU_TCSCR(r4) 885 ld r6, VCPU_ACOP(r4) 886 ld r7, VCPU_CSIGR(r4) 887 ld r8, VCPU_TACR(r4) 888 mtspr SPRN_TCSCR, r5 889 mtspr SPRN_ACOP, r6 890 mtspr SPRN_CSIGR, r7 891 mtspr SPRN_TACR, r8 892FTR_SECTION_ELSE 893 /* POWER9-only registers */ 894 ld r5, VCPU_TID(r4) 895 ld r6, VCPU_PSSCR(r4) 896 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 897 ld r7, VCPU_HFSCR(r4) 898 mtspr SPRN_TIDR, r5 899 mtspr SPRN_PSSCR, r6 900 mtspr SPRN_HFSCR, r7 901ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 9028: 903 904 /* 905 * Set the decrementer to the guest decrementer. 906 */ 907 ld r8,VCPU_DEC_EXPIRES(r4) 908 /* r8 is a host timebase value here, convert to guest TB */ 909 ld r5,HSTATE_KVM_VCORE(r13) 910 ld r6,VCORE_TB_OFFSET(r5) 911 add r8,r8,r6 912 mftb r7 913 subf r3,r7,r8 914 mtspr SPRN_DEC,r3 915 std r3,VCPU_DEC(r4) 916 917 ld r5, VCPU_SPRG0(r4) 918 ld r6, VCPU_SPRG1(r4) 919 ld r7, VCPU_SPRG2(r4) 920 ld r8, VCPU_SPRG3(r4) 921 mtspr SPRN_SPRG0, r5 922 mtspr SPRN_SPRG1, r6 923 mtspr SPRN_SPRG2, r7 924 mtspr SPRN_SPRG3, r8 925 926 /* Load up DAR and DSISR */ 927 ld r5, VCPU_DAR(r4) 928 lwz r6, VCPU_DSISR(r4) 929 mtspr SPRN_DAR, r5 930 mtspr SPRN_DSISR, r6 931 932 /* Restore AMR and UAMOR, set AMOR to all 1s */ 933 ld r5,VCPU_AMR(r4) 934 ld r6,VCPU_UAMOR(r4) 935 li r7,-1 936 mtspr SPRN_AMR,r5 937 mtspr SPRN_UAMOR,r6 938 mtspr SPRN_AMOR,r7 939 940 /* Restore state of CTRL run bit; assume 1 on entry */ 941 lwz r5,VCPU_CTRL(r4) 942 andi. r5,r5,1 943 bne 4f 944 mfspr r6,SPRN_CTRLF 945 clrrdi r6,r6,1 946 mtspr SPRN_CTRLT,r6 9474: 948 /* Secondary threads wait for primary to have done partition switch */ 949 ld r5, HSTATE_KVM_VCORE(r13) 950 lbz r6, HSTATE_PTID(r13) 951 cmpwi r6, 0 952 beq 21f 953 lbz r0, VCORE_IN_GUEST(r5) 954 cmpwi r0, 0 955 bne 21f 956 HMT_LOW 95720: lwz r3, VCORE_ENTRY_EXIT(r5) 958 cmpwi r3, 0x100 959 bge no_switch_exit 960 lbz r0, VCORE_IN_GUEST(r5) 961 cmpwi r0, 0 962 beq 20b 963 HMT_MEDIUM 96421: 965 /* Set LPCR. */ 966 ld r8,VCORE_LPCR(r5) 967 mtspr SPRN_LPCR,r8 968 isync 969 970 /* Check if HDEC expires soon */ 971 mfspr r3, SPRN_HDEC 972 EXTEND_HDEC(r3) 973 cmpdi r3, 512 /* 1 microsecond */ 974 blt hdec_soon 975 976#ifdef CONFIG_KVM_XICS 977 /* We are entering the guest on that thread, push VCPU to XIVE */ 978 ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 979 cmpldi cr0, r10, r0 980 beq no_xive 981 ld r11, VCPU_XIVE_SAVED_STATE(r4) 982 li r9, TM_QW1_OS 983 stdcix r11,r9,r10 984 eieio 985 lwz r11, VCPU_XIVE_CAM_WORD(r4) 986 li r9, TM_QW1_OS + TM_WORD2 987 stwcix r11,r9,r10 988 li r9, 1 989 stw r9, VCPU_XIVE_PUSHED(r4) 990no_xive: 991#endif /* CONFIG_KVM_XICS */ 992 993deliver_guest_interrupt: 994 ld r6, VCPU_CTR(r4) 995 ld r7, VCPU_XER(r4) 996 997 mtctr r6 998 mtxer r7 999 1000kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 1001 ld r10, VCPU_PC(r4) 1002 ld r11, VCPU_MSR(r4) 1003 ld r6, VCPU_SRR0(r4) 1004 ld r7, VCPU_SRR1(r4) 1005 mtspr SPRN_SRR0, r6 1006 mtspr SPRN_SRR1, r7 1007 1008 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1009 rldicl r11, r11, 63 - MSR_HV_LG, 1 1010 rotldi r11, r11, 1 + MSR_HV_LG 1011 ori r11, r11, MSR_ME 1012 1013 /* Check if we can deliver an external or decrementer interrupt now */ 1014 ld r0, VCPU_PENDING_EXC(r4) 1015 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 1016 cmpdi cr1, r0, 0 1017 andi. r8, r11, MSR_EE 1018 mfspr r8, SPRN_LPCR 1019 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 1020 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 1021 mtspr SPRN_LPCR, r8 1022 isync 1023 beq 5f 1024 li r0, BOOK3S_INTERRUPT_EXTERNAL 1025 bne cr1, 12f 1026 mfspr r0, SPRN_DEC 1027BEGIN_FTR_SECTION 1028 /* On POWER9 check whether the guest has large decrementer enabled */ 1029 andis. r8, r8, LPCR_LD@h 1030 bne 15f 1031END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1032 extsw r0, r0 103315: cmpdi r0, 0 1034 li r0, BOOK3S_INTERRUPT_DECREMENTER 1035 bge 5f 1036 103712: mtspr SPRN_SRR0, r10 1038 mr r10,r0 1039 mtspr SPRN_SRR1, r11 1040 mr r9, r4 1041 bl kvmppc_msr_interrupt 10425: 1043BEGIN_FTR_SECTION 1044 b fast_guest_return 1045END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1046 /* On POWER9, check for pending doorbell requests */ 1047 lbz r0, VCPU_DBELL_REQ(r4) 1048 cmpwi r0, 0 1049 beq fast_guest_return 1050 ld r5, HSTATE_KVM_VCORE(r13) 1051 /* Set DPDES register so the CPU will take a doorbell interrupt */ 1052 li r0, 1 1053 mtspr SPRN_DPDES, r0 1054 std r0, VCORE_DPDES(r5) 1055 /* Make sure other cpus see vcore->dpdes set before dbell req clear */ 1056 lwsync 1057 /* Clear the pending doorbell request */ 1058 li r0, 0 1059 stb r0, VCPU_DBELL_REQ(r4) 1060 1061/* 1062 * Required state: 1063 * R4 = vcpu 1064 * R10: value for HSRR0 1065 * R11: value for HSRR1 1066 * R13 = PACA 1067 */ 1068fast_guest_return: 1069 li r0,0 1070 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1071 mtspr SPRN_HSRR0,r10 1072 mtspr SPRN_HSRR1,r11 1073 1074 /* Activate guest mode, so faults get handled by KVM */ 1075 li r9, KVM_GUEST_MODE_GUEST_HV 1076 stb r9, HSTATE_IN_GUEST(r13) 1077 1078#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1079 /* Accumulate timing */ 1080 addi r3, r4, VCPU_TB_GUEST 1081 bl kvmhv_accumulate_time 1082#endif 1083 1084 /* Enter guest */ 1085 1086BEGIN_FTR_SECTION 1087 ld r5, VCPU_CFAR(r4) 1088 mtspr SPRN_CFAR, r5 1089END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1090BEGIN_FTR_SECTION 1091 ld r0, VCPU_PPR(r4) 1092END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1093 1094 ld r5, VCPU_LR(r4) 1095 lwz r6, VCPU_CR(r4) 1096 mtlr r5 1097 mtcr r6 1098 1099 ld r1, VCPU_GPR(R1)(r4) 1100 ld r2, VCPU_GPR(R2)(r4) 1101 ld r3, VCPU_GPR(R3)(r4) 1102 ld r5, VCPU_GPR(R5)(r4) 1103 ld r6, VCPU_GPR(R6)(r4) 1104 ld r7, VCPU_GPR(R7)(r4) 1105 ld r8, VCPU_GPR(R8)(r4) 1106 ld r9, VCPU_GPR(R9)(r4) 1107 ld r10, VCPU_GPR(R10)(r4) 1108 ld r11, VCPU_GPR(R11)(r4) 1109 ld r12, VCPU_GPR(R12)(r4) 1110 ld r13, VCPU_GPR(R13)(r4) 1111 1112BEGIN_FTR_SECTION 1113 mtspr SPRN_PPR, r0 1114END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1115 ld r0, VCPU_GPR(R0)(r4) 1116 ld r4, VCPU_GPR(R4)(r4) 1117 1118 hrfid 1119 b . 1120 1121secondary_too_late: 1122 li r12, 0 1123 cmpdi r4, 0 1124 beq 11f 1125 stw r12, VCPU_TRAP(r4) 1126#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1127 addi r3, r4, VCPU_TB_RMEXIT 1128 bl kvmhv_accumulate_time 1129#endif 113011: b kvmhv_switch_to_host 1131 1132no_switch_exit: 1133 HMT_MEDIUM 1134 li r12, 0 1135 b 12f 1136hdec_soon: 1137 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 113812: stw r12, VCPU_TRAP(r4) 1139 mr r9, r4 1140#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1141 addi r3, r4, VCPU_TB_RMEXIT 1142 bl kvmhv_accumulate_time 1143#endif 1144 b guest_exit_cont 1145 1146/****************************************************************************** 1147 * * 1148 * Exit code * 1149 * * 1150 *****************************************************************************/ 1151 1152/* 1153 * We come here from the first-level interrupt handlers. 1154 */ 1155 .globl kvmppc_interrupt_hv 1156kvmppc_interrupt_hv: 1157 /* 1158 * Register contents: 1159 * R12 = (guest CR << 32) | interrupt vector 1160 * R13 = PACA 1161 * guest R12 saved in shadow VCPU SCRATCH0 1162 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE 1163 * guest R13 saved in SPRN_SCRATCH0 1164 */ 1165 std r9, HSTATE_SCRATCH2(r13) 1166 lbz r9, HSTATE_IN_GUEST(r13) 1167 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1168 beq kvmppc_bad_host_intr 1169#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1170 cmpwi r9, KVM_GUEST_MODE_GUEST 1171 ld r9, HSTATE_SCRATCH2(r13) 1172 beq kvmppc_interrupt_pr 1173#endif 1174 /* We're now back in the host but in guest MMU context */ 1175 li r9, KVM_GUEST_MODE_HOST_HV 1176 stb r9, HSTATE_IN_GUEST(r13) 1177 1178 ld r9, HSTATE_KVM_VCPU(r13) 1179 1180 /* Save registers */ 1181 1182 std r0, VCPU_GPR(R0)(r9) 1183 std r1, VCPU_GPR(R1)(r9) 1184 std r2, VCPU_GPR(R2)(r9) 1185 std r3, VCPU_GPR(R3)(r9) 1186 std r4, VCPU_GPR(R4)(r9) 1187 std r5, VCPU_GPR(R5)(r9) 1188 std r6, VCPU_GPR(R6)(r9) 1189 std r7, VCPU_GPR(R7)(r9) 1190 std r8, VCPU_GPR(R8)(r9) 1191 ld r0, HSTATE_SCRATCH2(r13) 1192 std r0, VCPU_GPR(R9)(r9) 1193 std r10, VCPU_GPR(R10)(r9) 1194 std r11, VCPU_GPR(R11)(r9) 1195 ld r3, HSTATE_SCRATCH0(r13) 1196 std r3, VCPU_GPR(R12)(r9) 1197 /* CR is in the high half of r12 */ 1198 srdi r4, r12, 32 1199 stw r4, VCPU_CR(r9) 1200BEGIN_FTR_SECTION 1201 ld r3, HSTATE_CFAR(r13) 1202 std r3, VCPU_CFAR(r9) 1203END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1204BEGIN_FTR_SECTION 1205 ld r4, HSTATE_PPR(r13) 1206 std r4, VCPU_PPR(r9) 1207END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1208 1209 /* Restore R1/R2 so we can handle faults */ 1210 ld r1, HSTATE_HOST_R1(r13) 1211 ld r2, PACATOC(r13) 1212 1213 mfspr r10, SPRN_SRR0 1214 mfspr r11, SPRN_SRR1 1215 std r10, VCPU_SRR0(r9) 1216 std r11, VCPU_SRR1(r9) 1217 /* trap is in the low half of r12, clear CR from the high half */ 1218 clrldi r12, r12, 32 1219 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1220 beq 1f 1221 mfspr r10, SPRN_HSRR0 1222 mfspr r11, SPRN_HSRR1 1223 clrrdi r12, r12, 2 12241: std r10, VCPU_PC(r9) 1225 std r11, VCPU_MSR(r9) 1226 1227 GET_SCRATCH0(r3) 1228 mflr r4 1229 std r3, VCPU_GPR(R13)(r9) 1230 std r4, VCPU_LR(r9) 1231 1232 stw r12,VCPU_TRAP(r9) 1233 1234 /* 1235 * Now that we have saved away SRR0/1 and HSRR0/1, 1236 * interrupts are recoverable in principle, so set MSR_RI. 1237 * This becomes important for relocation-on interrupts from 1238 * the guest, which we can get in radix mode on POWER9. 1239 */ 1240 li r0, MSR_RI 1241 mtmsrd r0, 1 1242 1243#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1244 addi r3, r9, VCPU_TB_RMINTR 1245 mr r4, r9 1246 bl kvmhv_accumulate_time 1247 ld r5, VCPU_GPR(R5)(r9) 1248 ld r6, VCPU_GPR(R6)(r9) 1249 ld r7, VCPU_GPR(R7)(r9) 1250 ld r8, VCPU_GPR(R8)(r9) 1251#endif 1252 1253 /* Save HEIR (HV emulation assist reg) in emul_inst 1254 if this is an HEI (HV emulation interrupt, e40) */ 1255 li r3,KVM_INST_FETCH_FAILED 1256 stw r3,VCPU_LAST_INST(r9) 1257 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1258 bne 11f 1259 mfspr r3,SPRN_HEIR 126011: stw r3,VCPU_HEIR(r9) 1261 1262 /* these are volatile across C function calls */ 1263#ifdef CONFIG_RELOCATABLE 1264 ld r3, HSTATE_SCRATCH1(r13) 1265 mtctr r3 1266#else 1267 mfctr r3 1268#endif 1269 mfxer r4 1270 std r3, VCPU_CTR(r9) 1271 std r4, VCPU_XER(r9) 1272 1273 /* If this is a page table miss then see if it's theirs or ours */ 1274 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1275 beq kvmppc_hdsi 1276 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1277 beq kvmppc_hisi 1278 1279 /* See if this is a leftover HDEC interrupt */ 1280 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1281 bne 2f 1282 mfspr r3,SPRN_HDEC 1283 cmpwi r3,0 1284 mr r4,r9 1285 bge fast_guest_return 12862: 1287 /* See if this is an hcall we can handle in real mode */ 1288 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1289 beq hcall_try_real_mode 1290 1291 /* Hypervisor doorbell - exit only if host IPI flag set */ 1292 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1293 bne 3f 1294 lbz r0, HSTATE_HOST_IPI(r13) 1295 cmpwi r0, 0 1296 beq 4f 1297 b guest_exit_cont 12983: 1299 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1300 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1301 bne 14f 1302 mfspr r3, SPRN_HFSCR 1303 std r3, VCPU_HFSCR(r9) 1304 b guest_exit_cont 130514: 1306 /* External interrupt ? */ 1307 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1308 bne+ guest_exit_cont 1309 1310 /* External interrupt, first check for host_ipi. If this is 1311 * set, we know the host wants us out so let's do it now 1312 */ 1313 bl kvmppc_read_intr 1314 1315 /* 1316 * Restore the active volatile registers after returning from 1317 * a C function. 1318 */ 1319 ld r9, HSTATE_KVM_VCPU(r13) 1320 li r12, BOOK3S_INTERRUPT_EXTERNAL 1321 1322 /* 1323 * kvmppc_read_intr return codes: 1324 * 1325 * Exit to host (r3 > 0) 1326 * 1 An interrupt is pending that needs to be handled by the host 1327 * Exit guest and return to host by branching to guest_exit_cont 1328 * 1329 * 2 Passthrough that needs completion in the host 1330 * Exit guest and return to host by branching to guest_exit_cont 1331 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1332 * to indicate to the host to complete handling the interrupt 1333 * 1334 * Before returning to guest, we check if any CPU is heading out 1335 * to the host and if so, we head out also. If no CPUs are heading 1336 * check return values <= 0. 1337 * 1338 * Return to guest (r3 <= 0) 1339 * 0 No external interrupt is pending 1340 * -1 A guest wakeup IPI (which has now been cleared) 1341 * In either case, we return to guest to deliver any pending 1342 * guest interrupts. 1343 * 1344 * -2 A PCI passthrough external interrupt was handled 1345 * (interrupt was delivered directly to guest) 1346 * Return to guest to deliver any pending guest interrupts. 1347 */ 1348 1349 cmpdi r3, 1 1350 ble 1f 1351 1352 /* Return code = 2 */ 1353 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1354 stw r12, VCPU_TRAP(r9) 1355 b guest_exit_cont 1356 13571: /* Return code <= 1 */ 1358 cmpdi r3, 0 1359 bgt guest_exit_cont 1360 1361 /* Return code <= 0 */ 13624: ld r5, HSTATE_KVM_VCORE(r13) 1363 lwz r0, VCORE_ENTRY_EXIT(r5) 1364 cmpwi r0, 0x100 1365 mr r4, r9 1366 blt deliver_guest_interrupt 1367 1368guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1369#ifdef CONFIG_KVM_XICS 1370 /* We are exiting, pull the VP from the XIVE */ 1371 lwz r0, VCPU_XIVE_PUSHED(r9) 1372 cmpwi cr0, r0, 0 1373 beq 1f 1374 li r7, TM_SPC_PULL_OS_CTX 1375 li r6, TM_QW1_OS 1376 mfmsr r0 1377 andi. r0, r0, MSR_IR /* in real mode? */ 1378 beq 2f 1379 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1380 cmpldi cr0, r10, 0 1381 beq 1f 1382 /* First load to pull the context, we ignore the value */ 1383 lwzx r11, r7, r10 1384 eieio 1385 /* Second load to recover the context state (Words 0 and 1) */ 1386 ldx r11, r6, r10 1387 b 3f 13882: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1389 cmpldi cr0, r10, 0 1390 beq 1f 1391 /* First load to pull the context, we ignore the value */ 1392 lwzcix r11, r7, r10 1393 eieio 1394 /* Second load to recover the context state (Words 0 and 1) */ 1395 ldcix r11, r6, r10 13963: std r11, VCPU_XIVE_SAVED_STATE(r9) 1397 /* Fixup some of the state for the next load */ 1398 li r10, 0 1399 li r0, 0xff 1400 stw r10, VCPU_XIVE_PUSHED(r9) 1401 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1402 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 14031: 1404#endif /* CONFIG_KVM_XICS */ 1405 /* Save more register state */ 1406 mfdar r6 1407 mfdsisr r7 1408 std r6, VCPU_DAR(r9) 1409 stw r7, VCPU_DSISR(r9) 1410 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1411 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1412 beq mc_cont 1413 std r6, VCPU_FAULT_DAR(r9) 1414 stw r7, VCPU_FAULT_DSISR(r9) 1415 1416 /* See if it is a machine check */ 1417 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1418 beq machine_check_realmode 1419mc_cont: 1420#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1421 addi r3, r9, VCPU_TB_RMEXIT 1422 mr r4, r9 1423 bl kvmhv_accumulate_time 1424#endif 1425 1426 mr r3, r12 1427 /* Increment exit count, poke other threads to exit */ 1428 bl kvmhv_commence_exit 1429 nop 1430 ld r9, HSTATE_KVM_VCPU(r13) 1431 lwz r12, VCPU_TRAP(r9) 1432 1433 /* Stop others sending VCPU interrupts to this physical CPU */ 1434 li r0, -1 1435 stw r0, VCPU_CPU(r9) 1436 stw r0, VCPU_THREAD_CPU(r9) 1437 1438 /* Save guest CTRL register, set runlatch to 1 */ 1439 mfspr r6,SPRN_CTRLF 1440 stw r6,VCPU_CTRL(r9) 1441 andi. r0,r6,1 1442 bne 4f 1443 ori r6,r6,1 1444 mtspr SPRN_CTRLT,r6 14454: 1446 /* Read the guest SLB and save it away */ 1447 ld r5, VCPU_KVM(r9) 1448 lbz r0, KVM_RADIX(r5) 1449 cmpwi r0, 0 1450 li r5, 0 1451 bne 3f /* for radix, save 0 entries */ 1452 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1453 mtctr r0 1454 li r6,0 1455 addi r7,r9,VCPU_SLB 14561: slbmfee r8,r6 1457 andis. r0,r8,SLB_ESID_V@h 1458 beq 2f 1459 add r8,r8,r6 /* put index in */ 1460 slbmfev r3,r6 1461 std r8,VCPU_SLB_E(r7) 1462 std r3,VCPU_SLB_V(r7) 1463 addi r7,r7,VCPU_SLB_SIZE 1464 addi r5,r5,1 14652: addi r6,r6,1 1466 bdnz 1b 14673: stw r5,VCPU_SLB_MAX(r9) 1468 1469 /* 1470 * Save the guest PURR/SPURR 1471 */ 1472 mfspr r5,SPRN_PURR 1473 mfspr r6,SPRN_SPURR 1474 ld r7,VCPU_PURR(r9) 1475 ld r8,VCPU_SPURR(r9) 1476 std r5,VCPU_PURR(r9) 1477 std r6,VCPU_SPURR(r9) 1478 subf r5,r7,r5 1479 subf r6,r8,r6 1480 1481 /* 1482 * Restore host PURR/SPURR and add guest times 1483 * so that the time in the guest gets accounted. 1484 */ 1485 ld r3,HSTATE_PURR(r13) 1486 ld r4,HSTATE_SPURR(r13) 1487 add r3,r3,r5 1488 add r4,r4,r6 1489 mtspr SPRN_PURR,r3 1490 mtspr SPRN_SPURR,r4 1491 1492 /* Save DEC */ 1493 ld r3, HSTATE_KVM_VCORE(r13) 1494 mfspr r5,SPRN_DEC 1495 mftb r6 1496 /* On P9, if the guest has large decr enabled, don't sign extend */ 1497BEGIN_FTR_SECTION 1498 ld r4, VCORE_LPCR(r3) 1499 andis. r4, r4, LPCR_LD@h 1500 bne 16f 1501END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1502 extsw r5,r5 150316: add r5,r5,r6 1504 /* r5 is a guest timebase value here, convert to host TB */ 1505 ld r4,VCORE_TB_OFFSET(r3) 1506 subf r5,r4,r5 1507 std r5,VCPU_DEC_EXPIRES(r9) 1508 1509BEGIN_FTR_SECTION 1510 b 8f 1511END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1512 /* Save POWER8-specific registers */ 1513 mfspr r5, SPRN_IAMR 1514 mfspr r6, SPRN_PSPB 1515 mfspr r7, SPRN_FSCR 1516 std r5, VCPU_IAMR(r9) 1517 stw r6, VCPU_PSPB(r9) 1518 std r7, VCPU_FSCR(r9) 1519 mfspr r5, SPRN_IC 1520 mfspr r7, SPRN_TAR 1521 std r5, VCPU_IC(r9) 1522 std r7, VCPU_TAR(r9) 1523 mfspr r8, SPRN_EBBHR 1524 std r8, VCPU_EBBHR(r9) 1525 mfspr r5, SPRN_EBBRR 1526 mfspr r6, SPRN_BESCR 1527 mfspr r7, SPRN_PID 1528 mfspr r8, SPRN_WORT 1529 std r5, VCPU_EBBRR(r9) 1530 std r6, VCPU_BESCR(r9) 1531 stw r7, VCPU_GUEST_PID(r9) 1532 std r8, VCPU_WORT(r9) 1533BEGIN_FTR_SECTION 1534 mfspr r5, SPRN_TCSCR 1535 mfspr r6, SPRN_ACOP 1536 mfspr r7, SPRN_CSIGR 1537 mfspr r8, SPRN_TACR 1538 std r5, VCPU_TCSCR(r9) 1539 std r6, VCPU_ACOP(r9) 1540 std r7, VCPU_CSIGR(r9) 1541 std r8, VCPU_TACR(r9) 1542FTR_SECTION_ELSE 1543 mfspr r5, SPRN_TIDR 1544 mfspr r6, SPRN_PSSCR 1545 std r5, VCPU_TID(r9) 1546 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1547 rotldi r6, r6, 60 1548 std r6, VCPU_PSSCR(r9) 1549 /* Restore host HFSCR value */ 1550 ld r7, STACK_SLOT_HFSCR(r1) 1551 mtspr SPRN_HFSCR, r7 1552ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1553 /* 1554 * Restore various registers to 0, where non-zero values 1555 * set by the guest could disrupt the host. 1556 */ 1557 li r0, 0 1558 mtspr SPRN_PSPB, r0 1559 mtspr SPRN_WORT, r0 1560BEGIN_FTR_SECTION 1561 mtspr SPRN_IAMR, r0 1562 mtspr SPRN_TCSCR, r0 1563 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1564 li r0, 1 1565 sldi r0, r0, 31 1566 mtspr SPRN_MMCRS, r0 1567END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 15688: 1569 1570 /* Save and reset AMR and UAMOR before turning on the MMU */ 1571 mfspr r5,SPRN_AMR 1572 mfspr r6,SPRN_UAMOR 1573 std r5,VCPU_AMR(r9) 1574 std r6,VCPU_UAMOR(r9) 1575 li r6,0 1576 mtspr SPRN_AMR,r6 1577 mtspr SPRN_UAMOR, r6 1578 1579 /* Switch DSCR back to host value */ 1580 mfspr r8, SPRN_DSCR 1581 ld r7, HSTATE_DSCR(r13) 1582 std r8, VCPU_DSCR(r9) 1583 mtspr SPRN_DSCR, r7 1584 1585 /* Save non-volatile GPRs */ 1586 std r14, VCPU_GPR(R14)(r9) 1587 std r15, VCPU_GPR(R15)(r9) 1588 std r16, VCPU_GPR(R16)(r9) 1589 std r17, VCPU_GPR(R17)(r9) 1590 std r18, VCPU_GPR(R18)(r9) 1591 std r19, VCPU_GPR(R19)(r9) 1592 std r20, VCPU_GPR(R20)(r9) 1593 std r21, VCPU_GPR(R21)(r9) 1594 std r22, VCPU_GPR(R22)(r9) 1595 std r23, VCPU_GPR(R23)(r9) 1596 std r24, VCPU_GPR(R24)(r9) 1597 std r25, VCPU_GPR(R25)(r9) 1598 std r26, VCPU_GPR(R26)(r9) 1599 std r27, VCPU_GPR(R27)(r9) 1600 std r28, VCPU_GPR(R28)(r9) 1601 std r29, VCPU_GPR(R29)(r9) 1602 std r30, VCPU_GPR(R30)(r9) 1603 std r31, VCPU_GPR(R31)(r9) 1604 1605 /* Save SPRGs */ 1606 mfspr r3, SPRN_SPRG0 1607 mfspr r4, SPRN_SPRG1 1608 mfspr r5, SPRN_SPRG2 1609 mfspr r6, SPRN_SPRG3 1610 std r3, VCPU_SPRG0(r9) 1611 std r4, VCPU_SPRG1(r9) 1612 std r5, VCPU_SPRG2(r9) 1613 std r6, VCPU_SPRG3(r9) 1614 1615 /* save FP state */ 1616 mr r3, r9 1617 bl kvmppc_save_fp 1618 1619#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1620BEGIN_FTR_SECTION 1621 bl kvmppc_save_tm 1622END_FTR_SECTION_IFSET(CPU_FTR_TM) 1623#endif 1624 1625 /* Increment yield count if they have a VPA */ 1626 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1627 cmpdi r8, 0 1628 beq 25f 1629 li r4, LPPACA_YIELDCOUNT 1630 LWZX_BE r3, r8, r4 1631 addi r3, r3, 1 1632 STWX_BE r3, r8, r4 1633 li r3, 1 1634 stb r3, VCPU_VPA_DIRTY(r9) 163525: 1636 /* Save PMU registers if requested */ 1637 /* r8 and cr0.eq are live here */ 1638BEGIN_FTR_SECTION 1639 /* 1640 * POWER8 seems to have a hardware bug where setting 1641 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 1642 * when some counters are already negative doesn't seem 1643 * to cause a performance monitor alert (and hence interrupt). 1644 * The effect of this is that when saving the PMU state, 1645 * if there is no PMU alert pending when we read MMCR0 1646 * before freezing the counters, but one becomes pending 1647 * before we read the counters, we lose it. 1648 * To work around this, we need a way to freeze the counters 1649 * before reading MMCR0. Normally, freezing the counters 1650 * is done by writing MMCR0 (to set MMCR0[FC]) which 1651 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 1652 * we can also freeze the counters using MMCR2, by writing 1653 * 1s to all the counter freeze condition bits (there are 1654 * 9 bits each for 6 counters). 1655 */ 1656 li r3, -1 /* set all freeze bits */ 1657 clrrdi r3, r3, 10 1658 mfspr r10, SPRN_MMCR2 1659 mtspr SPRN_MMCR2, r3 1660 isync 1661END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1662 li r3, 1 1663 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1664 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1665 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1666 mfspr r6, SPRN_MMCRA 1667 /* Clear MMCRA in order to disable SDAR updates */ 1668 li r7, 0 1669 mtspr SPRN_MMCRA, r7 1670 isync 1671 beq 21f /* if no VPA, save PMU stuff anyway */ 1672 lbz r7, LPPACA_PMCINUSE(r8) 1673 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1674 bne 21f 1675 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1676 b 22f 167721: mfspr r5, SPRN_MMCR1 1678 mfspr r7, SPRN_SIAR 1679 mfspr r8, SPRN_SDAR 1680 std r4, VCPU_MMCR(r9) 1681 std r5, VCPU_MMCR + 8(r9) 1682 std r6, VCPU_MMCR + 16(r9) 1683BEGIN_FTR_SECTION 1684 std r10, VCPU_MMCR + 24(r9) 1685END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1686 std r7, VCPU_SIAR(r9) 1687 std r8, VCPU_SDAR(r9) 1688 mfspr r3, SPRN_PMC1 1689 mfspr r4, SPRN_PMC2 1690 mfspr r5, SPRN_PMC3 1691 mfspr r6, SPRN_PMC4 1692 mfspr r7, SPRN_PMC5 1693 mfspr r8, SPRN_PMC6 1694 stw r3, VCPU_PMC(r9) 1695 stw r4, VCPU_PMC + 4(r9) 1696 stw r5, VCPU_PMC + 8(r9) 1697 stw r6, VCPU_PMC + 12(r9) 1698 stw r7, VCPU_PMC + 16(r9) 1699 stw r8, VCPU_PMC + 20(r9) 1700BEGIN_FTR_SECTION 1701 mfspr r5, SPRN_SIER 1702 std r5, VCPU_SIER(r9) 1703BEGIN_FTR_SECTION_NESTED(96) 1704 mfspr r6, SPRN_SPMC1 1705 mfspr r7, SPRN_SPMC2 1706 mfspr r8, SPRN_MMCRS 1707 stw r6, VCPU_PMC + 24(r9) 1708 stw r7, VCPU_PMC + 28(r9) 1709 std r8, VCPU_MMCR + 32(r9) 1710 lis r4, 0x8000 1711 mtspr SPRN_MMCRS, r4 1712END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 1713END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 171422: 1715 /* Clear out SLB */ 1716 li r5,0 1717 slbmte r5,r5 1718 slbia 1719 ptesync 1720 1721 /* Restore host values of some registers */ 1722BEGIN_FTR_SECTION 1723 ld r5, STACK_SLOT_CIABR(r1) 1724 ld r6, STACK_SLOT_DAWR(r1) 1725 ld r7, STACK_SLOT_DAWRX(r1) 1726 mtspr SPRN_CIABR, r5 1727 mtspr SPRN_DAWR, r6 1728 mtspr SPRN_DAWRX, r7 1729END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1730BEGIN_FTR_SECTION 1731 ld r5, STACK_SLOT_TID(r1) 1732 ld r6, STACK_SLOT_PSSCR(r1) 1733 ld r7, STACK_SLOT_PID(r1) 1734 ld r8, STACK_SLOT_IAMR(r1) 1735 mtspr SPRN_TIDR, r5 1736 mtspr SPRN_PSSCR, r6 1737 mtspr SPRN_PID, r7 1738 mtspr SPRN_IAMR, r8 1739END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1740BEGIN_FTR_SECTION 1741 PPC_INVALIDATE_ERAT 1742END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 1743 1744 /* 1745 * POWER7/POWER8 guest -> host partition switch code. 1746 * We don't have to lock against tlbies but we do 1747 * have to coordinate the hardware threads. 1748 */ 1749kvmhv_switch_to_host: 1750 /* Secondary threads wait for primary to do partition switch */ 1751 ld r5,HSTATE_KVM_VCORE(r13) 1752 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1753 lbz r3,HSTATE_PTID(r13) 1754 cmpwi r3,0 1755 beq 15f 1756 HMT_LOW 175713: lbz r3,VCORE_IN_GUEST(r5) 1758 cmpwi r3,0 1759 bne 13b 1760 HMT_MEDIUM 1761 b 16f 1762 1763 /* Primary thread waits for all the secondaries to exit guest */ 176415: lwz r3,VCORE_ENTRY_EXIT(r5) 1765 rlwinm r0,r3,32-8,0xff 1766 clrldi r3,r3,56 1767 cmpw r3,r0 1768 bne 15b 1769 isync 1770 1771 /* Did we actually switch to the guest at all? */ 1772 lbz r6, VCORE_IN_GUEST(r5) 1773 cmpwi r6, 0 1774 beq 19f 1775 1776 /* Primary thread switches back to host partition */ 1777 lwz r7,KVM_HOST_LPID(r4) 1778BEGIN_FTR_SECTION 1779 ld r6,KVM_HOST_SDR1(r4) 1780 li r8,LPID_RSVD /* switch to reserved LPID */ 1781 mtspr SPRN_LPID,r8 1782 ptesync 1783 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1784END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1785 mtspr SPRN_LPID,r7 1786 isync 1787 1788BEGIN_FTR_SECTION 1789 /* DPDES and VTB are shared between threads */ 1790 mfspr r7, SPRN_DPDES 1791 mfspr r8, SPRN_VTB 1792 std r7, VCORE_DPDES(r5) 1793 std r8, VCORE_VTB(r5) 1794 /* clear DPDES so we don't get guest doorbells in the host */ 1795 li r8, 0 1796 mtspr SPRN_DPDES, r8 1797END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1798 1799 /* If HMI, call kvmppc_realmode_hmi_handler() */ 1800 cmpwi r12, BOOK3S_INTERRUPT_HMI 1801 bne 27f 1802 bl kvmppc_realmode_hmi_handler 1803 nop 1804 li r12, BOOK3S_INTERRUPT_HMI 1805 /* 1806 * At this point kvmppc_realmode_hmi_handler would have resync-ed 1807 * the TB. Hence it is not required to subtract guest timebase 1808 * offset from timebase. So, skip it. 1809 * 1810 * Also, do not call kvmppc_subcore_exit_guest() because it has 1811 * been invoked as part of kvmppc_realmode_hmi_handler(). 1812 */ 1813 b 30f 1814 181527: 1816 /* Subtract timebase offset from timebase */ 1817 ld r8,VCORE_TB_OFFSET(r5) 1818 cmpdi r8,0 1819 beq 17f 1820 mftb r6 /* current guest timebase */ 1821 subf r8,r8,r6 1822 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1823 mftb r7 /* check if lower 24 bits overflowed */ 1824 clrldi r6,r6,40 1825 clrldi r7,r7,40 1826 cmpld r7,r6 1827 bge 17f 1828 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1829 mtspr SPRN_TBU40,r8 1830 183117: bl kvmppc_subcore_exit_guest 1832 nop 183330: ld r5,HSTATE_KVM_VCORE(r13) 1834 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1835 1836 /* Reset PCR */ 1837 ld r0, VCORE_PCR(r5) 1838 cmpdi r0, 0 1839 beq 18f 1840 li r0, 0 1841 mtspr SPRN_PCR, r0 184218: 1843 /* Signal secondary CPUs to continue */ 1844 stb r0,VCORE_IN_GUEST(r5) 184519: lis r8,0x7fff /* MAX_INT@h */ 1846 mtspr SPRN_HDEC,r8 1847 184816: ld r8,KVM_HOST_LPCR(r4) 1849 mtspr SPRN_LPCR,r8 1850 isync 1851 1852 /* load host SLB entries */ 1853BEGIN_MMU_FTR_SECTION 1854 b 0f 1855END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1856 ld r8,PACA_SLBSHADOWPTR(r13) 1857 1858 .rept SLB_NUM_BOLTED 1859 li r3, SLBSHADOW_SAVEAREA 1860 LDX_BE r5, r8, r3 1861 addi r3, r3, 8 1862 LDX_BE r6, r8, r3 1863 andis. r7,r5,SLB_ESID_V@h 1864 beq 1f 1865 slbmte r6,r5 18661: addi r8,r8,16 1867 .endr 18680: 1869#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1870 /* Finish timing, if we have a vcpu */ 1871 ld r4, HSTATE_KVM_VCPU(r13) 1872 cmpdi r4, 0 1873 li r3, 0 1874 beq 2f 1875 bl kvmhv_accumulate_time 18762: 1877#endif 1878 /* Unset guest mode */ 1879 li r0, KVM_GUEST_MODE_NONE 1880 stb r0, HSTATE_IN_GUEST(r13) 1881 1882 ld r0, SFS+PPC_LR_STKOFF(r1) 1883 addi r1, r1, SFS 1884 mtlr r0 1885 blr 1886 1887/* 1888 * Check whether an HDSI is an HPTE not found fault or something else. 1889 * If it is an HPTE not found fault that is due to the guest accessing 1890 * a page that they have mapped but which we have paged out, then 1891 * we continue on with the guest exit path. In all other cases, 1892 * reflect the HDSI to the guest as a DSI. 1893 */ 1894kvmppc_hdsi: 1895 ld r3, VCPU_KVM(r9) 1896 lbz r0, KVM_RADIX(r3) 1897 cmpwi r0, 0 1898 mfspr r4, SPRN_HDAR 1899 mfspr r6, SPRN_HDSISR 1900 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 1901 /* HPTE not found fault or protection fault? */ 1902 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1903 beq 1f /* if not, send it to the guest */ 1904 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1905 beq 3f 1906BEGIN_FTR_SECTION 1907 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 1908 b 4f 1909END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1910 clrrdi r0, r4, 28 1911 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1912 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 1913 bne 7f /* if no SLB entry found */ 19144: std r4, VCPU_FAULT_DAR(r9) 1915 stw r6, VCPU_FAULT_DSISR(r9) 1916 1917 /* Search the hash table. */ 1918 mr r3, r9 /* vcpu pointer */ 1919 li r7, 1 /* data fault */ 1920 bl kvmppc_hpte_hv_fault 1921 ld r9, HSTATE_KVM_VCPU(r13) 1922 ld r10, VCPU_PC(r9) 1923 ld r11, VCPU_MSR(r9) 1924 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1925 cmpdi r3, 0 /* retry the instruction */ 1926 beq 6f 1927 cmpdi r3, -1 /* handle in kernel mode */ 1928 beq guest_exit_cont 1929 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1930 beq 2f 1931 1932 /* Synthesize a DSI (or DSegI) for the guest */ 1933 ld r4, VCPU_FAULT_DAR(r9) 1934 mr r6, r3 19351: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 1936 mtspr SPRN_DSISR, r6 19377: mtspr SPRN_DAR, r4 1938 mtspr SPRN_SRR0, r10 1939 mtspr SPRN_SRR1, r11 1940 mr r10, r0 1941 bl kvmppc_msr_interrupt 1942fast_interrupt_c_return: 19436: ld r7, VCPU_CTR(r9) 1944 ld r8, VCPU_XER(r9) 1945 mtctr r7 1946 mtxer r8 1947 mr r4, r9 1948 b fast_guest_return 1949 19503: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1951 ld r5, KVM_VRMA_SLB_V(r5) 1952 b 4b 1953 1954 /* If this is for emulated MMIO, load the instruction word */ 19552: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1956 1957 /* Set guest mode to 'jump over instruction' so if lwz faults 1958 * we'll just continue at the next IP. */ 1959 li r0, KVM_GUEST_MODE_SKIP 1960 stb r0, HSTATE_IN_GUEST(r13) 1961 1962 /* Do the access with MSR:DR enabled */ 1963 mfmsr r3 1964 ori r4, r3, MSR_DR /* Enable paging for data */ 1965 mtmsrd r4 1966 lwz r8, 0(r10) 1967 mtmsrd r3 1968 1969 /* Store the result */ 1970 stw r8, VCPU_LAST_INST(r9) 1971 1972 /* Unset guest mode. */ 1973 li r0, KVM_GUEST_MODE_HOST_HV 1974 stb r0, HSTATE_IN_GUEST(r13) 1975 b guest_exit_cont 1976 1977.Lradix_hdsi: 1978 std r4, VCPU_FAULT_DAR(r9) 1979 stw r6, VCPU_FAULT_DSISR(r9) 1980.Lradix_hisi: 1981 mfspr r5, SPRN_ASDR 1982 std r5, VCPU_FAULT_GPA(r9) 1983 b guest_exit_cont 1984 1985/* 1986 * Similarly for an HISI, reflect it to the guest as an ISI unless 1987 * it is an HPTE not found fault for a page that we have paged out. 1988 */ 1989kvmppc_hisi: 1990 ld r3, VCPU_KVM(r9) 1991 lbz r0, KVM_RADIX(r3) 1992 cmpwi r0, 0 1993 bne .Lradix_hisi /* for radix, just save ASDR */ 1994 andis. r0, r11, SRR1_ISI_NOPT@h 1995 beq 1f 1996 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1997 beq 3f 1998BEGIN_FTR_SECTION 1999 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2000 b 4f 2001END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2002 clrrdi r0, r10, 28 2003 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2004 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2005 bne 7f /* if no SLB entry found */ 20064: 2007 /* Search the hash table. */ 2008 mr r3, r9 /* vcpu pointer */ 2009 mr r4, r10 2010 mr r6, r11 2011 li r7, 0 /* instruction fault */ 2012 bl kvmppc_hpte_hv_fault 2013 ld r9, HSTATE_KVM_VCPU(r13) 2014 ld r10, VCPU_PC(r9) 2015 ld r11, VCPU_MSR(r9) 2016 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2017 cmpdi r3, 0 /* retry the instruction */ 2018 beq fast_interrupt_c_return 2019 cmpdi r3, -1 /* handle in kernel mode */ 2020 beq guest_exit_cont 2021 2022 /* Synthesize an ISI (or ISegI) for the guest */ 2023 mr r11, r3 20241: li r0, BOOK3S_INTERRUPT_INST_STORAGE 20257: mtspr SPRN_SRR0, r10 2026 mtspr SPRN_SRR1, r11 2027 mr r10, r0 2028 bl kvmppc_msr_interrupt 2029 b fast_interrupt_c_return 2030 20313: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2032 ld r5, KVM_VRMA_SLB_V(r6) 2033 b 4b 2034 2035/* 2036 * Try to handle an hcall in real mode. 2037 * Returns to the guest if we handle it, or continues on up to 2038 * the kernel if we can't (i.e. if we don't have a handler for 2039 * it, or if the handler returns H_TOO_HARD). 2040 * 2041 * r5 - r8 contain hcall args, 2042 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2043 */ 2044hcall_try_real_mode: 2045 ld r3,VCPU_GPR(R3)(r9) 2046 andi. r0,r11,MSR_PR 2047 /* sc 1 from userspace - reflect to guest syscall */ 2048 bne sc_1_fast_return 2049 clrrdi r3,r3,2 2050 cmpldi r3,hcall_real_table_end - hcall_real_table 2051 bge guest_exit_cont 2052 /* See if this hcall is enabled for in-kernel handling */ 2053 ld r4, VCPU_KVM(r9) 2054 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2055 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2056 add r4, r4, r0 2057 ld r0, KVM_ENABLED_HCALLS(r4) 2058 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2059 srd r0, r0, r4 2060 andi. r0, r0, 1 2061 beq guest_exit_cont 2062 /* Get pointer to handler, if any, and call it */ 2063 LOAD_REG_ADDR(r4, hcall_real_table) 2064 lwax r3,r3,r4 2065 cmpwi r3,0 2066 beq guest_exit_cont 2067 add r12,r3,r4 2068 mtctr r12 2069 mr r3,r9 /* get vcpu pointer */ 2070 ld r4,VCPU_GPR(R4)(r9) 2071 bctrl 2072 cmpdi r3,H_TOO_HARD 2073 beq hcall_real_fallback 2074 ld r4,HSTATE_KVM_VCPU(r13) 2075 std r3,VCPU_GPR(R3)(r4) 2076 ld r10,VCPU_PC(r4) 2077 ld r11,VCPU_MSR(r4) 2078 b fast_guest_return 2079 2080sc_1_fast_return: 2081 mtspr SPRN_SRR0,r10 2082 mtspr SPRN_SRR1,r11 2083 li r10, BOOK3S_INTERRUPT_SYSCALL 2084 bl kvmppc_msr_interrupt 2085 mr r4,r9 2086 b fast_guest_return 2087 2088 /* We've attempted a real mode hcall, but it's punted it back 2089 * to userspace. We need to restore some clobbered volatiles 2090 * before resuming the pass-it-to-qemu path */ 2091hcall_real_fallback: 2092 li r12,BOOK3S_INTERRUPT_SYSCALL 2093 ld r9, HSTATE_KVM_VCPU(r13) 2094 2095 b guest_exit_cont 2096 2097 .globl hcall_real_table 2098hcall_real_table: 2099 .long 0 /* 0 - unused */ 2100 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2101 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2102 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2103 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2104 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2105 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2106 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2107 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2108 .long 0 /* 0x24 - H_SET_SPRG0 */ 2109 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2110 .long 0 /* 0x2c */ 2111 .long 0 /* 0x30 */ 2112 .long 0 /* 0x34 */ 2113 .long 0 /* 0x38 */ 2114 .long 0 /* 0x3c */ 2115 .long 0 /* 0x40 */ 2116 .long 0 /* 0x44 */ 2117 .long 0 /* 0x48 */ 2118 .long 0 /* 0x4c */ 2119 .long 0 /* 0x50 */ 2120 .long 0 /* 0x54 */ 2121 .long 0 /* 0x58 */ 2122 .long 0 /* 0x5c */ 2123 .long 0 /* 0x60 */ 2124#ifdef CONFIG_KVM_XICS 2125 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2126 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2127 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2128 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2129 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2130#else 2131 .long 0 /* 0x64 - H_EOI */ 2132 .long 0 /* 0x68 - H_CPPR */ 2133 .long 0 /* 0x6c - H_IPI */ 2134 .long 0 /* 0x70 - H_IPOLL */ 2135 .long 0 /* 0x74 - H_XIRR */ 2136#endif 2137 .long 0 /* 0x78 */ 2138 .long 0 /* 0x7c */ 2139 .long 0 /* 0x80 */ 2140 .long 0 /* 0x84 */ 2141 .long 0 /* 0x88 */ 2142 .long 0 /* 0x8c */ 2143 .long 0 /* 0x90 */ 2144 .long 0 /* 0x94 */ 2145 .long 0 /* 0x98 */ 2146 .long 0 /* 0x9c */ 2147 .long 0 /* 0xa0 */ 2148 .long 0 /* 0xa4 */ 2149 .long 0 /* 0xa8 */ 2150 .long 0 /* 0xac */ 2151 .long 0 /* 0xb0 */ 2152 .long 0 /* 0xb4 */ 2153 .long 0 /* 0xb8 */ 2154 .long 0 /* 0xbc */ 2155 .long 0 /* 0xc0 */ 2156 .long 0 /* 0xc4 */ 2157 .long 0 /* 0xc8 */ 2158 .long 0 /* 0xcc */ 2159 .long 0 /* 0xd0 */ 2160 .long 0 /* 0xd4 */ 2161 .long 0 /* 0xd8 */ 2162 .long 0 /* 0xdc */ 2163 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2164 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2165 .long 0 /* 0xe8 */ 2166 .long 0 /* 0xec */ 2167 .long 0 /* 0xf0 */ 2168 .long 0 /* 0xf4 */ 2169 .long 0 /* 0xf8 */ 2170 .long 0 /* 0xfc */ 2171 .long 0 /* 0x100 */ 2172 .long 0 /* 0x104 */ 2173 .long 0 /* 0x108 */ 2174 .long 0 /* 0x10c */ 2175 .long 0 /* 0x110 */ 2176 .long 0 /* 0x114 */ 2177 .long 0 /* 0x118 */ 2178 .long 0 /* 0x11c */ 2179 .long 0 /* 0x120 */ 2180 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2181 .long 0 /* 0x128 */ 2182 .long 0 /* 0x12c */ 2183 .long 0 /* 0x130 */ 2184 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2185 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2186 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2187 .long 0 /* 0x140 */ 2188 .long 0 /* 0x144 */ 2189 .long 0 /* 0x148 */ 2190 .long 0 /* 0x14c */ 2191 .long 0 /* 0x150 */ 2192 .long 0 /* 0x154 */ 2193 .long 0 /* 0x158 */ 2194 .long 0 /* 0x15c */ 2195 .long 0 /* 0x160 */ 2196 .long 0 /* 0x164 */ 2197 .long 0 /* 0x168 */ 2198 .long 0 /* 0x16c */ 2199 .long 0 /* 0x170 */ 2200 .long 0 /* 0x174 */ 2201 .long 0 /* 0x178 */ 2202 .long 0 /* 0x17c */ 2203 .long 0 /* 0x180 */ 2204 .long 0 /* 0x184 */ 2205 .long 0 /* 0x188 */ 2206 .long 0 /* 0x18c */ 2207 .long 0 /* 0x190 */ 2208 .long 0 /* 0x194 */ 2209 .long 0 /* 0x198 */ 2210 .long 0 /* 0x19c */ 2211 .long 0 /* 0x1a0 */ 2212 .long 0 /* 0x1a4 */ 2213 .long 0 /* 0x1a8 */ 2214 .long 0 /* 0x1ac */ 2215 .long 0 /* 0x1b0 */ 2216 .long 0 /* 0x1b4 */ 2217 .long 0 /* 0x1b8 */ 2218 .long 0 /* 0x1bc */ 2219 .long 0 /* 0x1c0 */ 2220 .long 0 /* 0x1c4 */ 2221 .long 0 /* 0x1c8 */ 2222 .long 0 /* 0x1cc */ 2223 .long 0 /* 0x1d0 */ 2224 .long 0 /* 0x1d4 */ 2225 .long 0 /* 0x1d8 */ 2226 .long 0 /* 0x1dc */ 2227 .long 0 /* 0x1e0 */ 2228 .long 0 /* 0x1e4 */ 2229 .long 0 /* 0x1e8 */ 2230 .long 0 /* 0x1ec */ 2231 .long 0 /* 0x1f0 */ 2232 .long 0 /* 0x1f4 */ 2233 .long 0 /* 0x1f8 */ 2234 .long 0 /* 0x1fc */ 2235 .long 0 /* 0x200 */ 2236 .long 0 /* 0x204 */ 2237 .long 0 /* 0x208 */ 2238 .long 0 /* 0x20c */ 2239 .long 0 /* 0x210 */ 2240 .long 0 /* 0x214 */ 2241 .long 0 /* 0x218 */ 2242 .long 0 /* 0x21c */ 2243 .long 0 /* 0x220 */ 2244 .long 0 /* 0x224 */ 2245 .long 0 /* 0x228 */ 2246 .long 0 /* 0x22c */ 2247 .long 0 /* 0x230 */ 2248 .long 0 /* 0x234 */ 2249 .long 0 /* 0x238 */ 2250 .long 0 /* 0x23c */ 2251 .long 0 /* 0x240 */ 2252 .long 0 /* 0x244 */ 2253 .long 0 /* 0x248 */ 2254 .long 0 /* 0x24c */ 2255 .long 0 /* 0x250 */ 2256 .long 0 /* 0x254 */ 2257 .long 0 /* 0x258 */ 2258 .long 0 /* 0x25c */ 2259 .long 0 /* 0x260 */ 2260 .long 0 /* 0x264 */ 2261 .long 0 /* 0x268 */ 2262 .long 0 /* 0x26c */ 2263 .long 0 /* 0x270 */ 2264 .long 0 /* 0x274 */ 2265 .long 0 /* 0x278 */ 2266 .long 0 /* 0x27c */ 2267 .long 0 /* 0x280 */ 2268 .long 0 /* 0x284 */ 2269 .long 0 /* 0x288 */ 2270 .long 0 /* 0x28c */ 2271 .long 0 /* 0x290 */ 2272 .long 0 /* 0x294 */ 2273 .long 0 /* 0x298 */ 2274 .long 0 /* 0x29c */ 2275 .long 0 /* 0x2a0 */ 2276 .long 0 /* 0x2a4 */ 2277 .long 0 /* 0x2a8 */ 2278 .long 0 /* 0x2ac */ 2279 .long 0 /* 0x2b0 */ 2280 .long 0 /* 0x2b4 */ 2281 .long 0 /* 0x2b8 */ 2282 .long 0 /* 0x2bc */ 2283 .long 0 /* 0x2c0 */ 2284 .long 0 /* 0x2c4 */ 2285 .long 0 /* 0x2c8 */ 2286 .long 0 /* 0x2cc */ 2287 .long 0 /* 0x2d0 */ 2288 .long 0 /* 0x2d4 */ 2289 .long 0 /* 0x2d8 */ 2290 .long 0 /* 0x2dc */ 2291 .long 0 /* 0x2e0 */ 2292 .long 0 /* 0x2e4 */ 2293 .long 0 /* 0x2e8 */ 2294 .long 0 /* 0x2ec */ 2295 .long 0 /* 0x2f0 */ 2296 .long 0 /* 0x2f4 */ 2297 .long 0 /* 0x2f8 */ 2298#ifdef CONFIG_KVM_XICS 2299 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2300#else 2301 .long 0 /* 0x2fc - H_XIRR_X*/ 2302#endif 2303 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2304 .globl hcall_real_table_end 2305hcall_real_table_end: 2306 2307_GLOBAL(kvmppc_h_set_xdabr) 2308 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2309 beq 6f 2310 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2311 andc. r0, r5, r0 2312 beq 3f 23136: li r3, H_PARAMETER 2314 blr 2315 2316_GLOBAL(kvmppc_h_set_dabr) 2317 li r5, DABRX_USER | DABRX_KERNEL 23183: 2319BEGIN_FTR_SECTION 2320 b 2f 2321END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2322 std r4,VCPU_DABR(r3) 2323 stw r5, VCPU_DABRX(r3) 2324 mtspr SPRN_DABRX, r5 2325 /* Work around P7 bug where DABR can get corrupted on mtspr */ 23261: mtspr SPRN_DABR,r4 2327 mfspr r5, SPRN_DABR 2328 cmpd r4, r5 2329 bne 1b 2330 isync 2331 li r3,0 2332 blr 2333 2334 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 23352: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2336 rlwimi r5, r4, 2, DAWRX_WT 2337 clrrdi r4, r4, 3 2338 std r4, VCPU_DAWR(r3) 2339 std r5, VCPU_DAWRX(r3) 2340 mtspr SPRN_DAWR, r4 2341 mtspr SPRN_DAWRX, r5 2342 li r3, 0 2343 blr 2344 2345_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2346 ori r11,r11,MSR_EE 2347 std r11,VCPU_MSR(r3) 2348 li r0,1 2349 stb r0,VCPU_CEDED(r3) 2350 sync /* order setting ceded vs. testing prodded */ 2351 lbz r5,VCPU_PRODDED(r3) 2352 cmpwi r5,0 2353 bne kvm_cede_prodded 2354 li r12,0 /* set trap to 0 to say hcall is handled */ 2355 stw r12,VCPU_TRAP(r3) 2356 li r0,H_SUCCESS 2357 std r0,VCPU_GPR(R3)(r3) 2358 2359 /* 2360 * Set our bit in the bitmask of napping threads unless all the 2361 * other threads are already napping, in which case we send this 2362 * up to the host. 2363 */ 2364 ld r5,HSTATE_KVM_VCORE(r13) 2365 lbz r6,HSTATE_PTID(r13) 2366 lwz r8,VCORE_ENTRY_EXIT(r5) 2367 clrldi r8,r8,56 2368 li r0,1 2369 sld r0,r0,r6 2370 addi r6,r5,VCORE_NAPPING_THREADS 237131: lwarx r4,0,r6 2372 or r4,r4,r0 2373 cmpw r4,r8 2374 beq kvm_cede_exit 2375 stwcx. r4,0,r6 2376 bne 31b 2377 /* order napping_threads update vs testing entry_exit_map */ 2378 isync 2379 li r0,NAPPING_CEDE 2380 stb r0,HSTATE_NAPPING(r13) 2381 lwz r7,VCORE_ENTRY_EXIT(r5) 2382 cmpwi r7,0x100 2383 bge 33f /* another thread already exiting */ 2384 2385/* 2386 * Although not specifically required by the architecture, POWER7 2387 * preserves the following registers in nap mode, even if an SMT mode 2388 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2389 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2390 */ 2391 /* Save non-volatile GPRs */ 2392 std r14, VCPU_GPR(R14)(r3) 2393 std r15, VCPU_GPR(R15)(r3) 2394 std r16, VCPU_GPR(R16)(r3) 2395 std r17, VCPU_GPR(R17)(r3) 2396 std r18, VCPU_GPR(R18)(r3) 2397 std r19, VCPU_GPR(R19)(r3) 2398 std r20, VCPU_GPR(R20)(r3) 2399 std r21, VCPU_GPR(R21)(r3) 2400 std r22, VCPU_GPR(R22)(r3) 2401 std r23, VCPU_GPR(R23)(r3) 2402 std r24, VCPU_GPR(R24)(r3) 2403 std r25, VCPU_GPR(R25)(r3) 2404 std r26, VCPU_GPR(R26)(r3) 2405 std r27, VCPU_GPR(R27)(r3) 2406 std r28, VCPU_GPR(R28)(r3) 2407 std r29, VCPU_GPR(R29)(r3) 2408 std r30, VCPU_GPR(R30)(r3) 2409 std r31, VCPU_GPR(R31)(r3) 2410 2411 /* save FP state */ 2412 bl kvmppc_save_fp 2413 2414#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2415BEGIN_FTR_SECTION 2416 ld r9, HSTATE_KVM_VCPU(r13) 2417 bl kvmppc_save_tm 2418END_FTR_SECTION_IFSET(CPU_FTR_TM) 2419#endif 2420 2421 /* 2422 * Set DEC to the smaller of DEC and HDEC, so that we wake 2423 * no later than the end of our timeslice (HDEC interrupts 2424 * don't wake us from nap). 2425 */ 2426 mfspr r3, SPRN_DEC 2427 mfspr r4, SPRN_HDEC 2428 mftb r5 2429BEGIN_FTR_SECTION 2430 /* On P9 check whether the guest has large decrementer mode enabled */ 2431 ld r6, HSTATE_KVM_VCORE(r13) 2432 ld r6, VCORE_LPCR(r6) 2433 andis. r6, r6, LPCR_LD@h 2434 bne 68f 2435END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2436 extsw r3, r3 243768: EXTEND_HDEC(r4) 2438 cmpd r3, r4 2439 ble 67f 2440 mtspr SPRN_DEC, r4 244167: 2442 /* save expiry time of guest decrementer */ 2443 add r3, r3, r5 2444 ld r4, HSTATE_KVM_VCPU(r13) 2445 ld r5, HSTATE_KVM_VCORE(r13) 2446 ld r6, VCORE_TB_OFFSET(r5) 2447 subf r3, r6, r3 /* convert to host TB value */ 2448 std r3, VCPU_DEC_EXPIRES(r4) 2449 2450#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2451 ld r4, HSTATE_KVM_VCPU(r13) 2452 addi r3, r4, VCPU_TB_CEDE 2453 bl kvmhv_accumulate_time 2454#endif 2455 2456 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2457 2458 /* 2459 * Take a nap until a decrementer or external or doobell interrupt 2460 * occurs, with PECE1 and PECE0 set in LPCR. 2461 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2462 * Also clear the runlatch bit before napping. 2463 */ 2464kvm_do_nap: 2465 mfspr r0, SPRN_CTRLF 2466 clrrdi r0, r0, 1 2467 mtspr SPRN_CTRLT, r0 2468 2469 li r0,1 2470 stb r0,HSTATE_HWTHREAD_REQ(r13) 2471 mfspr r5,SPRN_LPCR 2472 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2473BEGIN_FTR_SECTION 2474 ori r5, r5, LPCR_PECEDH 2475 rlwimi r5, r3, 0, LPCR_PECEDP 2476END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2477 2478kvm_nap_sequence: /* desired LPCR value in r5 */ 2479BEGIN_FTR_SECTION 2480 /* 2481 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2482 * enable state loss = 1 (allow SMT mode switch) 2483 * requested level = 0 (just stop dispatching) 2484 */ 2485 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2486 mtspr SPRN_PSSCR, r3 2487 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2488 li r4, LPCR_PECE_HVEE@higher 2489 sldi r4, r4, 32 2490 or r5, r5, r4 2491END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2492 mtspr SPRN_LPCR,r5 2493 isync 2494 li r0, 0 2495 std r0, HSTATE_SCRATCH0(r13) 2496 ptesync 2497 ld r0, HSTATE_SCRATCH0(r13) 24981: cmpd r0, r0 2499 bne 1b 2500BEGIN_FTR_SECTION 2501 nap 2502FTR_SECTION_ELSE 2503 PPC_STOP 2504ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 2505 b . 2506 250733: mr r4, r3 2508 li r3, 0 2509 li r12, 0 2510 b 34f 2511 2512kvm_end_cede: 2513 /* get vcpu pointer */ 2514 ld r4, HSTATE_KVM_VCPU(r13) 2515 2516 /* Woken by external or decrementer interrupt */ 2517 ld r1, HSTATE_HOST_R1(r13) 2518 2519#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2520 addi r3, r4, VCPU_TB_RMINTR 2521 bl kvmhv_accumulate_time 2522#endif 2523 2524#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2525BEGIN_FTR_SECTION 2526 bl kvmppc_restore_tm 2527END_FTR_SECTION_IFSET(CPU_FTR_TM) 2528#endif 2529 2530 /* load up FP state */ 2531 bl kvmppc_load_fp 2532 2533 /* Restore guest decrementer */ 2534 ld r3, VCPU_DEC_EXPIRES(r4) 2535 ld r5, HSTATE_KVM_VCORE(r13) 2536 ld r6, VCORE_TB_OFFSET(r5) 2537 add r3, r3, r6 /* convert host TB to guest TB value */ 2538 mftb r7 2539 subf r3, r7, r3 2540 mtspr SPRN_DEC, r3 2541 2542 /* Load NV GPRS */ 2543 ld r14, VCPU_GPR(R14)(r4) 2544 ld r15, VCPU_GPR(R15)(r4) 2545 ld r16, VCPU_GPR(R16)(r4) 2546 ld r17, VCPU_GPR(R17)(r4) 2547 ld r18, VCPU_GPR(R18)(r4) 2548 ld r19, VCPU_GPR(R19)(r4) 2549 ld r20, VCPU_GPR(R20)(r4) 2550 ld r21, VCPU_GPR(R21)(r4) 2551 ld r22, VCPU_GPR(R22)(r4) 2552 ld r23, VCPU_GPR(R23)(r4) 2553 ld r24, VCPU_GPR(R24)(r4) 2554 ld r25, VCPU_GPR(R25)(r4) 2555 ld r26, VCPU_GPR(R26)(r4) 2556 ld r27, VCPU_GPR(R27)(r4) 2557 ld r28, VCPU_GPR(R28)(r4) 2558 ld r29, VCPU_GPR(R29)(r4) 2559 ld r30, VCPU_GPR(R30)(r4) 2560 ld r31, VCPU_GPR(R31)(r4) 2561 2562 /* Check the wake reason in SRR1 to see why we got here */ 2563 bl kvmppc_check_wake_reason 2564 2565 /* 2566 * Restore volatile registers since we could have called a 2567 * C routine in kvmppc_check_wake_reason 2568 * r4 = VCPU 2569 * r3 tells us whether we need to return to host or not 2570 * WARNING: it gets checked further down: 2571 * should not modify r3 until this check is done. 2572 */ 2573 ld r4, HSTATE_KVM_VCPU(r13) 2574 2575 /* clear our bit in vcore->napping_threads */ 257634: ld r5,HSTATE_KVM_VCORE(r13) 2577 lbz r7,HSTATE_PTID(r13) 2578 li r0,1 2579 sld r0,r0,r7 2580 addi r6,r5,VCORE_NAPPING_THREADS 258132: lwarx r7,0,r6 2582 andc r7,r7,r0 2583 stwcx. r7,0,r6 2584 bne 32b 2585 li r0,0 2586 stb r0,HSTATE_NAPPING(r13) 2587 2588 /* See if the wake reason saved in r3 means we need to exit */ 2589 stw r12, VCPU_TRAP(r4) 2590 mr r9, r4 2591 cmpdi r3, 0 2592 bgt guest_exit_cont 2593 2594 /* see if any other thread is already exiting */ 2595 lwz r0,VCORE_ENTRY_EXIT(r5) 2596 cmpwi r0,0x100 2597 bge guest_exit_cont 2598 2599 b kvmppc_cede_reentry /* if not go back to guest */ 2600 2601 /* cede when already previously prodded case */ 2602kvm_cede_prodded: 2603 li r0,0 2604 stb r0,VCPU_PRODDED(r3) 2605 sync /* order testing prodded vs. clearing ceded */ 2606 stb r0,VCPU_CEDED(r3) 2607 li r3,H_SUCCESS 2608 blr 2609 2610 /* we've ceded but we want to give control to the host */ 2611kvm_cede_exit: 2612 ld r9, HSTATE_KVM_VCPU(r13) 2613 b guest_exit_cont 2614 2615 /* Try to handle a machine check in real mode */ 2616machine_check_realmode: 2617 mr r3, r9 /* get vcpu pointer */ 2618 bl kvmppc_realmode_machine_check 2619 nop 2620 ld r9, HSTATE_KVM_VCPU(r13) 2621 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2622 /* 2623 * For the guest that is FWNMI capable, deliver all the MCE errors 2624 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit 2625 * reason. This new approach injects machine check errors in guest 2626 * address space to guest with additional information in the form 2627 * of RTAS event, thus enabling guest kernel to suitably handle 2628 * such errors. 2629 * 2630 * For the guest that is not FWNMI capable (old QEMU) fallback 2631 * to old behaviour for backward compatibility: 2632 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either 2633 * through machine check interrupt (set HSRR0 to 0x200). 2634 * For handled errors (no-fatal), just go back to guest execution 2635 * with current HSRR0. 2636 * if we receive machine check with MSR(RI=0) then deliver it to 2637 * guest as machine check causing guest to crash. 2638 */ 2639 ld r11, VCPU_MSR(r9) 2640 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ 2641 bne mc_cont /* if so, exit to host */ 2642 /* Check if guest is capable of handling NMI exit */ 2643 ld r10, VCPU_KVM(r9) 2644 lbz r10, KVM_FWNMI(r10) 2645 cmpdi r10, 1 /* FWNMI capable? */ 2646 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */ 2647 2648 /* if not, fall through for backward compatibility. */ 2649 andi. r10, r11, MSR_RI /* check for unrecoverable exception */ 2650 beq 1f /* Deliver a machine check to guest */ 2651 ld r10, VCPU_PC(r9) 2652 cmpdi r3, 0 /* Did we handle MCE ? */ 2653 bne 2f /* Continue guest execution. */ 2654 /* If not, deliver a machine check. SRR0/1 are already set */ 26551: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2656 bl kvmppc_msr_interrupt 26572: b fast_interrupt_c_return 2658 2659/* 2660 * Check the reason we woke from nap, and take appropriate action. 2661 * Returns (in r3): 2662 * 0 if nothing needs to be done 2663 * 1 if something happened that needs to be handled by the host 2664 * -1 if there was a guest wakeup (IPI or msgsnd) 2665 * -2 if we handled a PCI passthrough interrupt (returned by 2666 * kvmppc_read_intr only) 2667 * 2668 * Also sets r12 to the interrupt vector for any interrupt that needs 2669 * to be handled now by the host (0x500 for external interrupt), or zero. 2670 * Modifies all volatile registers (since it may call a C function). 2671 * This routine calls kvmppc_read_intr, a C function, if an external 2672 * interrupt is pending. 2673 */ 2674kvmppc_check_wake_reason: 2675 mfspr r6, SPRN_SRR1 2676BEGIN_FTR_SECTION 2677 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2678FTR_SECTION_ELSE 2679 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2680ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2681 cmpwi r6, 8 /* was it an external interrupt? */ 2682 beq 7f /* if so, see what it was */ 2683 li r3, 0 2684 li r12, 0 2685 cmpwi r6, 6 /* was it the decrementer? */ 2686 beq 0f 2687BEGIN_FTR_SECTION 2688 cmpwi r6, 5 /* privileged doorbell? */ 2689 beq 0f 2690 cmpwi r6, 3 /* hypervisor doorbell? */ 2691 beq 3f 2692END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2693 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2694 beq 4f 2695 li r3, 1 /* anything else, return 1 */ 26960: blr 2697 2698 /* hypervisor doorbell */ 26993: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2700 2701 /* 2702 * Clear the doorbell as we will invoke the handler 2703 * explicitly in the guest exit path. 2704 */ 2705 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2706 PPC_MSGCLR(6) 2707 /* see if it's a host IPI */ 2708 li r3, 1 2709 lbz r0, HSTATE_HOST_IPI(r13) 2710 cmpwi r0, 0 2711 bnelr 2712 /* if not, return -1 */ 2713 li r3, -1 2714 blr 2715 2716 /* Woken up due to Hypervisor maintenance interrupt */ 27174: li r12, BOOK3S_INTERRUPT_HMI 2718 li r3, 1 2719 blr 2720 2721 /* external interrupt - create a stack frame so we can call C */ 27227: mflr r0 2723 std r0, PPC_LR_STKOFF(r1) 2724 stdu r1, -PPC_MIN_STKFRM(r1) 2725 bl kvmppc_read_intr 2726 nop 2727 li r12, BOOK3S_INTERRUPT_EXTERNAL 2728 cmpdi r3, 1 2729 ble 1f 2730 2731 /* 2732 * Return code of 2 means PCI passthrough interrupt, but 2733 * we need to return back to host to complete handling the 2734 * interrupt. Trap reason is expected in r12 by guest 2735 * exit code. 2736 */ 2737 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 27381: 2739 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2740 addi r1, r1, PPC_MIN_STKFRM 2741 mtlr r0 2742 blr 2743 2744/* 2745 * Save away FP, VMX and VSX registers. 2746 * r3 = vcpu pointer 2747 * N.B. r30 and r31 are volatile across this function, 2748 * thus it is not callable from C. 2749 */ 2750kvmppc_save_fp: 2751 mflr r30 2752 mr r31,r3 2753 mfmsr r5 2754 ori r8,r5,MSR_FP 2755#ifdef CONFIG_ALTIVEC 2756BEGIN_FTR_SECTION 2757 oris r8,r8,MSR_VEC@h 2758END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2759#endif 2760#ifdef CONFIG_VSX 2761BEGIN_FTR_SECTION 2762 oris r8,r8,MSR_VSX@h 2763END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2764#endif 2765 mtmsrd r8 2766 addi r3,r3,VCPU_FPRS 2767 bl store_fp_state 2768#ifdef CONFIG_ALTIVEC 2769BEGIN_FTR_SECTION 2770 addi r3,r31,VCPU_VRS 2771 bl store_vr_state 2772END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2773#endif 2774 mfspr r6,SPRN_VRSAVE 2775 stw r6,VCPU_VRSAVE(r31) 2776 mtlr r30 2777 blr 2778 2779/* 2780 * Load up FP, VMX and VSX registers 2781 * r4 = vcpu pointer 2782 * N.B. r30 and r31 are volatile across this function, 2783 * thus it is not callable from C. 2784 */ 2785kvmppc_load_fp: 2786 mflr r30 2787 mr r31,r4 2788 mfmsr r9 2789 ori r8,r9,MSR_FP 2790#ifdef CONFIG_ALTIVEC 2791BEGIN_FTR_SECTION 2792 oris r8,r8,MSR_VEC@h 2793END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2794#endif 2795#ifdef CONFIG_VSX 2796BEGIN_FTR_SECTION 2797 oris r8,r8,MSR_VSX@h 2798END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2799#endif 2800 mtmsrd r8 2801 addi r3,r4,VCPU_FPRS 2802 bl load_fp_state 2803#ifdef CONFIG_ALTIVEC 2804BEGIN_FTR_SECTION 2805 addi r3,r31,VCPU_VRS 2806 bl load_vr_state 2807END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2808#endif 2809 lwz r7,VCPU_VRSAVE(r31) 2810 mtspr SPRN_VRSAVE,r7 2811 mtlr r30 2812 mr r4,r31 2813 blr 2814 2815#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2816/* 2817 * Save transactional state and TM-related registers. 2818 * Called with r9 pointing to the vcpu struct. 2819 * This can modify all checkpointed registers, but 2820 * restores r1, r2 and r9 (vcpu pointer) before exit. 2821 */ 2822kvmppc_save_tm: 2823 mflr r0 2824 std r0, PPC_LR_STKOFF(r1) 2825 2826 /* Turn on TM. */ 2827 mfmsr r8 2828 li r0, 1 2829 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 2830 mtmsrd r8 2831 2832 ld r5, VCPU_MSR(r9) 2833 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 2834 beq 1f /* TM not active in guest. */ 2835 2836 std r1, HSTATE_HOST_R1(r13) 2837 li r3, TM_CAUSE_KVM_RESCHED 2838 2839 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 2840 li r5, 0 2841 mtmsrd r5, 1 2842 2843 /* All GPRs are volatile at this point. */ 2844 TRECLAIM(R3) 2845 2846 /* Temporarily store r13 and r9 so we have some regs to play with */ 2847 SET_SCRATCH0(r13) 2848 GET_PACA(r13) 2849 std r9, PACATMSCRATCH(r13) 2850 ld r9, HSTATE_KVM_VCPU(r13) 2851 2852 /* Get a few more GPRs free. */ 2853 std r29, VCPU_GPRS_TM(29)(r9) 2854 std r30, VCPU_GPRS_TM(30)(r9) 2855 std r31, VCPU_GPRS_TM(31)(r9) 2856 2857 /* Save away PPR and DSCR soon so don't run with user values. */ 2858 mfspr r31, SPRN_PPR 2859 HMT_MEDIUM 2860 mfspr r30, SPRN_DSCR 2861 ld r29, HSTATE_DSCR(r13) 2862 mtspr SPRN_DSCR, r29 2863 2864 /* Save all but r9, r13 & r29-r31 */ 2865 reg = 0 2866 .rept 29 2867 .if (reg != 9) && (reg != 13) 2868 std reg, VCPU_GPRS_TM(reg)(r9) 2869 .endif 2870 reg = reg + 1 2871 .endr 2872 /* ... now save r13 */ 2873 GET_SCRATCH0(r4) 2874 std r4, VCPU_GPRS_TM(13)(r9) 2875 /* ... and save r9 */ 2876 ld r4, PACATMSCRATCH(r13) 2877 std r4, VCPU_GPRS_TM(9)(r9) 2878 2879 /* Reload stack pointer and TOC. */ 2880 ld r1, HSTATE_HOST_R1(r13) 2881 ld r2, PACATOC(r13) 2882 2883 /* Set MSR RI now we have r1 and r13 back. */ 2884 li r5, MSR_RI 2885 mtmsrd r5, 1 2886 2887 /* Save away checkpinted SPRs. */ 2888 std r31, VCPU_PPR_TM(r9) 2889 std r30, VCPU_DSCR_TM(r9) 2890 mflr r5 2891 mfcr r6 2892 mfctr r7 2893 mfspr r8, SPRN_AMR 2894 mfspr r10, SPRN_TAR 2895 mfxer r11 2896 std r5, VCPU_LR_TM(r9) 2897 stw r6, VCPU_CR_TM(r9) 2898 std r7, VCPU_CTR_TM(r9) 2899 std r8, VCPU_AMR_TM(r9) 2900 std r10, VCPU_TAR_TM(r9) 2901 std r11, VCPU_XER_TM(r9) 2902 2903 /* Restore r12 as trap number. */ 2904 lwz r12, VCPU_TRAP(r9) 2905 2906 /* Save FP/VSX. */ 2907 addi r3, r9, VCPU_FPRS_TM 2908 bl store_fp_state 2909 addi r3, r9, VCPU_VRS_TM 2910 bl store_vr_state 2911 mfspr r6, SPRN_VRSAVE 2912 stw r6, VCPU_VRSAVE_TM(r9) 29131: 2914 /* 2915 * We need to save these SPRs after the treclaim so that the software 2916 * error code is recorded correctly in the TEXASR. Also the user may 2917 * change these outside of a transaction, so they must always be 2918 * context switched. 2919 */ 2920 mfspr r5, SPRN_TFHAR 2921 mfspr r6, SPRN_TFIAR 2922 mfspr r7, SPRN_TEXASR 2923 std r5, VCPU_TFHAR(r9) 2924 std r6, VCPU_TFIAR(r9) 2925 std r7, VCPU_TEXASR(r9) 2926 2927 ld r0, PPC_LR_STKOFF(r1) 2928 mtlr r0 2929 blr 2930 2931/* 2932 * Restore transactional state and TM-related registers. 2933 * Called with r4 pointing to the vcpu struct. 2934 * This potentially modifies all checkpointed registers. 2935 * It restores r1, r2, r4 from the PACA. 2936 */ 2937kvmppc_restore_tm: 2938 mflr r0 2939 std r0, PPC_LR_STKOFF(r1) 2940 2941 /* Turn on TM/FP/VSX/VMX so we can restore them. */ 2942 mfmsr r5 2943 li r6, MSR_TM >> 32 2944 sldi r6, r6, 32 2945 or r5, r5, r6 2946 ori r5, r5, MSR_FP 2947 oris r5, r5, (MSR_VEC | MSR_VSX)@h 2948 mtmsrd r5 2949 2950 /* 2951 * The user may change these outside of a transaction, so they must 2952 * always be context switched. 2953 */ 2954 ld r5, VCPU_TFHAR(r4) 2955 ld r6, VCPU_TFIAR(r4) 2956 ld r7, VCPU_TEXASR(r4) 2957 mtspr SPRN_TFHAR, r5 2958 mtspr SPRN_TFIAR, r6 2959 mtspr SPRN_TEXASR, r7 2960 2961 ld r5, VCPU_MSR(r4) 2962 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 2963 beqlr /* TM not active in guest */ 2964 std r1, HSTATE_HOST_R1(r13) 2965 2966 /* Make sure the failure summary is set, otherwise we'll program check 2967 * when we trechkpt. It's possible that this might have been not set 2968 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the 2969 * host. 2970 */ 2971 oris r7, r7, (TEXASR_FS)@h 2972 mtspr SPRN_TEXASR, r7 2973 2974 /* 2975 * We need to load up the checkpointed state for the guest. 2976 * We need to do this early as it will blow away any GPRs, VSRs and 2977 * some SPRs. 2978 */ 2979 2980 mr r31, r4 2981 addi r3, r31, VCPU_FPRS_TM 2982 bl load_fp_state 2983 addi r3, r31, VCPU_VRS_TM 2984 bl load_vr_state 2985 mr r4, r31 2986 lwz r7, VCPU_VRSAVE_TM(r4) 2987 mtspr SPRN_VRSAVE, r7 2988 2989 ld r5, VCPU_LR_TM(r4) 2990 lwz r6, VCPU_CR_TM(r4) 2991 ld r7, VCPU_CTR_TM(r4) 2992 ld r8, VCPU_AMR_TM(r4) 2993 ld r9, VCPU_TAR_TM(r4) 2994 ld r10, VCPU_XER_TM(r4) 2995 mtlr r5 2996 mtcr r6 2997 mtctr r7 2998 mtspr SPRN_AMR, r8 2999 mtspr SPRN_TAR, r9 3000 mtxer r10 3001 3002 /* 3003 * Load up PPR and DSCR values but don't put them in the actual SPRs 3004 * till the last moment to avoid running with userspace PPR and DSCR for 3005 * too long. 3006 */ 3007 ld r29, VCPU_DSCR_TM(r4) 3008 ld r30, VCPU_PPR_TM(r4) 3009 3010 std r2, PACATMSCRATCH(r13) /* Save TOC */ 3011 3012 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 3013 li r5, 0 3014 mtmsrd r5, 1 3015 3016 /* Load GPRs r0-r28 */ 3017 reg = 0 3018 .rept 29 3019 ld reg, VCPU_GPRS_TM(reg)(r31) 3020 reg = reg + 1 3021 .endr 3022 3023 mtspr SPRN_DSCR, r29 3024 mtspr SPRN_PPR, r30 3025 3026 /* Load final GPRs */ 3027 ld 29, VCPU_GPRS_TM(29)(r31) 3028 ld 30, VCPU_GPRS_TM(30)(r31) 3029 ld 31, VCPU_GPRS_TM(31)(r31) 3030 3031 /* TM checkpointed state is now setup. All GPRs are now volatile. */ 3032 TRECHKPT 3033 3034 /* Now let's get back the state we need. */ 3035 HMT_MEDIUM 3036 GET_PACA(r13) 3037 ld r29, HSTATE_DSCR(r13) 3038 mtspr SPRN_DSCR, r29 3039 ld r4, HSTATE_KVM_VCPU(r13) 3040 ld r1, HSTATE_HOST_R1(r13) 3041 ld r2, PACATMSCRATCH(r13) 3042 3043 /* Set the MSR RI since we have our registers back. */ 3044 li r5, MSR_RI 3045 mtmsrd r5, 1 3046 3047 ld r0, PPC_LR_STKOFF(r1) 3048 mtlr r0 3049 blr 3050#endif 3051 3052/* 3053 * We come here if we get any exception or interrupt while we are 3054 * executing host real mode code while in guest MMU context. 3055 * For now just spin, but we should do something better. 3056 */ 3057kvmppc_bad_host_intr: 3058 b . 3059 3060/* 3061 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3062 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3063 * r11 has the guest MSR value (in/out) 3064 * r9 has a vcpu pointer (in) 3065 * r0 is used as a scratch register 3066 */ 3067kvmppc_msr_interrupt: 3068 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3069 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3070 ld r11, VCPU_INTR_MSR(r9) 3071 bne 1f 3072 /* ... if transactional, change to suspended */ 3073 li r0, 1 30741: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3075 blr 3076 3077/* 3078 * This works around a hardware bug on POWER8E processors, where 3079 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3080 * performance monitor interrupt. Instead, when we need to have 3081 * an interrupt pending, we have to arrange for a counter to overflow. 3082 */ 3083kvmppc_fix_pmao: 3084 li r3, 0 3085 mtspr SPRN_MMCR2, r3 3086 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3087 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3088 mtspr SPRN_MMCR0, r3 3089 lis r3, 0x7fff 3090 ori r3, r3, 0xffff 3091 mtspr SPRN_PMC6, r3 3092 isync 3093 blr 3094 3095#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3096/* 3097 * Start timing an activity 3098 * r3 = pointer to time accumulation struct, r4 = vcpu 3099 */ 3100kvmhv_start_timing: 3101 ld r5, HSTATE_KVM_VCORE(r13) 3102 lbz r6, VCORE_IN_GUEST(r5) 3103 cmpwi r6, 0 3104 beq 5f /* if in guest, need to */ 3105 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 31065: mftb r5 3107 subf r5, r6, r5 3108 std r3, VCPU_CUR_ACTIVITY(r4) 3109 std r5, VCPU_ACTIVITY_START(r4) 3110 blr 3111 3112/* 3113 * Accumulate time to one activity and start another. 3114 * r3 = pointer to new time accumulation struct, r4 = vcpu 3115 */ 3116kvmhv_accumulate_time: 3117 ld r5, HSTATE_KVM_VCORE(r13) 3118 lbz r8, VCORE_IN_GUEST(r5) 3119 cmpwi r8, 0 3120 beq 4f /* if in guest, need to */ 3121 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 31224: ld r5, VCPU_CUR_ACTIVITY(r4) 3123 ld r6, VCPU_ACTIVITY_START(r4) 3124 std r3, VCPU_CUR_ACTIVITY(r4) 3125 mftb r7 3126 subf r7, r8, r7 3127 std r7, VCPU_ACTIVITY_START(r4) 3128 cmpdi r5, 0 3129 beqlr 3130 subf r3, r6, r7 3131 ld r8, TAS_SEQCOUNT(r5) 3132 cmpdi r8, 0 3133 addi r8, r8, 1 3134 std r8, TAS_SEQCOUNT(r5) 3135 lwsync 3136 ld r7, TAS_TOTAL(r5) 3137 add r7, r7, r3 3138 std r7, TAS_TOTAL(r5) 3139 ld r6, TAS_MIN(r5) 3140 ld r7, TAS_MAX(r5) 3141 beq 3f 3142 cmpd r3, r6 3143 bge 1f 31443: std r3, TAS_MIN(r5) 31451: cmpd r3, r7 3146 ble 2f 3147 std r3, TAS_MAX(r5) 31482: lwsync 3149 addi r8, r8, 1 3150 std r8, TAS_SEQCOUNT(r5) 3151 blr 3152#endif 3153