1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/book3s/64/mmu-hash.h> 31#include <asm/tm.h> 32#include <asm/opal.h> 33#include <asm/xive-regs.h> 34 35/* Sign-extend HDEC if not on POWER9 */ 36#define EXTEND_HDEC(reg) \ 37BEGIN_FTR_SECTION; \ 38 extsw reg, reg; \ 39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 40 41#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 42 43/* Values in HSTATE_NAPPING(r13) */ 44#define NAPPING_CEDE 1 45#define NAPPING_NOVCPU 2 46 47/* Stack frame offsets for kvmppc_hv_entry */ 48#define SFS 160 49#define STACK_SLOT_TRAP (SFS-4) 50#define STACK_SLOT_TID (SFS-16) 51#define STACK_SLOT_PSSCR (SFS-24) 52#define STACK_SLOT_PID (SFS-32) 53#define STACK_SLOT_IAMR (SFS-40) 54#define STACK_SLOT_CIABR (SFS-48) 55#define STACK_SLOT_DAWR (SFS-56) 56#define STACK_SLOT_DAWRX (SFS-64) 57#define STACK_SLOT_HFSCR (SFS-72) 58 59/* 60 * Call kvmppc_hv_entry in real mode. 61 * Must be called with interrupts hard-disabled. 62 * 63 * Input Registers: 64 * 65 * LR = return address to continue at after eventually re-enabling MMU 66 */ 67_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 68 mflr r0 69 std r0, PPC_LR_STKOFF(r1) 70 stdu r1, -112(r1) 71 mfmsr r10 72 std r10, HSTATE_HOST_MSR(r13) 73 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 74 li r0,MSR_RI 75 andc r0,r10,r0 76 li r6,MSR_IR | MSR_DR 77 andc r6,r10,r6 78 mtmsrd r0,1 /* clear RI in MSR */ 79 mtsrr0 r5 80 mtsrr1 r6 81 RFI 82 83kvmppc_call_hv_entry: 84 ld r4, HSTATE_KVM_VCPU(r13) 85 bl kvmppc_hv_entry 86 87 /* Back from guest - restore host state and return to caller */ 88 89BEGIN_FTR_SECTION 90 /* Restore host DABR and DABRX */ 91 ld r5,HSTATE_DABR(r13) 92 li r6,7 93 mtspr SPRN_DABR,r5 94 mtspr SPRN_DABRX,r6 95END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 96 97 /* Restore SPRG3 */ 98 ld r3,PACA_SPRG_VDSO(r13) 99 mtspr SPRN_SPRG_VDSO_WRITE,r3 100 101 /* Reload the host's PMU registers */ 102 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 103 lbz r4, LPPACA_PMCINUSE(r3) 104 cmpwi r4, 0 105 beq 23f /* skip if not */ 106BEGIN_FTR_SECTION 107 ld r3, HSTATE_MMCR0(r13) 108 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 109 cmpwi r4, MMCR0_PMAO 110 beql kvmppc_fix_pmao 111END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 112 lwz r3, HSTATE_PMC1(r13) 113 lwz r4, HSTATE_PMC2(r13) 114 lwz r5, HSTATE_PMC3(r13) 115 lwz r6, HSTATE_PMC4(r13) 116 lwz r8, HSTATE_PMC5(r13) 117 lwz r9, HSTATE_PMC6(r13) 118 mtspr SPRN_PMC1, r3 119 mtspr SPRN_PMC2, r4 120 mtspr SPRN_PMC3, r5 121 mtspr SPRN_PMC4, r6 122 mtspr SPRN_PMC5, r8 123 mtspr SPRN_PMC6, r9 124 ld r3, HSTATE_MMCR0(r13) 125 ld r4, HSTATE_MMCR1(r13) 126 ld r5, HSTATE_MMCRA(r13) 127 ld r6, HSTATE_SIAR(r13) 128 ld r7, HSTATE_SDAR(r13) 129 mtspr SPRN_MMCR1, r4 130 mtspr SPRN_MMCRA, r5 131 mtspr SPRN_SIAR, r6 132 mtspr SPRN_SDAR, r7 133BEGIN_FTR_SECTION 134 ld r8, HSTATE_MMCR2(r13) 135 ld r9, HSTATE_SIER(r13) 136 mtspr SPRN_MMCR2, r8 137 mtspr SPRN_SIER, r9 138END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 139 mtspr SPRN_MMCR0, r3 140 isync 14123: 142 143 /* 144 * Reload DEC. HDEC interrupts were disabled when 145 * we reloaded the host's LPCR value. 146 */ 147 ld r3, HSTATE_DECEXP(r13) 148 mftb r4 149 subf r4, r4, r3 150 mtspr SPRN_DEC, r4 151 152BEGIN_FTR_SECTION 153 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 154 li r0, 0 155 stb r0, HSTATE_HWTHREAD_REQ(r13) 156END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 157 158 /* 159 * For external interrupts we need to call the Linux 160 * handler to process the interrupt. We do that by jumping 161 * to absolute address 0x500 for external interrupts. 162 * The [h]rfid at the end of the handler will return to 163 * the book3s_hv_interrupts.S code. For other interrupts 164 * we do the rfid to get back to the book3s_hv_interrupts.S 165 * code here. 166 */ 167 ld r8, 112+PPC_LR_STKOFF(r1) 168 addi r1, r1, 112 169 ld r7, HSTATE_HOST_MSR(r13) 170 171 /* Return the trap number on this thread as the return value */ 172 mr r3, r12 173 174 /* 175 * If we came back from the guest via a relocation-on interrupt, 176 * we will be in virtual mode at this point, which makes it a 177 * little easier to get back to the caller. 178 */ 179 mfmsr r0 180 andi. r0, r0, MSR_IR /* in real mode? */ 181 bne .Lvirt_return 182 183 /* RFI into the highmem handler */ 184 mfmsr r6 185 li r0, MSR_RI 186 andc r6, r6, r0 187 mtmsrd r6, 1 /* Clear RI in MSR */ 188 mtsrr0 r8 189 mtsrr1 r7 190 RFI 191 192 /* Virtual-mode return */ 193.Lvirt_return: 194 mtlr r8 195 blr 196 197kvmppc_primary_no_guest: 198 /* We handle this much like a ceded vcpu */ 199 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 200 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 201 /* HDEC value came from DEC in the first place, it will fit */ 202 mfspr r3, SPRN_HDEC 203 mtspr SPRN_DEC, r3 204 /* 205 * Make sure the primary has finished the MMU switch. 206 * We should never get here on a secondary thread, but 207 * check it for robustness' sake. 208 */ 209 ld r5, HSTATE_KVM_VCORE(r13) 21065: lbz r0, VCORE_IN_GUEST(r5) 211 cmpwi r0, 0 212 beq 65b 213 /* Set LPCR. */ 214 ld r8,VCORE_LPCR(r5) 215 mtspr SPRN_LPCR,r8 216 isync 217 /* set our bit in napping_threads */ 218 ld r5, HSTATE_KVM_VCORE(r13) 219 lbz r7, HSTATE_PTID(r13) 220 li r0, 1 221 sld r0, r0, r7 222 addi r6, r5, VCORE_NAPPING_THREADS 2231: lwarx r3, 0, r6 224 or r3, r3, r0 225 stwcx. r3, 0, r6 226 bne 1b 227 /* order napping_threads update vs testing entry_exit_map */ 228 isync 229 li r12, 0 230 lwz r7, VCORE_ENTRY_EXIT(r5) 231 cmpwi r7, 0x100 232 bge kvm_novcpu_exit /* another thread already exiting */ 233 li r3, NAPPING_NOVCPU 234 stb r3, HSTATE_NAPPING(r13) 235 236 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 237 b kvm_do_nap 238 239/* 240 * kvm_novcpu_wakeup 241 * Entered from kvm_start_guest if kvm_hstate.napping is set 242 * to NAPPING_NOVCPU 243 * r2 = kernel TOC 244 * r13 = paca 245 */ 246kvm_novcpu_wakeup: 247 ld r1, HSTATE_HOST_R1(r13) 248 ld r5, HSTATE_KVM_VCORE(r13) 249 li r0, 0 250 stb r0, HSTATE_NAPPING(r13) 251 252 /* check the wake reason */ 253 bl kvmppc_check_wake_reason 254 255 /* 256 * Restore volatile registers since we could have called 257 * a C routine in kvmppc_check_wake_reason. 258 * r5 = VCORE 259 */ 260 ld r5, HSTATE_KVM_VCORE(r13) 261 262 /* see if any other thread is already exiting */ 263 lwz r0, VCORE_ENTRY_EXIT(r5) 264 cmpwi r0, 0x100 265 bge kvm_novcpu_exit 266 267 /* clear our bit in napping_threads */ 268 lbz r7, HSTATE_PTID(r13) 269 li r0, 1 270 sld r0, r0, r7 271 addi r6, r5, VCORE_NAPPING_THREADS 2724: lwarx r7, 0, r6 273 andc r7, r7, r0 274 stwcx. r7, 0, r6 275 bne 4b 276 277 /* See if the wake reason means we need to exit */ 278 cmpdi r3, 0 279 bge kvm_novcpu_exit 280 281 /* See if our timeslice has expired (HDEC is negative) */ 282 mfspr r0, SPRN_HDEC 283 EXTEND_HDEC(r0) 284 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 285 cmpdi r0, 0 286 blt kvm_novcpu_exit 287 288 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 289 ld r4, HSTATE_KVM_VCPU(r13) 290 cmpdi r4, 0 291 beq kvmppc_primary_no_guest 292 293#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 294 addi r3, r4, VCPU_TB_RMENTRY 295 bl kvmhv_start_timing 296#endif 297 b kvmppc_got_guest 298 299kvm_novcpu_exit: 300#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 301 ld r4, HSTATE_KVM_VCPU(r13) 302 cmpdi r4, 0 303 beq 13f 304 addi r3, r4, VCPU_TB_RMEXIT 305 bl kvmhv_accumulate_time 306#endif 30713: mr r3, r12 308 stw r12, STACK_SLOT_TRAP(r1) 309 bl kvmhv_commence_exit 310 nop 311 lwz r12, STACK_SLOT_TRAP(r1) 312 b kvmhv_switch_to_host 313 314/* 315 * We come in here when wakened from nap mode. 316 * Relocation is off and most register values are lost. 317 * r13 points to the PACA. 318 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 319 * This is not used by ISAv3.0B processors. 320 */ 321 .globl kvm_start_guest 322kvm_start_guest: 323 /* Set runlatch bit the minute you wake up from nap */ 324 mfspr r0, SPRN_CTRLF 325 ori r0, r0, 1 326 mtspr SPRN_CTRLT, r0 327 328 /* 329 * Could avoid this and pass it through in r3. For now, 330 * code expects it to be in SRR1. 331 */ 332 mtspr SPRN_SRR1,r3 333 334 ld r2,PACATOC(r13) 335 336 li r0,KVM_HWTHREAD_IN_KVM 337 stb r0,HSTATE_HWTHREAD_STATE(r13) 338 339 /* NV GPR values from power7_idle() will no longer be valid */ 340 li r0,1 341 stb r0,PACA_NAPSTATELOST(r13) 342 343 /* were we napping due to cede? */ 344 lbz r0,HSTATE_NAPPING(r13) 345 cmpwi r0,NAPPING_CEDE 346 beq kvm_end_cede 347 cmpwi r0,NAPPING_NOVCPU 348 beq kvm_novcpu_wakeup 349 350 ld r1,PACAEMERGSP(r13) 351 subi r1,r1,STACK_FRAME_OVERHEAD 352 353 /* 354 * We weren't napping due to cede, so this must be a secondary 355 * thread being woken up to run a guest, or being woken up due 356 * to a stray IPI. (Or due to some machine check or hypervisor 357 * maintenance interrupt while the core is in KVM.) 358 */ 359 360 /* Check the wake reason in SRR1 to see why we got here */ 361 bl kvmppc_check_wake_reason 362 /* 363 * kvmppc_check_wake_reason could invoke a C routine, but we 364 * have no volatile registers to restore when we return. 365 */ 366 367 cmpdi r3, 0 368 bge kvm_no_guest 369 370 /* get vcore pointer, NULL if we have nothing to run */ 371 ld r5,HSTATE_KVM_VCORE(r13) 372 cmpdi r5,0 373 /* if we have no vcore to run, go back to sleep */ 374 beq kvm_no_guest 375 376kvm_secondary_got_guest: 377 378 /* Set HSTATE_DSCR(r13) to something sensible */ 379 ld r6, PACA_DSCR_DEFAULT(r13) 380 std r6, HSTATE_DSCR(r13) 381 382 /* On thread 0 of a subcore, set HDEC to max */ 383 lbz r4, HSTATE_PTID(r13) 384 cmpwi r4, 0 385 bne 63f 386 LOAD_REG_ADDR(r6, decrementer_max) 387 ld r6, 0(r6) 388 mtspr SPRN_HDEC, r6 389 /* and set per-LPAR registers, if doing dynamic micro-threading */ 390 ld r6, HSTATE_SPLIT_MODE(r13) 391 cmpdi r6, 0 392 beq 63f 393 ld r0, KVM_SPLIT_RPR(r6) 394 mtspr SPRN_RPR, r0 395 ld r0, KVM_SPLIT_PMMAR(r6) 396 mtspr SPRN_PMMAR, r0 397 ld r0, KVM_SPLIT_LDBAR(r6) 398 mtspr SPRN_LDBAR, r0 399 isync 40063: 401 /* Order load of vcpu after load of vcore */ 402 lwsync 403 ld r4, HSTATE_KVM_VCPU(r13) 404 bl kvmppc_hv_entry 405 406 /* Back from the guest, go back to nap */ 407 /* Clear our vcpu and vcore pointers so we don't come back in early */ 408 li r0, 0 409 std r0, HSTATE_KVM_VCPU(r13) 410 /* 411 * Once we clear HSTATE_KVM_VCORE(r13), the code in 412 * kvmppc_run_core() is going to assume that all our vcpu 413 * state is visible in memory. This lwsync makes sure 414 * that that is true. 415 */ 416 lwsync 417 std r0, HSTATE_KVM_VCORE(r13) 418 419 /* 420 * All secondaries exiting guest will fall through this path. 421 * Before proceeding, just check for HMI interrupt and 422 * invoke opal hmi handler. By now we are sure that the 423 * primary thread on this core/subcore has already made partition 424 * switch/TB resync and we are good to call opal hmi handler. 425 */ 426 cmpwi r12, BOOK3S_INTERRUPT_HMI 427 bne kvm_no_guest 428 429 li r3,0 /* NULL argument */ 430 bl hmi_exception_realmode 431/* 432 * At this point we have finished executing in the guest. 433 * We need to wait for hwthread_req to become zero, since 434 * we may not turn on the MMU while hwthread_req is non-zero. 435 * While waiting we also need to check if we get given a vcpu to run. 436 */ 437kvm_no_guest: 438BEGIN_FTR_SECTION 439 twi 31,0,0 440END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 441 lbz r3, HSTATE_HWTHREAD_REQ(r13) 442 cmpwi r3, 0 443 bne 53f 444 HMT_MEDIUM 445 li r0, KVM_HWTHREAD_IN_KERNEL 446 stb r0, HSTATE_HWTHREAD_STATE(r13) 447 /* need to recheck hwthread_req after a barrier, to avoid race */ 448 sync 449 lbz r3, HSTATE_HWTHREAD_REQ(r13) 450 cmpwi r3, 0 451 bne 54f 452/* 453 * We jump to pnv_wakeup_loss, which will return to the caller 454 * of power7_nap in the powernv cpu offline loop. The value we 455 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss 456 * requires SRR1 in r12. 457 */ 458 li r3, LPCR_PECE0 459 mfspr r4, SPRN_LPCR 460 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 461 mtspr SPRN_LPCR, r4 462 li r3, 0 463 mfspr r12,SPRN_SRR1 464 b pnv_wakeup_loss 465 46653: HMT_LOW 467 ld r5, HSTATE_KVM_VCORE(r13) 468 cmpdi r5, 0 469 bne 60f 470 ld r3, HSTATE_SPLIT_MODE(r13) 471 cmpdi r3, 0 472 beq kvm_no_guest 473 lbz r0, KVM_SPLIT_DO_NAP(r3) 474 cmpwi r0, 0 475 beq kvm_no_guest 476 HMT_MEDIUM 477 b kvm_unsplit_nap 47860: HMT_MEDIUM 479 b kvm_secondary_got_guest 480 48154: li r0, KVM_HWTHREAD_IN_KVM 482 stb r0, HSTATE_HWTHREAD_STATE(r13) 483 b kvm_no_guest 484 485/* 486 * Here the primary thread is trying to return the core to 487 * whole-core mode, so we need to nap. 488 */ 489kvm_unsplit_nap: 490 /* 491 * When secondaries are napping in kvm_unsplit_nap() with 492 * hwthread_req = 1, HMI goes ignored even though subcores are 493 * already exited the guest. Hence HMI keeps waking up secondaries 494 * from nap in a loop and secondaries always go back to nap since 495 * no vcore is assigned to them. This makes impossible for primary 496 * thread to get hold of secondary threads resulting into a soft 497 * lockup in KVM path. 498 * 499 * Let us check if HMI is pending and handle it before we go to nap. 500 */ 501 cmpwi r12, BOOK3S_INTERRUPT_HMI 502 bne 55f 503 li r3, 0 /* NULL argument */ 504 bl hmi_exception_realmode 50555: 506 /* 507 * Ensure that secondary doesn't nap when it has 508 * its vcore pointer set. 509 */ 510 sync /* matches smp_mb() before setting split_info.do_nap */ 511 ld r0, HSTATE_KVM_VCORE(r13) 512 cmpdi r0, 0 513 bne kvm_no_guest 514 /* clear any pending message */ 515BEGIN_FTR_SECTION 516 lis r6, (PPC_DBELL_SERVER << (63-36))@h 517 PPC_MSGCLR(6) 518END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 519 /* Set kvm_split_mode.napped[tid] = 1 */ 520 ld r3, HSTATE_SPLIT_MODE(r13) 521 li r0, 1 522 lhz r4, PACAPACAINDEX(r13) 523 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ 524 addi r4, r4, KVM_SPLIT_NAPPED 525 stbx r0, r3, r4 526 /* Check the do_nap flag again after setting napped[] */ 527 sync 528 lbz r0, KVM_SPLIT_DO_NAP(r3) 529 cmpwi r0, 0 530 beq 57f 531 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 532 mfspr r5, SPRN_LPCR 533 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 534 b kvm_nap_sequence 535 53657: li r0, 0 537 stbx r0, r3, r4 538 b kvm_no_guest 539 540/****************************************************************************** 541 * * 542 * Entry code * 543 * * 544 *****************************************************************************/ 545 546.global kvmppc_hv_entry 547kvmppc_hv_entry: 548 549 /* Required state: 550 * 551 * R4 = vcpu pointer (or NULL) 552 * MSR = ~IR|DR 553 * R13 = PACA 554 * R1 = host R1 555 * R2 = TOC 556 * all other volatile GPRS = free 557 * Does not preserve non-volatile GPRs or CR fields 558 */ 559 mflr r0 560 std r0, PPC_LR_STKOFF(r1) 561 stdu r1, -SFS(r1) 562 563 /* Save R1 in the PACA */ 564 std r1, HSTATE_HOST_R1(r13) 565 566 li r6, KVM_GUEST_MODE_HOST_HV 567 stb r6, HSTATE_IN_GUEST(r13) 568 569#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 570 /* Store initial timestamp */ 571 cmpdi r4, 0 572 beq 1f 573 addi r3, r4, VCPU_TB_RMENTRY 574 bl kvmhv_start_timing 5751: 576#endif 577 578 /* Use cr7 as an indication of radix mode */ 579 ld r5, HSTATE_KVM_VCORE(r13) 580 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 581 lbz r0, KVM_RADIX(r9) 582 cmpwi cr7, r0, 0 583 584 /* Clear out SLB if hash */ 585 bne cr7, 2f 586 li r6,0 587 slbmte r6,r6 588 slbia 589 ptesync 5902: 591 /* 592 * POWER7/POWER8 host -> guest partition switch code. 593 * We don't have to lock against concurrent tlbies, 594 * but we do have to coordinate across hardware threads. 595 */ 596 /* Set bit in entry map iff exit map is zero. */ 597 li r7, 1 598 lbz r6, HSTATE_PTID(r13) 599 sld r7, r7, r6 600 addi r8, r5, VCORE_ENTRY_EXIT 60121: lwarx r3, 0, r8 602 cmpwi r3, 0x100 /* any threads starting to exit? */ 603 bge secondary_too_late /* if so we're too late to the party */ 604 or r3, r3, r7 605 stwcx. r3, 0, r8 606 bne 21b 607 608 /* Primary thread switches to guest partition. */ 609 cmpwi r6,0 610 bne 10f 611 lwz r7,KVM_LPID(r9) 612BEGIN_FTR_SECTION 613 ld r6,KVM_SDR1(r9) 614 li r0,LPID_RSVD /* switch to reserved LPID */ 615 mtspr SPRN_LPID,r0 616 ptesync 617 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 618END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 619 mtspr SPRN_LPID,r7 620 isync 621 622 /* See if we need to flush the TLB */ 623 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 624BEGIN_FTR_SECTION 625 /* 626 * On POWER9, individual threads can come in here, but the 627 * TLB is shared between the 4 threads in a core, hence 628 * invalidating on one thread invalidates for all. 629 * Thus we make all 4 threads use the same bit here. 630 */ 631 clrrdi r6,r6,2 632END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 633 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 634 srdi r6,r6,6 /* doubleword number */ 635 sldi r6,r6,3 /* address offset */ 636 add r6,r6,r9 637 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 638 li r8,1 639 sld r8,r8,r7 640 ld r7,0(r6) 641 and. r7,r7,r8 642 beq 22f 643 /* Flush the TLB of any entries for this LPID */ 644 lwz r0,KVM_TLB_SETS(r9) 645 mtctr r0 646 li r7,0x800 /* IS field = 0b10 */ 647 ptesync 648 li r0,0 /* RS for P9 version of tlbiel */ 649 bne cr7, 29f 65028: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ 651 addi r7,r7,0x1000 652 bdnz 28b 653 b 30f 65429: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */ 655 addi r7,r7,0x1000 656 bdnz 29b 65730: ptesync 65823: ldarx r7,0,r6 /* clear the bit after TLB flushed */ 659 andc r7,r7,r8 660 stdcx. r7,0,r6 661 bne 23b 662 663 /* Add timebase offset onto timebase */ 66422: ld r8,VCORE_TB_OFFSET(r5) 665 cmpdi r8,0 666 beq 37f 667 mftb r6 /* current host timebase */ 668 add r8,r8,r6 669 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 670 mftb r7 /* check if lower 24 bits overflowed */ 671 clrldi r6,r6,40 672 clrldi r7,r7,40 673 cmpld r7,r6 674 bge 37f 675 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 676 mtspr SPRN_TBU40,r8 677 678 /* Load guest PCR value to select appropriate compat mode */ 67937: ld r7, VCORE_PCR(r5) 680 cmpdi r7, 0 681 beq 38f 682 mtspr SPRN_PCR, r7 68338: 684 685BEGIN_FTR_SECTION 686 /* DPDES and VTB are shared between threads */ 687 ld r8, VCORE_DPDES(r5) 688 ld r7, VCORE_VTB(r5) 689 mtspr SPRN_DPDES, r8 690 mtspr SPRN_VTB, r7 691END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 692 693 /* Mark the subcore state as inside guest */ 694 bl kvmppc_subcore_enter_guest 695 nop 696 ld r5, HSTATE_KVM_VCORE(r13) 697 ld r4, HSTATE_KVM_VCPU(r13) 698 li r0,1 699 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 700 701 /* Do we have a guest vcpu to run? */ 70210: cmpdi r4, 0 703 beq kvmppc_primary_no_guest 704kvmppc_got_guest: 705 706 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 707 lwz r5,VCPU_SLB_MAX(r4) 708 cmpwi r5,0 709 beq 9f 710 mtctr r5 711 addi r6,r4,VCPU_SLB 7121: ld r8,VCPU_SLB_E(r6) 713 ld r9,VCPU_SLB_V(r6) 714 slbmte r9,r8 715 addi r6,r6,VCPU_SLB_SIZE 716 bdnz 1b 7179: 718 /* Increment yield count if they have a VPA */ 719 ld r3, VCPU_VPA(r4) 720 cmpdi r3, 0 721 beq 25f 722 li r6, LPPACA_YIELDCOUNT 723 LWZX_BE r5, r3, r6 724 addi r5, r5, 1 725 STWX_BE r5, r3, r6 726 li r6, 1 727 stb r6, VCPU_VPA_DIRTY(r4) 72825: 729 730 /* Save purr/spurr */ 731 mfspr r5,SPRN_PURR 732 mfspr r6,SPRN_SPURR 733 std r5,HSTATE_PURR(r13) 734 std r6,HSTATE_SPURR(r13) 735 ld r7,VCPU_PURR(r4) 736 ld r8,VCPU_SPURR(r4) 737 mtspr SPRN_PURR,r7 738 mtspr SPRN_SPURR,r8 739 740 /* Save host values of some registers */ 741BEGIN_FTR_SECTION 742 mfspr r5, SPRN_TIDR 743 mfspr r6, SPRN_PSSCR 744 mfspr r7, SPRN_PID 745 mfspr r8, SPRN_IAMR 746 std r5, STACK_SLOT_TID(r1) 747 std r6, STACK_SLOT_PSSCR(r1) 748 std r7, STACK_SLOT_PID(r1) 749 std r8, STACK_SLOT_IAMR(r1) 750 mfspr r5, SPRN_HFSCR 751 std r5, STACK_SLOT_HFSCR(r1) 752END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 753BEGIN_FTR_SECTION 754 mfspr r5, SPRN_CIABR 755 mfspr r6, SPRN_DAWR 756 mfspr r7, SPRN_DAWRX 757 std r5, STACK_SLOT_CIABR(r1) 758 std r6, STACK_SLOT_DAWR(r1) 759 std r7, STACK_SLOT_DAWRX(r1) 760END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 761 762BEGIN_FTR_SECTION 763 /* Set partition DABR */ 764 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 765 lwz r5,VCPU_DABRX(r4) 766 ld r6,VCPU_DABR(r4) 767 mtspr SPRN_DABRX,r5 768 mtspr SPRN_DABR,r6 769 isync 770END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 771 772#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 773BEGIN_FTR_SECTION 774 /* 775 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 776 */ 777 bl kvmppc_restore_tm 778END_FTR_SECTION_IFSET(CPU_FTR_TM) 779#endif 780 781 /* Load guest PMU registers */ 782 /* R4 is live here (vcpu pointer) */ 783 li r3, 1 784 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 785 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 786 isync 787BEGIN_FTR_SECTION 788 ld r3, VCPU_MMCR(r4) 789 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 790 cmpwi r5, MMCR0_PMAO 791 beql kvmppc_fix_pmao 792END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 793 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 794 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 795 lwz r6, VCPU_PMC + 8(r4) 796 lwz r7, VCPU_PMC + 12(r4) 797 lwz r8, VCPU_PMC + 16(r4) 798 lwz r9, VCPU_PMC + 20(r4) 799 mtspr SPRN_PMC1, r3 800 mtspr SPRN_PMC2, r5 801 mtspr SPRN_PMC3, r6 802 mtspr SPRN_PMC4, r7 803 mtspr SPRN_PMC5, r8 804 mtspr SPRN_PMC6, r9 805 ld r3, VCPU_MMCR(r4) 806 ld r5, VCPU_MMCR + 8(r4) 807 ld r6, VCPU_MMCR + 16(r4) 808 ld r7, VCPU_SIAR(r4) 809 ld r8, VCPU_SDAR(r4) 810 mtspr SPRN_MMCR1, r5 811 mtspr SPRN_MMCRA, r6 812 mtspr SPRN_SIAR, r7 813 mtspr SPRN_SDAR, r8 814BEGIN_FTR_SECTION 815 ld r5, VCPU_MMCR + 24(r4) 816 ld r6, VCPU_SIER(r4) 817 mtspr SPRN_MMCR2, r5 818 mtspr SPRN_SIER, r6 819BEGIN_FTR_SECTION_NESTED(96) 820 lwz r7, VCPU_PMC + 24(r4) 821 lwz r8, VCPU_PMC + 28(r4) 822 ld r9, VCPU_MMCR + 32(r4) 823 mtspr SPRN_SPMC1, r7 824 mtspr SPRN_SPMC2, r8 825 mtspr SPRN_MMCRS, r9 826END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 827END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 828 mtspr SPRN_MMCR0, r3 829 isync 830 831 /* Load up FP, VMX and VSX registers */ 832 bl kvmppc_load_fp 833 834 ld r14, VCPU_GPR(R14)(r4) 835 ld r15, VCPU_GPR(R15)(r4) 836 ld r16, VCPU_GPR(R16)(r4) 837 ld r17, VCPU_GPR(R17)(r4) 838 ld r18, VCPU_GPR(R18)(r4) 839 ld r19, VCPU_GPR(R19)(r4) 840 ld r20, VCPU_GPR(R20)(r4) 841 ld r21, VCPU_GPR(R21)(r4) 842 ld r22, VCPU_GPR(R22)(r4) 843 ld r23, VCPU_GPR(R23)(r4) 844 ld r24, VCPU_GPR(R24)(r4) 845 ld r25, VCPU_GPR(R25)(r4) 846 ld r26, VCPU_GPR(R26)(r4) 847 ld r27, VCPU_GPR(R27)(r4) 848 ld r28, VCPU_GPR(R28)(r4) 849 ld r29, VCPU_GPR(R29)(r4) 850 ld r30, VCPU_GPR(R30)(r4) 851 ld r31, VCPU_GPR(R31)(r4) 852 853 /* Switch DSCR to guest value */ 854 ld r5, VCPU_DSCR(r4) 855 mtspr SPRN_DSCR, r5 856 857BEGIN_FTR_SECTION 858 /* Skip next section on POWER7 */ 859 b 8f 860END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 861 /* Load up POWER8-specific registers */ 862 ld r5, VCPU_IAMR(r4) 863 lwz r6, VCPU_PSPB(r4) 864 ld r7, VCPU_FSCR(r4) 865 mtspr SPRN_IAMR, r5 866 mtspr SPRN_PSPB, r6 867 mtspr SPRN_FSCR, r7 868 ld r5, VCPU_DAWR(r4) 869 ld r6, VCPU_DAWRX(r4) 870 ld r7, VCPU_CIABR(r4) 871 ld r8, VCPU_TAR(r4) 872 mtspr SPRN_DAWR, r5 873 mtspr SPRN_DAWRX, r6 874 mtspr SPRN_CIABR, r7 875 mtspr SPRN_TAR, r8 876 ld r5, VCPU_IC(r4) 877 ld r8, VCPU_EBBHR(r4) 878 mtspr SPRN_IC, r5 879 mtspr SPRN_EBBHR, r8 880 ld r5, VCPU_EBBRR(r4) 881 ld r6, VCPU_BESCR(r4) 882 lwz r7, VCPU_GUEST_PID(r4) 883 ld r8, VCPU_WORT(r4) 884 mtspr SPRN_EBBRR, r5 885 mtspr SPRN_BESCR, r6 886 mtspr SPRN_PID, r7 887 mtspr SPRN_WORT, r8 888BEGIN_FTR_SECTION 889 PPC_INVALIDATE_ERAT 890END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 891BEGIN_FTR_SECTION 892 /* POWER8-only registers */ 893 ld r5, VCPU_TCSCR(r4) 894 ld r6, VCPU_ACOP(r4) 895 ld r7, VCPU_CSIGR(r4) 896 ld r8, VCPU_TACR(r4) 897 mtspr SPRN_TCSCR, r5 898 mtspr SPRN_ACOP, r6 899 mtspr SPRN_CSIGR, r7 900 mtspr SPRN_TACR, r8 901FTR_SECTION_ELSE 902 /* POWER9-only registers */ 903 ld r5, VCPU_TID(r4) 904 ld r6, VCPU_PSSCR(r4) 905 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 906 ld r7, VCPU_HFSCR(r4) 907 mtspr SPRN_TIDR, r5 908 mtspr SPRN_PSSCR, r6 909 mtspr SPRN_HFSCR, r7 910ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 9118: 912 913 /* 914 * Set the decrementer to the guest decrementer. 915 */ 916 ld r8,VCPU_DEC_EXPIRES(r4) 917 /* r8 is a host timebase value here, convert to guest TB */ 918 ld r5,HSTATE_KVM_VCORE(r13) 919 ld r6,VCORE_TB_OFFSET(r5) 920 add r8,r8,r6 921 mftb r7 922 subf r3,r7,r8 923 mtspr SPRN_DEC,r3 924 std r3,VCPU_DEC(r4) 925 926 ld r5, VCPU_SPRG0(r4) 927 ld r6, VCPU_SPRG1(r4) 928 ld r7, VCPU_SPRG2(r4) 929 ld r8, VCPU_SPRG3(r4) 930 mtspr SPRN_SPRG0, r5 931 mtspr SPRN_SPRG1, r6 932 mtspr SPRN_SPRG2, r7 933 mtspr SPRN_SPRG3, r8 934 935 /* Load up DAR and DSISR */ 936 ld r5, VCPU_DAR(r4) 937 lwz r6, VCPU_DSISR(r4) 938 mtspr SPRN_DAR, r5 939 mtspr SPRN_DSISR, r6 940 941 /* Restore AMR and UAMOR, set AMOR to all 1s */ 942 ld r5,VCPU_AMR(r4) 943 ld r6,VCPU_UAMOR(r4) 944 li r7,-1 945 mtspr SPRN_AMR,r5 946 mtspr SPRN_UAMOR,r6 947 mtspr SPRN_AMOR,r7 948 949 /* Restore state of CTRL run bit; assume 1 on entry */ 950 lwz r5,VCPU_CTRL(r4) 951 andi. r5,r5,1 952 bne 4f 953 mfspr r6,SPRN_CTRLF 954 clrrdi r6,r6,1 955 mtspr SPRN_CTRLT,r6 9564: 957 /* Secondary threads wait for primary to have done partition switch */ 958 ld r5, HSTATE_KVM_VCORE(r13) 959 lbz r6, HSTATE_PTID(r13) 960 cmpwi r6, 0 961 beq 21f 962 lbz r0, VCORE_IN_GUEST(r5) 963 cmpwi r0, 0 964 bne 21f 965 HMT_LOW 96620: lwz r3, VCORE_ENTRY_EXIT(r5) 967 cmpwi r3, 0x100 968 bge no_switch_exit 969 lbz r0, VCORE_IN_GUEST(r5) 970 cmpwi r0, 0 971 beq 20b 972 HMT_MEDIUM 97321: 974 /* Set LPCR. */ 975 ld r8,VCORE_LPCR(r5) 976 mtspr SPRN_LPCR,r8 977 isync 978 979 /* Check if HDEC expires soon */ 980 mfspr r3, SPRN_HDEC 981 EXTEND_HDEC(r3) 982 cmpdi r3, 512 /* 1 microsecond */ 983 blt hdec_soon 984 985#ifdef CONFIG_KVM_XICS 986 /* We are entering the guest on that thread, push VCPU to XIVE */ 987 ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 988 cmpldi cr0, r10, 0 989 beq no_xive 990 ld r11, VCPU_XIVE_SAVED_STATE(r4) 991 li r9, TM_QW1_OS 992 eieio 993 stdcix r11,r9,r10 994 lwz r11, VCPU_XIVE_CAM_WORD(r4) 995 li r9, TM_QW1_OS + TM_WORD2 996 stwcix r11,r9,r10 997 li r9, 1 998 stw r9, VCPU_XIVE_PUSHED(r4) 999 eieio 1000no_xive: 1001#endif /* CONFIG_KVM_XICS */ 1002 1003deliver_guest_interrupt: 1004 ld r6, VCPU_CTR(r4) 1005 ld r7, VCPU_XER(r4) 1006 1007 mtctr r6 1008 mtxer r7 1009 1010kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 1011 ld r10, VCPU_PC(r4) 1012 ld r11, VCPU_MSR(r4) 1013 ld r6, VCPU_SRR0(r4) 1014 ld r7, VCPU_SRR1(r4) 1015 mtspr SPRN_SRR0, r6 1016 mtspr SPRN_SRR1, r7 1017 1018 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1019 rldicl r11, r11, 63 - MSR_HV_LG, 1 1020 rotldi r11, r11, 1 + MSR_HV_LG 1021 ori r11, r11, MSR_ME 1022 1023 /* Check if we can deliver an external or decrementer interrupt now */ 1024 ld r0, VCPU_PENDING_EXC(r4) 1025 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 1026 cmpdi cr1, r0, 0 1027 andi. r8, r11, MSR_EE 1028 mfspr r8, SPRN_LPCR 1029 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 1030 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 1031 mtspr SPRN_LPCR, r8 1032 isync 1033 beq 5f 1034 li r0, BOOK3S_INTERRUPT_EXTERNAL 1035 bne cr1, 12f 1036 mfspr r0, SPRN_DEC 1037BEGIN_FTR_SECTION 1038 /* On POWER9 check whether the guest has large decrementer enabled */ 1039 andis. r8, r8, LPCR_LD@h 1040 bne 15f 1041END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1042 extsw r0, r0 104315: cmpdi r0, 0 1044 li r0, BOOK3S_INTERRUPT_DECREMENTER 1045 bge 5f 1046 104712: mtspr SPRN_SRR0, r10 1048 mr r10,r0 1049 mtspr SPRN_SRR1, r11 1050 mr r9, r4 1051 bl kvmppc_msr_interrupt 10525: 1053BEGIN_FTR_SECTION 1054 b fast_guest_return 1055END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1056 /* On POWER9, check for pending doorbell requests */ 1057 lbz r0, VCPU_DBELL_REQ(r4) 1058 cmpwi r0, 0 1059 beq fast_guest_return 1060 ld r5, HSTATE_KVM_VCORE(r13) 1061 /* Set DPDES register so the CPU will take a doorbell interrupt */ 1062 li r0, 1 1063 mtspr SPRN_DPDES, r0 1064 std r0, VCORE_DPDES(r5) 1065 /* Make sure other cpus see vcore->dpdes set before dbell req clear */ 1066 lwsync 1067 /* Clear the pending doorbell request */ 1068 li r0, 0 1069 stb r0, VCPU_DBELL_REQ(r4) 1070 1071/* 1072 * Required state: 1073 * R4 = vcpu 1074 * R10: value for HSRR0 1075 * R11: value for HSRR1 1076 * R13 = PACA 1077 */ 1078fast_guest_return: 1079 li r0,0 1080 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1081 mtspr SPRN_HSRR0,r10 1082 mtspr SPRN_HSRR1,r11 1083 1084 /* Activate guest mode, so faults get handled by KVM */ 1085 li r9, KVM_GUEST_MODE_GUEST_HV 1086 stb r9, HSTATE_IN_GUEST(r13) 1087 1088#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1089 /* Accumulate timing */ 1090 addi r3, r4, VCPU_TB_GUEST 1091 bl kvmhv_accumulate_time 1092#endif 1093 1094 /* Enter guest */ 1095 1096BEGIN_FTR_SECTION 1097 ld r5, VCPU_CFAR(r4) 1098 mtspr SPRN_CFAR, r5 1099END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1100BEGIN_FTR_SECTION 1101 ld r0, VCPU_PPR(r4) 1102END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1103 1104 ld r5, VCPU_LR(r4) 1105 lwz r6, VCPU_CR(r4) 1106 mtlr r5 1107 mtcr r6 1108 1109 ld r1, VCPU_GPR(R1)(r4) 1110 ld r2, VCPU_GPR(R2)(r4) 1111 ld r3, VCPU_GPR(R3)(r4) 1112 ld r5, VCPU_GPR(R5)(r4) 1113 ld r6, VCPU_GPR(R6)(r4) 1114 ld r7, VCPU_GPR(R7)(r4) 1115 ld r8, VCPU_GPR(R8)(r4) 1116 ld r9, VCPU_GPR(R9)(r4) 1117 ld r10, VCPU_GPR(R10)(r4) 1118 ld r11, VCPU_GPR(R11)(r4) 1119 ld r12, VCPU_GPR(R12)(r4) 1120 ld r13, VCPU_GPR(R13)(r4) 1121 1122BEGIN_FTR_SECTION 1123 mtspr SPRN_PPR, r0 1124END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1125 1126/* Move canary into DSISR to check for later */ 1127BEGIN_FTR_SECTION 1128 li r0, 0x7fff 1129 mtspr SPRN_HDSISR, r0 1130END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1131 1132 ld r0, VCPU_GPR(R0)(r4) 1133 ld r4, VCPU_GPR(R4)(r4) 1134 1135 hrfid 1136 b . 1137 1138secondary_too_late: 1139 li r12, 0 1140 cmpdi r4, 0 1141 beq 11f 1142 stw r12, VCPU_TRAP(r4) 1143#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1144 addi r3, r4, VCPU_TB_RMEXIT 1145 bl kvmhv_accumulate_time 1146#endif 114711: b kvmhv_switch_to_host 1148 1149no_switch_exit: 1150 HMT_MEDIUM 1151 li r12, 0 1152 b 12f 1153hdec_soon: 1154 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 115512: stw r12, VCPU_TRAP(r4) 1156 mr r9, r4 1157#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1158 addi r3, r4, VCPU_TB_RMEXIT 1159 bl kvmhv_accumulate_time 1160#endif 1161 b guest_exit_cont 1162 1163/****************************************************************************** 1164 * * 1165 * Exit code * 1166 * * 1167 *****************************************************************************/ 1168 1169/* 1170 * We come here from the first-level interrupt handlers. 1171 */ 1172 .globl kvmppc_interrupt_hv 1173kvmppc_interrupt_hv: 1174 /* 1175 * Register contents: 1176 * R12 = (guest CR << 32) | interrupt vector 1177 * R13 = PACA 1178 * guest R12 saved in shadow VCPU SCRATCH0 1179 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE 1180 * guest R13 saved in SPRN_SCRATCH0 1181 */ 1182 std r9, HSTATE_SCRATCH2(r13) 1183 lbz r9, HSTATE_IN_GUEST(r13) 1184 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1185 beq kvmppc_bad_host_intr 1186#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1187 cmpwi r9, KVM_GUEST_MODE_GUEST 1188 ld r9, HSTATE_SCRATCH2(r13) 1189 beq kvmppc_interrupt_pr 1190#endif 1191 /* We're now back in the host but in guest MMU context */ 1192 li r9, KVM_GUEST_MODE_HOST_HV 1193 stb r9, HSTATE_IN_GUEST(r13) 1194 1195 ld r9, HSTATE_KVM_VCPU(r13) 1196 1197 /* Save registers */ 1198 1199 std r0, VCPU_GPR(R0)(r9) 1200 std r1, VCPU_GPR(R1)(r9) 1201 std r2, VCPU_GPR(R2)(r9) 1202 std r3, VCPU_GPR(R3)(r9) 1203 std r4, VCPU_GPR(R4)(r9) 1204 std r5, VCPU_GPR(R5)(r9) 1205 std r6, VCPU_GPR(R6)(r9) 1206 std r7, VCPU_GPR(R7)(r9) 1207 std r8, VCPU_GPR(R8)(r9) 1208 ld r0, HSTATE_SCRATCH2(r13) 1209 std r0, VCPU_GPR(R9)(r9) 1210 std r10, VCPU_GPR(R10)(r9) 1211 std r11, VCPU_GPR(R11)(r9) 1212 ld r3, HSTATE_SCRATCH0(r13) 1213 std r3, VCPU_GPR(R12)(r9) 1214 /* CR is in the high half of r12 */ 1215 srdi r4, r12, 32 1216 stw r4, VCPU_CR(r9) 1217BEGIN_FTR_SECTION 1218 ld r3, HSTATE_CFAR(r13) 1219 std r3, VCPU_CFAR(r9) 1220END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1221BEGIN_FTR_SECTION 1222 ld r4, HSTATE_PPR(r13) 1223 std r4, VCPU_PPR(r9) 1224END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1225 1226 /* Restore R1/R2 so we can handle faults */ 1227 ld r1, HSTATE_HOST_R1(r13) 1228 ld r2, PACATOC(r13) 1229 1230 mfspr r10, SPRN_SRR0 1231 mfspr r11, SPRN_SRR1 1232 std r10, VCPU_SRR0(r9) 1233 std r11, VCPU_SRR1(r9) 1234 /* trap is in the low half of r12, clear CR from the high half */ 1235 clrldi r12, r12, 32 1236 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1237 beq 1f 1238 mfspr r10, SPRN_HSRR0 1239 mfspr r11, SPRN_HSRR1 1240 clrrdi r12, r12, 2 12411: std r10, VCPU_PC(r9) 1242 std r11, VCPU_MSR(r9) 1243 1244 GET_SCRATCH0(r3) 1245 mflr r4 1246 std r3, VCPU_GPR(R13)(r9) 1247 std r4, VCPU_LR(r9) 1248 1249 stw r12,VCPU_TRAP(r9) 1250 1251 /* 1252 * Now that we have saved away SRR0/1 and HSRR0/1, 1253 * interrupts are recoverable in principle, so set MSR_RI. 1254 * This becomes important for relocation-on interrupts from 1255 * the guest, which we can get in radix mode on POWER9. 1256 */ 1257 li r0, MSR_RI 1258 mtmsrd r0, 1 1259 1260#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1261 addi r3, r9, VCPU_TB_RMINTR 1262 mr r4, r9 1263 bl kvmhv_accumulate_time 1264 ld r5, VCPU_GPR(R5)(r9) 1265 ld r6, VCPU_GPR(R6)(r9) 1266 ld r7, VCPU_GPR(R7)(r9) 1267 ld r8, VCPU_GPR(R8)(r9) 1268#endif 1269 1270 /* Save HEIR (HV emulation assist reg) in emul_inst 1271 if this is an HEI (HV emulation interrupt, e40) */ 1272 li r3,KVM_INST_FETCH_FAILED 1273 stw r3,VCPU_LAST_INST(r9) 1274 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1275 bne 11f 1276 mfspr r3,SPRN_HEIR 127711: stw r3,VCPU_HEIR(r9) 1278 1279 /* these are volatile across C function calls */ 1280#ifdef CONFIG_RELOCATABLE 1281 ld r3, HSTATE_SCRATCH1(r13) 1282 mtctr r3 1283#else 1284 mfctr r3 1285#endif 1286 mfxer r4 1287 std r3, VCPU_CTR(r9) 1288 std r4, VCPU_XER(r9) 1289 1290 /* If this is a page table miss then see if it's theirs or ours */ 1291 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1292 beq kvmppc_hdsi 1293 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1294 beq kvmppc_hisi 1295 1296 /* See if this is a leftover HDEC interrupt */ 1297 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1298 bne 2f 1299 mfspr r3,SPRN_HDEC 1300 EXTEND_HDEC(r3) 1301 cmpdi r3,0 1302 mr r4,r9 1303 bge fast_guest_return 13042: 1305 /* See if this is an hcall we can handle in real mode */ 1306 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1307 beq hcall_try_real_mode 1308 1309 /* Hypervisor doorbell - exit only if host IPI flag set */ 1310 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1311 bne 3f 1312BEGIN_FTR_SECTION 1313 PPC_MSGSYNC 1314 lwsync 1315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1316 lbz r0, HSTATE_HOST_IPI(r13) 1317 cmpwi r0, 0 1318 beq 4f 1319 b guest_exit_cont 13203: 1321 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1322 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1323 bne 14f 1324 mfspr r3, SPRN_HFSCR 1325 std r3, VCPU_HFSCR(r9) 1326 b guest_exit_cont 132714: 1328 /* External interrupt ? */ 1329 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1330 bne+ guest_exit_cont 1331 1332 /* External interrupt, first check for host_ipi. If this is 1333 * set, we know the host wants us out so let's do it now 1334 */ 1335 bl kvmppc_read_intr 1336 1337 /* 1338 * Restore the active volatile registers after returning from 1339 * a C function. 1340 */ 1341 ld r9, HSTATE_KVM_VCPU(r13) 1342 li r12, BOOK3S_INTERRUPT_EXTERNAL 1343 1344 /* 1345 * kvmppc_read_intr return codes: 1346 * 1347 * Exit to host (r3 > 0) 1348 * 1 An interrupt is pending that needs to be handled by the host 1349 * Exit guest and return to host by branching to guest_exit_cont 1350 * 1351 * 2 Passthrough that needs completion in the host 1352 * Exit guest and return to host by branching to guest_exit_cont 1353 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1354 * to indicate to the host to complete handling the interrupt 1355 * 1356 * Before returning to guest, we check if any CPU is heading out 1357 * to the host and if so, we head out also. If no CPUs are heading 1358 * check return values <= 0. 1359 * 1360 * Return to guest (r3 <= 0) 1361 * 0 No external interrupt is pending 1362 * -1 A guest wakeup IPI (which has now been cleared) 1363 * In either case, we return to guest to deliver any pending 1364 * guest interrupts. 1365 * 1366 * -2 A PCI passthrough external interrupt was handled 1367 * (interrupt was delivered directly to guest) 1368 * Return to guest to deliver any pending guest interrupts. 1369 */ 1370 1371 cmpdi r3, 1 1372 ble 1f 1373 1374 /* Return code = 2 */ 1375 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1376 stw r12, VCPU_TRAP(r9) 1377 b guest_exit_cont 1378 13791: /* Return code <= 1 */ 1380 cmpdi r3, 0 1381 bgt guest_exit_cont 1382 1383 /* Return code <= 0 */ 13844: ld r5, HSTATE_KVM_VCORE(r13) 1385 lwz r0, VCORE_ENTRY_EXIT(r5) 1386 cmpwi r0, 0x100 1387 mr r4, r9 1388 blt deliver_guest_interrupt 1389 1390guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1391#ifdef CONFIG_KVM_XICS 1392 /* We are exiting, pull the VP from the XIVE */ 1393 lwz r0, VCPU_XIVE_PUSHED(r9) 1394 cmpwi cr0, r0, 0 1395 beq 1f 1396 li r7, TM_SPC_PULL_OS_CTX 1397 li r6, TM_QW1_OS 1398 mfmsr r0 1399 andi. r0, r0, MSR_IR /* in real mode? */ 1400 beq 2f 1401 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1402 cmpldi cr0, r10, 0 1403 beq 1f 1404 /* First load to pull the context, we ignore the value */ 1405 eieio 1406 lwzx r11, r7, r10 1407 /* Second load to recover the context state (Words 0 and 1) */ 1408 ldx r11, r6, r10 1409 b 3f 14102: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1411 cmpldi cr0, r10, 0 1412 beq 1f 1413 /* First load to pull the context, we ignore the value */ 1414 eieio 1415 lwzcix r11, r7, r10 1416 /* Second load to recover the context state (Words 0 and 1) */ 1417 ldcix r11, r6, r10 14183: std r11, VCPU_XIVE_SAVED_STATE(r9) 1419 /* Fixup some of the state for the next load */ 1420 li r10, 0 1421 li r0, 0xff 1422 stw r10, VCPU_XIVE_PUSHED(r9) 1423 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1424 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1425 eieio 14261: 1427#endif /* CONFIG_KVM_XICS */ 1428 /* Save more register state */ 1429 mfdar r6 1430 mfdsisr r7 1431 std r6, VCPU_DAR(r9) 1432 stw r7, VCPU_DSISR(r9) 1433 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1434 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1435 beq mc_cont 1436 std r6, VCPU_FAULT_DAR(r9) 1437 stw r7, VCPU_FAULT_DSISR(r9) 1438 1439 /* See if it is a machine check */ 1440 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1441 beq machine_check_realmode 1442mc_cont: 1443#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1444 addi r3, r9, VCPU_TB_RMEXIT 1445 mr r4, r9 1446 bl kvmhv_accumulate_time 1447#endif 1448 1449 mr r3, r12 1450 /* Increment exit count, poke other threads to exit */ 1451 bl kvmhv_commence_exit 1452 nop 1453 ld r9, HSTATE_KVM_VCPU(r13) 1454 lwz r12, VCPU_TRAP(r9) 1455 1456 /* Stop others sending VCPU interrupts to this physical CPU */ 1457 li r0, -1 1458 stw r0, VCPU_CPU(r9) 1459 stw r0, VCPU_THREAD_CPU(r9) 1460 1461 /* Save guest CTRL register, set runlatch to 1 */ 1462 mfspr r6,SPRN_CTRLF 1463 stw r6,VCPU_CTRL(r9) 1464 andi. r0,r6,1 1465 bne 4f 1466 ori r6,r6,1 1467 mtspr SPRN_CTRLT,r6 14684: 1469 /* Check if we are running hash or radix and store it in cr2 */ 1470 ld r5, VCPU_KVM(r9) 1471 lbz r0, KVM_RADIX(r5) 1472 cmpwi cr2,r0,0 1473 1474 /* Read the guest SLB and save it away */ 1475 li r5, 0 1476 bne cr2, 3f /* for radix, save 0 entries */ 1477 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1478 mtctr r0 1479 li r6,0 1480 addi r7,r9,VCPU_SLB 14811: slbmfee r8,r6 1482 andis. r0,r8,SLB_ESID_V@h 1483 beq 2f 1484 add r8,r8,r6 /* put index in */ 1485 slbmfev r3,r6 1486 std r8,VCPU_SLB_E(r7) 1487 std r3,VCPU_SLB_V(r7) 1488 addi r7,r7,VCPU_SLB_SIZE 1489 addi r5,r5,1 14902: addi r6,r6,1 1491 bdnz 1b 14923: stw r5,VCPU_SLB_MAX(r9) 1493 1494 /* 1495 * Save the guest PURR/SPURR 1496 */ 1497 mfspr r5,SPRN_PURR 1498 mfspr r6,SPRN_SPURR 1499 ld r7,VCPU_PURR(r9) 1500 ld r8,VCPU_SPURR(r9) 1501 std r5,VCPU_PURR(r9) 1502 std r6,VCPU_SPURR(r9) 1503 subf r5,r7,r5 1504 subf r6,r8,r6 1505 1506 /* 1507 * Restore host PURR/SPURR and add guest times 1508 * so that the time in the guest gets accounted. 1509 */ 1510 ld r3,HSTATE_PURR(r13) 1511 ld r4,HSTATE_SPURR(r13) 1512 add r3,r3,r5 1513 add r4,r4,r6 1514 mtspr SPRN_PURR,r3 1515 mtspr SPRN_SPURR,r4 1516 1517 /* Save DEC */ 1518 ld r3, HSTATE_KVM_VCORE(r13) 1519 mfspr r5,SPRN_DEC 1520 mftb r6 1521 /* On P9, if the guest has large decr enabled, don't sign extend */ 1522BEGIN_FTR_SECTION 1523 ld r4, VCORE_LPCR(r3) 1524 andis. r4, r4, LPCR_LD@h 1525 bne 16f 1526END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1527 extsw r5,r5 152816: add r5,r5,r6 1529 /* r5 is a guest timebase value here, convert to host TB */ 1530 ld r4,VCORE_TB_OFFSET(r3) 1531 subf r5,r4,r5 1532 std r5,VCPU_DEC_EXPIRES(r9) 1533 1534BEGIN_FTR_SECTION 1535 b 8f 1536END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1537 /* Save POWER8-specific registers */ 1538 mfspr r5, SPRN_IAMR 1539 mfspr r6, SPRN_PSPB 1540 mfspr r7, SPRN_FSCR 1541 std r5, VCPU_IAMR(r9) 1542 stw r6, VCPU_PSPB(r9) 1543 std r7, VCPU_FSCR(r9) 1544 mfspr r5, SPRN_IC 1545 mfspr r7, SPRN_TAR 1546 std r5, VCPU_IC(r9) 1547 std r7, VCPU_TAR(r9) 1548 mfspr r8, SPRN_EBBHR 1549 std r8, VCPU_EBBHR(r9) 1550 mfspr r5, SPRN_EBBRR 1551 mfspr r6, SPRN_BESCR 1552 mfspr r7, SPRN_PID 1553 mfspr r8, SPRN_WORT 1554 std r5, VCPU_EBBRR(r9) 1555 std r6, VCPU_BESCR(r9) 1556 stw r7, VCPU_GUEST_PID(r9) 1557 std r8, VCPU_WORT(r9) 1558BEGIN_FTR_SECTION 1559 mfspr r5, SPRN_TCSCR 1560 mfspr r6, SPRN_ACOP 1561 mfspr r7, SPRN_CSIGR 1562 mfspr r8, SPRN_TACR 1563 std r5, VCPU_TCSCR(r9) 1564 std r6, VCPU_ACOP(r9) 1565 std r7, VCPU_CSIGR(r9) 1566 std r8, VCPU_TACR(r9) 1567FTR_SECTION_ELSE 1568 mfspr r5, SPRN_TIDR 1569 mfspr r6, SPRN_PSSCR 1570 std r5, VCPU_TID(r9) 1571 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1572 rotldi r6, r6, 60 1573 std r6, VCPU_PSSCR(r9) 1574 /* Restore host HFSCR value */ 1575 ld r7, STACK_SLOT_HFSCR(r1) 1576 mtspr SPRN_HFSCR, r7 1577ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1578 /* 1579 * Restore various registers to 0, where non-zero values 1580 * set by the guest could disrupt the host. 1581 */ 1582 li r0, 0 1583 mtspr SPRN_PSPB, r0 1584 mtspr SPRN_WORT, r0 1585BEGIN_FTR_SECTION 1586 mtspr SPRN_IAMR, r0 1587 mtspr SPRN_TCSCR, r0 1588 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1589 li r0, 1 1590 sldi r0, r0, 31 1591 mtspr SPRN_MMCRS, r0 1592END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 15938: 1594 1595 /* Save and reset AMR and UAMOR before turning on the MMU */ 1596 mfspr r5,SPRN_AMR 1597 mfspr r6,SPRN_UAMOR 1598 std r5,VCPU_AMR(r9) 1599 std r6,VCPU_UAMOR(r9) 1600 li r6,0 1601 mtspr SPRN_AMR,r6 1602 mtspr SPRN_UAMOR, r6 1603 1604 /* Switch DSCR back to host value */ 1605 mfspr r8, SPRN_DSCR 1606 ld r7, HSTATE_DSCR(r13) 1607 std r8, VCPU_DSCR(r9) 1608 mtspr SPRN_DSCR, r7 1609 1610 /* Save non-volatile GPRs */ 1611 std r14, VCPU_GPR(R14)(r9) 1612 std r15, VCPU_GPR(R15)(r9) 1613 std r16, VCPU_GPR(R16)(r9) 1614 std r17, VCPU_GPR(R17)(r9) 1615 std r18, VCPU_GPR(R18)(r9) 1616 std r19, VCPU_GPR(R19)(r9) 1617 std r20, VCPU_GPR(R20)(r9) 1618 std r21, VCPU_GPR(R21)(r9) 1619 std r22, VCPU_GPR(R22)(r9) 1620 std r23, VCPU_GPR(R23)(r9) 1621 std r24, VCPU_GPR(R24)(r9) 1622 std r25, VCPU_GPR(R25)(r9) 1623 std r26, VCPU_GPR(R26)(r9) 1624 std r27, VCPU_GPR(R27)(r9) 1625 std r28, VCPU_GPR(R28)(r9) 1626 std r29, VCPU_GPR(R29)(r9) 1627 std r30, VCPU_GPR(R30)(r9) 1628 std r31, VCPU_GPR(R31)(r9) 1629 1630 /* Save SPRGs */ 1631 mfspr r3, SPRN_SPRG0 1632 mfspr r4, SPRN_SPRG1 1633 mfspr r5, SPRN_SPRG2 1634 mfspr r6, SPRN_SPRG3 1635 std r3, VCPU_SPRG0(r9) 1636 std r4, VCPU_SPRG1(r9) 1637 std r5, VCPU_SPRG2(r9) 1638 std r6, VCPU_SPRG3(r9) 1639 1640 /* save FP state */ 1641 mr r3, r9 1642 bl kvmppc_save_fp 1643 1644#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1645BEGIN_FTR_SECTION 1646 /* 1647 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 1648 */ 1649 bl kvmppc_save_tm 1650END_FTR_SECTION_IFSET(CPU_FTR_TM) 1651#endif 1652 1653 /* Increment yield count if they have a VPA */ 1654 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1655 cmpdi r8, 0 1656 beq 25f 1657 li r4, LPPACA_YIELDCOUNT 1658 LWZX_BE r3, r8, r4 1659 addi r3, r3, 1 1660 STWX_BE r3, r8, r4 1661 li r3, 1 1662 stb r3, VCPU_VPA_DIRTY(r9) 166325: 1664 /* Save PMU registers if requested */ 1665 /* r8 and cr0.eq are live here */ 1666BEGIN_FTR_SECTION 1667 /* 1668 * POWER8 seems to have a hardware bug where setting 1669 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 1670 * when some counters are already negative doesn't seem 1671 * to cause a performance monitor alert (and hence interrupt). 1672 * The effect of this is that when saving the PMU state, 1673 * if there is no PMU alert pending when we read MMCR0 1674 * before freezing the counters, but one becomes pending 1675 * before we read the counters, we lose it. 1676 * To work around this, we need a way to freeze the counters 1677 * before reading MMCR0. Normally, freezing the counters 1678 * is done by writing MMCR0 (to set MMCR0[FC]) which 1679 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 1680 * we can also freeze the counters using MMCR2, by writing 1681 * 1s to all the counter freeze condition bits (there are 1682 * 9 bits each for 6 counters). 1683 */ 1684 li r3, -1 /* set all freeze bits */ 1685 clrrdi r3, r3, 10 1686 mfspr r10, SPRN_MMCR2 1687 mtspr SPRN_MMCR2, r3 1688 isync 1689END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1690 li r3, 1 1691 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1692 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1693 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1694 mfspr r6, SPRN_MMCRA 1695 /* Clear MMCRA in order to disable SDAR updates */ 1696 li r7, 0 1697 mtspr SPRN_MMCRA, r7 1698 isync 1699 beq 21f /* if no VPA, save PMU stuff anyway */ 1700 lbz r7, LPPACA_PMCINUSE(r8) 1701 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1702 bne 21f 1703 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1704 b 22f 170521: mfspr r5, SPRN_MMCR1 1706 mfspr r7, SPRN_SIAR 1707 mfspr r8, SPRN_SDAR 1708 std r4, VCPU_MMCR(r9) 1709 std r5, VCPU_MMCR + 8(r9) 1710 std r6, VCPU_MMCR + 16(r9) 1711BEGIN_FTR_SECTION 1712 std r10, VCPU_MMCR + 24(r9) 1713END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1714 std r7, VCPU_SIAR(r9) 1715 std r8, VCPU_SDAR(r9) 1716 mfspr r3, SPRN_PMC1 1717 mfspr r4, SPRN_PMC2 1718 mfspr r5, SPRN_PMC3 1719 mfspr r6, SPRN_PMC4 1720 mfspr r7, SPRN_PMC5 1721 mfspr r8, SPRN_PMC6 1722 stw r3, VCPU_PMC(r9) 1723 stw r4, VCPU_PMC + 4(r9) 1724 stw r5, VCPU_PMC + 8(r9) 1725 stw r6, VCPU_PMC + 12(r9) 1726 stw r7, VCPU_PMC + 16(r9) 1727 stw r8, VCPU_PMC + 20(r9) 1728BEGIN_FTR_SECTION 1729 mfspr r5, SPRN_SIER 1730 std r5, VCPU_SIER(r9) 1731BEGIN_FTR_SECTION_NESTED(96) 1732 mfspr r6, SPRN_SPMC1 1733 mfspr r7, SPRN_SPMC2 1734 mfspr r8, SPRN_MMCRS 1735 stw r6, VCPU_PMC + 24(r9) 1736 stw r7, VCPU_PMC + 28(r9) 1737 std r8, VCPU_MMCR + 32(r9) 1738 lis r4, 0x8000 1739 mtspr SPRN_MMCRS, r4 1740END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 1741END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 174222: 1743 1744 /* Restore host values of some registers */ 1745BEGIN_FTR_SECTION 1746 ld r5, STACK_SLOT_CIABR(r1) 1747 ld r6, STACK_SLOT_DAWR(r1) 1748 ld r7, STACK_SLOT_DAWRX(r1) 1749 mtspr SPRN_CIABR, r5 1750 mtspr SPRN_DAWR, r6 1751 mtspr SPRN_DAWRX, r7 1752END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1753BEGIN_FTR_SECTION 1754 ld r5, STACK_SLOT_TID(r1) 1755 ld r6, STACK_SLOT_PSSCR(r1) 1756 ld r7, STACK_SLOT_PID(r1) 1757 ld r8, STACK_SLOT_IAMR(r1) 1758 mtspr SPRN_TIDR, r5 1759 mtspr SPRN_PSSCR, r6 1760 mtspr SPRN_PID, r7 1761 mtspr SPRN_IAMR, r8 1762END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1763 1764#ifdef CONFIG_PPC_RADIX_MMU 1765 /* 1766 * Are we running hash or radix ? 1767 */ 1768 ld r5, VCPU_KVM(r9) 1769 lbz r0, KVM_RADIX(r5) 1770 cmpwi cr2, r0, 0 1771 beq cr2, 3f 1772 1773 /* Radix: Handle the case where the guest used an illegal PID */ 1774 LOAD_REG_ADDR(r4, mmu_base_pid) 1775 lwz r3, VCPU_GUEST_PID(r9) 1776 lwz r5, 0(r4) 1777 cmpw cr0,r3,r5 1778 blt 2f 1779 1780 /* 1781 * Illegal PID, the HW might have prefetched and cached in the TLB 1782 * some translations for the LPID 0 / guest PID combination which 1783 * Linux doesn't know about, so we need to flush that PID out of 1784 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to 1785 * the right context. 1786 */ 1787 li r0,0 1788 mtspr SPRN_LPID,r0 1789 isync 1790 1791 /* Then do a congruence class local flush */ 1792 ld r6,VCPU_KVM(r9) 1793 lwz r0,KVM_TLB_SETS(r6) 1794 mtctr r0 1795 li r7,0x400 /* IS field = 0b01 */ 1796 ptesync 1797 sldi r0,r3,32 /* RS has PID */ 17981: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ 1799 addi r7,r7,0x1000 1800 bdnz 1b 1801 ptesync 1802 18032: /* Flush the ERAT on radix P9 DD1 guest exit */ 1804BEGIN_FTR_SECTION 1805 PPC_INVALIDATE_ERAT 1806END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 1807 b 4f 1808#endif /* CONFIG_PPC_RADIX_MMU */ 1809 1810 /* Hash: clear out SLB */ 18113: li r5,0 1812 slbmte r5,r5 1813 slbia 1814 ptesync 18154: 1816 /* 1817 * POWER7/POWER8 guest -> host partition switch code. 1818 * We don't have to lock against tlbies but we do 1819 * have to coordinate the hardware threads. 1820 */ 1821kvmhv_switch_to_host: 1822 /* Secondary threads wait for primary to do partition switch */ 1823 ld r5,HSTATE_KVM_VCORE(r13) 1824 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1825 lbz r3,HSTATE_PTID(r13) 1826 cmpwi r3,0 1827 beq 15f 1828 HMT_LOW 182913: lbz r3,VCORE_IN_GUEST(r5) 1830 cmpwi r3,0 1831 bne 13b 1832 HMT_MEDIUM 1833 b 16f 1834 1835 /* Primary thread waits for all the secondaries to exit guest */ 183615: lwz r3,VCORE_ENTRY_EXIT(r5) 1837 rlwinm r0,r3,32-8,0xff 1838 clrldi r3,r3,56 1839 cmpw r3,r0 1840 bne 15b 1841 isync 1842 1843 /* Did we actually switch to the guest at all? */ 1844 lbz r6, VCORE_IN_GUEST(r5) 1845 cmpwi r6, 0 1846 beq 19f 1847 1848 /* Primary thread switches back to host partition */ 1849 lwz r7,KVM_HOST_LPID(r4) 1850BEGIN_FTR_SECTION 1851 ld r6,KVM_HOST_SDR1(r4) 1852 li r8,LPID_RSVD /* switch to reserved LPID */ 1853 mtspr SPRN_LPID,r8 1854 ptesync 1855 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1856END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1857 mtspr SPRN_LPID,r7 1858 isync 1859 1860BEGIN_FTR_SECTION 1861 /* DPDES and VTB are shared between threads */ 1862 mfspr r7, SPRN_DPDES 1863 mfspr r8, SPRN_VTB 1864 std r7, VCORE_DPDES(r5) 1865 std r8, VCORE_VTB(r5) 1866 /* clear DPDES so we don't get guest doorbells in the host */ 1867 li r8, 0 1868 mtspr SPRN_DPDES, r8 1869END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1870 1871 /* If HMI, call kvmppc_realmode_hmi_handler() */ 1872 cmpwi r12, BOOK3S_INTERRUPT_HMI 1873 bne 27f 1874 bl kvmppc_realmode_hmi_handler 1875 nop 1876 li r12, BOOK3S_INTERRUPT_HMI 1877 /* 1878 * At this point kvmppc_realmode_hmi_handler would have resync-ed 1879 * the TB. Hence it is not required to subtract guest timebase 1880 * offset from timebase. So, skip it. 1881 * 1882 * Also, do not call kvmppc_subcore_exit_guest() because it has 1883 * been invoked as part of kvmppc_realmode_hmi_handler(). 1884 */ 1885 b 30f 1886 188727: 1888 /* Subtract timebase offset from timebase */ 1889 ld r8,VCORE_TB_OFFSET(r5) 1890 cmpdi r8,0 1891 beq 17f 1892 mftb r6 /* current guest timebase */ 1893 subf r8,r8,r6 1894 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1895 mftb r7 /* check if lower 24 bits overflowed */ 1896 clrldi r6,r6,40 1897 clrldi r7,r7,40 1898 cmpld r7,r6 1899 bge 17f 1900 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1901 mtspr SPRN_TBU40,r8 1902 190317: bl kvmppc_subcore_exit_guest 1904 nop 190530: ld r5,HSTATE_KVM_VCORE(r13) 1906 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1907 1908 /* Reset PCR */ 1909 ld r0, VCORE_PCR(r5) 1910 cmpdi r0, 0 1911 beq 18f 1912 li r0, 0 1913 mtspr SPRN_PCR, r0 191418: 1915 /* Signal secondary CPUs to continue */ 1916 stb r0,VCORE_IN_GUEST(r5) 191719: lis r8,0x7fff /* MAX_INT@h */ 1918 mtspr SPRN_HDEC,r8 1919 192016: ld r8,KVM_HOST_LPCR(r4) 1921 mtspr SPRN_LPCR,r8 1922 isync 1923 1924 /* load host SLB entries */ 1925BEGIN_MMU_FTR_SECTION 1926 b 0f 1927END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1928 ld r8,PACA_SLBSHADOWPTR(r13) 1929 1930 .rept SLB_NUM_BOLTED 1931 li r3, SLBSHADOW_SAVEAREA 1932 LDX_BE r5, r8, r3 1933 addi r3, r3, 8 1934 LDX_BE r6, r8, r3 1935 andis. r7,r5,SLB_ESID_V@h 1936 beq 1f 1937 slbmte r6,r5 19381: addi r8,r8,16 1939 .endr 19400: 1941#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1942 /* Finish timing, if we have a vcpu */ 1943 ld r4, HSTATE_KVM_VCPU(r13) 1944 cmpdi r4, 0 1945 li r3, 0 1946 beq 2f 1947 bl kvmhv_accumulate_time 19482: 1949#endif 1950 /* Unset guest mode */ 1951 li r0, KVM_GUEST_MODE_NONE 1952 stb r0, HSTATE_IN_GUEST(r13) 1953 1954 ld r0, SFS+PPC_LR_STKOFF(r1) 1955 addi r1, r1, SFS 1956 mtlr r0 1957 blr 1958 1959/* 1960 * Check whether an HDSI is an HPTE not found fault or something else. 1961 * If it is an HPTE not found fault that is due to the guest accessing 1962 * a page that they have mapped but which we have paged out, then 1963 * we continue on with the guest exit path. In all other cases, 1964 * reflect the HDSI to the guest as a DSI. 1965 */ 1966kvmppc_hdsi: 1967 ld r3, VCPU_KVM(r9) 1968 lbz r0, KVM_RADIX(r3) 1969 mfspr r4, SPRN_HDAR 1970 mfspr r6, SPRN_HDSISR 1971BEGIN_FTR_SECTION 1972 /* Look for DSISR canary. If we find it, retry instruction */ 1973 cmpdi r6, 0x7fff 1974 beq 6f 1975END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1976 cmpwi r0, 0 1977 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 1978 /* HPTE not found fault or protection fault? */ 1979 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1980 beq 1f /* if not, send it to the guest */ 1981 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1982 beq 3f 1983BEGIN_FTR_SECTION 1984 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 1985 b 4f 1986END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1987 clrrdi r0, r4, 28 1988 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1989 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 1990 bne 7f /* if no SLB entry found */ 19914: std r4, VCPU_FAULT_DAR(r9) 1992 stw r6, VCPU_FAULT_DSISR(r9) 1993 1994 /* Search the hash table. */ 1995 mr r3, r9 /* vcpu pointer */ 1996 li r7, 1 /* data fault */ 1997 bl kvmppc_hpte_hv_fault 1998 ld r9, HSTATE_KVM_VCPU(r13) 1999 ld r10, VCPU_PC(r9) 2000 ld r11, VCPU_MSR(r9) 2001 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 2002 cmpdi r3, 0 /* retry the instruction */ 2003 beq 6f 2004 cmpdi r3, -1 /* handle in kernel mode */ 2005 beq guest_exit_cont 2006 cmpdi r3, -2 /* MMIO emulation; need instr word */ 2007 beq 2f 2008 2009 /* Synthesize a DSI (or DSegI) for the guest */ 2010 ld r4, VCPU_FAULT_DAR(r9) 2011 mr r6, r3 20121: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 2013 mtspr SPRN_DSISR, r6 20147: mtspr SPRN_DAR, r4 2015 mtspr SPRN_SRR0, r10 2016 mtspr SPRN_SRR1, r11 2017 mr r10, r0 2018 bl kvmppc_msr_interrupt 2019fast_interrupt_c_return: 20206: ld r7, VCPU_CTR(r9) 2021 ld r8, VCPU_XER(r9) 2022 mtctr r7 2023 mtxer r8 2024 mr r4, r9 2025 b fast_guest_return 2026 20273: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 2028 ld r5, KVM_VRMA_SLB_V(r5) 2029 b 4b 2030 2031 /* If this is for emulated MMIO, load the instruction word */ 20322: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 2033 2034 /* Set guest mode to 'jump over instruction' so if lwz faults 2035 * we'll just continue at the next IP. */ 2036 li r0, KVM_GUEST_MODE_SKIP 2037 stb r0, HSTATE_IN_GUEST(r13) 2038 2039 /* Do the access with MSR:DR enabled */ 2040 mfmsr r3 2041 ori r4, r3, MSR_DR /* Enable paging for data */ 2042 mtmsrd r4 2043 lwz r8, 0(r10) 2044 mtmsrd r3 2045 2046 /* Store the result */ 2047 stw r8, VCPU_LAST_INST(r9) 2048 2049 /* Unset guest mode. */ 2050 li r0, KVM_GUEST_MODE_HOST_HV 2051 stb r0, HSTATE_IN_GUEST(r13) 2052 b guest_exit_cont 2053 2054.Lradix_hdsi: 2055 std r4, VCPU_FAULT_DAR(r9) 2056 stw r6, VCPU_FAULT_DSISR(r9) 2057.Lradix_hisi: 2058 mfspr r5, SPRN_ASDR 2059 std r5, VCPU_FAULT_GPA(r9) 2060 b guest_exit_cont 2061 2062/* 2063 * Similarly for an HISI, reflect it to the guest as an ISI unless 2064 * it is an HPTE not found fault for a page that we have paged out. 2065 */ 2066kvmppc_hisi: 2067 ld r3, VCPU_KVM(r9) 2068 lbz r0, KVM_RADIX(r3) 2069 cmpwi r0, 0 2070 bne .Lradix_hisi /* for radix, just save ASDR */ 2071 andis. r0, r11, SRR1_ISI_NOPT@h 2072 beq 1f 2073 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 2074 beq 3f 2075BEGIN_FTR_SECTION 2076 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2077 b 4f 2078END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2079 clrrdi r0, r10, 28 2080 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2081 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2082 bne 7f /* if no SLB entry found */ 20834: 2084 /* Search the hash table. */ 2085 mr r3, r9 /* vcpu pointer */ 2086 mr r4, r10 2087 mr r6, r11 2088 li r7, 0 /* instruction fault */ 2089 bl kvmppc_hpte_hv_fault 2090 ld r9, HSTATE_KVM_VCPU(r13) 2091 ld r10, VCPU_PC(r9) 2092 ld r11, VCPU_MSR(r9) 2093 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2094 cmpdi r3, 0 /* retry the instruction */ 2095 beq fast_interrupt_c_return 2096 cmpdi r3, -1 /* handle in kernel mode */ 2097 beq guest_exit_cont 2098 2099 /* Synthesize an ISI (or ISegI) for the guest */ 2100 mr r11, r3 21011: li r0, BOOK3S_INTERRUPT_INST_STORAGE 21027: mtspr SPRN_SRR0, r10 2103 mtspr SPRN_SRR1, r11 2104 mr r10, r0 2105 bl kvmppc_msr_interrupt 2106 b fast_interrupt_c_return 2107 21083: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2109 ld r5, KVM_VRMA_SLB_V(r6) 2110 b 4b 2111 2112/* 2113 * Try to handle an hcall in real mode. 2114 * Returns to the guest if we handle it, or continues on up to 2115 * the kernel if we can't (i.e. if we don't have a handler for 2116 * it, or if the handler returns H_TOO_HARD). 2117 * 2118 * r5 - r8 contain hcall args, 2119 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2120 */ 2121hcall_try_real_mode: 2122 ld r3,VCPU_GPR(R3)(r9) 2123 andi. r0,r11,MSR_PR 2124 /* sc 1 from userspace - reflect to guest syscall */ 2125 bne sc_1_fast_return 2126 clrrdi r3,r3,2 2127 cmpldi r3,hcall_real_table_end - hcall_real_table 2128 bge guest_exit_cont 2129 /* See if this hcall is enabled for in-kernel handling */ 2130 ld r4, VCPU_KVM(r9) 2131 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2132 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2133 add r4, r4, r0 2134 ld r0, KVM_ENABLED_HCALLS(r4) 2135 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2136 srd r0, r0, r4 2137 andi. r0, r0, 1 2138 beq guest_exit_cont 2139 /* Get pointer to handler, if any, and call it */ 2140 LOAD_REG_ADDR(r4, hcall_real_table) 2141 lwax r3,r3,r4 2142 cmpwi r3,0 2143 beq guest_exit_cont 2144 add r12,r3,r4 2145 mtctr r12 2146 mr r3,r9 /* get vcpu pointer */ 2147 ld r4,VCPU_GPR(R4)(r9) 2148 bctrl 2149 cmpdi r3,H_TOO_HARD 2150 beq hcall_real_fallback 2151 ld r4,HSTATE_KVM_VCPU(r13) 2152 std r3,VCPU_GPR(R3)(r4) 2153 ld r10,VCPU_PC(r4) 2154 ld r11,VCPU_MSR(r4) 2155 b fast_guest_return 2156 2157sc_1_fast_return: 2158 mtspr SPRN_SRR0,r10 2159 mtspr SPRN_SRR1,r11 2160 li r10, BOOK3S_INTERRUPT_SYSCALL 2161 bl kvmppc_msr_interrupt 2162 mr r4,r9 2163 b fast_guest_return 2164 2165 /* We've attempted a real mode hcall, but it's punted it back 2166 * to userspace. We need to restore some clobbered volatiles 2167 * before resuming the pass-it-to-qemu path */ 2168hcall_real_fallback: 2169 li r12,BOOK3S_INTERRUPT_SYSCALL 2170 ld r9, HSTATE_KVM_VCPU(r13) 2171 2172 b guest_exit_cont 2173 2174 .globl hcall_real_table 2175hcall_real_table: 2176 .long 0 /* 0 - unused */ 2177 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2178 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2179 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2180 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2181 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2182 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2183 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2184 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2185 .long 0 /* 0x24 - H_SET_SPRG0 */ 2186 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2187 .long 0 /* 0x2c */ 2188 .long 0 /* 0x30 */ 2189 .long 0 /* 0x34 */ 2190 .long 0 /* 0x38 */ 2191 .long 0 /* 0x3c */ 2192 .long 0 /* 0x40 */ 2193 .long 0 /* 0x44 */ 2194 .long 0 /* 0x48 */ 2195 .long 0 /* 0x4c */ 2196 .long 0 /* 0x50 */ 2197 .long 0 /* 0x54 */ 2198 .long 0 /* 0x58 */ 2199 .long 0 /* 0x5c */ 2200 .long 0 /* 0x60 */ 2201#ifdef CONFIG_KVM_XICS 2202 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2203 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2204 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2205 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2206 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2207#else 2208 .long 0 /* 0x64 - H_EOI */ 2209 .long 0 /* 0x68 - H_CPPR */ 2210 .long 0 /* 0x6c - H_IPI */ 2211 .long 0 /* 0x70 - H_IPOLL */ 2212 .long 0 /* 0x74 - H_XIRR */ 2213#endif 2214 .long 0 /* 0x78 */ 2215 .long 0 /* 0x7c */ 2216 .long 0 /* 0x80 */ 2217 .long 0 /* 0x84 */ 2218 .long 0 /* 0x88 */ 2219 .long 0 /* 0x8c */ 2220 .long 0 /* 0x90 */ 2221 .long 0 /* 0x94 */ 2222 .long 0 /* 0x98 */ 2223 .long 0 /* 0x9c */ 2224 .long 0 /* 0xa0 */ 2225 .long 0 /* 0xa4 */ 2226 .long 0 /* 0xa8 */ 2227 .long 0 /* 0xac */ 2228 .long 0 /* 0xb0 */ 2229 .long 0 /* 0xb4 */ 2230 .long 0 /* 0xb8 */ 2231 .long 0 /* 0xbc */ 2232 .long 0 /* 0xc0 */ 2233 .long 0 /* 0xc4 */ 2234 .long 0 /* 0xc8 */ 2235 .long 0 /* 0xcc */ 2236 .long 0 /* 0xd0 */ 2237 .long 0 /* 0xd4 */ 2238 .long 0 /* 0xd8 */ 2239 .long 0 /* 0xdc */ 2240 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2241 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2242 .long 0 /* 0xe8 */ 2243 .long 0 /* 0xec */ 2244 .long 0 /* 0xf0 */ 2245 .long 0 /* 0xf4 */ 2246 .long 0 /* 0xf8 */ 2247 .long 0 /* 0xfc */ 2248 .long 0 /* 0x100 */ 2249 .long 0 /* 0x104 */ 2250 .long 0 /* 0x108 */ 2251 .long 0 /* 0x10c */ 2252 .long 0 /* 0x110 */ 2253 .long 0 /* 0x114 */ 2254 .long 0 /* 0x118 */ 2255 .long 0 /* 0x11c */ 2256 .long 0 /* 0x120 */ 2257 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2258 .long 0 /* 0x128 */ 2259 .long 0 /* 0x12c */ 2260 .long 0 /* 0x130 */ 2261 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2262 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2263 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2264 .long 0 /* 0x140 */ 2265 .long 0 /* 0x144 */ 2266 .long 0 /* 0x148 */ 2267 .long 0 /* 0x14c */ 2268 .long 0 /* 0x150 */ 2269 .long 0 /* 0x154 */ 2270 .long 0 /* 0x158 */ 2271 .long 0 /* 0x15c */ 2272 .long 0 /* 0x160 */ 2273 .long 0 /* 0x164 */ 2274 .long 0 /* 0x168 */ 2275 .long 0 /* 0x16c */ 2276 .long 0 /* 0x170 */ 2277 .long 0 /* 0x174 */ 2278 .long 0 /* 0x178 */ 2279 .long 0 /* 0x17c */ 2280 .long 0 /* 0x180 */ 2281 .long 0 /* 0x184 */ 2282 .long 0 /* 0x188 */ 2283 .long 0 /* 0x18c */ 2284 .long 0 /* 0x190 */ 2285 .long 0 /* 0x194 */ 2286 .long 0 /* 0x198 */ 2287 .long 0 /* 0x19c */ 2288 .long 0 /* 0x1a0 */ 2289 .long 0 /* 0x1a4 */ 2290 .long 0 /* 0x1a8 */ 2291 .long 0 /* 0x1ac */ 2292 .long 0 /* 0x1b0 */ 2293 .long 0 /* 0x1b4 */ 2294 .long 0 /* 0x1b8 */ 2295 .long 0 /* 0x1bc */ 2296 .long 0 /* 0x1c0 */ 2297 .long 0 /* 0x1c4 */ 2298 .long 0 /* 0x1c8 */ 2299 .long 0 /* 0x1cc */ 2300 .long 0 /* 0x1d0 */ 2301 .long 0 /* 0x1d4 */ 2302 .long 0 /* 0x1d8 */ 2303 .long 0 /* 0x1dc */ 2304 .long 0 /* 0x1e0 */ 2305 .long 0 /* 0x1e4 */ 2306 .long 0 /* 0x1e8 */ 2307 .long 0 /* 0x1ec */ 2308 .long 0 /* 0x1f0 */ 2309 .long 0 /* 0x1f4 */ 2310 .long 0 /* 0x1f8 */ 2311 .long 0 /* 0x1fc */ 2312 .long 0 /* 0x200 */ 2313 .long 0 /* 0x204 */ 2314 .long 0 /* 0x208 */ 2315 .long 0 /* 0x20c */ 2316 .long 0 /* 0x210 */ 2317 .long 0 /* 0x214 */ 2318 .long 0 /* 0x218 */ 2319 .long 0 /* 0x21c */ 2320 .long 0 /* 0x220 */ 2321 .long 0 /* 0x224 */ 2322 .long 0 /* 0x228 */ 2323 .long 0 /* 0x22c */ 2324 .long 0 /* 0x230 */ 2325 .long 0 /* 0x234 */ 2326 .long 0 /* 0x238 */ 2327 .long 0 /* 0x23c */ 2328 .long 0 /* 0x240 */ 2329 .long 0 /* 0x244 */ 2330 .long 0 /* 0x248 */ 2331 .long 0 /* 0x24c */ 2332 .long 0 /* 0x250 */ 2333 .long 0 /* 0x254 */ 2334 .long 0 /* 0x258 */ 2335 .long 0 /* 0x25c */ 2336 .long 0 /* 0x260 */ 2337 .long 0 /* 0x264 */ 2338 .long 0 /* 0x268 */ 2339 .long 0 /* 0x26c */ 2340 .long 0 /* 0x270 */ 2341 .long 0 /* 0x274 */ 2342 .long 0 /* 0x278 */ 2343 .long 0 /* 0x27c */ 2344 .long 0 /* 0x280 */ 2345 .long 0 /* 0x284 */ 2346 .long 0 /* 0x288 */ 2347 .long 0 /* 0x28c */ 2348 .long 0 /* 0x290 */ 2349 .long 0 /* 0x294 */ 2350 .long 0 /* 0x298 */ 2351 .long 0 /* 0x29c */ 2352 .long 0 /* 0x2a0 */ 2353 .long 0 /* 0x2a4 */ 2354 .long 0 /* 0x2a8 */ 2355 .long 0 /* 0x2ac */ 2356 .long 0 /* 0x2b0 */ 2357 .long 0 /* 0x2b4 */ 2358 .long 0 /* 0x2b8 */ 2359 .long 0 /* 0x2bc */ 2360 .long 0 /* 0x2c0 */ 2361 .long 0 /* 0x2c4 */ 2362 .long 0 /* 0x2c8 */ 2363 .long 0 /* 0x2cc */ 2364 .long 0 /* 0x2d0 */ 2365 .long 0 /* 0x2d4 */ 2366 .long 0 /* 0x2d8 */ 2367 .long 0 /* 0x2dc */ 2368 .long 0 /* 0x2e0 */ 2369 .long 0 /* 0x2e4 */ 2370 .long 0 /* 0x2e8 */ 2371 .long 0 /* 0x2ec */ 2372 .long 0 /* 0x2f0 */ 2373 .long 0 /* 0x2f4 */ 2374 .long 0 /* 0x2f8 */ 2375#ifdef CONFIG_KVM_XICS 2376 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2377#else 2378 .long 0 /* 0x2fc - H_XIRR_X*/ 2379#endif 2380 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2381 .globl hcall_real_table_end 2382hcall_real_table_end: 2383 2384_GLOBAL(kvmppc_h_set_xdabr) 2385 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2386 beq 6f 2387 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2388 andc. r0, r5, r0 2389 beq 3f 23906: li r3, H_PARAMETER 2391 blr 2392 2393_GLOBAL(kvmppc_h_set_dabr) 2394 li r5, DABRX_USER | DABRX_KERNEL 23953: 2396BEGIN_FTR_SECTION 2397 b 2f 2398END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2399 std r4,VCPU_DABR(r3) 2400 stw r5, VCPU_DABRX(r3) 2401 mtspr SPRN_DABRX, r5 2402 /* Work around P7 bug where DABR can get corrupted on mtspr */ 24031: mtspr SPRN_DABR,r4 2404 mfspr r5, SPRN_DABR 2405 cmpd r4, r5 2406 bne 1b 2407 isync 2408 li r3,0 2409 blr 2410 2411 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 24122: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2413 rlwimi r5, r4, 2, DAWRX_WT 2414 clrrdi r4, r4, 3 2415 std r4, VCPU_DAWR(r3) 2416 std r5, VCPU_DAWRX(r3) 2417 mtspr SPRN_DAWR, r4 2418 mtspr SPRN_DAWRX, r5 2419 li r3, 0 2420 blr 2421 2422_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2423 ori r11,r11,MSR_EE 2424 std r11,VCPU_MSR(r3) 2425 li r0,1 2426 stb r0,VCPU_CEDED(r3) 2427 sync /* order setting ceded vs. testing prodded */ 2428 lbz r5,VCPU_PRODDED(r3) 2429 cmpwi r5,0 2430 bne kvm_cede_prodded 2431 li r12,0 /* set trap to 0 to say hcall is handled */ 2432 stw r12,VCPU_TRAP(r3) 2433 li r0,H_SUCCESS 2434 std r0,VCPU_GPR(R3)(r3) 2435 2436 /* 2437 * Set our bit in the bitmask of napping threads unless all the 2438 * other threads are already napping, in which case we send this 2439 * up to the host. 2440 */ 2441 ld r5,HSTATE_KVM_VCORE(r13) 2442 lbz r6,HSTATE_PTID(r13) 2443 lwz r8,VCORE_ENTRY_EXIT(r5) 2444 clrldi r8,r8,56 2445 li r0,1 2446 sld r0,r0,r6 2447 addi r6,r5,VCORE_NAPPING_THREADS 244831: lwarx r4,0,r6 2449 or r4,r4,r0 2450 cmpw r4,r8 2451 beq kvm_cede_exit 2452 stwcx. r4,0,r6 2453 bne 31b 2454 /* order napping_threads update vs testing entry_exit_map */ 2455 isync 2456 li r0,NAPPING_CEDE 2457 stb r0,HSTATE_NAPPING(r13) 2458 lwz r7,VCORE_ENTRY_EXIT(r5) 2459 cmpwi r7,0x100 2460 bge 33f /* another thread already exiting */ 2461 2462/* 2463 * Although not specifically required by the architecture, POWER7 2464 * preserves the following registers in nap mode, even if an SMT mode 2465 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2466 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2467 */ 2468 /* Save non-volatile GPRs */ 2469 std r14, VCPU_GPR(R14)(r3) 2470 std r15, VCPU_GPR(R15)(r3) 2471 std r16, VCPU_GPR(R16)(r3) 2472 std r17, VCPU_GPR(R17)(r3) 2473 std r18, VCPU_GPR(R18)(r3) 2474 std r19, VCPU_GPR(R19)(r3) 2475 std r20, VCPU_GPR(R20)(r3) 2476 std r21, VCPU_GPR(R21)(r3) 2477 std r22, VCPU_GPR(R22)(r3) 2478 std r23, VCPU_GPR(R23)(r3) 2479 std r24, VCPU_GPR(R24)(r3) 2480 std r25, VCPU_GPR(R25)(r3) 2481 std r26, VCPU_GPR(R26)(r3) 2482 std r27, VCPU_GPR(R27)(r3) 2483 std r28, VCPU_GPR(R28)(r3) 2484 std r29, VCPU_GPR(R29)(r3) 2485 std r30, VCPU_GPR(R30)(r3) 2486 std r31, VCPU_GPR(R31)(r3) 2487 2488 /* save FP state */ 2489 bl kvmppc_save_fp 2490 2491#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2492BEGIN_FTR_SECTION 2493 /* 2494 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 2495 */ 2496 ld r9, HSTATE_KVM_VCPU(r13) 2497 bl kvmppc_save_tm 2498END_FTR_SECTION_IFSET(CPU_FTR_TM) 2499#endif 2500 2501 /* 2502 * Set DEC to the smaller of DEC and HDEC, so that we wake 2503 * no later than the end of our timeslice (HDEC interrupts 2504 * don't wake us from nap). 2505 */ 2506 mfspr r3, SPRN_DEC 2507 mfspr r4, SPRN_HDEC 2508 mftb r5 2509BEGIN_FTR_SECTION 2510 /* On P9 check whether the guest has large decrementer mode enabled */ 2511 ld r6, HSTATE_KVM_VCORE(r13) 2512 ld r6, VCORE_LPCR(r6) 2513 andis. r6, r6, LPCR_LD@h 2514 bne 68f 2515END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2516 extsw r3, r3 251768: EXTEND_HDEC(r4) 2518 cmpd r3, r4 2519 ble 67f 2520 mtspr SPRN_DEC, r4 252167: 2522 /* save expiry time of guest decrementer */ 2523 add r3, r3, r5 2524 ld r4, HSTATE_KVM_VCPU(r13) 2525 ld r5, HSTATE_KVM_VCORE(r13) 2526 ld r6, VCORE_TB_OFFSET(r5) 2527 subf r3, r6, r3 /* convert to host TB value */ 2528 std r3, VCPU_DEC_EXPIRES(r4) 2529 2530#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2531 ld r4, HSTATE_KVM_VCPU(r13) 2532 addi r3, r4, VCPU_TB_CEDE 2533 bl kvmhv_accumulate_time 2534#endif 2535 2536 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2537 2538 /* 2539 * Take a nap until a decrementer or external or doobell interrupt 2540 * occurs, with PECE1 and PECE0 set in LPCR. 2541 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2542 * Also clear the runlatch bit before napping. 2543 */ 2544kvm_do_nap: 2545 mfspr r0, SPRN_CTRLF 2546 clrrdi r0, r0, 1 2547 mtspr SPRN_CTRLT, r0 2548 2549BEGIN_FTR_SECTION 2550 li r0,1 2551 stb r0,HSTATE_HWTHREAD_REQ(r13) 2552END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 2553 mfspr r5,SPRN_LPCR 2554 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2555BEGIN_FTR_SECTION 2556 ori r5, r5, LPCR_PECEDH 2557 rlwimi r5, r3, 0, LPCR_PECEDP 2558END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2559 2560kvm_nap_sequence: /* desired LPCR value in r5 */ 2561BEGIN_FTR_SECTION 2562 /* 2563 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2564 * enable state loss = 1 (allow SMT mode switch) 2565 * requested level = 0 (just stop dispatching) 2566 */ 2567 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2568 mtspr SPRN_PSSCR, r3 2569 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2570 li r4, LPCR_PECE_HVEE@higher 2571 sldi r4, r4, 32 2572 or r5, r5, r4 2573END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2574 mtspr SPRN_LPCR,r5 2575 isync 2576 li r0, 0 2577 std r0, HSTATE_SCRATCH0(r13) 2578 ptesync 2579 ld r0, HSTATE_SCRATCH0(r13) 25801: cmpd r0, r0 2581 bne 1b 2582BEGIN_FTR_SECTION 2583 nap 2584FTR_SECTION_ELSE 2585 PPC_STOP 2586ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 2587 b . 2588 258933: mr r4, r3 2590 li r3, 0 2591 li r12, 0 2592 b 34f 2593 2594kvm_end_cede: 2595 /* get vcpu pointer */ 2596 ld r4, HSTATE_KVM_VCPU(r13) 2597 2598 /* Woken by external or decrementer interrupt */ 2599 ld r1, HSTATE_HOST_R1(r13) 2600 2601#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2602 addi r3, r4, VCPU_TB_RMINTR 2603 bl kvmhv_accumulate_time 2604#endif 2605 2606#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2607BEGIN_FTR_SECTION 2608 /* 2609 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 2610 */ 2611 bl kvmppc_restore_tm 2612END_FTR_SECTION_IFSET(CPU_FTR_TM) 2613#endif 2614 2615 /* load up FP state */ 2616 bl kvmppc_load_fp 2617 2618 /* Restore guest decrementer */ 2619 ld r3, VCPU_DEC_EXPIRES(r4) 2620 ld r5, HSTATE_KVM_VCORE(r13) 2621 ld r6, VCORE_TB_OFFSET(r5) 2622 add r3, r3, r6 /* convert host TB to guest TB value */ 2623 mftb r7 2624 subf r3, r7, r3 2625 mtspr SPRN_DEC, r3 2626 2627 /* Load NV GPRS */ 2628 ld r14, VCPU_GPR(R14)(r4) 2629 ld r15, VCPU_GPR(R15)(r4) 2630 ld r16, VCPU_GPR(R16)(r4) 2631 ld r17, VCPU_GPR(R17)(r4) 2632 ld r18, VCPU_GPR(R18)(r4) 2633 ld r19, VCPU_GPR(R19)(r4) 2634 ld r20, VCPU_GPR(R20)(r4) 2635 ld r21, VCPU_GPR(R21)(r4) 2636 ld r22, VCPU_GPR(R22)(r4) 2637 ld r23, VCPU_GPR(R23)(r4) 2638 ld r24, VCPU_GPR(R24)(r4) 2639 ld r25, VCPU_GPR(R25)(r4) 2640 ld r26, VCPU_GPR(R26)(r4) 2641 ld r27, VCPU_GPR(R27)(r4) 2642 ld r28, VCPU_GPR(R28)(r4) 2643 ld r29, VCPU_GPR(R29)(r4) 2644 ld r30, VCPU_GPR(R30)(r4) 2645 ld r31, VCPU_GPR(R31)(r4) 2646 2647 /* Check the wake reason in SRR1 to see why we got here */ 2648 bl kvmppc_check_wake_reason 2649 2650 /* 2651 * Restore volatile registers since we could have called a 2652 * C routine in kvmppc_check_wake_reason 2653 * r4 = VCPU 2654 * r3 tells us whether we need to return to host or not 2655 * WARNING: it gets checked further down: 2656 * should not modify r3 until this check is done. 2657 */ 2658 ld r4, HSTATE_KVM_VCPU(r13) 2659 2660 /* clear our bit in vcore->napping_threads */ 266134: ld r5,HSTATE_KVM_VCORE(r13) 2662 lbz r7,HSTATE_PTID(r13) 2663 li r0,1 2664 sld r0,r0,r7 2665 addi r6,r5,VCORE_NAPPING_THREADS 266632: lwarx r7,0,r6 2667 andc r7,r7,r0 2668 stwcx. r7,0,r6 2669 bne 32b 2670 li r0,0 2671 stb r0,HSTATE_NAPPING(r13) 2672 2673 /* See if the wake reason saved in r3 means we need to exit */ 2674 stw r12, VCPU_TRAP(r4) 2675 mr r9, r4 2676 cmpdi r3, 0 2677 bgt guest_exit_cont 2678 2679 /* see if any other thread is already exiting */ 2680 lwz r0,VCORE_ENTRY_EXIT(r5) 2681 cmpwi r0,0x100 2682 bge guest_exit_cont 2683 2684 b kvmppc_cede_reentry /* if not go back to guest */ 2685 2686 /* cede when already previously prodded case */ 2687kvm_cede_prodded: 2688 li r0,0 2689 stb r0,VCPU_PRODDED(r3) 2690 sync /* order testing prodded vs. clearing ceded */ 2691 stb r0,VCPU_CEDED(r3) 2692 li r3,H_SUCCESS 2693 blr 2694 2695 /* we've ceded but we want to give control to the host */ 2696kvm_cede_exit: 2697 ld r9, HSTATE_KVM_VCPU(r13) 2698 b guest_exit_cont 2699 2700 /* Try to handle a machine check in real mode */ 2701machine_check_realmode: 2702 mr r3, r9 /* get vcpu pointer */ 2703 bl kvmppc_realmode_machine_check 2704 nop 2705 ld r9, HSTATE_KVM_VCPU(r13) 2706 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2707 /* 2708 * For the guest that is FWNMI capable, deliver all the MCE errors 2709 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit 2710 * reason. This new approach injects machine check errors in guest 2711 * address space to guest with additional information in the form 2712 * of RTAS event, thus enabling guest kernel to suitably handle 2713 * such errors. 2714 * 2715 * For the guest that is not FWNMI capable (old QEMU) fallback 2716 * to old behaviour for backward compatibility: 2717 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either 2718 * through machine check interrupt (set HSRR0 to 0x200). 2719 * For handled errors (no-fatal), just go back to guest execution 2720 * with current HSRR0. 2721 * if we receive machine check with MSR(RI=0) then deliver it to 2722 * guest as machine check causing guest to crash. 2723 */ 2724 ld r11, VCPU_MSR(r9) 2725 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ 2726 bne mc_cont /* if so, exit to host */ 2727 /* Check if guest is capable of handling NMI exit */ 2728 ld r10, VCPU_KVM(r9) 2729 lbz r10, KVM_FWNMI(r10) 2730 cmpdi r10, 1 /* FWNMI capable? */ 2731 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */ 2732 2733 /* if not, fall through for backward compatibility. */ 2734 andi. r10, r11, MSR_RI /* check for unrecoverable exception */ 2735 beq 1f /* Deliver a machine check to guest */ 2736 ld r10, VCPU_PC(r9) 2737 cmpdi r3, 0 /* Did we handle MCE ? */ 2738 bne 2f /* Continue guest execution. */ 2739 /* If not, deliver a machine check. SRR0/1 are already set */ 27401: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2741 bl kvmppc_msr_interrupt 27422: b fast_interrupt_c_return 2743 2744/* 2745 * Check the reason we woke from nap, and take appropriate action. 2746 * Returns (in r3): 2747 * 0 if nothing needs to be done 2748 * 1 if something happened that needs to be handled by the host 2749 * -1 if there was a guest wakeup (IPI or msgsnd) 2750 * -2 if we handled a PCI passthrough interrupt (returned by 2751 * kvmppc_read_intr only) 2752 * 2753 * Also sets r12 to the interrupt vector for any interrupt that needs 2754 * to be handled now by the host (0x500 for external interrupt), or zero. 2755 * Modifies all volatile registers (since it may call a C function). 2756 * This routine calls kvmppc_read_intr, a C function, if an external 2757 * interrupt is pending. 2758 */ 2759kvmppc_check_wake_reason: 2760 mfspr r6, SPRN_SRR1 2761BEGIN_FTR_SECTION 2762 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2763FTR_SECTION_ELSE 2764 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2765ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2766 cmpwi r6, 8 /* was it an external interrupt? */ 2767 beq 7f /* if so, see what it was */ 2768 li r3, 0 2769 li r12, 0 2770 cmpwi r6, 6 /* was it the decrementer? */ 2771 beq 0f 2772BEGIN_FTR_SECTION 2773 cmpwi r6, 5 /* privileged doorbell? */ 2774 beq 0f 2775 cmpwi r6, 3 /* hypervisor doorbell? */ 2776 beq 3f 2777END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2778 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2779 beq 4f 2780 li r3, 1 /* anything else, return 1 */ 27810: blr 2782 2783 /* hypervisor doorbell */ 27843: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2785 2786 /* 2787 * Clear the doorbell as we will invoke the handler 2788 * explicitly in the guest exit path. 2789 */ 2790 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2791 PPC_MSGCLR(6) 2792 /* see if it's a host IPI */ 2793 li r3, 1 2794BEGIN_FTR_SECTION 2795 PPC_MSGSYNC 2796 lwsync 2797END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2798 lbz r0, HSTATE_HOST_IPI(r13) 2799 cmpwi r0, 0 2800 bnelr 2801 /* if not, return -1 */ 2802 li r3, -1 2803 blr 2804 2805 /* Woken up due to Hypervisor maintenance interrupt */ 28064: li r12, BOOK3S_INTERRUPT_HMI 2807 li r3, 1 2808 blr 2809 2810 /* external interrupt - create a stack frame so we can call C */ 28117: mflr r0 2812 std r0, PPC_LR_STKOFF(r1) 2813 stdu r1, -PPC_MIN_STKFRM(r1) 2814 bl kvmppc_read_intr 2815 nop 2816 li r12, BOOK3S_INTERRUPT_EXTERNAL 2817 cmpdi r3, 1 2818 ble 1f 2819 2820 /* 2821 * Return code of 2 means PCI passthrough interrupt, but 2822 * we need to return back to host to complete handling the 2823 * interrupt. Trap reason is expected in r12 by guest 2824 * exit code. 2825 */ 2826 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 28271: 2828 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2829 addi r1, r1, PPC_MIN_STKFRM 2830 mtlr r0 2831 blr 2832 2833/* 2834 * Save away FP, VMX and VSX registers. 2835 * r3 = vcpu pointer 2836 * N.B. r30 and r31 are volatile across this function, 2837 * thus it is not callable from C. 2838 */ 2839kvmppc_save_fp: 2840 mflr r30 2841 mr r31,r3 2842 mfmsr r5 2843 ori r8,r5,MSR_FP 2844#ifdef CONFIG_ALTIVEC 2845BEGIN_FTR_SECTION 2846 oris r8,r8,MSR_VEC@h 2847END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2848#endif 2849#ifdef CONFIG_VSX 2850BEGIN_FTR_SECTION 2851 oris r8,r8,MSR_VSX@h 2852END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2853#endif 2854 mtmsrd r8 2855 addi r3,r3,VCPU_FPRS 2856 bl store_fp_state 2857#ifdef CONFIG_ALTIVEC 2858BEGIN_FTR_SECTION 2859 addi r3,r31,VCPU_VRS 2860 bl store_vr_state 2861END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2862#endif 2863 mfspr r6,SPRN_VRSAVE 2864 stw r6,VCPU_VRSAVE(r31) 2865 mtlr r30 2866 blr 2867 2868/* 2869 * Load up FP, VMX and VSX registers 2870 * r4 = vcpu pointer 2871 * N.B. r30 and r31 are volatile across this function, 2872 * thus it is not callable from C. 2873 */ 2874kvmppc_load_fp: 2875 mflr r30 2876 mr r31,r4 2877 mfmsr r9 2878 ori r8,r9,MSR_FP 2879#ifdef CONFIG_ALTIVEC 2880BEGIN_FTR_SECTION 2881 oris r8,r8,MSR_VEC@h 2882END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2883#endif 2884#ifdef CONFIG_VSX 2885BEGIN_FTR_SECTION 2886 oris r8,r8,MSR_VSX@h 2887END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2888#endif 2889 mtmsrd r8 2890 addi r3,r4,VCPU_FPRS 2891 bl load_fp_state 2892#ifdef CONFIG_ALTIVEC 2893BEGIN_FTR_SECTION 2894 addi r3,r31,VCPU_VRS 2895 bl load_vr_state 2896END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2897#endif 2898 lwz r7,VCPU_VRSAVE(r31) 2899 mtspr SPRN_VRSAVE,r7 2900 mtlr r30 2901 mr r4,r31 2902 blr 2903 2904#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2905/* 2906 * Save transactional state and TM-related registers. 2907 * Called with r9 pointing to the vcpu struct. 2908 * This can modify all checkpointed registers, but 2909 * restores r1, r2 and r9 (vcpu pointer) before exit. 2910 */ 2911kvmppc_save_tm: 2912 mflr r0 2913 std r0, PPC_LR_STKOFF(r1) 2914 2915 /* Turn on TM. */ 2916 mfmsr r8 2917 li r0, 1 2918 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 2919 mtmsrd r8 2920 2921 ld r5, VCPU_MSR(r9) 2922 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 2923 beq 1f /* TM not active in guest. */ 2924 2925 std r1, HSTATE_HOST_R1(r13) 2926 li r3, TM_CAUSE_KVM_RESCHED 2927 2928 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 2929 li r5, 0 2930 mtmsrd r5, 1 2931 2932 /* All GPRs are volatile at this point. */ 2933 TRECLAIM(R3) 2934 2935 /* Temporarily store r13 and r9 so we have some regs to play with */ 2936 SET_SCRATCH0(r13) 2937 GET_PACA(r13) 2938 std r9, PACATMSCRATCH(r13) 2939 ld r9, HSTATE_KVM_VCPU(r13) 2940 2941 /* Get a few more GPRs free. */ 2942 std r29, VCPU_GPRS_TM(29)(r9) 2943 std r30, VCPU_GPRS_TM(30)(r9) 2944 std r31, VCPU_GPRS_TM(31)(r9) 2945 2946 /* Save away PPR and DSCR soon so don't run with user values. */ 2947 mfspr r31, SPRN_PPR 2948 HMT_MEDIUM 2949 mfspr r30, SPRN_DSCR 2950 ld r29, HSTATE_DSCR(r13) 2951 mtspr SPRN_DSCR, r29 2952 2953 /* Save all but r9, r13 & r29-r31 */ 2954 reg = 0 2955 .rept 29 2956 .if (reg != 9) && (reg != 13) 2957 std reg, VCPU_GPRS_TM(reg)(r9) 2958 .endif 2959 reg = reg + 1 2960 .endr 2961 /* ... now save r13 */ 2962 GET_SCRATCH0(r4) 2963 std r4, VCPU_GPRS_TM(13)(r9) 2964 /* ... and save r9 */ 2965 ld r4, PACATMSCRATCH(r13) 2966 std r4, VCPU_GPRS_TM(9)(r9) 2967 2968 /* Reload stack pointer and TOC. */ 2969 ld r1, HSTATE_HOST_R1(r13) 2970 ld r2, PACATOC(r13) 2971 2972 /* Set MSR RI now we have r1 and r13 back. */ 2973 li r5, MSR_RI 2974 mtmsrd r5, 1 2975 2976 /* Save away checkpinted SPRs. */ 2977 std r31, VCPU_PPR_TM(r9) 2978 std r30, VCPU_DSCR_TM(r9) 2979 mflr r5 2980 mfcr r6 2981 mfctr r7 2982 mfspr r8, SPRN_AMR 2983 mfspr r10, SPRN_TAR 2984 mfxer r11 2985 std r5, VCPU_LR_TM(r9) 2986 stw r6, VCPU_CR_TM(r9) 2987 std r7, VCPU_CTR_TM(r9) 2988 std r8, VCPU_AMR_TM(r9) 2989 std r10, VCPU_TAR_TM(r9) 2990 std r11, VCPU_XER_TM(r9) 2991 2992 /* Restore r12 as trap number. */ 2993 lwz r12, VCPU_TRAP(r9) 2994 2995 /* Save FP/VSX. */ 2996 addi r3, r9, VCPU_FPRS_TM 2997 bl store_fp_state 2998 addi r3, r9, VCPU_VRS_TM 2999 bl store_vr_state 3000 mfspr r6, SPRN_VRSAVE 3001 stw r6, VCPU_VRSAVE_TM(r9) 30021: 3003 /* 3004 * We need to save these SPRs after the treclaim so that the software 3005 * error code is recorded correctly in the TEXASR. Also the user may 3006 * change these outside of a transaction, so they must always be 3007 * context switched. 3008 */ 3009 mfspr r5, SPRN_TFHAR 3010 mfspr r6, SPRN_TFIAR 3011 mfspr r7, SPRN_TEXASR 3012 std r5, VCPU_TFHAR(r9) 3013 std r6, VCPU_TFIAR(r9) 3014 std r7, VCPU_TEXASR(r9) 3015 3016 ld r0, PPC_LR_STKOFF(r1) 3017 mtlr r0 3018 blr 3019 3020/* 3021 * Restore transactional state and TM-related registers. 3022 * Called with r4 pointing to the vcpu struct. 3023 * This potentially modifies all checkpointed registers. 3024 * It restores r1, r2, r4 from the PACA. 3025 */ 3026kvmppc_restore_tm: 3027 mflr r0 3028 std r0, PPC_LR_STKOFF(r1) 3029 3030 /* Turn on TM/FP/VSX/VMX so we can restore them. */ 3031 mfmsr r5 3032 li r6, MSR_TM >> 32 3033 sldi r6, r6, 32 3034 or r5, r5, r6 3035 ori r5, r5, MSR_FP 3036 oris r5, r5, (MSR_VEC | MSR_VSX)@h 3037 mtmsrd r5 3038 3039 /* 3040 * The user may change these outside of a transaction, so they must 3041 * always be context switched. 3042 */ 3043 ld r5, VCPU_TFHAR(r4) 3044 ld r6, VCPU_TFIAR(r4) 3045 ld r7, VCPU_TEXASR(r4) 3046 mtspr SPRN_TFHAR, r5 3047 mtspr SPRN_TFIAR, r6 3048 mtspr SPRN_TEXASR, r7 3049 3050 ld r5, VCPU_MSR(r4) 3051 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 3052 beqlr /* TM not active in guest */ 3053 std r1, HSTATE_HOST_R1(r13) 3054 3055 /* Make sure the failure summary is set, otherwise we'll program check 3056 * when we trechkpt. It's possible that this might have been not set 3057 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the 3058 * host. 3059 */ 3060 oris r7, r7, (TEXASR_FS)@h 3061 mtspr SPRN_TEXASR, r7 3062 3063 /* 3064 * We need to load up the checkpointed state for the guest. 3065 * We need to do this early as it will blow away any GPRs, VSRs and 3066 * some SPRs. 3067 */ 3068 3069 mr r31, r4 3070 addi r3, r31, VCPU_FPRS_TM 3071 bl load_fp_state 3072 addi r3, r31, VCPU_VRS_TM 3073 bl load_vr_state 3074 mr r4, r31 3075 lwz r7, VCPU_VRSAVE_TM(r4) 3076 mtspr SPRN_VRSAVE, r7 3077 3078 ld r5, VCPU_LR_TM(r4) 3079 lwz r6, VCPU_CR_TM(r4) 3080 ld r7, VCPU_CTR_TM(r4) 3081 ld r8, VCPU_AMR_TM(r4) 3082 ld r9, VCPU_TAR_TM(r4) 3083 ld r10, VCPU_XER_TM(r4) 3084 mtlr r5 3085 mtcr r6 3086 mtctr r7 3087 mtspr SPRN_AMR, r8 3088 mtspr SPRN_TAR, r9 3089 mtxer r10 3090 3091 /* 3092 * Load up PPR and DSCR values but don't put them in the actual SPRs 3093 * till the last moment to avoid running with userspace PPR and DSCR for 3094 * too long. 3095 */ 3096 ld r29, VCPU_DSCR_TM(r4) 3097 ld r30, VCPU_PPR_TM(r4) 3098 3099 std r2, PACATMSCRATCH(r13) /* Save TOC */ 3100 3101 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 3102 li r5, 0 3103 mtmsrd r5, 1 3104 3105 /* Load GPRs r0-r28 */ 3106 reg = 0 3107 .rept 29 3108 ld reg, VCPU_GPRS_TM(reg)(r31) 3109 reg = reg + 1 3110 .endr 3111 3112 mtspr SPRN_DSCR, r29 3113 mtspr SPRN_PPR, r30 3114 3115 /* Load final GPRs */ 3116 ld 29, VCPU_GPRS_TM(29)(r31) 3117 ld 30, VCPU_GPRS_TM(30)(r31) 3118 ld 31, VCPU_GPRS_TM(31)(r31) 3119 3120 /* TM checkpointed state is now setup. All GPRs are now volatile. */ 3121 TRECHKPT 3122 3123 /* Now let's get back the state we need. */ 3124 HMT_MEDIUM 3125 GET_PACA(r13) 3126 ld r29, HSTATE_DSCR(r13) 3127 mtspr SPRN_DSCR, r29 3128 ld r4, HSTATE_KVM_VCPU(r13) 3129 ld r1, HSTATE_HOST_R1(r13) 3130 ld r2, PACATMSCRATCH(r13) 3131 3132 /* Set the MSR RI since we have our registers back. */ 3133 li r5, MSR_RI 3134 mtmsrd r5, 1 3135 3136 ld r0, PPC_LR_STKOFF(r1) 3137 mtlr r0 3138 blr 3139#endif 3140 3141/* 3142 * We come here if we get any exception or interrupt while we are 3143 * executing host real mode code while in guest MMU context. 3144 * For now just spin, but we should do something better. 3145 */ 3146kvmppc_bad_host_intr: 3147 b . 3148 3149/* 3150 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3151 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3152 * r11 has the guest MSR value (in/out) 3153 * r9 has a vcpu pointer (in) 3154 * r0 is used as a scratch register 3155 */ 3156kvmppc_msr_interrupt: 3157 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3158 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3159 ld r11, VCPU_INTR_MSR(r9) 3160 bne 1f 3161 /* ... if transactional, change to suspended */ 3162 li r0, 1 31631: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3164 blr 3165 3166/* 3167 * This works around a hardware bug on POWER8E processors, where 3168 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3169 * performance monitor interrupt. Instead, when we need to have 3170 * an interrupt pending, we have to arrange for a counter to overflow. 3171 */ 3172kvmppc_fix_pmao: 3173 li r3, 0 3174 mtspr SPRN_MMCR2, r3 3175 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3176 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3177 mtspr SPRN_MMCR0, r3 3178 lis r3, 0x7fff 3179 ori r3, r3, 0xffff 3180 mtspr SPRN_PMC6, r3 3181 isync 3182 blr 3183 3184#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3185/* 3186 * Start timing an activity 3187 * r3 = pointer to time accumulation struct, r4 = vcpu 3188 */ 3189kvmhv_start_timing: 3190 ld r5, HSTATE_KVM_VCORE(r13) 3191 lbz r6, VCORE_IN_GUEST(r5) 3192 cmpwi r6, 0 3193 beq 5f /* if in guest, need to */ 3194 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 31955: mftb r5 3196 subf r5, r6, r5 3197 std r3, VCPU_CUR_ACTIVITY(r4) 3198 std r5, VCPU_ACTIVITY_START(r4) 3199 blr 3200 3201/* 3202 * Accumulate time to one activity and start another. 3203 * r3 = pointer to new time accumulation struct, r4 = vcpu 3204 */ 3205kvmhv_accumulate_time: 3206 ld r5, HSTATE_KVM_VCORE(r13) 3207 lbz r8, VCORE_IN_GUEST(r5) 3208 cmpwi r8, 0 3209 beq 4f /* if in guest, need to */ 3210 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 32114: ld r5, VCPU_CUR_ACTIVITY(r4) 3212 ld r6, VCPU_ACTIVITY_START(r4) 3213 std r3, VCPU_CUR_ACTIVITY(r4) 3214 mftb r7 3215 subf r7, r8, r7 3216 std r7, VCPU_ACTIVITY_START(r4) 3217 cmpdi r5, 0 3218 beqlr 3219 subf r3, r6, r7 3220 ld r8, TAS_SEQCOUNT(r5) 3221 cmpdi r8, 0 3222 addi r8, r8, 1 3223 std r8, TAS_SEQCOUNT(r5) 3224 lwsync 3225 ld r7, TAS_TOTAL(r5) 3226 add r7, r7, r3 3227 std r7, TAS_TOTAL(r5) 3228 ld r6, TAS_MIN(r5) 3229 ld r7, TAS_MAX(r5) 3230 beq 3f 3231 cmpd r3, r6 3232 bge 1f 32333: std r3, TAS_MIN(r5) 32341: cmpd r3, r7 3235 ble 2f 3236 std r3, TAS_MAX(r5) 32372: lwsync 3238 addi r8, r8, 1 3239 std r8, TAS_SEQCOUNT(r5) 3240 blr 3241#endif 3242