1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/book3s/64/mmu-hash.h> 31#include <asm/tm.h> 32#include <asm/opal.h> 33#include <asm/xive-regs.h> 34#include <asm/thread_info.h> 35 36/* Sign-extend HDEC if not on POWER9 */ 37#define EXTEND_HDEC(reg) \ 38BEGIN_FTR_SECTION; \ 39 extsw reg, reg; \ 40END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 41 42/* Values in HSTATE_NAPPING(r13) */ 43#define NAPPING_CEDE 1 44#define NAPPING_NOVCPU 2 45 46/* Stack frame offsets for kvmppc_hv_entry */ 47#define SFS 160 48#define STACK_SLOT_TRAP (SFS-4) 49#define STACK_SLOT_TID (SFS-16) 50#define STACK_SLOT_PSSCR (SFS-24) 51#define STACK_SLOT_PID (SFS-32) 52#define STACK_SLOT_IAMR (SFS-40) 53#define STACK_SLOT_CIABR (SFS-48) 54#define STACK_SLOT_DAWR (SFS-56) 55#define STACK_SLOT_DAWRX (SFS-64) 56#define STACK_SLOT_HFSCR (SFS-72) 57 58/* 59 * Call kvmppc_hv_entry in real mode. 60 * Must be called with interrupts hard-disabled. 61 * 62 * Input Registers: 63 * 64 * LR = return address to continue at after eventually re-enabling MMU 65 */ 66_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 67 mflr r0 68 std r0, PPC_LR_STKOFF(r1) 69 stdu r1, -112(r1) 70 mfmsr r10 71 std r10, HSTATE_HOST_MSR(r13) 72 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 73 li r0,MSR_RI 74 andc r0,r10,r0 75 li r6,MSR_IR | MSR_DR 76 andc r6,r10,r6 77 mtmsrd r0,1 /* clear RI in MSR */ 78 mtsrr0 r5 79 mtsrr1 r6 80 RFI_TO_KERNEL 81 82kvmppc_call_hv_entry: 83BEGIN_FTR_SECTION 84 /* On P9, do LPCR setting, if necessary */ 85 ld r3, HSTATE_SPLIT_MODE(r13) 86 cmpdi r3, 0 87 beq 46f 88 lwz r4, KVM_SPLIT_DO_SET(r3) 89 cmpwi r4, 0 90 beq 46f 91 bl kvmhv_p9_set_lpcr 92 nop 9346: 94END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 95 96 ld r4, HSTATE_KVM_VCPU(r13) 97 bl kvmppc_hv_entry 98 99 /* Back from guest - restore host state and return to caller */ 100 101BEGIN_FTR_SECTION 102 /* Restore host DABR and DABRX */ 103 ld r5,HSTATE_DABR(r13) 104 li r6,7 105 mtspr SPRN_DABR,r5 106 mtspr SPRN_DABRX,r6 107END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 108 109 /* Restore SPRG3 */ 110 ld r3,PACA_SPRG_VDSO(r13) 111 mtspr SPRN_SPRG_VDSO_WRITE,r3 112 113 /* Reload the host's PMU registers */ 114 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 115 cmpwi r4, 0 116 beq 23f /* skip if not */ 117BEGIN_FTR_SECTION 118 ld r3, HSTATE_MMCR0(r13) 119 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 120 cmpwi r4, MMCR0_PMAO 121 beql kvmppc_fix_pmao 122END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 123 lwz r3, HSTATE_PMC1(r13) 124 lwz r4, HSTATE_PMC2(r13) 125 lwz r5, HSTATE_PMC3(r13) 126 lwz r6, HSTATE_PMC4(r13) 127 lwz r8, HSTATE_PMC5(r13) 128 lwz r9, HSTATE_PMC6(r13) 129 mtspr SPRN_PMC1, r3 130 mtspr SPRN_PMC2, r4 131 mtspr SPRN_PMC3, r5 132 mtspr SPRN_PMC4, r6 133 mtspr SPRN_PMC5, r8 134 mtspr SPRN_PMC6, r9 135 ld r3, HSTATE_MMCR0(r13) 136 ld r4, HSTATE_MMCR1(r13) 137 ld r5, HSTATE_MMCRA(r13) 138 ld r6, HSTATE_SIAR(r13) 139 ld r7, HSTATE_SDAR(r13) 140 mtspr SPRN_MMCR1, r4 141 mtspr SPRN_MMCRA, r5 142 mtspr SPRN_SIAR, r6 143 mtspr SPRN_SDAR, r7 144BEGIN_FTR_SECTION 145 ld r8, HSTATE_MMCR2(r13) 146 ld r9, HSTATE_SIER(r13) 147 mtspr SPRN_MMCR2, r8 148 mtspr SPRN_SIER, r9 149END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 150 mtspr SPRN_MMCR0, r3 151 isync 15223: 153 154 /* 155 * Reload DEC. HDEC interrupts were disabled when 156 * we reloaded the host's LPCR value. 157 */ 158 ld r3, HSTATE_DECEXP(r13) 159 mftb r4 160 subf r4, r4, r3 161 mtspr SPRN_DEC, r4 162 163 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 164 li r0, 0 165 stb r0, HSTATE_HWTHREAD_REQ(r13) 166 167 /* 168 * For external interrupts we need to call the Linux 169 * handler to process the interrupt. We do that by jumping 170 * to absolute address 0x500 for external interrupts. 171 * The [h]rfid at the end of the handler will return to 172 * the book3s_hv_interrupts.S code. For other interrupts 173 * we do the rfid to get back to the book3s_hv_interrupts.S 174 * code here. 175 */ 176 ld r8, 112+PPC_LR_STKOFF(r1) 177 addi r1, r1, 112 178 ld r7, HSTATE_HOST_MSR(r13) 179 180 /* Return the trap number on this thread as the return value */ 181 mr r3, r12 182 183 /* 184 * If we came back from the guest via a relocation-on interrupt, 185 * we will be in virtual mode at this point, which makes it a 186 * little easier to get back to the caller. 187 */ 188 mfmsr r0 189 andi. r0, r0, MSR_IR /* in real mode? */ 190 bne .Lvirt_return 191 192 /* RFI into the highmem handler */ 193 mfmsr r6 194 li r0, MSR_RI 195 andc r6, r6, r0 196 mtmsrd r6, 1 /* Clear RI in MSR */ 197 mtsrr0 r8 198 mtsrr1 r7 199 RFI_TO_KERNEL 200 201 /* Virtual-mode return */ 202.Lvirt_return: 203 mtlr r8 204 blr 205 206kvmppc_primary_no_guest: 207 /* We handle this much like a ceded vcpu */ 208 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 209 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 210 /* HDEC value came from DEC in the first place, it will fit */ 211 mfspr r3, SPRN_HDEC 212 mtspr SPRN_DEC, r3 213 /* 214 * Make sure the primary has finished the MMU switch. 215 * We should never get here on a secondary thread, but 216 * check it for robustness' sake. 217 */ 218 ld r5, HSTATE_KVM_VCORE(r13) 21965: lbz r0, VCORE_IN_GUEST(r5) 220 cmpwi r0, 0 221 beq 65b 222 /* Set LPCR. */ 223 ld r8,VCORE_LPCR(r5) 224 mtspr SPRN_LPCR,r8 225 isync 226 /* set our bit in napping_threads */ 227 ld r5, HSTATE_KVM_VCORE(r13) 228 lbz r7, HSTATE_PTID(r13) 229 li r0, 1 230 sld r0, r0, r7 231 addi r6, r5, VCORE_NAPPING_THREADS 2321: lwarx r3, 0, r6 233 or r3, r3, r0 234 stwcx. r3, 0, r6 235 bne 1b 236 /* order napping_threads update vs testing entry_exit_map */ 237 isync 238 li r12, 0 239 lwz r7, VCORE_ENTRY_EXIT(r5) 240 cmpwi r7, 0x100 241 bge kvm_novcpu_exit /* another thread already exiting */ 242 li r3, NAPPING_NOVCPU 243 stb r3, HSTATE_NAPPING(r13) 244 245 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 246 b kvm_do_nap 247 248/* 249 * kvm_novcpu_wakeup 250 * Entered from kvm_start_guest if kvm_hstate.napping is set 251 * to NAPPING_NOVCPU 252 * r2 = kernel TOC 253 * r13 = paca 254 */ 255kvm_novcpu_wakeup: 256 ld r1, HSTATE_HOST_R1(r13) 257 ld r5, HSTATE_KVM_VCORE(r13) 258 li r0, 0 259 stb r0, HSTATE_NAPPING(r13) 260 261 /* check the wake reason */ 262 bl kvmppc_check_wake_reason 263 264 /* 265 * Restore volatile registers since we could have called 266 * a C routine in kvmppc_check_wake_reason. 267 * r5 = VCORE 268 */ 269 ld r5, HSTATE_KVM_VCORE(r13) 270 271 /* see if any other thread is already exiting */ 272 lwz r0, VCORE_ENTRY_EXIT(r5) 273 cmpwi r0, 0x100 274 bge kvm_novcpu_exit 275 276 /* clear our bit in napping_threads */ 277 lbz r7, HSTATE_PTID(r13) 278 li r0, 1 279 sld r0, r0, r7 280 addi r6, r5, VCORE_NAPPING_THREADS 2814: lwarx r7, 0, r6 282 andc r7, r7, r0 283 stwcx. r7, 0, r6 284 bne 4b 285 286 /* See if the wake reason means we need to exit */ 287 cmpdi r3, 0 288 bge kvm_novcpu_exit 289 290 /* See if our timeslice has expired (HDEC is negative) */ 291 mfspr r0, SPRN_HDEC 292 EXTEND_HDEC(r0) 293 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 294 cmpdi r0, 0 295 blt kvm_novcpu_exit 296 297 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 298 ld r4, HSTATE_KVM_VCPU(r13) 299 cmpdi r4, 0 300 beq kvmppc_primary_no_guest 301 302#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 303 addi r3, r4, VCPU_TB_RMENTRY 304 bl kvmhv_start_timing 305#endif 306 b kvmppc_got_guest 307 308kvm_novcpu_exit: 309#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 310 ld r4, HSTATE_KVM_VCPU(r13) 311 cmpdi r4, 0 312 beq 13f 313 addi r3, r4, VCPU_TB_RMEXIT 314 bl kvmhv_accumulate_time 315#endif 31613: mr r3, r12 317 stw r12, STACK_SLOT_TRAP(r1) 318 bl kvmhv_commence_exit 319 nop 320 b kvmhv_switch_to_host 321 322/* 323 * We come in here when wakened from nap mode. 324 * Relocation is off and most register values are lost. 325 * r13 points to the PACA. 326 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 327 */ 328 .globl kvm_start_guest 329kvm_start_guest: 330 /* Set runlatch bit the minute you wake up from nap */ 331 mfspr r0, SPRN_CTRLF 332 ori r0, r0, 1 333 mtspr SPRN_CTRLT, r0 334 335 /* 336 * Could avoid this and pass it through in r3. For now, 337 * code expects it to be in SRR1. 338 */ 339 mtspr SPRN_SRR1,r3 340 341 ld r2,PACATOC(r13) 342 343 li r0,0 344 stb r0,PACA_FTRACE_ENABLED(r13) 345 346 li r0,KVM_HWTHREAD_IN_KVM 347 stb r0,HSTATE_HWTHREAD_STATE(r13) 348 349 /* NV GPR values from power7_idle() will no longer be valid */ 350 li r0,1 351 stb r0,PACA_NAPSTATELOST(r13) 352 353 /* were we napping due to cede? */ 354 lbz r0,HSTATE_NAPPING(r13) 355 cmpwi r0,NAPPING_CEDE 356 beq kvm_end_cede 357 cmpwi r0,NAPPING_NOVCPU 358 beq kvm_novcpu_wakeup 359 360 ld r1,PACAEMERGSP(r13) 361 subi r1,r1,STACK_FRAME_OVERHEAD 362 363 /* 364 * We weren't napping due to cede, so this must be a secondary 365 * thread being woken up to run a guest, or being woken up due 366 * to a stray IPI. (Or due to some machine check or hypervisor 367 * maintenance interrupt while the core is in KVM.) 368 */ 369 370 /* Check the wake reason in SRR1 to see why we got here */ 371 bl kvmppc_check_wake_reason 372 /* 373 * kvmppc_check_wake_reason could invoke a C routine, but we 374 * have no volatile registers to restore when we return. 375 */ 376 377 cmpdi r3, 0 378 bge kvm_no_guest 379 380 /* get vcore pointer, NULL if we have nothing to run */ 381 ld r5,HSTATE_KVM_VCORE(r13) 382 cmpdi r5,0 383 /* if we have no vcore to run, go back to sleep */ 384 beq kvm_no_guest 385 386kvm_secondary_got_guest: 387 388 /* Set HSTATE_DSCR(r13) to something sensible */ 389 ld r6, PACA_DSCR_DEFAULT(r13) 390 std r6, HSTATE_DSCR(r13) 391 392 /* On thread 0 of a subcore, set HDEC to max */ 393 lbz r4, HSTATE_PTID(r13) 394 cmpwi r4, 0 395 bne 63f 396 LOAD_REG_ADDR(r6, decrementer_max) 397 ld r6, 0(r6) 398 mtspr SPRN_HDEC, r6 399 /* and set per-LPAR registers, if doing dynamic micro-threading */ 400 ld r6, HSTATE_SPLIT_MODE(r13) 401 cmpdi r6, 0 402 beq 63f 403BEGIN_FTR_SECTION 404 ld r0, KVM_SPLIT_RPR(r6) 405 mtspr SPRN_RPR, r0 406 ld r0, KVM_SPLIT_PMMAR(r6) 407 mtspr SPRN_PMMAR, r0 408 ld r0, KVM_SPLIT_LDBAR(r6) 409 mtspr SPRN_LDBAR, r0 410 isync 411FTR_SECTION_ELSE 412 /* On P9 we use the split_info for coordinating LPCR changes */ 413 lwz r4, KVM_SPLIT_DO_SET(r6) 414 cmpwi r4, 0 415 beq 1f 416 mr r3, r6 417 bl kvmhv_p9_set_lpcr 418 nop 4191: 420ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 42163: 422 /* Order load of vcpu after load of vcore */ 423 lwsync 424 ld r4, HSTATE_KVM_VCPU(r13) 425 bl kvmppc_hv_entry 426 427 /* Back from the guest, go back to nap */ 428 /* Clear our vcpu and vcore pointers so we don't come back in early */ 429 li r0, 0 430 std r0, HSTATE_KVM_VCPU(r13) 431 /* 432 * Once we clear HSTATE_KVM_VCORE(r13), the code in 433 * kvmppc_run_core() is going to assume that all our vcpu 434 * state is visible in memory. This lwsync makes sure 435 * that that is true. 436 */ 437 lwsync 438 std r0, HSTATE_KVM_VCORE(r13) 439 440 /* 441 * All secondaries exiting guest will fall through this path. 442 * Before proceeding, just check for HMI interrupt and 443 * invoke opal hmi handler. By now we are sure that the 444 * primary thread on this core/subcore has already made partition 445 * switch/TB resync and we are good to call opal hmi handler. 446 */ 447 cmpwi r12, BOOK3S_INTERRUPT_HMI 448 bne kvm_no_guest 449 450 li r3,0 /* NULL argument */ 451 bl hmi_exception_realmode 452/* 453 * At this point we have finished executing in the guest. 454 * We need to wait for hwthread_req to become zero, since 455 * we may not turn on the MMU while hwthread_req is non-zero. 456 * While waiting we also need to check if we get given a vcpu to run. 457 */ 458kvm_no_guest: 459 lbz r3, HSTATE_HWTHREAD_REQ(r13) 460 cmpwi r3, 0 461 bne 53f 462 HMT_MEDIUM 463 li r0, KVM_HWTHREAD_IN_KERNEL 464 stb r0, HSTATE_HWTHREAD_STATE(r13) 465 /* need to recheck hwthread_req after a barrier, to avoid race */ 466 sync 467 lbz r3, HSTATE_HWTHREAD_REQ(r13) 468 cmpwi r3, 0 469 bne 54f 470/* 471 * We jump to pnv_wakeup_loss, which will return to the caller 472 * of power7_nap in the powernv cpu offline loop. The value we 473 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss 474 * requires SRR1 in r12. 475 */ 476 li r3, LPCR_PECE0 477 mfspr r4, SPRN_LPCR 478 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 479 mtspr SPRN_LPCR, r4 480 li r3, 0 481 mfspr r12,SPRN_SRR1 482 b pnv_wakeup_loss 483 48453: HMT_LOW 485 ld r5, HSTATE_KVM_VCORE(r13) 486 cmpdi r5, 0 487 bne 60f 488 ld r3, HSTATE_SPLIT_MODE(r13) 489 cmpdi r3, 0 490 beq kvm_no_guest 491 lwz r0, KVM_SPLIT_DO_SET(r3) 492 cmpwi r0, 0 493 bne kvmhv_do_set 494 lwz r0, KVM_SPLIT_DO_RESTORE(r3) 495 cmpwi r0, 0 496 bne kvmhv_do_restore 497 lbz r0, KVM_SPLIT_DO_NAP(r3) 498 cmpwi r0, 0 499 beq kvm_no_guest 500 HMT_MEDIUM 501 b kvm_unsplit_nap 50260: HMT_MEDIUM 503 b kvm_secondary_got_guest 504 50554: li r0, KVM_HWTHREAD_IN_KVM 506 stb r0, HSTATE_HWTHREAD_STATE(r13) 507 b kvm_no_guest 508 509kvmhv_do_set: 510 /* Set LPCR, LPIDR etc. on P9 */ 511 HMT_MEDIUM 512 bl kvmhv_p9_set_lpcr 513 nop 514 b kvm_no_guest 515 516kvmhv_do_restore: 517 HMT_MEDIUM 518 bl kvmhv_p9_restore_lpcr 519 nop 520 b kvm_no_guest 521 522/* 523 * Here the primary thread is trying to return the core to 524 * whole-core mode, so we need to nap. 525 */ 526kvm_unsplit_nap: 527 /* 528 * When secondaries are napping in kvm_unsplit_nap() with 529 * hwthread_req = 1, HMI goes ignored even though subcores are 530 * already exited the guest. Hence HMI keeps waking up secondaries 531 * from nap in a loop and secondaries always go back to nap since 532 * no vcore is assigned to them. This makes impossible for primary 533 * thread to get hold of secondary threads resulting into a soft 534 * lockup in KVM path. 535 * 536 * Let us check if HMI is pending and handle it before we go to nap. 537 */ 538 cmpwi r12, BOOK3S_INTERRUPT_HMI 539 bne 55f 540 li r3, 0 /* NULL argument */ 541 bl hmi_exception_realmode 54255: 543 /* 544 * Ensure that secondary doesn't nap when it has 545 * its vcore pointer set. 546 */ 547 sync /* matches smp_mb() before setting split_info.do_nap */ 548 ld r0, HSTATE_KVM_VCORE(r13) 549 cmpdi r0, 0 550 bne kvm_no_guest 551 /* clear any pending message */ 552BEGIN_FTR_SECTION 553 lis r6, (PPC_DBELL_SERVER << (63-36))@h 554 PPC_MSGCLR(6) 555END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 556 /* Set kvm_split_mode.napped[tid] = 1 */ 557 ld r3, HSTATE_SPLIT_MODE(r13) 558 li r0, 1 559 lbz r4, HSTATE_TID(r13) 560 addi r4, r4, KVM_SPLIT_NAPPED 561 stbx r0, r3, r4 562 /* Check the do_nap flag again after setting napped[] */ 563 sync 564 lbz r0, KVM_SPLIT_DO_NAP(r3) 565 cmpwi r0, 0 566 beq 57f 567 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 568 mfspr r5, SPRN_LPCR 569 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 570 b kvm_nap_sequence 571 57257: li r0, 0 573 stbx r0, r3, r4 574 b kvm_no_guest 575 576/****************************************************************************** 577 * * 578 * Entry code * 579 * * 580 *****************************************************************************/ 581 582.global kvmppc_hv_entry 583kvmppc_hv_entry: 584 585 /* Required state: 586 * 587 * R4 = vcpu pointer (or NULL) 588 * MSR = ~IR|DR 589 * R13 = PACA 590 * R1 = host R1 591 * R2 = TOC 592 * all other volatile GPRS = free 593 * Does not preserve non-volatile GPRs or CR fields 594 */ 595 mflr r0 596 std r0, PPC_LR_STKOFF(r1) 597 stdu r1, -SFS(r1) 598 599 /* Save R1 in the PACA */ 600 std r1, HSTATE_HOST_R1(r13) 601 602 li r6, KVM_GUEST_MODE_HOST_HV 603 stb r6, HSTATE_IN_GUEST(r13) 604 605#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 606 /* Store initial timestamp */ 607 cmpdi r4, 0 608 beq 1f 609 addi r3, r4, VCPU_TB_RMENTRY 610 bl kvmhv_start_timing 6111: 612#endif 613 614 /* Use cr7 as an indication of radix mode */ 615 ld r5, HSTATE_KVM_VCORE(r13) 616 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 617 lbz r0, KVM_RADIX(r9) 618 cmpwi cr7, r0, 0 619 620 /* 621 * POWER7/POWER8 host -> guest partition switch code. 622 * We don't have to lock against concurrent tlbies, 623 * but we do have to coordinate across hardware threads. 624 */ 625 /* Set bit in entry map iff exit map is zero. */ 626 li r7, 1 627 lbz r6, HSTATE_PTID(r13) 628 sld r7, r7, r6 629 addi r8, r5, VCORE_ENTRY_EXIT 63021: lwarx r3, 0, r8 631 cmpwi r3, 0x100 /* any threads starting to exit? */ 632 bge secondary_too_late /* if so we're too late to the party */ 633 or r3, r3, r7 634 stwcx. r3, 0, r8 635 bne 21b 636 637 /* Primary thread switches to guest partition. */ 638 cmpwi r6,0 639 bne 10f 640 641 /* Radix has already switched LPID and flushed core TLB */ 642 bne cr7, 22f 643 644 lwz r7,KVM_LPID(r9) 645BEGIN_FTR_SECTION 646 ld r6,KVM_SDR1(r9) 647 li r0,LPID_RSVD /* switch to reserved LPID */ 648 mtspr SPRN_LPID,r0 649 ptesync 650 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 651END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 652 mtspr SPRN_LPID,r7 653 isync 654 655 /* See if we need to flush the TLB. Hash has to be done in RM */ 656 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 657BEGIN_FTR_SECTION 658 /* 659 * On POWER9, individual threads can come in here, but the 660 * TLB is shared between the 4 threads in a core, hence 661 * invalidating on one thread invalidates for all. 662 * Thus we make all 4 threads use the same bit here. 663 */ 664 clrrdi r6,r6,2 665END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 666 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 667 srdi r6,r6,6 /* doubleword number */ 668 sldi r6,r6,3 /* address offset */ 669 add r6,r6,r9 670 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 671 li r8,1 672 sld r8,r8,r7 673 ld r7,0(r6) 674 and. r7,r7,r8 675 beq 22f 676 /* Flush the TLB of any entries for this LPID */ 677 lwz r0,KVM_TLB_SETS(r9) 678 mtctr r0 679 li r7,0x800 /* IS field = 0b10 */ 680 ptesync 681 li r0,0 /* RS for P9 version of tlbiel */ 68228: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ 683 addi r7,r7,0x1000 684 bdnz 28b 685 ptesync 68623: ldarx r7,0,r6 /* clear the bit after TLB flushed */ 687 andc r7,r7,r8 688 stdcx. r7,0,r6 689 bne 23b 690 691 /* Add timebase offset onto timebase */ 69222: ld r8,VCORE_TB_OFFSET(r5) 693 cmpdi r8,0 694 beq 37f 695 std r8, VCORE_TB_OFFSET_APPL(r5) 696 mftb r6 /* current host timebase */ 697 add r8,r8,r6 698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 699 mftb r7 /* check if lower 24 bits overflowed */ 700 clrldi r6,r6,40 701 clrldi r7,r7,40 702 cmpld r7,r6 703 bge 37f 704 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 705 mtspr SPRN_TBU40,r8 706 707 /* Load guest PCR value to select appropriate compat mode */ 70837: ld r7, VCORE_PCR(r5) 709 cmpdi r7, 0 710 beq 38f 711 mtspr SPRN_PCR, r7 71238: 713 714BEGIN_FTR_SECTION 715 /* DPDES and VTB are shared between threads */ 716 ld r8, VCORE_DPDES(r5) 717 ld r7, VCORE_VTB(r5) 718 mtspr SPRN_DPDES, r8 719 mtspr SPRN_VTB, r7 720END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 721 722 /* Mark the subcore state as inside guest */ 723 bl kvmppc_subcore_enter_guest 724 nop 725 ld r5, HSTATE_KVM_VCORE(r13) 726 ld r4, HSTATE_KVM_VCPU(r13) 727 li r0,1 728 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 729 730 /* Do we have a guest vcpu to run? */ 73110: cmpdi r4, 0 732 beq kvmppc_primary_no_guest 733kvmppc_got_guest: 734 /* Increment yield count if they have a VPA */ 735 ld r3, VCPU_VPA(r4) 736 cmpdi r3, 0 737 beq 25f 738 li r6, LPPACA_YIELDCOUNT 739 LWZX_BE r5, r3, r6 740 addi r5, r5, 1 741 STWX_BE r5, r3, r6 742 li r6, 1 743 stb r6, VCPU_VPA_DIRTY(r4) 74425: 745 746 /* Save purr/spurr */ 747 mfspr r5,SPRN_PURR 748 mfspr r6,SPRN_SPURR 749 std r5,HSTATE_PURR(r13) 750 std r6,HSTATE_SPURR(r13) 751 ld r7,VCPU_PURR(r4) 752 ld r8,VCPU_SPURR(r4) 753 mtspr SPRN_PURR,r7 754 mtspr SPRN_SPURR,r8 755 756 /* Save host values of some registers */ 757BEGIN_FTR_SECTION 758 mfspr r5, SPRN_TIDR 759 mfspr r6, SPRN_PSSCR 760 mfspr r7, SPRN_PID 761 mfspr r8, SPRN_IAMR 762 std r5, STACK_SLOT_TID(r1) 763 std r6, STACK_SLOT_PSSCR(r1) 764 std r7, STACK_SLOT_PID(r1) 765 std r8, STACK_SLOT_IAMR(r1) 766 mfspr r5, SPRN_HFSCR 767 std r5, STACK_SLOT_HFSCR(r1) 768END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 769BEGIN_FTR_SECTION 770 mfspr r5, SPRN_CIABR 771 mfspr r6, SPRN_DAWR 772 mfspr r7, SPRN_DAWRX 773 std r5, STACK_SLOT_CIABR(r1) 774 std r6, STACK_SLOT_DAWR(r1) 775 std r7, STACK_SLOT_DAWRX(r1) 776END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 777 778BEGIN_FTR_SECTION 779 /* Set partition DABR */ 780 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 781 lwz r5,VCPU_DABRX(r4) 782 ld r6,VCPU_DABR(r4) 783 mtspr SPRN_DABRX,r5 784 mtspr SPRN_DABR,r6 785 isync 786END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 787 788#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 789/* 790 * Branch around the call if both CPU_FTR_TM and 791 * CPU_FTR_P9_TM_HV_ASSIST are off. 792 */ 793BEGIN_FTR_SECTION 794 b 91f 795END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 796 /* 797 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 798 */ 799 mr r3, r4 800 ld r4, VCPU_MSR(r3) 801 bl kvmppc_restore_tm_hv 802 ld r4, HSTATE_KVM_VCPU(r13) 80391: 804#endif 805 806 /* Load guest PMU registers */ 807 /* R4 is live here (vcpu pointer) */ 808 li r3, 1 809 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 810 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 811 isync 812BEGIN_FTR_SECTION 813 ld r3, VCPU_MMCR(r4) 814 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 815 cmpwi r5, MMCR0_PMAO 816 beql kvmppc_fix_pmao 817END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 818 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 819 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 820 lwz r6, VCPU_PMC + 8(r4) 821 lwz r7, VCPU_PMC + 12(r4) 822 lwz r8, VCPU_PMC + 16(r4) 823 lwz r9, VCPU_PMC + 20(r4) 824 mtspr SPRN_PMC1, r3 825 mtspr SPRN_PMC2, r5 826 mtspr SPRN_PMC3, r6 827 mtspr SPRN_PMC4, r7 828 mtspr SPRN_PMC5, r8 829 mtspr SPRN_PMC6, r9 830 ld r3, VCPU_MMCR(r4) 831 ld r5, VCPU_MMCR + 8(r4) 832 ld r6, VCPU_MMCR + 16(r4) 833 ld r7, VCPU_SIAR(r4) 834 ld r8, VCPU_SDAR(r4) 835 mtspr SPRN_MMCR1, r5 836 mtspr SPRN_MMCRA, r6 837 mtspr SPRN_SIAR, r7 838 mtspr SPRN_SDAR, r8 839BEGIN_FTR_SECTION 840 ld r5, VCPU_MMCR + 24(r4) 841 ld r6, VCPU_SIER(r4) 842 mtspr SPRN_MMCR2, r5 843 mtspr SPRN_SIER, r6 844BEGIN_FTR_SECTION_NESTED(96) 845 lwz r7, VCPU_PMC + 24(r4) 846 lwz r8, VCPU_PMC + 28(r4) 847 ld r9, VCPU_MMCR + 32(r4) 848 mtspr SPRN_SPMC1, r7 849 mtspr SPRN_SPMC2, r8 850 mtspr SPRN_MMCRS, r9 851END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 852END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 853 mtspr SPRN_MMCR0, r3 854 isync 855 856 /* Load up FP, VMX and VSX registers */ 857 bl kvmppc_load_fp 858 859 ld r14, VCPU_GPR(R14)(r4) 860 ld r15, VCPU_GPR(R15)(r4) 861 ld r16, VCPU_GPR(R16)(r4) 862 ld r17, VCPU_GPR(R17)(r4) 863 ld r18, VCPU_GPR(R18)(r4) 864 ld r19, VCPU_GPR(R19)(r4) 865 ld r20, VCPU_GPR(R20)(r4) 866 ld r21, VCPU_GPR(R21)(r4) 867 ld r22, VCPU_GPR(R22)(r4) 868 ld r23, VCPU_GPR(R23)(r4) 869 ld r24, VCPU_GPR(R24)(r4) 870 ld r25, VCPU_GPR(R25)(r4) 871 ld r26, VCPU_GPR(R26)(r4) 872 ld r27, VCPU_GPR(R27)(r4) 873 ld r28, VCPU_GPR(R28)(r4) 874 ld r29, VCPU_GPR(R29)(r4) 875 ld r30, VCPU_GPR(R30)(r4) 876 ld r31, VCPU_GPR(R31)(r4) 877 878 /* Switch DSCR to guest value */ 879 ld r5, VCPU_DSCR(r4) 880 mtspr SPRN_DSCR, r5 881 882BEGIN_FTR_SECTION 883 /* Skip next section on POWER7 */ 884 b 8f 885END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 886 /* Load up POWER8-specific registers */ 887 ld r5, VCPU_IAMR(r4) 888 lwz r6, VCPU_PSPB(r4) 889 ld r7, VCPU_FSCR(r4) 890 mtspr SPRN_IAMR, r5 891 mtspr SPRN_PSPB, r6 892 mtspr SPRN_FSCR, r7 893 ld r5, VCPU_DAWR(r4) 894 ld r6, VCPU_DAWRX(r4) 895 ld r7, VCPU_CIABR(r4) 896 ld r8, VCPU_TAR(r4) 897 /* 898 * Handle broken DAWR case by not writing it. This means we 899 * can still store the DAWR register for migration. 900 */ 901BEGIN_FTR_SECTION 902 mtspr SPRN_DAWR, r5 903 mtspr SPRN_DAWRX, r6 904END_FTR_SECTION_IFSET(CPU_FTR_DAWR) 905 mtspr SPRN_CIABR, r7 906 mtspr SPRN_TAR, r8 907 ld r5, VCPU_IC(r4) 908 ld r8, VCPU_EBBHR(r4) 909 mtspr SPRN_IC, r5 910 mtspr SPRN_EBBHR, r8 911 ld r5, VCPU_EBBRR(r4) 912 ld r6, VCPU_BESCR(r4) 913 lwz r7, VCPU_GUEST_PID(r4) 914 ld r8, VCPU_WORT(r4) 915 mtspr SPRN_EBBRR, r5 916 mtspr SPRN_BESCR, r6 917 mtspr SPRN_PID, r7 918 mtspr SPRN_WORT, r8 919BEGIN_FTR_SECTION 920 PPC_INVALIDATE_ERAT 921END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 922BEGIN_FTR_SECTION 923 /* POWER8-only registers */ 924 ld r5, VCPU_TCSCR(r4) 925 ld r6, VCPU_ACOP(r4) 926 ld r7, VCPU_CSIGR(r4) 927 ld r8, VCPU_TACR(r4) 928 mtspr SPRN_TCSCR, r5 929 mtspr SPRN_ACOP, r6 930 mtspr SPRN_CSIGR, r7 931 mtspr SPRN_TACR, r8 932 nop 933FTR_SECTION_ELSE 934 /* POWER9-only registers */ 935 ld r5, VCPU_TID(r4) 936 ld r6, VCPU_PSSCR(r4) 937 lbz r8, HSTATE_FAKE_SUSPEND(r13) 938 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 939 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG 940 ld r7, VCPU_HFSCR(r4) 941 mtspr SPRN_TIDR, r5 942 mtspr SPRN_PSSCR, r6 943 mtspr SPRN_HFSCR, r7 944ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 9458: 946 947 ld r5, VCPU_SPRG0(r4) 948 ld r6, VCPU_SPRG1(r4) 949 ld r7, VCPU_SPRG2(r4) 950 ld r8, VCPU_SPRG3(r4) 951 mtspr SPRN_SPRG0, r5 952 mtspr SPRN_SPRG1, r6 953 mtspr SPRN_SPRG2, r7 954 mtspr SPRN_SPRG3, r8 955 956 /* Load up DAR and DSISR */ 957 ld r5, VCPU_DAR(r4) 958 lwz r6, VCPU_DSISR(r4) 959 mtspr SPRN_DAR, r5 960 mtspr SPRN_DSISR, r6 961 962 /* Restore AMR and UAMOR, set AMOR to all 1s */ 963 ld r5,VCPU_AMR(r4) 964 ld r6,VCPU_UAMOR(r4) 965 li r7,-1 966 mtspr SPRN_AMR,r5 967 mtspr SPRN_UAMOR,r6 968 mtspr SPRN_AMOR,r7 969 970 /* Restore state of CTRL run bit; assume 1 on entry */ 971 lwz r5,VCPU_CTRL(r4) 972 andi. r5,r5,1 973 bne 4f 974 mfspr r6,SPRN_CTRLF 975 clrrdi r6,r6,1 976 mtspr SPRN_CTRLT,r6 9774: 978 /* Secondary threads wait for primary to have done partition switch */ 979 ld r5, HSTATE_KVM_VCORE(r13) 980 lbz r6, HSTATE_PTID(r13) 981 cmpwi r6, 0 982 beq 21f 983 lbz r0, VCORE_IN_GUEST(r5) 984 cmpwi r0, 0 985 bne 21f 986 HMT_LOW 98720: lwz r3, VCORE_ENTRY_EXIT(r5) 988 cmpwi r3, 0x100 989 bge no_switch_exit 990 lbz r0, VCORE_IN_GUEST(r5) 991 cmpwi r0, 0 992 beq 20b 993 HMT_MEDIUM 99421: 995 /* Set LPCR. */ 996 ld r8,VCORE_LPCR(r5) 997 mtspr SPRN_LPCR,r8 998 isync 999 1000 /* 1001 * Set the decrementer to the guest decrementer. 1002 */ 1003 ld r8,VCPU_DEC_EXPIRES(r4) 1004 /* r8 is a host timebase value here, convert to guest TB */ 1005 ld r5,HSTATE_KVM_VCORE(r13) 1006 ld r6,VCORE_TB_OFFSET_APPL(r5) 1007 add r8,r8,r6 1008 mftb r7 1009 subf r3,r7,r8 1010 mtspr SPRN_DEC,r3 1011 1012 /* Check if HDEC expires soon */ 1013 mfspr r3, SPRN_HDEC 1014 EXTEND_HDEC(r3) 1015 cmpdi r3, 512 /* 1 microsecond */ 1016 blt hdec_soon 1017 1018 /* For hash guest, clear out and reload the SLB */ 1019 ld r6, VCPU_KVM(r4) 1020 lbz r0, KVM_RADIX(r6) 1021 cmpwi r0, 0 1022 bne 9f 1023 li r6, 0 1024 slbmte r6, r6 1025 slbia 1026 ptesync 1027 1028 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 1029 lwz r5,VCPU_SLB_MAX(r4) 1030 cmpwi r5,0 1031 beq 9f 1032 mtctr r5 1033 addi r6,r4,VCPU_SLB 10341: ld r8,VCPU_SLB_E(r6) 1035 ld r9,VCPU_SLB_V(r6) 1036 slbmte r9,r8 1037 addi r6,r6,VCPU_SLB_SIZE 1038 bdnz 1b 10399: 1040 1041#ifdef CONFIG_KVM_XICS 1042 /* We are entering the guest on that thread, push VCPU to XIVE */ 1043 ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1044 cmpldi cr0, r10, 0 1045 beq no_xive 1046 ld r11, VCPU_XIVE_SAVED_STATE(r4) 1047 li r9, TM_QW1_OS 1048 eieio 1049 stdcix r11,r9,r10 1050 lwz r11, VCPU_XIVE_CAM_WORD(r4) 1051 li r9, TM_QW1_OS + TM_WORD2 1052 stwcix r11,r9,r10 1053 li r9, 1 1054 stb r9, VCPU_XIVE_PUSHED(r4) 1055 eieio 1056 1057 /* 1058 * We clear the irq_pending flag. There is a small chance of a 1059 * race vs. the escalation interrupt happening on another 1060 * processor setting it again, but the only consequence is to 1061 * cause a spurrious wakeup on the next H_CEDE which is not an 1062 * issue. 1063 */ 1064 li r0,0 1065 stb r0, VCPU_IRQ_PENDING(r4) 1066 1067 /* 1068 * In single escalation mode, if the escalation interrupt is 1069 * on, we mask it. 1070 */ 1071 lbz r0, VCPU_XIVE_ESC_ON(r4) 1072 cmpwi r0,0 1073 beq 1f 1074 ld r10, VCPU_XIVE_ESC_RADDR(r4) 1075 li r9, XIVE_ESB_SET_PQ_01 1076 ldcix r0, r10, r9 1077 sync 1078 1079 /* We have a possible subtle race here: The escalation interrupt might 1080 * have fired and be on its way to the host queue while we mask it, 1081 * and if we unmask it early enough (re-cede right away), there is 1082 * a theorical possibility that it fires again, thus landing in the 1083 * target queue more than once which is a big no-no. 1084 * 1085 * Fortunately, solving this is rather easy. If the above load setting 1086 * PQ to 01 returns a previous value where P is set, then we know the 1087 * escalation interrupt is somewhere on its way to the host. In that 1088 * case we simply don't clear the xive_esc_on flag below. It will be 1089 * eventually cleared by the handler for the escalation interrupt. 1090 * 1091 * Then, when doing a cede, we check that flag again before re-enabling 1092 * the escalation interrupt, and if set, we abort the cede. 1093 */ 1094 andi. r0, r0, XIVE_ESB_VAL_P 1095 bne- 1f 1096 1097 /* Now P is 0, we can clear the flag */ 1098 li r0, 0 1099 stb r0, VCPU_XIVE_ESC_ON(r4) 11001: 1101no_xive: 1102#endif /* CONFIG_KVM_XICS */ 1103 1104deliver_guest_interrupt: 1105 ld r6, VCPU_CTR(r4) 1106 ld r7, VCPU_XER(r4) 1107 1108 mtctr r6 1109 mtxer r7 1110 1111kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 1112 ld r10, VCPU_PC(r4) 1113 ld r11, VCPU_MSR(r4) 1114 ld r6, VCPU_SRR0(r4) 1115 ld r7, VCPU_SRR1(r4) 1116 mtspr SPRN_SRR0, r6 1117 mtspr SPRN_SRR1, r7 1118 1119 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1120 rldicl r11, r11, 63 - MSR_HV_LG, 1 1121 rotldi r11, r11, 1 + MSR_HV_LG 1122 ori r11, r11, MSR_ME 1123 1124 /* Check if we can deliver an external or decrementer interrupt now */ 1125 ld r0, VCPU_PENDING_EXC(r4) 1126 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 1127 cmpdi cr1, r0, 0 1128 andi. r8, r11, MSR_EE 1129 mfspr r8, SPRN_LPCR 1130 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 1131 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 1132 mtspr SPRN_LPCR, r8 1133 isync 1134 beq 5f 1135 li r0, BOOK3S_INTERRUPT_EXTERNAL 1136 bne cr1, 12f 1137 mfspr r0, SPRN_DEC 1138BEGIN_FTR_SECTION 1139 /* On POWER9 check whether the guest has large decrementer enabled */ 1140 andis. r8, r8, LPCR_LD@h 1141 bne 15f 1142END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1143 extsw r0, r0 114415: cmpdi r0, 0 1145 li r0, BOOK3S_INTERRUPT_DECREMENTER 1146 bge 5f 1147 114812: mtspr SPRN_SRR0, r10 1149 mr r10,r0 1150 mtspr SPRN_SRR1, r11 1151 mr r9, r4 1152 bl kvmppc_msr_interrupt 11535: 1154BEGIN_FTR_SECTION 1155 b fast_guest_return 1156END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1157 /* On POWER9, check for pending doorbell requests */ 1158 lbz r0, VCPU_DBELL_REQ(r4) 1159 cmpwi r0, 0 1160 beq fast_guest_return 1161 ld r5, HSTATE_KVM_VCORE(r13) 1162 /* Set DPDES register so the CPU will take a doorbell interrupt */ 1163 li r0, 1 1164 mtspr SPRN_DPDES, r0 1165 std r0, VCORE_DPDES(r5) 1166 /* Make sure other cpus see vcore->dpdes set before dbell req clear */ 1167 lwsync 1168 /* Clear the pending doorbell request */ 1169 li r0, 0 1170 stb r0, VCPU_DBELL_REQ(r4) 1171 1172/* 1173 * Required state: 1174 * R4 = vcpu 1175 * R10: value for HSRR0 1176 * R11: value for HSRR1 1177 * R13 = PACA 1178 */ 1179fast_guest_return: 1180 li r0,0 1181 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1182 mtspr SPRN_HSRR0,r10 1183 mtspr SPRN_HSRR1,r11 1184 1185 /* Activate guest mode, so faults get handled by KVM */ 1186 li r9, KVM_GUEST_MODE_GUEST_HV 1187 stb r9, HSTATE_IN_GUEST(r13) 1188 1189#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1190 /* Accumulate timing */ 1191 addi r3, r4, VCPU_TB_GUEST 1192 bl kvmhv_accumulate_time 1193#endif 1194 1195 /* Enter guest */ 1196 1197BEGIN_FTR_SECTION 1198 ld r5, VCPU_CFAR(r4) 1199 mtspr SPRN_CFAR, r5 1200END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1201BEGIN_FTR_SECTION 1202 ld r0, VCPU_PPR(r4) 1203END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1204 1205 ld r5, VCPU_LR(r4) 1206 lwz r6, VCPU_CR(r4) 1207 mtlr r5 1208 mtcr r6 1209 1210 ld r1, VCPU_GPR(R1)(r4) 1211 ld r2, VCPU_GPR(R2)(r4) 1212 ld r3, VCPU_GPR(R3)(r4) 1213 ld r5, VCPU_GPR(R5)(r4) 1214 ld r6, VCPU_GPR(R6)(r4) 1215 ld r7, VCPU_GPR(R7)(r4) 1216 ld r8, VCPU_GPR(R8)(r4) 1217 ld r9, VCPU_GPR(R9)(r4) 1218 ld r10, VCPU_GPR(R10)(r4) 1219 ld r11, VCPU_GPR(R11)(r4) 1220 ld r12, VCPU_GPR(R12)(r4) 1221 ld r13, VCPU_GPR(R13)(r4) 1222 1223BEGIN_FTR_SECTION 1224 mtspr SPRN_PPR, r0 1225END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1226 1227/* Move canary into DSISR to check for later */ 1228BEGIN_FTR_SECTION 1229 li r0, 0x7fff 1230 mtspr SPRN_HDSISR, r0 1231END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1232 1233 ld r0, VCPU_GPR(R0)(r4) 1234 ld r4, VCPU_GPR(R4)(r4) 1235 HRFI_TO_GUEST 1236 b . 1237 1238secondary_too_late: 1239 li r12, 0 1240 stw r12, STACK_SLOT_TRAP(r1) 1241 cmpdi r4, 0 1242 beq 11f 1243 stw r12, VCPU_TRAP(r4) 1244#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1245 addi r3, r4, VCPU_TB_RMEXIT 1246 bl kvmhv_accumulate_time 1247#endif 124811: b kvmhv_switch_to_host 1249 1250no_switch_exit: 1251 HMT_MEDIUM 1252 li r12, 0 1253 b 12f 1254hdec_soon: 1255 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 125612: stw r12, VCPU_TRAP(r4) 1257 mr r9, r4 1258#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1259 addi r3, r4, VCPU_TB_RMEXIT 1260 bl kvmhv_accumulate_time 1261#endif 1262 b guest_bypass 1263 1264/****************************************************************************** 1265 * * 1266 * Exit code * 1267 * * 1268 *****************************************************************************/ 1269 1270/* 1271 * We come here from the first-level interrupt handlers. 1272 */ 1273 .globl kvmppc_interrupt_hv 1274kvmppc_interrupt_hv: 1275 /* 1276 * Register contents: 1277 * R12 = (guest CR << 32) | interrupt vector 1278 * R13 = PACA 1279 * guest R12 saved in shadow VCPU SCRATCH0 1280 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE 1281 * guest R13 saved in SPRN_SCRATCH0 1282 */ 1283 std r9, HSTATE_SCRATCH2(r13) 1284 lbz r9, HSTATE_IN_GUEST(r13) 1285 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1286 beq kvmppc_bad_host_intr 1287#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1288 cmpwi r9, KVM_GUEST_MODE_GUEST 1289 ld r9, HSTATE_SCRATCH2(r13) 1290 beq kvmppc_interrupt_pr 1291#endif 1292 /* We're now back in the host but in guest MMU context */ 1293 li r9, KVM_GUEST_MODE_HOST_HV 1294 stb r9, HSTATE_IN_GUEST(r13) 1295 1296 ld r9, HSTATE_KVM_VCPU(r13) 1297 1298 /* Save registers */ 1299 1300 std r0, VCPU_GPR(R0)(r9) 1301 std r1, VCPU_GPR(R1)(r9) 1302 std r2, VCPU_GPR(R2)(r9) 1303 std r3, VCPU_GPR(R3)(r9) 1304 std r4, VCPU_GPR(R4)(r9) 1305 std r5, VCPU_GPR(R5)(r9) 1306 std r6, VCPU_GPR(R6)(r9) 1307 std r7, VCPU_GPR(R7)(r9) 1308 std r8, VCPU_GPR(R8)(r9) 1309 ld r0, HSTATE_SCRATCH2(r13) 1310 std r0, VCPU_GPR(R9)(r9) 1311 std r10, VCPU_GPR(R10)(r9) 1312 std r11, VCPU_GPR(R11)(r9) 1313 ld r3, HSTATE_SCRATCH0(r13) 1314 std r3, VCPU_GPR(R12)(r9) 1315 /* CR is in the high half of r12 */ 1316 srdi r4, r12, 32 1317 stw r4, VCPU_CR(r9) 1318BEGIN_FTR_SECTION 1319 ld r3, HSTATE_CFAR(r13) 1320 std r3, VCPU_CFAR(r9) 1321END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1322BEGIN_FTR_SECTION 1323 ld r4, HSTATE_PPR(r13) 1324 std r4, VCPU_PPR(r9) 1325END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1326 1327 /* Restore R1/R2 so we can handle faults */ 1328 ld r1, HSTATE_HOST_R1(r13) 1329 ld r2, PACATOC(r13) 1330 1331 mfspr r10, SPRN_SRR0 1332 mfspr r11, SPRN_SRR1 1333 std r10, VCPU_SRR0(r9) 1334 std r11, VCPU_SRR1(r9) 1335 /* trap is in the low half of r12, clear CR from the high half */ 1336 clrldi r12, r12, 32 1337 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1338 beq 1f 1339 mfspr r10, SPRN_HSRR0 1340 mfspr r11, SPRN_HSRR1 1341 clrrdi r12, r12, 2 13421: std r10, VCPU_PC(r9) 1343 std r11, VCPU_MSR(r9) 1344 1345 GET_SCRATCH0(r3) 1346 mflr r4 1347 std r3, VCPU_GPR(R13)(r9) 1348 std r4, VCPU_LR(r9) 1349 1350 stw r12,VCPU_TRAP(r9) 1351 1352 /* 1353 * Now that we have saved away SRR0/1 and HSRR0/1, 1354 * interrupts are recoverable in principle, so set MSR_RI. 1355 * This becomes important for relocation-on interrupts from 1356 * the guest, which we can get in radix mode on POWER9. 1357 */ 1358 li r0, MSR_RI 1359 mtmsrd r0, 1 1360 1361#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1362 addi r3, r9, VCPU_TB_RMINTR 1363 mr r4, r9 1364 bl kvmhv_accumulate_time 1365 ld r5, VCPU_GPR(R5)(r9) 1366 ld r6, VCPU_GPR(R6)(r9) 1367 ld r7, VCPU_GPR(R7)(r9) 1368 ld r8, VCPU_GPR(R8)(r9) 1369#endif 1370 1371 /* Save HEIR (HV emulation assist reg) in emul_inst 1372 if this is an HEI (HV emulation interrupt, e40) */ 1373 li r3,KVM_INST_FETCH_FAILED 1374 stw r3,VCPU_LAST_INST(r9) 1375 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1376 bne 11f 1377 mfspr r3,SPRN_HEIR 137811: stw r3,VCPU_HEIR(r9) 1379 1380 /* these are volatile across C function calls */ 1381#ifdef CONFIG_RELOCATABLE 1382 ld r3, HSTATE_SCRATCH1(r13) 1383 mtctr r3 1384#else 1385 mfctr r3 1386#endif 1387 mfxer r4 1388 std r3, VCPU_CTR(r9) 1389 std r4, VCPU_XER(r9) 1390 1391#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1392 /* For softpatch interrupt, go off and do TM instruction emulation */ 1393 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 1394 beq kvmppc_tm_emul 1395#endif 1396 1397 /* If this is a page table miss then see if it's theirs or ours */ 1398 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1399 beq kvmppc_hdsi 1400 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1401 beq kvmppc_hisi 1402 1403 /* See if this is a leftover HDEC interrupt */ 1404 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1405 bne 2f 1406 mfspr r3,SPRN_HDEC 1407 EXTEND_HDEC(r3) 1408 cmpdi r3,0 1409 mr r4,r9 1410 bge fast_guest_return 14112: 1412 /* See if this is an hcall we can handle in real mode */ 1413 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1414 beq hcall_try_real_mode 1415 1416 /* Hypervisor doorbell - exit only if host IPI flag set */ 1417 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1418 bne 3f 1419BEGIN_FTR_SECTION 1420 PPC_MSGSYNC 1421 lwsync 1422END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1423 lbz r0, HSTATE_HOST_IPI(r13) 1424 cmpwi r0, 0 1425 beq 4f 1426 b guest_exit_cont 14273: 1428 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1429 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1430 bne 14f 1431 mfspr r3, SPRN_HFSCR 1432 std r3, VCPU_HFSCR(r9) 1433 b guest_exit_cont 143414: 1435 /* External interrupt ? */ 1436 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1437 bne+ guest_exit_cont 1438 1439 /* External interrupt, first check for host_ipi. If this is 1440 * set, we know the host wants us out so let's do it now 1441 */ 1442 bl kvmppc_read_intr 1443 1444 /* 1445 * Restore the active volatile registers after returning from 1446 * a C function. 1447 */ 1448 ld r9, HSTATE_KVM_VCPU(r13) 1449 li r12, BOOK3S_INTERRUPT_EXTERNAL 1450 1451 /* 1452 * kvmppc_read_intr return codes: 1453 * 1454 * Exit to host (r3 > 0) 1455 * 1 An interrupt is pending that needs to be handled by the host 1456 * Exit guest and return to host by branching to guest_exit_cont 1457 * 1458 * 2 Passthrough that needs completion in the host 1459 * Exit guest and return to host by branching to guest_exit_cont 1460 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1461 * to indicate to the host to complete handling the interrupt 1462 * 1463 * Before returning to guest, we check if any CPU is heading out 1464 * to the host and if so, we head out also. If no CPUs are heading 1465 * check return values <= 0. 1466 * 1467 * Return to guest (r3 <= 0) 1468 * 0 No external interrupt is pending 1469 * -1 A guest wakeup IPI (which has now been cleared) 1470 * In either case, we return to guest to deliver any pending 1471 * guest interrupts. 1472 * 1473 * -2 A PCI passthrough external interrupt was handled 1474 * (interrupt was delivered directly to guest) 1475 * Return to guest to deliver any pending guest interrupts. 1476 */ 1477 1478 cmpdi r3, 1 1479 ble 1f 1480 1481 /* Return code = 2 */ 1482 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1483 stw r12, VCPU_TRAP(r9) 1484 b guest_exit_cont 1485 14861: /* Return code <= 1 */ 1487 cmpdi r3, 0 1488 bgt guest_exit_cont 1489 1490 /* Return code <= 0 */ 14914: ld r5, HSTATE_KVM_VCORE(r13) 1492 lwz r0, VCORE_ENTRY_EXIT(r5) 1493 cmpwi r0, 0x100 1494 mr r4, r9 1495 blt deliver_guest_interrupt 1496 1497guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1498 /* Save more register state */ 1499 mfdar r6 1500 mfdsisr r7 1501 std r6, VCPU_DAR(r9) 1502 stw r7, VCPU_DSISR(r9) 1503 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1504 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1505 beq mc_cont 1506 std r6, VCPU_FAULT_DAR(r9) 1507 stw r7, VCPU_FAULT_DSISR(r9) 1508 1509 /* See if it is a machine check */ 1510 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1511 beq machine_check_realmode 1512mc_cont: 1513#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1514 addi r3, r9, VCPU_TB_RMEXIT 1515 mr r4, r9 1516 bl kvmhv_accumulate_time 1517#endif 1518#ifdef CONFIG_KVM_XICS 1519 /* We are exiting, pull the VP from the XIVE */ 1520 lbz r0, VCPU_XIVE_PUSHED(r9) 1521 cmpwi cr0, r0, 0 1522 beq 1f 1523 li r7, TM_SPC_PULL_OS_CTX 1524 li r6, TM_QW1_OS 1525 mfmsr r0 1526 andi. r0, r0, MSR_DR /* in real mode? */ 1527 beq 2f 1528 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1529 cmpldi cr0, r10, 0 1530 beq 1f 1531 /* First load to pull the context, we ignore the value */ 1532 eieio 1533 lwzx r11, r7, r10 1534 /* Second load to recover the context state (Words 0 and 1) */ 1535 ldx r11, r6, r10 1536 b 3f 15372: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1538 cmpldi cr0, r10, 0 1539 beq 1f 1540 /* First load to pull the context, we ignore the value */ 1541 eieio 1542 lwzcix r11, r7, r10 1543 /* Second load to recover the context state (Words 0 and 1) */ 1544 ldcix r11, r6, r10 15453: std r11, VCPU_XIVE_SAVED_STATE(r9) 1546 /* Fixup some of the state for the next load */ 1547 li r10, 0 1548 li r0, 0xff 1549 stb r10, VCPU_XIVE_PUSHED(r9) 1550 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1551 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1552 eieio 15531: 1554#endif /* CONFIG_KVM_XICS */ 1555 1556 /* For hash guest, read the guest SLB and save it away */ 1557 ld r5, VCPU_KVM(r9) 1558 lbz r0, KVM_RADIX(r5) 1559 li r5, 0 1560 cmpwi r0, 0 1561 bne 3f /* for radix, save 0 entries */ 1562 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1563 mtctr r0 1564 li r6,0 1565 addi r7,r9,VCPU_SLB 15661: slbmfee r8,r6 1567 andis. r0,r8,SLB_ESID_V@h 1568 beq 2f 1569 add r8,r8,r6 /* put index in */ 1570 slbmfev r3,r6 1571 std r8,VCPU_SLB_E(r7) 1572 std r3,VCPU_SLB_V(r7) 1573 addi r7,r7,VCPU_SLB_SIZE 1574 addi r5,r5,1 15752: addi r6,r6,1 1576 bdnz 1b 1577 /* Finally clear out the SLB */ 1578 li r0,0 1579 slbmte r0,r0 1580 slbia 1581 ptesync 15823: stw r5,VCPU_SLB_MAX(r9) 1583 1584 /* load host SLB entries */ 1585BEGIN_MMU_FTR_SECTION 1586 b 0f 1587END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1588 ld r8,PACA_SLBSHADOWPTR(r13) 1589 1590 .rept SLB_NUM_BOLTED 1591 li r3, SLBSHADOW_SAVEAREA 1592 LDX_BE r5, r8, r3 1593 addi r3, r3, 8 1594 LDX_BE r6, r8, r3 1595 andis. r7,r5,SLB_ESID_V@h 1596 beq 1f 1597 slbmte r6,r5 15981: addi r8,r8,16 1599 .endr 16000: 1601 1602guest_bypass: 1603 stw r12, STACK_SLOT_TRAP(r1) 1604 1605 /* Save DEC */ 1606 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1607 ld r3, HSTATE_KVM_VCORE(r13) 1608 mfspr r5,SPRN_DEC 1609 mftb r6 1610 /* On P9, if the guest has large decr enabled, don't sign extend */ 1611BEGIN_FTR_SECTION 1612 ld r4, VCORE_LPCR(r3) 1613 andis. r4, r4, LPCR_LD@h 1614 bne 16f 1615END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1616 extsw r5,r5 161716: add r5,r5,r6 1618 /* r5 is a guest timebase value here, convert to host TB */ 1619 ld r4,VCORE_TB_OFFSET_APPL(r3) 1620 subf r5,r4,r5 1621 std r5,VCPU_DEC_EXPIRES(r9) 1622 1623 /* Increment exit count, poke other threads to exit */ 1624 mr r3, r12 1625 bl kvmhv_commence_exit 1626 nop 1627 ld r9, HSTATE_KVM_VCPU(r13) 1628 1629 /* Stop others sending VCPU interrupts to this physical CPU */ 1630 li r0, -1 1631 stw r0, VCPU_CPU(r9) 1632 stw r0, VCPU_THREAD_CPU(r9) 1633 1634 /* Save guest CTRL register, set runlatch to 1 */ 1635 mfspr r6,SPRN_CTRLF 1636 stw r6,VCPU_CTRL(r9) 1637 andi. r0,r6,1 1638 bne 4f 1639 ori r6,r6,1 1640 mtspr SPRN_CTRLT,r6 16414: 1642 /* 1643 * Save the guest PURR/SPURR 1644 */ 1645 mfspr r5,SPRN_PURR 1646 mfspr r6,SPRN_SPURR 1647 ld r7,VCPU_PURR(r9) 1648 ld r8,VCPU_SPURR(r9) 1649 std r5,VCPU_PURR(r9) 1650 std r6,VCPU_SPURR(r9) 1651 subf r5,r7,r5 1652 subf r6,r8,r6 1653 1654 /* 1655 * Restore host PURR/SPURR and add guest times 1656 * so that the time in the guest gets accounted. 1657 */ 1658 ld r3,HSTATE_PURR(r13) 1659 ld r4,HSTATE_SPURR(r13) 1660 add r3,r3,r5 1661 add r4,r4,r6 1662 mtspr SPRN_PURR,r3 1663 mtspr SPRN_SPURR,r4 1664 1665BEGIN_FTR_SECTION 1666 b 8f 1667END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1668 /* Save POWER8-specific registers */ 1669 mfspr r5, SPRN_IAMR 1670 mfspr r6, SPRN_PSPB 1671 mfspr r7, SPRN_FSCR 1672 std r5, VCPU_IAMR(r9) 1673 stw r6, VCPU_PSPB(r9) 1674 std r7, VCPU_FSCR(r9) 1675 mfspr r5, SPRN_IC 1676 mfspr r7, SPRN_TAR 1677 std r5, VCPU_IC(r9) 1678 std r7, VCPU_TAR(r9) 1679 mfspr r8, SPRN_EBBHR 1680 std r8, VCPU_EBBHR(r9) 1681 mfspr r5, SPRN_EBBRR 1682 mfspr r6, SPRN_BESCR 1683 mfspr r7, SPRN_PID 1684 mfspr r8, SPRN_WORT 1685 std r5, VCPU_EBBRR(r9) 1686 std r6, VCPU_BESCR(r9) 1687 stw r7, VCPU_GUEST_PID(r9) 1688 std r8, VCPU_WORT(r9) 1689BEGIN_FTR_SECTION 1690 mfspr r5, SPRN_TCSCR 1691 mfspr r6, SPRN_ACOP 1692 mfspr r7, SPRN_CSIGR 1693 mfspr r8, SPRN_TACR 1694 std r5, VCPU_TCSCR(r9) 1695 std r6, VCPU_ACOP(r9) 1696 std r7, VCPU_CSIGR(r9) 1697 std r8, VCPU_TACR(r9) 1698FTR_SECTION_ELSE 1699 mfspr r5, SPRN_TIDR 1700 mfspr r6, SPRN_PSSCR 1701 std r5, VCPU_TID(r9) 1702 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1703 rotldi r6, r6, 60 1704 std r6, VCPU_PSSCR(r9) 1705 /* Restore host HFSCR value */ 1706 ld r7, STACK_SLOT_HFSCR(r1) 1707 mtspr SPRN_HFSCR, r7 1708ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1709 /* 1710 * Restore various registers to 0, where non-zero values 1711 * set by the guest could disrupt the host. 1712 */ 1713 li r0, 0 1714 mtspr SPRN_PSPB, r0 1715 mtspr SPRN_WORT, r0 1716BEGIN_FTR_SECTION 1717 mtspr SPRN_IAMR, r0 1718 mtspr SPRN_TCSCR, r0 1719 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1720 li r0, 1 1721 sldi r0, r0, 31 1722 mtspr SPRN_MMCRS, r0 1723END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 17248: 1725 1726 /* Save and reset AMR and UAMOR before turning on the MMU */ 1727 mfspr r5,SPRN_AMR 1728 mfspr r6,SPRN_UAMOR 1729 std r5,VCPU_AMR(r9) 1730 std r6,VCPU_UAMOR(r9) 1731 li r6,0 1732 mtspr SPRN_AMR,r6 1733 mtspr SPRN_UAMOR, r6 1734 1735 /* Switch DSCR back to host value */ 1736 mfspr r8, SPRN_DSCR 1737 ld r7, HSTATE_DSCR(r13) 1738 std r8, VCPU_DSCR(r9) 1739 mtspr SPRN_DSCR, r7 1740 1741 /* Save non-volatile GPRs */ 1742 std r14, VCPU_GPR(R14)(r9) 1743 std r15, VCPU_GPR(R15)(r9) 1744 std r16, VCPU_GPR(R16)(r9) 1745 std r17, VCPU_GPR(R17)(r9) 1746 std r18, VCPU_GPR(R18)(r9) 1747 std r19, VCPU_GPR(R19)(r9) 1748 std r20, VCPU_GPR(R20)(r9) 1749 std r21, VCPU_GPR(R21)(r9) 1750 std r22, VCPU_GPR(R22)(r9) 1751 std r23, VCPU_GPR(R23)(r9) 1752 std r24, VCPU_GPR(R24)(r9) 1753 std r25, VCPU_GPR(R25)(r9) 1754 std r26, VCPU_GPR(R26)(r9) 1755 std r27, VCPU_GPR(R27)(r9) 1756 std r28, VCPU_GPR(R28)(r9) 1757 std r29, VCPU_GPR(R29)(r9) 1758 std r30, VCPU_GPR(R30)(r9) 1759 std r31, VCPU_GPR(R31)(r9) 1760 1761 /* Save SPRGs */ 1762 mfspr r3, SPRN_SPRG0 1763 mfspr r4, SPRN_SPRG1 1764 mfspr r5, SPRN_SPRG2 1765 mfspr r6, SPRN_SPRG3 1766 std r3, VCPU_SPRG0(r9) 1767 std r4, VCPU_SPRG1(r9) 1768 std r5, VCPU_SPRG2(r9) 1769 std r6, VCPU_SPRG3(r9) 1770 1771 /* save FP state */ 1772 mr r3, r9 1773 bl kvmppc_save_fp 1774 1775#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1776/* 1777 * Branch around the call if both CPU_FTR_TM and 1778 * CPU_FTR_P9_TM_HV_ASSIST are off. 1779 */ 1780BEGIN_FTR_SECTION 1781 b 91f 1782END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 1783 /* 1784 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 1785 */ 1786 mr r3, r9 1787 ld r4, VCPU_MSR(r3) 1788 bl kvmppc_save_tm_hv 1789 ld r9, HSTATE_KVM_VCPU(r13) 179091: 1791#endif 1792 1793 /* Increment yield count if they have a VPA */ 1794 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1795 cmpdi r8, 0 1796 beq 25f 1797 li r4, LPPACA_YIELDCOUNT 1798 LWZX_BE r3, r8, r4 1799 addi r3, r3, 1 1800 STWX_BE r3, r8, r4 1801 li r3, 1 1802 stb r3, VCPU_VPA_DIRTY(r9) 180325: 1804 /* Save PMU registers if requested */ 1805 /* r8 and cr0.eq are live here */ 1806BEGIN_FTR_SECTION 1807 /* 1808 * POWER8 seems to have a hardware bug where setting 1809 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 1810 * when some counters are already negative doesn't seem 1811 * to cause a performance monitor alert (and hence interrupt). 1812 * The effect of this is that when saving the PMU state, 1813 * if there is no PMU alert pending when we read MMCR0 1814 * before freezing the counters, but one becomes pending 1815 * before we read the counters, we lose it. 1816 * To work around this, we need a way to freeze the counters 1817 * before reading MMCR0. Normally, freezing the counters 1818 * is done by writing MMCR0 (to set MMCR0[FC]) which 1819 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 1820 * we can also freeze the counters using MMCR2, by writing 1821 * 1s to all the counter freeze condition bits (there are 1822 * 9 bits each for 6 counters). 1823 */ 1824 li r3, -1 /* set all freeze bits */ 1825 clrrdi r3, r3, 10 1826 mfspr r10, SPRN_MMCR2 1827 mtspr SPRN_MMCR2, r3 1828 isync 1829END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1830 li r3, 1 1831 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1832 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1833 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1834 mfspr r6, SPRN_MMCRA 1835 /* Clear MMCRA in order to disable SDAR updates */ 1836 li r7, 0 1837 mtspr SPRN_MMCRA, r7 1838 isync 1839 beq 21f /* if no VPA, save PMU stuff anyway */ 1840 lbz r7, LPPACA_PMCINUSE(r8) 1841 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1842 bne 21f 1843 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1844 b 22f 184521: mfspr r5, SPRN_MMCR1 1846 mfspr r7, SPRN_SIAR 1847 mfspr r8, SPRN_SDAR 1848 std r4, VCPU_MMCR(r9) 1849 std r5, VCPU_MMCR + 8(r9) 1850 std r6, VCPU_MMCR + 16(r9) 1851BEGIN_FTR_SECTION 1852 std r10, VCPU_MMCR + 24(r9) 1853END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1854 std r7, VCPU_SIAR(r9) 1855 std r8, VCPU_SDAR(r9) 1856 mfspr r3, SPRN_PMC1 1857 mfspr r4, SPRN_PMC2 1858 mfspr r5, SPRN_PMC3 1859 mfspr r6, SPRN_PMC4 1860 mfspr r7, SPRN_PMC5 1861 mfspr r8, SPRN_PMC6 1862 stw r3, VCPU_PMC(r9) 1863 stw r4, VCPU_PMC + 4(r9) 1864 stw r5, VCPU_PMC + 8(r9) 1865 stw r6, VCPU_PMC + 12(r9) 1866 stw r7, VCPU_PMC + 16(r9) 1867 stw r8, VCPU_PMC + 20(r9) 1868BEGIN_FTR_SECTION 1869 mfspr r5, SPRN_SIER 1870 std r5, VCPU_SIER(r9) 1871BEGIN_FTR_SECTION_NESTED(96) 1872 mfspr r6, SPRN_SPMC1 1873 mfspr r7, SPRN_SPMC2 1874 mfspr r8, SPRN_MMCRS 1875 stw r6, VCPU_PMC + 24(r9) 1876 stw r7, VCPU_PMC + 28(r9) 1877 std r8, VCPU_MMCR + 32(r9) 1878 lis r4, 0x8000 1879 mtspr SPRN_MMCRS, r4 1880END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 1881END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 188222: 1883 1884 /* Restore host values of some registers */ 1885BEGIN_FTR_SECTION 1886 ld r5, STACK_SLOT_CIABR(r1) 1887 ld r6, STACK_SLOT_DAWR(r1) 1888 ld r7, STACK_SLOT_DAWRX(r1) 1889 mtspr SPRN_CIABR, r5 1890 /* 1891 * If the DAWR doesn't work, it's ok to write these here as 1892 * this value should always be zero 1893 */ 1894 mtspr SPRN_DAWR, r6 1895 mtspr SPRN_DAWRX, r7 1896END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1897BEGIN_FTR_SECTION 1898 ld r5, STACK_SLOT_TID(r1) 1899 ld r6, STACK_SLOT_PSSCR(r1) 1900 ld r7, STACK_SLOT_PID(r1) 1901 ld r8, STACK_SLOT_IAMR(r1) 1902 mtspr SPRN_TIDR, r5 1903 mtspr SPRN_PSSCR, r6 1904 mtspr SPRN_PID, r7 1905 mtspr SPRN_IAMR, r8 1906END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1907 1908#ifdef CONFIG_PPC_RADIX_MMU 1909 /* 1910 * Are we running hash or radix ? 1911 */ 1912 ld r5, VCPU_KVM(r9) 1913 lbz r0, KVM_RADIX(r5) 1914 cmpwi cr2, r0, 0 1915 beq cr2, 4f 1916 1917 /* 1918 * Radix: do eieio; tlbsync; ptesync sequence in case we 1919 * interrupted the guest between a tlbie and a ptesync. 1920 */ 1921 eieio 1922 tlbsync 1923 ptesync 1924 1925 /* Radix: Handle the case where the guest used an illegal PID */ 1926 LOAD_REG_ADDR(r4, mmu_base_pid) 1927 lwz r3, VCPU_GUEST_PID(r9) 1928 lwz r5, 0(r4) 1929 cmpw cr0,r3,r5 1930 blt 2f 1931 1932 /* 1933 * Illegal PID, the HW might have prefetched and cached in the TLB 1934 * some translations for the LPID 0 / guest PID combination which 1935 * Linux doesn't know about, so we need to flush that PID out of 1936 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to 1937 * the right context. 1938 */ 1939 li r0,0 1940 mtspr SPRN_LPID,r0 1941 isync 1942 1943 /* Then do a congruence class local flush */ 1944 ld r6,VCPU_KVM(r9) 1945 lwz r0,KVM_TLB_SETS(r6) 1946 mtctr r0 1947 li r7,0x400 /* IS field = 0b01 */ 1948 ptesync 1949 sldi r0,r3,32 /* RS has PID */ 19501: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ 1951 addi r7,r7,0x1000 1952 bdnz 1b 1953 ptesync 1954 19552: /* Flush the ERAT on radix P9 DD1 guest exit */ 1956BEGIN_FTR_SECTION 1957 PPC_INVALIDATE_ERAT 1958END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 19594: 1960#endif /* CONFIG_PPC_RADIX_MMU */ 1961 1962 /* 1963 * POWER7/POWER8 guest -> host partition switch code. 1964 * We don't have to lock against tlbies but we do 1965 * have to coordinate the hardware threads. 1966 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1967 */ 1968kvmhv_switch_to_host: 1969 /* Secondary threads wait for primary to do partition switch */ 1970 ld r5,HSTATE_KVM_VCORE(r13) 1971 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1972 lbz r3,HSTATE_PTID(r13) 1973 cmpwi r3,0 1974 beq 15f 1975 HMT_LOW 197613: lbz r3,VCORE_IN_GUEST(r5) 1977 cmpwi r3,0 1978 bne 13b 1979 HMT_MEDIUM 1980 b 16f 1981 1982 /* Primary thread waits for all the secondaries to exit guest */ 198315: lwz r3,VCORE_ENTRY_EXIT(r5) 1984 rlwinm r0,r3,32-8,0xff 1985 clrldi r3,r3,56 1986 cmpw r3,r0 1987 bne 15b 1988 isync 1989 1990 /* Did we actually switch to the guest at all? */ 1991 lbz r6, VCORE_IN_GUEST(r5) 1992 cmpwi r6, 0 1993 beq 19f 1994 1995 /* Primary thread switches back to host partition */ 1996 lwz r7,KVM_HOST_LPID(r4) 1997BEGIN_FTR_SECTION 1998 ld r6,KVM_HOST_SDR1(r4) 1999 li r8,LPID_RSVD /* switch to reserved LPID */ 2000 mtspr SPRN_LPID,r8 2001 ptesync 2002 mtspr SPRN_SDR1,r6 /* switch to host page table */ 2003END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 2004 mtspr SPRN_LPID,r7 2005 isync 2006 2007BEGIN_FTR_SECTION 2008 /* DPDES and VTB are shared between threads */ 2009 mfspr r7, SPRN_DPDES 2010 mfspr r8, SPRN_VTB 2011 std r7, VCORE_DPDES(r5) 2012 std r8, VCORE_VTB(r5) 2013 /* clear DPDES so we don't get guest doorbells in the host */ 2014 li r8, 0 2015 mtspr SPRN_DPDES, r8 2016END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2017 2018 /* If HMI, call kvmppc_realmode_hmi_handler() */ 2019 lwz r12, STACK_SLOT_TRAP(r1) 2020 cmpwi r12, BOOK3S_INTERRUPT_HMI 2021 bne 27f 2022 bl kvmppc_realmode_hmi_handler 2023 nop 2024 cmpdi r3, 0 2025 /* 2026 * At this point kvmppc_realmode_hmi_handler may have resync-ed 2027 * the TB, and if it has, we must not subtract the guest timebase 2028 * offset from the timebase. So, skip it. 2029 * 2030 * Also, do not call kvmppc_subcore_exit_guest() because it has 2031 * been invoked as part of kvmppc_realmode_hmi_handler(). 2032 */ 2033 beq 30f 2034 203527: 2036 /* Subtract timebase offset from timebase */ 2037 ld r8, VCORE_TB_OFFSET_APPL(r5) 2038 cmpdi r8,0 2039 beq 17f 2040 li r0, 0 2041 std r0, VCORE_TB_OFFSET_APPL(r5) 2042 mftb r6 /* current guest timebase */ 2043 subf r8,r8,r6 2044 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 2045 mftb r7 /* check if lower 24 bits overflowed */ 2046 clrldi r6,r6,40 2047 clrldi r7,r7,40 2048 cmpld r7,r6 2049 bge 17f 2050 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 2051 mtspr SPRN_TBU40,r8 2052 205317: bl kvmppc_subcore_exit_guest 2054 nop 205530: ld r5,HSTATE_KVM_VCORE(r13) 2056 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 2057 2058 /* Reset PCR */ 2059 ld r0, VCORE_PCR(r5) 2060 cmpdi r0, 0 2061 beq 18f 2062 li r0, 0 2063 mtspr SPRN_PCR, r0 206418: 2065 /* Signal secondary CPUs to continue */ 2066 stb r0,VCORE_IN_GUEST(r5) 206719: lis r8,0x7fff /* MAX_INT@h */ 2068 mtspr SPRN_HDEC,r8 2069 207016: 2071BEGIN_FTR_SECTION 2072 /* On POWER9 with HPT-on-radix we need to wait for all other threads */ 2073 ld r3, HSTATE_SPLIT_MODE(r13) 2074 cmpdi r3, 0 2075 beq 47f 2076 lwz r8, KVM_SPLIT_DO_RESTORE(r3) 2077 cmpwi r8, 0 2078 beq 47f 2079 bl kvmhv_p9_restore_lpcr 2080 nop 2081 b 48f 208247: 2083END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2084 ld r8,KVM_HOST_LPCR(r4) 2085 mtspr SPRN_LPCR,r8 2086 isync 208748: 2088#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2089 /* Finish timing, if we have a vcpu */ 2090 ld r4, HSTATE_KVM_VCPU(r13) 2091 cmpdi r4, 0 2092 li r3, 0 2093 beq 2f 2094 bl kvmhv_accumulate_time 20952: 2096#endif 2097 /* Unset guest mode */ 2098 li r0, KVM_GUEST_MODE_NONE 2099 stb r0, HSTATE_IN_GUEST(r13) 2100 2101 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 2102 ld r0, SFS+PPC_LR_STKOFF(r1) 2103 addi r1, r1, SFS 2104 mtlr r0 2105 blr 2106 2107#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2108/* 2109 * Softpatch interrupt for transactional memory emulation cases 2110 * on POWER9 DD2.2. This is early in the guest exit path - we 2111 * haven't saved registers or done a treclaim yet. 2112 */ 2113kvmppc_tm_emul: 2114 /* Save instruction image in HEIR */ 2115 mfspr r3, SPRN_HEIR 2116 stw r3, VCPU_HEIR(r9) 2117 2118 /* 2119 * The cases we want to handle here are those where the guest 2120 * is in real suspend mode and is trying to transition to 2121 * transactional mode. 2122 */ 2123 lbz r0, HSTATE_FAKE_SUSPEND(r13) 2124 cmpwi r0, 0 /* keep exiting guest if in fake suspend */ 2125 bne guest_exit_cont 2126 rldicl r3, r11, 64 - MSR_TS_S_LG, 62 2127 cmpwi r3, 1 /* or if not in suspend state */ 2128 bne guest_exit_cont 2129 2130 /* Call C code to do the emulation */ 2131 mr r3, r9 2132 bl kvmhv_p9_tm_emulation_early 2133 nop 2134 ld r9, HSTATE_KVM_VCPU(r13) 2135 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 2136 cmpwi r3, 0 2137 beq guest_exit_cont /* continue exiting if not handled */ 2138 ld r10, VCPU_PC(r9) 2139 ld r11, VCPU_MSR(r9) 2140 b fast_interrupt_c_return /* go back to guest if handled */ 2141#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2142 2143/* 2144 * Check whether an HDSI is an HPTE not found fault or something else. 2145 * If it is an HPTE not found fault that is due to the guest accessing 2146 * a page that they have mapped but which we have paged out, then 2147 * we continue on with the guest exit path. In all other cases, 2148 * reflect the HDSI to the guest as a DSI. 2149 */ 2150kvmppc_hdsi: 2151 ld r3, VCPU_KVM(r9) 2152 lbz r0, KVM_RADIX(r3) 2153 mfspr r4, SPRN_HDAR 2154 mfspr r6, SPRN_HDSISR 2155BEGIN_FTR_SECTION 2156 /* Look for DSISR canary. If we find it, retry instruction */ 2157 cmpdi r6, 0x7fff 2158 beq 6f 2159END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2160 cmpwi r0, 0 2161 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 2162 /* HPTE not found fault or protection fault? */ 2163 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 2164 beq 1f /* if not, send it to the guest */ 2165 andi. r0, r11, MSR_DR /* data relocation enabled? */ 2166 beq 3f 2167BEGIN_FTR_SECTION 2168 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2169 b 4f 2170END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2171 clrrdi r0, r4, 28 2172 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2173 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 2174 bne 7f /* if no SLB entry found */ 21754: std r4, VCPU_FAULT_DAR(r9) 2176 stw r6, VCPU_FAULT_DSISR(r9) 2177 2178 /* Search the hash table. */ 2179 mr r3, r9 /* vcpu pointer */ 2180 li r7, 1 /* data fault */ 2181 bl kvmppc_hpte_hv_fault 2182 ld r9, HSTATE_KVM_VCPU(r13) 2183 ld r10, VCPU_PC(r9) 2184 ld r11, VCPU_MSR(r9) 2185 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 2186 cmpdi r3, 0 /* retry the instruction */ 2187 beq 6f 2188 cmpdi r3, -1 /* handle in kernel mode */ 2189 beq guest_exit_cont 2190 cmpdi r3, -2 /* MMIO emulation; need instr word */ 2191 beq 2f 2192 2193 /* Synthesize a DSI (or DSegI) for the guest */ 2194 ld r4, VCPU_FAULT_DAR(r9) 2195 mr r6, r3 21961: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 2197 mtspr SPRN_DSISR, r6 21987: mtspr SPRN_DAR, r4 2199 mtspr SPRN_SRR0, r10 2200 mtspr SPRN_SRR1, r11 2201 mr r10, r0 2202 bl kvmppc_msr_interrupt 2203fast_interrupt_c_return: 22046: ld r7, VCPU_CTR(r9) 2205 ld r8, VCPU_XER(r9) 2206 mtctr r7 2207 mtxer r8 2208 mr r4, r9 2209 b fast_guest_return 2210 22113: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 2212 ld r5, KVM_VRMA_SLB_V(r5) 2213 b 4b 2214 2215 /* If this is for emulated MMIO, load the instruction word */ 22162: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 2217 2218 /* Set guest mode to 'jump over instruction' so if lwz faults 2219 * we'll just continue at the next IP. */ 2220 li r0, KVM_GUEST_MODE_SKIP 2221 stb r0, HSTATE_IN_GUEST(r13) 2222 2223 /* Do the access with MSR:DR enabled */ 2224 mfmsr r3 2225 ori r4, r3, MSR_DR /* Enable paging for data */ 2226 mtmsrd r4 2227 lwz r8, 0(r10) 2228 mtmsrd r3 2229 2230 /* Store the result */ 2231 stw r8, VCPU_LAST_INST(r9) 2232 2233 /* Unset guest mode. */ 2234 li r0, KVM_GUEST_MODE_HOST_HV 2235 stb r0, HSTATE_IN_GUEST(r13) 2236 b guest_exit_cont 2237 2238.Lradix_hdsi: 2239 std r4, VCPU_FAULT_DAR(r9) 2240 stw r6, VCPU_FAULT_DSISR(r9) 2241.Lradix_hisi: 2242 mfspr r5, SPRN_ASDR 2243 std r5, VCPU_FAULT_GPA(r9) 2244 b guest_exit_cont 2245 2246/* 2247 * Similarly for an HISI, reflect it to the guest as an ISI unless 2248 * it is an HPTE not found fault for a page that we have paged out. 2249 */ 2250kvmppc_hisi: 2251 ld r3, VCPU_KVM(r9) 2252 lbz r0, KVM_RADIX(r3) 2253 cmpwi r0, 0 2254 bne .Lradix_hisi /* for radix, just save ASDR */ 2255 andis. r0, r11, SRR1_ISI_NOPT@h 2256 beq 1f 2257 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 2258 beq 3f 2259BEGIN_FTR_SECTION 2260 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2261 b 4f 2262END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2263 clrrdi r0, r10, 28 2264 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2265 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2266 bne 7f /* if no SLB entry found */ 22674: 2268 /* Search the hash table. */ 2269 mr r3, r9 /* vcpu pointer */ 2270 mr r4, r10 2271 mr r6, r11 2272 li r7, 0 /* instruction fault */ 2273 bl kvmppc_hpte_hv_fault 2274 ld r9, HSTATE_KVM_VCPU(r13) 2275 ld r10, VCPU_PC(r9) 2276 ld r11, VCPU_MSR(r9) 2277 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2278 cmpdi r3, 0 /* retry the instruction */ 2279 beq fast_interrupt_c_return 2280 cmpdi r3, -1 /* handle in kernel mode */ 2281 beq guest_exit_cont 2282 2283 /* Synthesize an ISI (or ISegI) for the guest */ 2284 mr r11, r3 22851: li r0, BOOK3S_INTERRUPT_INST_STORAGE 22867: mtspr SPRN_SRR0, r10 2287 mtspr SPRN_SRR1, r11 2288 mr r10, r0 2289 bl kvmppc_msr_interrupt 2290 b fast_interrupt_c_return 2291 22923: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2293 ld r5, KVM_VRMA_SLB_V(r6) 2294 b 4b 2295 2296/* 2297 * Try to handle an hcall in real mode. 2298 * Returns to the guest if we handle it, or continues on up to 2299 * the kernel if we can't (i.e. if we don't have a handler for 2300 * it, or if the handler returns H_TOO_HARD). 2301 * 2302 * r5 - r8 contain hcall args, 2303 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2304 */ 2305hcall_try_real_mode: 2306 ld r3,VCPU_GPR(R3)(r9) 2307 andi. r0,r11,MSR_PR 2308 /* sc 1 from userspace - reflect to guest syscall */ 2309 bne sc_1_fast_return 2310 clrrdi r3,r3,2 2311 cmpldi r3,hcall_real_table_end - hcall_real_table 2312 bge guest_exit_cont 2313 /* See if this hcall is enabled for in-kernel handling */ 2314 ld r4, VCPU_KVM(r9) 2315 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2316 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2317 add r4, r4, r0 2318 ld r0, KVM_ENABLED_HCALLS(r4) 2319 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2320 srd r0, r0, r4 2321 andi. r0, r0, 1 2322 beq guest_exit_cont 2323 /* Get pointer to handler, if any, and call it */ 2324 LOAD_REG_ADDR(r4, hcall_real_table) 2325 lwax r3,r3,r4 2326 cmpwi r3,0 2327 beq guest_exit_cont 2328 add r12,r3,r4 2329 mtctr r12 2330 mr r3,r9 /* get vcpu pointer */ 2331 ld r4,VCPU_GPR(R4)(r9) 2332 bctrl 2333 cmpdi r3,H_TOO_HARD 2334 beq hcall_real_fallback 2335 ld r4,HSTATE_KVM_VCPU(r13) 2336 std r3,VCPU_GPR(R3)(r4) 2337 ld r10,VCPU_PC(r4) 2338 ld r11,VCPU_MSR(r4) 2339 b fast_guest_return 2340 2341sc_1_fast_return: 2342 mtspr SPRN_SRR0,r10 2343 mtspr SPRN_SRR1,r11 2344 li r10, BOOK3S_INTERRUPT_SYSCALL 2345 bl kvmppc_msr_interrupt 2346 mr r4,r9 2347 b fast_guest_return 2348 2349 /* We've attempted a real mode hcall, but it's punted it back 2350 * to userspace. We need to restore some clobbered volatiles 2351 * before resuming the pass-it-to-qemu path */ 2352hcall_real_fallback: 2353 li r12,BOOK3S_INTERRUPT_SYSCALL 2354 ld r9, HSTATE_KVM_VCPU(r13) 2355 2356 b guest_exit_cont 2357 2358 .globl hcall_real_table 2359hcall_real_table: 2360 .long 0 /* 0 - unused */ 2361 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2362 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2363 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2364 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2365 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2366 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2367 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2368 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2369 .long 0 /* 0x24 - H_SET_SPRG0 */ 2370 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2371 .long 0 /* 0x2c */ 2372 .long 0 /* 0x30 */ 2373 .long 0 /* 0x34 */ 2374 .long 0 /* 0x38 */ 2375 .long 0 /* 0x3c */ 2376 .long 0 /* 0x40 */ 2377 .long 0 /* 0x44 */ 2378 .long 0 /* 0x48 */ 2379 .long 0 /* 0x4c */ 2380 .long 0 /* 0x50 */ 2381 .long 0 /* 0x54 */ 2382 .long 0 /* 0x58 */ 2383 .long 0 /* 0x5c */ 2384 .long 0 /* 0x60 */ 2385#ifdef CONFIG_KVM_XICS 2386 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2387 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2388 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2389 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2390 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2391#else 2392 .long 0 /* 0x64 - H_EOI */ 2393 .long 0 /* 0x68 - H_CPPR */ 2394 .long 0 /* 0x6c - H_IPI */ 2395 .long 0 /* 0x70 - H_IPOLL */ 2396 .long 0 /* 0x74 - H_XIRR */ 2397#endif 2398 .long 0 /* 0x78 */ 2399 .long 0 /* 0x7c */ 2400 .long 0 /* 0x80 */ 2401 .long 0 /* 0x84 */ 2402 .long 0 /* 0x88 */ 2403 .long 0 /* 0x8c */ 2404 .long 0 /* 0x90 */ 2405 .long 0 /* 0x94 */ 2406 .long 0 /* 0x98 */ 2407 .long 0 /* 0x9c */ 2408 .long 0 /* 0xa0 */ 2409 .long 0 /* 0xa4 */ 2410 .long 0 /* 0xa8 */ 2411 .long 0 /* 0xac */ 2412 .long 0 /* 0xb0 */ 2413 .long 0 /* 0xb4 */ 2414 .long 0 /* 0xb8 */ 2415 .long 0 /* 0xbc */ 2416 .long 0 /* 0xc0 */ 2417 .long 0 /* 0xc4 */ 2418 .long 0 /* 0xc8 */ 2419 .long 0 /* 0xcc */ 2420 .long 0 /* 0xd0 */ 2421 .long 0 /* 0xd4 */ 2422 .long 0 /* 0xd8 */ 2423 .long 0 /* 0xdc */ 2424 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2425 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2426 .long 0 /* 0xe8 */ 2427 .long 0 /* 0xec */ 2428 .long 0 /* 0xf0 */ 2429 .long 0 /* 0xf4 */ 2430 .long 0 /* 0xf8 */ 2431 .long 0 /* 0xfc */ 2432 .long 0 /* 0x100 */ 2433 .long 0 /* 0x104 */ 2434 .long 0 /* 0x108 */ 2435 .long 0 /* 0x10c */ 2436 .long 0 /* 0x110 */ 2437 .long 0 /* 0x114 */ 2438 .long 0 /* 0x118 */ 2439 .long 0 /* 0x11c */ 2440 .long 0 /* 0x120 */ 2441 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2442 .long 0 /* 0x128 */ 2443 .long 0 /* 0x12c */ 2444 .long 0 /* 0x130 */ 2445 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2446 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2447 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2448 .long 0 /* 0x140 */ 2449 .long 0 /* 0x144 */ 2450 .long 0 /* 0x148 */ 2451 .long 0 /* 0x14c */ 2452 .long 0 /* 0x150 */ 2453 .long 0 /* 0x154 */ 2454 .long 0 /* 0x158 */ 2455 .long 0 /* 0x15c */ 2456 .long 0 /* 0x160 */ 2457 .long 0 /* 0x164 */ 2458 .long 0 /* 0x168 */ 2459 .long 0 /* 0x16c */ 2460 .long 0 /* 0x170 */ 2461 .long 0 /* 0x174 */ 2462 .long 0 /* 0x178 */ 2463 .long 0 /* 0x17c */ 2464 .long 0 /* 0x180 */ 2465 .long 0 /* 0x184 */ 2466 .long 0 /* 0x188 */ 2467 .long 0 /* 0x18c */ 2468 .long 0 /* 0x190 */ 2469 .long 0 /* 0x194 */ 2470 .long 0 /* 0x198 */ 2471 .long 0 /* 0x19c */ 2472 .long 0 /* 0x1a0 */ 2473 .long 0 /* 0x1a4 */ 2474 .long 0 /* 0x1a8 */ 2475 .long 0 /* 0x1ac */ 2476 .long 0 /* 0x1b0 */ 2477 .long 0 /* 0x1b4 */ 2478 .long 0 /* 0x1b8 */ 2479 .long 0 /* 0x1bc */ 2480 .long 0 /* 0x1c0 */ 2481 .long 0 /* 0x1c4 */ 2482 .long 0 /* 0x1c8 */ 2483 .long 0 /* 0x1cc */ 2484 .long 0 /* 0x1d0 */ 2485 .long 0 /* 0x1d4 */ 2486 .long 0 /* 0x1d8 */ 2487 .long 0 /* 0x1dc */ 2488 .long 0 /* 0x1e0 */ 2489 .long 0 /* 0x1e4 */ 2490 .long 0 /* 0x1e8 */ 2491 .long 0 /* 0x1ec */ 2492 .long 0 /* 0x1f0 */ 2493 .long 0 /* 0x1f4 */ 2494 .long 0 /* 0x1f8 */ 2495 .long 0 /* 0x1fc */ 2496 .long 0 /* 0x200 */ 2497 .long 0 /* 0x204 */ 2498 .long 0 /* 0x208 */ 2499 .long 0 /* 0x20c */ 2500 .long 0 /* 0x210 */ 2501 .long 0 /* 0x214 */ 2502 .long 0 /* 0x218 */ 2503 .long 0 /* 0x21c */ 2504 .long 0 /* 0x220 */ 2505 .long 0 /* 0x224 */ 2506 .long 0 /* 0x228 */ 2507 .long 0 /* 0x22c */ 2508 .long 0 /* 0x230 */ 2509 .long 0 /* 0x234 */ 2510 .long 0 /* 0x238 */ 2511 .long 0 /* 0x23c */ 2512 .long 0 /* 0x240 */ 2513 .long 0 /* 0x244 */ 2514 .long 0 /* 0x248 */ 2515 .long 0 /* 0x24c */ 2516 .long 0 /* 0x250 */ 2517 .long 0 /* 0x254 */ 2518 .long 0 /* 0x258 */ 2519 .long 0 /* 0x25c */ 2520 .long 0 /* 0x260 */ 2521 .long 0 /* 0x264 */ 2522 .long 0 /* 0x268 */ 2523 .long 0 /* 0x26c */ 2524 .long 0 /* 0x270 */ 2525 .long 0 /* 0x274 */ 2526 .long 0 /* 0x278 */ 2527 .long 0 /* 0x27c */ 2528 .long 0 /* 0x280 */ 2529 .long 0 /* 0x284 */ 2530 .long 0 /* 0x288 */ 2531 .long 0 /* 0x28c */ 2532 .long 0 /* 0x290 */ 2533 .long 0 /* 0x294 */ 2534 .long 0 /* 0x298 */ 2535 .long 0 /* 0x29c */ 2536 .long 0 /* 0x2a0 */ 2537 .long 0 /* 0x2a4 */ 2538 .long 0 /* 0x2a8 */ 2539 .long 0 /* 0x2ac */ 2540 .long 0 /* 0x2b0 */ 2541 .long 0 /* 0x2b4 */ 2542 .long 0 /* 0x2b8 */ 2543 .long 0 /* 0x2bc */ 2544 .long 0 /* 0x2c0 */ 2545 .long 0 /* 0x2c4 */ 2546 .long 0 /* 0x2c8 */ 2547 .long 0 /* 0x2cc */ 2548 .long 0 /* 0x2d0 */ 2549 .long 0 /* 0x2d4 */ 2550 .long 0 /* 0x2d8 */ 2551 .long 0 /* 0x2dc */ 2552 .long 0 /* 0x2e0 */ 2553 .long 0 /* 0x2e4 */ 2554 .long 0 /* 0x2e8 */ 2555 .long 0 /* 0x2ec */ 2556 .long 0 /* 0x2f0 */ 2557 .long 0 /* 0x2f4 */ 2558 .long 0 /* 0x2f8 */ 2559#ifdef CONFIG_KVM_XICS 2560 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2561#else 2562 .long 0 /* 0x2fc - H_XIRR_X*/ 2563#endif 2564 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2565 .globl hcall_real_table_end 2566hcall_real_table_end: 2567 2568_GLOBAL(kvmppc_h_set_xdabr) 2569 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2570 beq 6f 2571 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2572 andc. r0, r5, r0 2573 beq 3f 25746: li r3, H_PARAMETER 2575 blr 2576 2577_GLOBAL(kvmppc_h_set_dabr) 2578 li r5, DABRX_USER | DABRX_KERNEL 25793: 2580BEGIN_FTR_SECTION 2581 b 2f 2582END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2583 std r4,VCPU_DABR(r3) 2584 stw r5, VCPU_DABRX(r3) 2585 mtspr SPRN_DABRX, r5 2586 /* Work around P7 bug where DABR can get corrupted on mtspr */ 25871: mtspr SPRN_DABR,r4 2588 mfspr r5, SPRN_DABR 2589 cmpd r4, r5 2590 bne 1b 2591 isync 2592 li r3,0 2593 blr 2594 25952: 2596BEGIN_FTR_SECTION 2597 /* POWER9 with disabled DAWR */ 2598 li r3, H_HARDWARE 2599 blr 2600END_FTR_SECTION_IFCLR(CPU_FTR_DAWR) 2601 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2602 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2603 rlwimi r5, r4, 2, DAWRX_WT 2604 clrrdi r4, r4, 3 2605 std r4, VCPU_DAWR(r3) 2606 std r5, VCPU_DAWRX(r3) 2607 mtspr SPRN_DAWR, r4 2608 mtspr SPRN_DAWRX, r5 2609 li r3, 0 2610 blr 2611 2612_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2613 ori r11,r11,MSR_EE 2614 std r11,VCPU_MSR(r3) 2615 li r0,1 2616 stb r0,VCPU_CEDED(r3) 2617 sync /* order setting ceded vs. testing prodded */ 2618 lbz r5,VCPU_PRODDED(r3) 2619 cmpwi r5,0 2620 bne kvm_cede_prodded 2621 li r12,0 /* set trap to 0 to say hcall is handled */ 2622 stw r12,VCPU_TRAP(r3) 2623 li r0,H_SUCCESS 2624 std r0,VCPU_GPR(R3)(r3) 2625 2626 /* 2627 * Set our bit in the bitmask of napping threads unless all the 2628 * other threads are already napping, in which case we send this 2629 * up to the host. 2630 */ 2631 ld r5,HSTATE_KVM_VCORE(r13) 2632 lbz r6,HSTATE_PTID(r13) 2633 lwz r8,VCORE_ENTRY_EXIT(r5) 2634 clrldi r8,r8,56 2635 li r0,1 2636 sld r0,r0,r6 2637 addi r6,r5,VCORE_NAPPING_THREADS 263831: lwarx r4,0,r6 2639 or r4,r4,r0 2640 cmpw r4,r8 2641 beq kvm_cede_exit 2642 stwcx. r4,0,r6 2643 bne 31b 2644 /* order napping_threads update vs testing entry_exit_map */ 2645 isync 2646 li r0,NAPPING_CEDE 2647 stb r0,HSTATE_NAPPING(r13) 2648 lwz r7,VCORE_ENTRY_EXIT(r5) 2649 cmpwi r7,0x100 2650 bge 33f /* another thread already exiting */ 2651 2652/* 2653 * Although not specifically required by the architecture, POWER7 2654 * preserves the following registers in nap mode, even if an SMT mode 2655 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2656 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2657 */ 2658 /* Save non-volatile GPRs */ 2659 std r14, VCPU_GPR(R14)(r3) 2660 std r15, VCPU_GPR(R15)(r3) 2661 std r16, VCPU_GPR(R16)(r3) 2662 std r17, VCPU_GPR(R17)(r3) 2663 std r18, VCPU_GPR(R18)(r3) 2664 std r19, VCPU_GPR(R19)(r3) 2665 std r20, VCPU_GPR(R20)(r3) 2666 std r21, VCPU_GPR(R21)(r3) 2667 std r22, VCPU_GPR(R22)(r3) 2668 std r23, VCPU_GPR(R23)(r3) 2669 std r24, VCPU_GPR(R24)(r3) 2670 std r25, VCPU_GPR(R25)(r3) 2671 std r26, VCPU_GPR(R26)(r3) 2672 std r27, VCPU_GPR(R27)(r3) 2673 std r28, VCPU_GPR(R28)(r3) 2674 std r29, VCPU_GPR(R29)(r3) 2675 std r30, VCPU_GPR(R30)(r3) 2676 std r31, VCPU_GPR(R31)(r3) 2677 2678 /* save FP state */ 2679 bl kvmppc_save_fp 2680 2681#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2682/* 2683 * Branch around the call if both CPU_FTR_TM and 2684 * CPU_FTR_P9_TM_HV_ASSIST are off. 2685 */ 2686BEGIN_FTR_SECTION 2687 b 91f 2688END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2689 /* 2690 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 2691 */ 2692 ld r3, HSTATE_KVM_VCPU(r13) 2693 ld r4, VCPU_MSR(r3) 2694 bl kvmppc_save_tm_hv 269591: 2696#endif 2697 2698 /* 2699 * Set DEC to the smaller of DEC and HDEC, so that we wake 2700 * no later than the end of our timeslice (HDEC interrupts 2701 * don't wake us from nap). 2702 */ 2703 mfspr r3, SPRN_DEC 2704 mfspr r4, SPRN_HDEC 2705 mftb r5 2706BEGIN_FTR_SECTION 2707 /* On P9 check whether the guest has large decrementer mode enabled */ 2708 ld r6, HSTATE_KVM_VCORE(r13) 2709 ld r6, VCORE_LPCR(r6) 2710 andis. r6, r6, LPCR_LD@h 2711 bne 68f 2712END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2713 extsw r3, r3 271468: EXTEND_HDEC(r4) 2715 cmpd r3, r4 2716 ble 67f 2717 mtspr SPRN_DEC, r4 271867: 2719 /* save expiry time of guest decrementer */ 2720 add r3, r3, r5 2721 ld r4, HSTATE_KVM_VCPU(r13) 2722 ld r5, HSTATE_KVM_VCORE(r13) 2723 ld r6, VCORE_TB_OFFSET_APPL(r5) 2724 subf r3, r6, r3 /* convert to host TB value */ 2725 std r3, VCPU_DEC_EXPIRES(r4) 2726 2727#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2728 ld r4, HSTATE_KVM_VCPU(r13) 2729 addi r3, r4, VCPU_TB_CEDE 2730 bl kvmhv_accumulate_time 2731#endif 2732 2733 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2734 2735 /* 2736 * Take a nap until a decrementer or external or doobell interrupt 2737 * occurs, with PECE1 and PECE0 set in LPCR. 2738 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2739 * Also clear the runlatch bit before napping. 2740 */ 2741kvm_do_nap: 2742 mfspr r0, SPRN_CTRLF 2743 clrrdi r0, r0, 1 2744 mtspr SPRN_CTRLT, r0 2745 2746 li r0,1 2747 stb r0,HSTATE_HWTHREAD_REQ(r13) 2748 mfspr r5,SPRN_LPCR 2749 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2750BEGIN_FTR_SECTION 2751 ori r5, r5, LPCR_PECEDH 2752 rlwimi r5, r3, 0, LPCR_PECEDP 2753END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2754 2755kvm_nap_sequence: /* desired LPCR value in r5 */ 2756BEGIN_FTR_SECTION 2757 /* 2758 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2759 * enable state loss = 1 (allow SMT mode switch) 2760 * requested level = 0 (just stop dispatching) 2761 */ 2762 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2763 mtspr SPRN_PSSCR, r3 2764 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2765 li r4, LPCR_PECE_HVEE@higher 2766 sldi r4, r4, 32 2767 or r5, r5, r4 2768END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2769 mtspr SPRN_LPCR,r5 2770 isync 2771 li r0, 0 2772 std r0, HSTATE_SCRATCH0(r13) 2773 ptesync 2774 ld r0, HSTATE_SCRATCH0(r13) 27751: cmpd r0, r0 2776 bne 1b 2777BEGIN_FTR_SECTION 2778 nap 2779FTR_SECTION_ELSE 2780 PPC_STOP 2781ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 2782 b . 2783 278433: mr r4, r3 2785 li r3, 0 2786 li r12, 0 2787 b 34f 2788 2789kvm_end_cede: 2790 /* get vcpu pointer */ 2791 ld r4, HSTATE_KVM_VCPU(r13) 2792 2793 /* Woken by external or decrementer interrupt */ 2794 ld r1, HSTATE_HOST_R1(r13) 2795 2796#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2797 addi r3, r4, VCPU_TB_RMINTR 2798 bl kvmhv_accumulate_time 2799#endif 2800 2801#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2802/* 2803 * Branch around the call if both CPU_FTR_TM and 2804 * CPU_FTR_P9_TM_HV_ASSIST are off. 2805 */ 2806BEGIN_FTR_SECTION 2807 b 91f 2808END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2809 /* 2810 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 2811 */ 2812 mr r3, r4 2813 ld r4, VCPU_MSR(r3) 2814 bl kvmppc_restore_tm_hv 2815 ld r4, HSTATE_KVM_VCPU(r13) 281691: 2817#endif 2818 2819 /* load up FP state */ 2820 bl kvmppc_load_fp 2821 2822 /* Restore guest decrementer */ 2823 ld r3, VCPU_DEC_EXPIRES(r4) 2824 ld r5, HSTATE_KVM_VCORE(r13) 2825 ld r6, VCORE_TB_OFFSET_APPL(r5) 2826 add r3, r3, r6 /* convert host TB to guest TB value */ 2827 mftb r7 2828 subf r3, r7, r3 2829 mtspr SPRN_DEC, r3 2830 2831 /* Load NV GPRS */ 2832 ld r14, VCPU_GPR(R14)(r4) 2833 ld r15, VCPU_GPR(R15)(r4) 2834 ld r16, VCPU_GPR(R16)(r4) 2835 ld r17, VCPU_GPR(R17)(r4) 2836 ld r18, VCPU_GPR(R18)(r4) 2837 ld r19, VCPU_GPR(R19)(r4) 2838 ld r20, VCPU_GPR(R20)(r4) 2839 ld r21, VCPU_GPR(R21)(r4) 2840 ld r22, VCPU_GPR(R22)(r4) 2841 ld r23, VCPU_GPR(R23)(r4) 2842 ld r24, VCPU_GPR(R24)(r4) 2843 ld r25, VCPU_GPR(R25)(r4) 2844 ld r26, VCPU_GPR(R26)(r4) 2845 ld r27, VCPU_GPR(R27)(r4) 2846 ld r28, VCPU_GPR(R28)(r4) 2847 ld r29, VCPU_GPR(R29)(r4) 2848 ld r30, VCPU_GPR(R30)(r4) 2849 ld r31, VCPU_GPR(R31)(r4) 2850 2851 /* Check the wake reason in SRR1 to see why we got here */ 2852 bl kvmppc_check_wake_reason 2853 2854 /* 2855 * Restore volatile registers since we could have called a 2856 * C routine in kvmppc_check_wake_reason 2857 * r4 = VCPU 2858 * r3 tells us whether we need to return to host or not 2859 * WARNING: it gets checked further down: 2860 * should not modify r3 until this check is done. 2861 */ 2862 ld r4, HSTATE_KVM_VCPU(r13) 2863 2864 /* clear our bit in vcore->napping_threads */ 286534: ld r5,HSTATE_KVM_VCORE(r13) 2866 lbz r7,HSTATE_PTID(r13) 2867 li r0,1 2868 sld r0,r0,r7 2869 addi r6,r5,VCORE_NAPPING_THREADS 287032: lwarx r7,0,r6 2871 andc r7,r7,r0 2872 stwcx. r7,0,r6 2873 bne 32b 2874 li r0,0 2875 stb r0,HSTATE_NAPPING(r13) 2876 2877 /* See if the wake reason saved in r3 means we need to exit */ 2878 stw r12, VCPU_TRAP(r4) 2879 mr r9, r4 2880 cmpdi r3, 0 2881 bgt guest_exit_cont 2882 2883 /* see if any other thread is already exiting */ 2884 lwz r0,VCORE_ENTRY_EXIT(r5) 2885 cmpwi r0,0x100 2886 bge guest_exit_cont 2887 2888 b kvmppc_cede_reentry /* if not go back to guest */ 2889 2890 /* cede when already previously prodded case */ 2891kvm_cede_prodded: 2892 li r0,0 2893 stb r0,VCPU_PRODDED(r3) 2894 sync /* order testing prodded vs. clearing ceded */ 2895 stb r0,VCPU_CEDED(r3) 2896 li r3,H_SUCCESS 2897 blr 2898 2899 /* we've ceded but we want to give control to the host */ 2900kvm_cede_exit: 2901 ld r9, HSTATE_KVM_VCPU(r13) 2902#ifdef CONFIG_KVM_XICS 2903 /* Abort if we still have a pending escalation */ 2904 lbz r5, VCPU_XIVE_ESC_ON(r9) 2905 cmpwi r5, 0 2906 beq 1f 2907 li r0, 0 2908 stb r0, VCPU_CEDED(r9) 29091: /* Enable XIVE escalation */ 2910 li r5, XIVE_ESB_SET_PQ_00 2911 mfmsr r0 2912 andi. r0, r0, MSR_DR /* in real mode? */ 2913 beq 1f 2914 ld r10, VCPU_XIVE_ESC_VADDR(r9) 2915 cmpdi r10, 0 2916 beq 3f 2917 ldx r0, r10, r5 2918 b 2f 29191: ld r10, VCPU_XIVE_ESC_RADDR(r9) 2920 cmpdi r10, 0 2921 beq 3f 2922 ldcix r0, r10, r5 29232: sync 2924 li r0, 1 2925 stb r0, VCPU_XIVE_ESC_ON(r9) 2926#endif /* CONFIG_KVM_XICS */ 29273: b guest_exit_cont 2928 2929 /* Try to handle a machine check in real mode */ 2930machine_check_realmode: 2931 mr r3, r9 /* get vcpu pointer */ 2932 bl kvmppc_realmode_machine_check 2933 nop 2934 ld r9, HSTATE_KVM_VCPU(r13) 2935 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2936 /* 2937 * For the guest that is FWNMI capable, deliver all the MCE errors 2938 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit 2939 * reason. This new approach injects machine check errors in guest 2940 * address space to guest with additional information in the form 2941 * of RTAS event, thus enabling guest kernel to suitably handle 2942 * such errors. 2943 * 2944 * For the guest that is not FWNMI capable (old QEMU) fallback 2945 * to old behaviour for backward compatibility: 2946 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either 2947 * through machine check interrupt (set HSRR0 to 0x200). 2948 * For handled errors (no-fatal), just go back to guest execution 2949 * with current HSRR0. 2950 * if we receive machine check with MSR(RI=0) then deliver it to 2951 * guest as machine check causing guest to crash. 2952 */ 2953 ld r11, VCPU_MSR(r9) 2954 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ 2955 bne mc_cont /* if so, exit to host */ 2956 /* Check if guest is capable of handling NMI exit */ 2957 ld r10, VCPU_KVM(r9) 2958 lbz r10, KVM_FWNMI(r10) 2959 cmpdi r10, 1 /* FWNMI capable? */ 2960 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */ 2961 2962 /* if not, fall through for backward compatibility. */ 2963 andi. r10, r11, MSR_RI /* check for unrecoverable exception */ 2964 beq 1f /* Deliver a machine check to guest */ 2965 ld r10, VCPU_PC(r9) 2966 cmpdi r3, 0 /* Did we handle MCE ? */ 2967 bne 2f /* Continue guest execution. */ 2968 /* If not, deliver a machine check. SRR0/1 are already set */ 29691: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2970 bl kvmppc_msr_interrupt 29712: b fast_interrupt_c_return 2972 2973/* 2974 * Check the reason we woke from nap, and take appropriate action. 2975 * Returns (in r3): 2976 * 0 if nothing needs to be done 2977 * 1 if something happened that needs to be handled by the host 2978 * -1 if there was a guest wakeup (IPI or msgsnd) 2979 * -2 if we handled a PCI passthrough interrupt (returned by 2980 * kvmppc_read_intr only) 2981 * 2982 * Also sets r12 to the interrupt vector for any interrupt that needs 2983 * to be handled now by the host (0x500 for external interrupt), or zero. 2984 * Modifies all volatile registers (since it may call a C function). 2985 * This routine calls kvmppc_read_intr, a C function, if an external 2986 * interrupt is pending. 2987 */ 2988kvmppc_check_wake_reason: 2989 mfspr r6, SPRN_SRR1 2990BEGIN_FTR_SECTION 2991 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2992FTR_SECTION_ELSE 2993 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2994ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2995 cmpwi r6, 8 /* was it an external interrupt? */ 2996 beq 7f /* if so, see what it was */ 2997 li r3, 0 2998 li r12, 0 2999 cmpwi r6, 6 /* was it the decrementer? */ 3000 beq 0f 3001BEGIN_FTR_SECTION 3002 cmpwi r6, 5 /* privileged doorbell? */ 3003 beq 0f 3004 cmpwi r6, 3 /* hypervisor doorbell? */ 3005 beq 3f 3006END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3007 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 3008 beq 4f 3009 li r3, 1 /* anything else, return 1 */ 30100: blr 3011 3012 /* hypervisor doorbell */ 30133: li r12, BOOK3S_INTERRUPT_H_DOORBELL 3014 3015 /* 3016 * Clear the doorbell as we will invoke the handler 3017 * explicitly in the guest exit path. 3018 */ 3019 lis r6, (PPC_DBELL_SERVER << (63-36))@h 3020 PPC_MSGCLR(6) 3021 /* see if it's a host IPI */ 3022 li r3, 1 3023BEGIN_FTR_SECTION 3024 PPC_MSGSYNC 3025 lwsync 3026END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 3027 lbz r0, HSTATE_HOST_IPI(r13) 3028 cmpwi r0, 0 3029 bnelr 3030 /* if not, return -1 */ 3031 li r3, -1 3032 blr 3033 3034 /* Woken up due to Hypervisor maintenance interrupt */ 30354: li r12, BOOK3S_INTERRUPT_HMI 3036 li r3, 1 3037 blr 3038 3039 /* external interrupt - create a stack frame so we can call C */ 30407: mflr r0 3041 std r0, PPC_LR_STKOFF(r1) 3042 stdu r1, -PPC_MIN_STKFRM(r1) 3043 bl kvmppc_read_intr 3044 nop 3045 li r12, BOOK3S_INTERRUPT_EXTERNAL 3046 cmpdi r3, 1 3047 ble 1f 3048 3049 /* 3050 * Return code of 2 means PCI passthrough interrupt, but 3051 * we need to return back to host to complete handling the 3052 * interrupt. Trap reason is expected in r12 by guest 3053 * exit code. 3054 */ 3055 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 30561: 3057 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 3058 addi r1, r1, PPC_MIN_STKFRM 3059 mtlr r0 3060 blr 3061 3062/* 3063 * Save away FP, VMX and VSX registers. 3064 * r3 = vcpu pointer 3065 * N.B. r30 and r31 are volatile across this function, 3066 * thus it is not callable from C. 3067 */ 3068kvmppc_save_fp: 3069 mflr r30 3070 mr r31,r3 3071 mfmsr r5 3072 ori r8,r5,MSR_FP 3073#ifdef CONFIG_ALTIVEC 3074BEGIN_FTR_SECTION 3075 oris r8,r8,MSR_VEC@h 3076END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3077#endif 3078#ifdef CONFIG_VSX 3079BEGIN_FTR_SECTION 3080 oris r8,r8,MSR_VSX@h 3081END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3082#endif 3083 mtmsrd r8 3084 addi r3,r3,VCPU_FPRS 3085 bl store_fp_state 3086#ifdef CONFIG_ALTIVEC 3087BEGIN_FTR_SECTION 3088 addi r3,r31,VCPU_VRS 3089 bl store_vr_state 3090END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3091#endif 3092 mfspr r6,SPRN_VRSAVE 3093 stw r6,VCPU_VRSAVE(r31) 3094 mtlr r30 3095 blr 3096 3097/* 3098 * Load up FP, VMX and VSX registers 3099 * r4 = vcpu pointer 3100 * N.B. r30 and r31 are volatile across this function, 3101 * thus it is not callable from C. 3102 */ 3103kvmppc_load_fp: 3104 mflr r30 3105 mr r31,r4 3106 mfmsr r9 3107 ori r8,r9,MSR_FP 3108#ifdef CONFIG_ALTIVEC 3109BEGIN_FTR_SECTION 3110 oris r8,r8,MSR_VEC@h 3111END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3112#endif 3113#ifdef CONFIG_VSX 3114BEGIN_FTR_SECTION 3115 oris r8,r8,MSR_VSX@h 3116END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3117#endif 3118 mtmsrd r8 3119 addi r3,r4,VCPU_FPRS 3120 bl load_fp_state 3121#ifdef CONFIG_ALTIVEC 3122BEGIN_FTR_SECTION 3123 addi r3,r31,VCPU_VRS 3124 bl load_vr_state 3125END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3126#endif 3127 lwz r7,VCPU_VRSAVE(r31) 3128 mtspr SPRN_VRSAVE,r7 3129 mtlr r30 3130 mr r4,r31 3131 blr 3132 3133#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3134/* 3135 * Save transactional state and TM-related registers. 3136 * Called with r3 pointing to the vcpu struct and r4 containing 3137 * the guest MSR value. 3138 * This can modify all checkpointed registers, but 3139 * restores r1 and r2 before exit. 3140 */ 3141kvmppc_save_tm_hv: 3142 /* See if we need to handle fake suspend mode */ 3143BEGIN_FTR_SECTION 3144 b __kvmppc_save_tm 3145END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3146 3147 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 3148 cmpwi r0, 0 3149 beq __kvmppc_save_tm 3150 3151 /* The following code handles the fake_suspend = 1 case */ 3152 mflr r0 3153 std r0, PPC_LR_STKOFF(r1) 3154 stdu r1, -PPC_MIN_STKFRM(r1) 3155 3156 /* Turn on TM. */ 3157 mfmsr r8 3158 li r0, 1 3159 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 3160 mtmsrd r8 3161 3162 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 3163 beq 4f 3164BEGIN_FTR_SECTION 3165 bl pnv_power9_force_smt4_catch 3166END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3167 nop 3168 3169 std r1, HSTATE_HOST_R1(r13) 3170 3171 /* Clear the MSR RI since r1, r13 may be foobar. */ 3172 li r5, 0 3173 mtmsrd r5, 1 3174 3175 /* We have to treclaim here because that's the only way to do S->N */ 3176 li r3, TM_CAUSE_KVM_RESCHED 3177 TRECLAIM(R3) 3178 3179 /* 3180 * We were in fake suspend, so we are not going to save the 3181 * register state as the guest checkpointed state (since 3182 * we already have it), therefore we can now use any volatile GPR. 3183 */ 3184 /* Reload PACA pointer, stack pointer and TOC. */ 3185 GET_PACA(r13) 3186 ld r1, HSTATE_HOST_R1(r13) 3187 ld r2, PACATOC(r13) 3188 3189 /* Set MSR RI now we have r1 and r13 back. */ 3190 li r5, MSR_RI 3191 mtmsrd r5, 1 3192 3193 HMT_MEDIUM 3194 ld r6, HSTATE_DSCR(r13) 3195 mtspr SPRN_DSCR, r6 3196BEGIN_FTR_SECTION_NESTED(96) 3197 bl pnv_power9_force_smt4_release 3198END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) 3199 nop 3200 32014: 3202 mfspr r3, SPRN_PSSCR 3203 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 3204 li r0, PSSCR_FAKE_SUSPEND 3205 andc r3, r3, r0 3206 mtspr SPRN_PSSCR, r3 3207 3208 /* Don't save TEXASR, use value from last exit in real suspend state */ 3209 ld r9, HSTATE_KVM_VCPU(r13) 3210 mfspr r5, SPRN_TFHAR 3211 mfspr r6, SPRN_TFIAR 3212 std r5, VCPU_TFHAR(r9) 3213 std r6, VCPU_TFIAR(r9) 3214 3215 addi r1, r1, PPC_MIN_STKFRM 3216 ld r0, PPC_LR_STKOFF(r1) 3217 mtlr r0 3218 blr 3219 3220/* 3221 * Restore transactional state and TM-related registers. 3222 * Called with r3 pointing to the vcpu struct 3223 * and r4 containing the guest MSR value. 3224 * This potentially modifies all checkpointed registers. 3225 * It restores r1 and r2 from the PACA. 3226 */ 3227kvmppc_restore_tm_hv: 3228 /* 3229 * If we are doing TM emulation for the guest on a POWER9 DD2, 3230 * then we don't actually do a trechkpt -- we either set up 3231 * fake-suspend mode, or emulate a TM rollback. 3232 */ 3233BEGIN_FTR_SECTION 3234 b __kvmppc_restore_tm 3235END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3236 mflr r0 3237 std r0, PPC_LR_STKOFF(r1) 3238 3239 li r0, 0 3240 stb r0, HSTATE_FAKE_SUSPEND(r13) 3241 3242 /* Turn on TM so we can restore TM SPRs */ 3243 mfmsr r5 3244 li r0, 1 3245 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 3246 mtmsrd r5 3247 3248 /* 3249 * The user may change these outside of a transaction, so they must 3250 * always be context switched. 3251 */ 3252 ld r5, VCPU_TFHAR(r3) 3253 ld r6, VCPU_TFIAR(r3) 3254 ld r7, VCPU_TEXASR(r3) 3255 mtspr SPRN_TFHAR, r5 3256 mtspr SPRN_TFIAR, r6 3257 mtspr SPRN_TEXASR, r7 3258 3259 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 3260 beqlr /* TM not active in guest */ 3261 3262 /* Make sure the failure summary is set */ 3263 oris r7, r7, (TEXASR_FS)@h 3264 mtspr SPRN_TEXASR, r7 3265 3266 cmpwi r5, 1 /* check for suspended state */ 3267 bgt 10f 3268 stb r5, HSTATE_FAKE_SUSPEND(r13) 3269 b 9f /* and return */ 327010: stdu r1, -PPC_MIN_STKFRM(r1) 3271 /* guest is in transactional state, so simulate rollback */ 3272 bl kvmhv_emulate_tm_rollback 3273 nop 3274 addi r1, r1, PPC_MIN_STKFRM 32759: ld r0, PPC_LR_STKOFF(r1) 3276 mtlr r0 3277 blr 3278#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 3279 3280/* 3281 * We come here if we get any exception or interrupt while we are 3282 * executing host real mode code while in guest MMU context. 3283 * r12 is (CR << 32) | vector 3284 * r13 points to our PACA 3285 * r12 is saved in HSTATE_SCRATCH0(r13) 3286 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE 3287 * r9 is saved in HSTATE_SCRATCH2(r13) 3288 * r13 is saved in HSPRG1 3289 * cfar is saved in HSTATE_CFAR(r13) 3290 * ppr is saved in HSTATE_PPR(r13) 3291 */ 3292kvmppc_bad_host_intr: 3293 /* 3294 * Switch to the emergency stack, but start half-way down in 3295 * case we were already on it. 3296 */ 3297 mr r9, r1 3298 std r1, PACAR1(r13) 3299 ld r1, PACAEMERGSP(r13) 3300 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 3301 std r9, 0(r1) 3302 std r0, GPR0(r1) 3303 std r9, GPR1(r1) 3304 std r2, GPR2(r1) 3305 SAVE_4GPRS(3, r1) 3306 SAVE_2GPRS(7, r1) 3307 srdi r0, r12, 32 3308 clrldi r12, r12, 32 3309 std r0, _CCR(r1) 3310 std r12, _TRAP(r1) 3311 andi. r0, r12, 2 3312 beq 1f 3313 mfspr r3, SPRN_HSRR0 3314 mfspr r4, SPRN_HSRR1 3315 mfspr r5, SPRN_HDAR 3316 mfspr r6, SPRN_HDSISR 3317 b 2f 33181: mfspr r3, SPRN_SRR0 3319 mfspr r4, SPRN_SRR1 3320 mfspr r5, SPRN_DAR 3321 mfspr r6, SPRN_DSISR 33222: std r3, _NIP(r1) 3323 std r4, _MSR(r1) 3324 std r5, _DAR(r1) 3325 std r6, _DSISR(r1) 3326 ld r9, HSTATE_SCRATCH2(r13) 3327 ld r12, HSTATE_SCRATCH0(r13) 3328 GET_SCRATCH0(r0) 3329 SAVE_4GPRS(9, r1) 3330 std r0, GPR13(r1) 3331 SAVE_NVGPRS(r1) 3332 ld r5, HSTATE_CFAR(r13) 3333 std r5, ORIG_GPR3(r1) 3334 mflr r3 3335#ifdef CONFIG_RELOCATABLE 3336 ld r4, HSTATE_SCRATCH1(r13) 3337#else 3338 mfctr r4 3339#endif 3340 mfxer r5 3341 lbz r6, PACAIRQSOFTMASK(r13) 3342 std r3, _LINK(r1) 3343 std r4, _CTR(r1) 3344 std r5, _XER(r1) 3345 std r6, SOFTE(r1) 3346 ld r2, PACATOC(r13) 3347 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 3348 std r3, STACK_FRAME_OVERHEAD-16(r1) 3349 3350 /* 3351 * On POWER9 do a minimal restore of the MMU and call C code, 3352 * which will print a message and panic. 3353 * XXX On POWER7 and POWER8, we just spin here since we don't 3354 * know what the other threads are doing (and we don't want to 3355 * coordinate with them) - but at least we now have register state 3356 * in memory that we might be able to look at from another CPU. 3357 */ 3358BEGIN_FTR_SECTION 3359 b . 3360END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 3361 ld r9, HSTATE_KVM_VCPU(r13) 3362 ld r10, VCPU_KVM(r9) 3363 3364 li r0, 0 3365 mtspr SPRN_AMR, r0 3366 mtspr SPRN_IAMR, r0 3367 mtspr SPRN_CIABR, r0 3368 mtspr SPRN_DAWRX, r0 3369 3370 /* Flush the ERAT on radix P9 DD1 guest exit */ 3371BEGIN_FTR_SECTION 3372 PPC_INVALIDATE_ERAT 3373END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 3374 3375BEGIN_MMU_FTR_SECTION 3376 b 4f 3377END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 3378 3379 slbmte r0, r0 3380 slbia 3381 ptesync 3382 ld r8, PACA_SLBSHADOWPTR(r13) 3383 .rept SLB_NUM_BOLTED 3384 li r3, SLBSHADOW_SAVEAREA 3385 LDX_BE r5, r8, r3 3386 addi r3, r3, 8 3387 LDX_BE r6, r8, r3 3388 andis. r7, r5, SLB_ESID_V@h 3389 beq 3f 3390 slbmte r6, r5 33913: addi r8, r8, 16 3392 .endr 3393 33944: lwz r7, KVM_HOST_LPID(r10) 3395 mtspr SPRN_LPID, r7 3396 mtspr SPRN_PID, r0 3397 ld r8, KVM_HOST_LPCR(r10) 3398 mtspr SPRN_LPCR, r8 3399 isync 3400 li r0, KVM_GUEST_MODE_NONE 3401 stb r0, HSTATE_IN_GUEST(r13) 3402 3403 /* 3404 * Turn on the MMU and jump to C code 3405 */ 3406 bcl 20, 31, .+4 34075: mflr r3 3408 addi r3, r3, 9f - 5b 3409 li r4, -1 3410 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ 3411 ld r4, PACAKMSR(r13) 3412 mtspr SPRN_SRR0, r3 3413 mtspr SPRN_SRR1, r4 3414 RFI_TO_KERNEL 34159: addi r3, r1, STACK_FRAME_OVERHEAD 3416 bl kvmppc_bad_interrupt 3417 b 9b 3418 3419/* 3420 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3421 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3422 * r11 has the guest MSR value (in/out) 3423 * r9 has a vcpu pointer (in) 3424 * r0 is used as a scratch register 3425 */ 3426kvmppc_msr_interrupt: 3427 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3428 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3429 ld r11, VCPU_INTR_MSR(r9) 3430 bne 1f 3431 /* ... if transactional, change to suspended */ 3432 li r0, 1 34331: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3434 blr 3435 3436/* 3437 * This works around a hardware bug on POWER8E processors, where 3438 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3439 * performance monitor interrupt. Instead, when we need to have 3440 * an interrupt pending, we have to arrange for a counter to overflow. 3441 */ 3442kvmppc_fix_pmao: 3443 li r3, 0 3444 mtspr SPRN_MMCR2, r3 3445 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3446 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3447 mtspr SPRN_MMCR0, r3 3448 lis r3, 0x7fff 3449 ori r3, r3, 0xffff 3450 mtspr SPRN_PMC6, r3 3451 isync 3452 blr 3453 3454#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3455/* 3456 * Start timing an activity 3457 * r3 = pointer to time accumulation struct, r4 = vcpu 3458 */ 3459kvmhv_start_timing: 3460 ld r5, HSTATE_KVM_VCORE(r13) 3461 ld r6, VCORE_TB_OFFSET_APPL(r5) 3462 mftb r5 3463 subf r5, r6, r5 /* subtract current timebase offset */ 3464 std r3, VCPU_CUR_ACTIVITY(r4) 3465 std r5, VCPU_ACTIVITY_START(r4) 3466 blr 3467 3468/* 3469 * Accumulate time to one activity and start another. 3470 * r3 = pointer to new time accumulation struct, r4 = vcpu 3471 */ 3472kvmhv_accumulate_time: 3473 ld r5, HSTATE_KVM_VCORE(r13) 3474 ld r8, VCORE_TB_OFFSET_APPL(r5) 3475 ld r5, VCPU_CUR_ACTIVITY(r4) 3476 ld r6, VCPU_ACTIVITY_START(r4) 3477 std r3, VCPU_CUR_ACTIVITY(r4) 3478 mftb r7 3479 subf r7, r8, r7 /* subtract current timebase offset */ 3480 std r7, VCPU_ACTIVITY_START(r4) 3481 cmpdi r5, 0 3482 beqlr 3483 subf r3, r6, r7 3484 ld r8, TAS_SEQCOUNT(r5) 3485 cmpdi r8, 0 3486 addi r8, r8, 1 3487 std r8, TAS_SEQCOUNT(r5) 3488 lwsync 3489 ld r7, TAS_TOTAL(r5) 3490 add r7, r7, r3 3491 std r7, TAS_TOTAL(r5) 3492 ld r6, TAS_MIN(r5) 3493 ld r7, TAS_MAX(r5) 3494 beq 3f 3495 cmpd r3, r6 3496 bge 1f 34973: std r3, TAS_MIN(r5) 34981: cmpd r3, r7 3499 ble 2f 3500 std r3, TAS_MAX(r5) 35012: lwsync 3502 addi r8, r8, 1 3503 std r8, TAS_SEQCOUNT(r5) 3504 blr 3505#endif 3506