1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/book3s/64/mmu-hash.h> 31#include <asm/tm.h> 32#include <asm/opal.h> 33 34#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 35 36/* Values in HSTATE_NAPPING(r13) */ 37#define NAPPING_CEDE 1 38#define NAPPING_NOVCPU 2 39 40/* 41 * Call kvmppc_hv_entry in real mode. 42 * Must be called with interrupts hard-disabled. 43 * 44 * Input Registers: 45 * 46 * LR = return address to continue at after eventually re-enabling MMU 47 */ 48_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 49 mflr r0 50 std r0, PPC_LR_STKOFF(r1) 51 stdu r1, -112(r1) 52 mfmsr r10 53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 54 li r0,MSR_RI 55 andc r0,r10,r0 56 li r6,MSR_IR | MSR_DR 57 andc r6,r10,r6 58 mtmsrd r0,1 /* clear RI in MSR */ 59 mtsrr0 r5 60 mtsrr1 r6 61 RFI 62 63kvmppc_call_hv_entry: 64 ld r4, HSTATE_KVM_VCPU(r13) 65 bl kvmppc_hv_entry 66 67 /* Back from guest - restore host state and return to caller */ 68 69BEGIN_FTR_SECTION 70 /* Restore host DABR and DABRX */ 71 ld r5,HSTATE_DABR(r13) 72 li r6,7 73 mtspr SPRN_DABR,r5 74 mtspr SPRN_DABRX,r6 75END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 76 77 /* Restore SPRG3 */ 78 ld r3,PACA_SPRG_VDSO(r13) 79 mtspr SPRN_SPRG_VDSO_WRITE,r3 80 81 /* Reload the host's PMU registers */ 82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 83 lbz r4, LPPACA_PMCINUSE(r3) 84 cmpwi r4, 0 85 beq 23f /* skip if not */ 86BEGIN_FTR_SECTION 87 ld r3, HSTATE_MMCR0(r13) 88 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 89 cmpwi r4, MMCR0_PMAO 90 beql kvmppc_fix_pmao 91END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 92 lwz r3, HSTATE_PMC1(r13) 93 lwz r4, HSTATE_PMC2(r13) 94 lwz r5, HSTATE_PMC3(r13) 95 lwz r6, HSTATE_PMC4(r13) 96 lwz r8, HSTATE_PMC5(r13) 97 lwz r9, HSTATE_PMC6(r13) 98 mtspr SPRN_PMC1, r3 99 mtspr SPRN_PMC2, r4 100 mtspr SPRN_PMC3, r5 101 mtspr SPRN_PMC4, r6 102 mtspr SPRN_PMC5, r8 103 mtspr SPRN_PMC6, r9 104 ld r3, HSTATE_MMCR0(r13) 105 ld r4, HSTATE_MMCR1(r13) 106 ld r5, HSTATE_MMCRA(r13) 107 ld r6, HSTATE_SIAR(r13) 108 ld r7, HSTATE_SDAR(r13) 109 mtspr SPRN_MMCR1, r4 110 mtspr SPRN_MMCRA, r5 111 mtspr SPRN_SIAR, r6 112 mtspr SPRN_SDAR, r7 113BEGIN_FTR_SECTION 114 ld r8, HSTATE_MMCR2(r13) 115 ld r9, HSTATE_SIER(r13) 116 mtspr SPRN_MMCR2, r8 117 mtspr SPRN_SIER, r9 118END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 119 mtspr SPRN_MMCR0, r3 120 isync 12123: 122 123 /* 124 * Reload DEC. HDEC interrupts were disabled when 125 * we reloaded the host's LPCR value. 126 */ 127 ld r3, HSTATE_DECEXP(r13) 128 mftb r4 129 subf r4, r4, r3 130 mtspr SPRN_DEC, r4 131 132 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 133 li r0, 0 134 stb r0, HSTATE_HWTHREAD_REQ(r13) 135 136 /* 137 * For external and machine check interrupts, we need 138 * to call the Linux handler to process the interrupt. 139 * We do that by jumping to absolute address 0x500 for 140 * external interrupts, or the machine_check_fwnmi label 141 * for machine checks (since firmware might have patched 142 * the vector area at 0x200). The [h]rfid at the end of the 143 * handler will return to the book3s_hv_interrupts.S code. 144 * For other interrupts we do the rfid to get back 145 * to the book3s_hv_interrupts.S code here. 146 */ 147 ld r8, 112+PPC_LR_STKOFF(r1) 148 addi r1, r1, 112 149 ld r7, HSTATE_HOST_MSR(r13) 150 151 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 152 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 153 beq 11f 154 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 155 beq 15f /* Invoke the H_DOORBELL handler */ 156 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI 157 beq cr2, 14f /* HMI check */ 158 159 /* RFI into the highmem handler, or branch to interrupt handler */ 160 mfmsr r6 161 li r0, MSR_RI 162 andc r6, r6, r0 163 mtmsrd r6, 1 /* Clear RI in MSR */ 164 mtsrr0 r8 165 mtsrr1 r7 166 beq cr1, 13f /* machine check */ 167 RFI 168 169 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 17011: mtspr SPRN_HSRR0, r8 171 mtspr SPRN_HSRR1, r7 172 ba 0x500 173 17413: b machine_check_fwnmi 175 17614: mtspr SPRN_HSRR0, r8 177 mtspr SPRN_HSRR1, r7 178 b hmi_exception_after_realmode 179 18015: mtspr SPRN_HSRR0, r8 181 mtspr SPRN_HSRR1, r7 182 ba 0xe80 183 184kvmppc_primary_no_guest: 185 /* We handle this much like a ceded vcpu */ 186 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 187 mfspr r3, SPRN_HDEC 188 mtspr SPRN_DEC, r3 189 /* 190 * Make sure the primary has finished the MMU switch. 191 * We should never get here on a secondary thread, but 192 * check it for robustness' sake. 193 */ 194 ld r5, HSTATE_KVM_VCORE(r13) 19565: lbz r0, VCORE_IN_GUEST(r5) 196 cmpwi r0, 0 197 beq 65b 198 /* Set LPCR. */ 199 ld r8,VCORE_LPCR(r5) 200 mtspr SPRN_LPCR,r8 201 isync 202 /* set our bit in napping_threads */ 203 ld r5, HSTATE_KVM_VCORE(r13) 204 lbz r7, HSTATE_PTID(r13) 205 li r0, 1 206 sld r0, r0, r7 207 addi r6, r5, VCORE_NAPPING_THREADS 2081: lwarx r3, 0, r6 209 or r3, r3, r0 210 stwcx. r3, 0, r6 211 bne 1b 212 /* order napping_threads update vs testing entry_exit_map */ 213 isync 214 li r12, 0 215 lwz r7, VCORE_ENTRY_EXIT(r5) 216 cmpwi r7, 0x100 217 bge kvm_novcpu_exit /* another thread already exiting */ 218 li r3, NAPPING_NOVCPU 219 stb r3, HSTATE_NAPPING(r13) 220 221 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 222 b kvm_do_nap 223 224/* 225 * kvm_novcpu_wakeup 226 * Entered from kvm_start_guest if kvm_hstate.napping is set 227 * to NAPPING_NOVCPU 228 * r2 = kernel TOC 229 * r13 = paca 230 */ 231kvm_novcpu_wakeup: 232 ld r1, HSTATE_HOST_R1(r13) 233 ld r5, HSTATE_KVM_VCORE(r13) 234 li r0, 0 235 stb r0, HSTATE_NAPPING(r13) 236 237 /* check the wake reason */ 238 bl kvmppc_check_wake_reason 239 240 /* 241 * Restore volatile registers since we could have called 242 * a C routine in kvmppc_check_wake_reason. 243 * r5 = VCORE 244 */ 245 ld r5, HSTATE_KVM_VCORE(r13) 246 247 /* see if any other thread is already exiting */ 248 lwz r0, VCORE_ENTRY_EXIT(r5) 249 cmpwi r0, 0x100 250 bge kvm_novcpu_exit 251 252 /* clear our bit in napping_threads */ 253 lbz r7, HSTATE_PTID(r13) 254 li r0, 1 255 sld r0, r0, r7 256 addi r6, r5, VCORE_NAPPING_THREADS 2574: lwarx r7, 0, r6 258 andc r7, r7, r0 259 stwcx. r7, 0, r6 260 bne 4b 261 262 /* See if the wake reason means we need to exit */ 263 cmpdi r3, 0 264 bge kvm_novcpu_exit 265 266 /* See if our timeslice has expired (HDEC is negative) */ 267 mfspr r0, SPRN_HDEC 268 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 269 cmpwi r0, 0 270 blt kvm_novcpu_exit 271 272 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 273 ld r4, HSTATE_KVM_VCPU(r13) 274 cmpdi r4, 0 275 beq kvmppc_primary_no_guest 276 277#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 278 addi r3, r4, VCPU_TB_RMENTRY 279 bl kvmhv_start_timing 280#endif 281 b kvmppc_got_guest 282 283kvm_novcpu_exit: 284#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 285 ld r4, HSTATE_KVM_VCPU(r13) 286 cmpdi r4, 0 287 beq 13f 288 addi r3, r4, VCPU_TB_RMEXIT 289 bl kvmhv_accumulate_time 290#endif 29113: mr r3, r12 292 stw r12, 112-4(r1) 293 bl kvmhv_commence_exit 294 nop 295 lwz r12, 112-4(r1) 296 b kvmhv_switch_to_host 297 298/* 299 * We come in here when wakened from nap mode. 300 * Relocation is off and most register values are lost. 301 * r13 points to the PACA. 302 */ 303 .globl kvm_start_guest 304kvm_start_guest: 305 306 /* Set runlatch bit the minute you wake up from nap */ 307 mfspr r0, SPRN_CTRLF 308 ori r0, r0, 1 309 mtspr SPRN_CTRLT, r0 310 311 ld r2,PACATOC(r13) 312 313 li r0,KVM_HWTHREAD_IN_KVM 314 stb r0,HSTATE_HWTHREAD_STATE(r13) 315 316 /* NV GPR values from power7_idle() will no longer be valid */ 317 li r0,1 318 stb r0,PACA_NAPSTATELOST(r13) 319 320 /* were we napping due to cede? */ 321 lbz r0,HSTATE_NAPPING(r13) 322 cmpwi r0,NAPPING_CEDE 323 beq kvm_end_cede 324 cmpwi r0,NAPPING_NOVCPU 325 beq kvm_novcpu_wakeup 326 327 ld r1,PACAEMERGSP(r13) 328 subi r1,r1,STACK_FRAME_OVERHEAD 329 330 /* 331 * We weren't napping due to cede, so this must be a secondary 332 * thread being woken up to run a guest, or being woken up due 333 * to a stray IPI. (Or due to some machine check or hypervisor 334 * maintenance interrupt while the core is in KVM.) 335 */ 336 337 /* Check the wake reason in SRR1 to see why we got here */ 338 bl kvmppc_check_wake_reason 339 /* 340 * kvmppc_check_wake_reason could invoke a C routine, but we 341 * have no volatile registers to restore when we return. 342 */ 343 344 cmpdi r3, 0 345 bge kvm_no_guest 346 347 /* get vcore pointer, NULL if we have nothing to run */ 348 ld r5,HSTATE_KVM_VCORE(r13) 349 cmpdi r5,0 350 /* if we have no vcore to run, go back to sleep */ 351 beq kvm_no_guest 352 353kvm_secondary_got_guest: 354 355 /* Set HSTATE_DSCR(r13) to something sensible */ 356 ld r6, PACA_DSCR_DEFAULT(r13) 357 std r6, HSTATE_DSCR(r13) 358 359 /* On thread 0 of a subcore, set HDEC to max */ 360 lbz r4, HSTATE_PTID(r13) 361 cmpwi r4, 0 362 bne 63f 363 lis r6, 0x7fff 364 ori r6, r6, 0xffff 365 mtspr SPRN_HDEC, r6 366 /* and set per-LPAR registers, if doing dynamic micro-threading */ 367 ld r6, HSTATE_SPLIT_MODE(r13) 368 cmpdi r6, 0 369 beq 63f 370 ld r0, KVM_SPLIT_RPR(r6) 371 mtspr SPRN_RPR, r0 372 ld r0, KVM_SPLIT_PMMAR(r6) 373 mtspr SPRN_PMMAR, r0 374 ld r0, KVM_SPLIT_LDBAR(r6) 375 mtspr SPRN_LDBAR, r0 376 isync 37763: 378 /* Order load of vcpu after load of vcore */ 379 lwsync 380 ld r4, HSTATE_KVM_VCPU(r13) 381 bl kvmppc_hv_entry 382 383 /* Back from the guest, go back to nap */ 384 /* Clear our vcpu and vcore pointers so we don't come back in early */ 385 li r0, 0 386 std r0, HSTATE_KVM_VCPU(r13) 387 /* 388 * Once we clear HSTATE_KVM_VCORE(r13), the code in 389 * kvmppc_run_core() is going to assume that all our vcpu 390 * state is visible in memory. This lwsync makes sure 391 * that that is true. 392 */ 393 lwsync 394 std r0, HSTATE_KVM_VCORE(r13) 395 396 /* 397 * All secondaries exiting guest will fall through this path. 398 * Before proceeding, just check for HMI interrupt and 399 * invoke opal hmi handler. By now we are sure that the 400 * primary thread on this core/subcore has already made partition 401 * switch/TB resync and we are good to call opal hmi handler. 402 */ 403 cmpwi r12, BOOK3S_INTERRUPT_HMI 404 bne kvm_no_guest 405 406 li r3,0 /* NULL argument */ 407 bl hmi_exception_realmode 408/* 409 * At this point we have finished executing in the guest. 410 * We need to wait for hwthread_req to become zero, since 411 * we may not turn on the MMU while hwthread_req is non-zero. 412 * While waiting we also need to check if we get given a vcpu to run. 413 */ 414kvm_no_guest: 415 lbz r3, HSTATE_HWTHREAD_REQ(r13) 416 cmpwi r3, 0 417 bne 53f 418 HMT_MEDIUM 419 li r0, KVM_HWTHREAD_IN_KERNEL 420 stb r0, HSTATE_HWTHREAD_STATE(r13) 421 /* need to recheck hwthread_req after a barrier, to avoid race */ 422 sync 423 lbz r3, HSTATE_HWTHREAD_REQ(r13) 424 cmpwi r3, 0 425 bne 54f 426/* 427 * We jump to pnv_wakeup_loss, which will return to the caller 428 * of power7_nap in the powernv cpu offline loop. The value we 429 * put in r3 becomes the return value for power7_nap. 430 */ 431 li r3, LPCR_PECE0 432 mfspr r4, SPRN_LPCR 433 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 434 mtspr SPRN_LPCR, r4 435 li r3, 0 436 b pnv_wakeup_loss 437 43853: HMT_LOW 439 ld r5, HSTATE_KVM_VCORE(r13) 440 cmpdi r5, 0 441 bne 60f 442 ld r3, HSTATE_SPLIT_MODE(r13) 443 cmpdi r3, 0 444 beq kvm_no_guest 445 lbz r0, KVM_SPLIT_DO_NAP(r3) 446 cmpwi r0, 0 447 beq kvm_no_guest 448 HMT_MEDIUM 449 b kvm_unsplit_nap 45060: HMT_MEDIUM 451 b kvm_secondary_got_guest 452 45354: li r0, KVM_HWTHREAD_IN_KVM 454 stb r0, HSTATE_HWTHREAD_STATE(r13) 455 b kvm_no_guest 456 457/* 458 * Here the primary thread is trying to return the core to 459 * whole-core mode, so we need to nap. 460 */ 461kvm_unsplit_nap: 462 /* 463 * When secondaries are napping in kvm_unsplit_nap() with 464 * hwthread_req = 1, HMI goes ignored even though subcores are 465 * already exited the guest. Hence HMI keeps waking up secondaries 466 * from nap in a loop and secondaries always go back to nap since 467 * no vcore is assigned to them. This makes impossible for primary 468 * thread to get hold of secondary threads resulting into a soft 469 * lockup in KVM path. 470 * 471 * Let us check if HMI is pending and handle it before we go to nap. 472 */ 473 cmpwi r12, BOOK3S_INTERRUPT_HMI 474 bne 55f 475 li r3, 0 /* NULL argument */ 476 bl hmi_exception_realmode 47755: 478 /* 479 * Ensure that secondary doesn't nap when it has 480 * its vcore pointer set. 481 */ 482 sync /* matches smp_mb() before setting split_info.do_nap */ 483 ld r0, HSTATE_KVM_VCORE(r13) 484 cmpdi r0, 0 485 bne kvm_no_guest 486 /* clear any pending message */ 487BEGIN_FTR_SECTION 488 lis r6, (PPC_DBELL_SERVER << (63-36))@h 489 PPC_MSGCLR(6) 490END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 491 /* Set kvm_split_mode.napped[tid] = 1 */ 492 ld r3, HSTATE_SPLIT_MODE(r13) 493 li r0, 1 494 lhz r4, PACAPACAINDEX(r13) 495 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ 496 addi r4, r4, KVM_SPLIT_NAPPED 497 stbx r0, r3, r4 498 /* Check the do_nap flag again after setting napped[] */ 499 sync 500 lbz r0, KVM_SPLIT_DO_NAP(r3) 501 cmpwi r0, 0 502 beq 57f 503 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 504 mfspr r5, SPRN_LPCR 505 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 506 b kvm_nap_sequence 507 50857: li r0, 0 509 stbx r0, r3, r4 510 b kvm_no_guest 511 512/****************************************************************************** 513 * * 514 * Entry code * 515 * * 516 *****************************************************************************/ 517 518/* Stack frame offsets */ 519#define STACK_SLOT_TID (112-16) 520#define STACK_SLOT_PSSCR (112-24) 521 522.global kvmppc_hv_entry 523kvmppc_hv_entry: 524 525 /* Required state: 526 * 527 * R4 = vcpu pointer (or NULL) 528 * MSR = ~IR|DR 529 * R13 = PACA 530 * R1 = host R1 531 * R2 = TOC 532 * all other volatile GPRS = free 533 */ 534 mflr r0 535 std r0, PPC_LR_STKOFF(r1) 536 stdu r1, -112(r1) 537 538 /* Save R1 in the PACA */ 539 std r1, HSTATE_HOST_R1(r13) 540 541 li r6, KVM_GUEST_MODE_HOST_HV 542 stb r6, HSTATE_IN_GUEST(r13) 543 544#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 545 /* Store initial timestamp */ 546 cmpdi r4, 0 547 beq 1f 548 addi r3, r4, VCPU_TB_RMENTRY 549 bl kvmhv_start_timing 5501: 551#endif 552 /* Clear out SLB */ 553 li r6,0 554 slbmte r6,r6 555 slbia 556 ptesync 557 558 /* 559 * POWER7/POWER8 host -> guest partition switch code. 560 * We don't have to lock against concurrent tlbies, 561 * but we do have to coordinate across hardware threads. 562 */ 563 /* Set bit in entry map iff exit map is zero. */ 564 ld r5, HSTATE_KVM_VCORE(r13) 565 li r7, 1 566 lbz r6, HSTATE_PTID(r13) 567 sld r7, r7, r6 568 addi r9, r5, VCORE_ENTRY_EXIT 56921: lwarx r3, 0, r9 570 cmpwi r3, 0x100 /* any threads starting to exit? */ 571 bge secondary_too_late /* if so we're too late to the party */ 572 or r3, r3, r7 573 stwcx. r3, 0, r9 574 bne 21b 575 576 /* Primary thread switches to guest partition. */ 577 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 578 cmpwi r6,0 579 bne 10f 580 lwz r7,KVM_LPID(r9) 581BEGIN_FTR_SECTION 582 ld r6,KVM_SDR1(r9) 583 li r0,LPID_RSVD /* switch to reserved LPID */ 584 mtspr SPRN_LPID,r0 585 ptesync 586 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 587END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 588 mtspr SPRN_LPID,r7 589 isync 590 591 /* See if we need to flush the TLB */ 592 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 593 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 594 srdi r6,r6,6 /* doubleword number */ 595 sldi r6,r6,3 /* address offset */ 596 add r6,r6,r9 597 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 598 li r0,1 599 sld r0,r0,r7 600 ld r7,0(r6) 601 and. r7,r7,r0 602 beq 22f 60323: ldarx r7,0,r6 /* if set, clear the bit */ 604 andc r7,r7,r0 605 stdcx. r7,0,r6 606 bne 23b 607 /* Flush the TLB of any entries for this LPID */ 608 lwz r6,KVM_TLB_SETS(r9) 609 li r0,0 /* RS for P9 version of tlbiel */ 610 mtctr r6 611 li r7,0x800 /* IS field = 0b10 */ 612 ptesync 61328: tlbiel r7 614 addi r7,r7,0x1000 615 bdnz 28b 616 ptesync 617 618 /* Add timebase offset onto timebase */ 61922: ld r8,VCORE_TB_OFFSET(r5) 620 cmpdi r8,0 621 beq 37f 622 mftb r6 /* current host timebase */ 623 add r8,r8,r6 624 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 625 mftb r7 /* check if lower 24 bits overflowed */ 626 clrldi r6,r6,40 627 clrldi r7,r7,40 628 cmpld r7,r6 629 bge 37f 630 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 631 mtspr SPRN_TBU40,r8 632 633 /* Load guest PCR value to select appropriate compat mode */ 63437: ld r7, VCORE_PCR(r5) 635 cmpdi r7, 0 636 beq 38f 637 mtspr SPRN_PCR, r7 63838: 639 640BEGIN_FTR_SECTION 641 /* DPDES and VTB are shared between threads */ 642 ld r8, VCORE_DPDES(r5) 643 ld r7, VCORE_VTB(r5) 644 mtspr SPRN_DPDES, r8 645 mtspr SPRN_VTB, r7 646END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 647 648 /* Mark the subcore state as inside guest */ 649 bl kvmppc_subcore_enter_guest 650 nop 651 ld r5, HSTATE_KVM_VCORE(r13) 652 ld r4, HSTATE_KVM_VCPU(r13) 653 li r0,1 654 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 655 656 /* Do we have a guest vcpu to run? */ 65710: cmpdi r4, 0 658 beq kvmppc_primary_no_guest 659kvmppc_got_guest: 660 661 /* Load up guest SLB entries */ 662 lwz r5,VCPU_SLB_MAX(r4) 663 cmpwi r5,0 664 beq 9f 665 mtctr r5 666 addi r6,r4,VCPU_SLB 6671: ld r8,VCPU_SLB_E(r6) 668 ld r9,VCPU_SLB_V(r6) 669 slbmte r9,r8 670 addi r6,r6,VCPU_SLB_SIZE 671 bdnz 1b 6729: 673 /* Increment yield count if they have a VPA */ 674 ld r3, VCPU_VPA(r4) 675 cmpdi r3, 0 676 beq 25f 677 li r6, LPPACA_YIELDCOUNT 678 LWZX_BE r5, r3, r6 679 addi r5, r5, 1 680 STWX_BE r5, r3, r6 681 li r6, 1 682 stb r6, VCPU_VPA_DIRTY(r4) 68325: 684 685 /* Save purr/spurr */ 686 mfspr r5,SPRN_PURR 687 mfspr r6,SPRN_SPURR 688 std r5,HSTATE_PURR(r13) 689 std r6,HSTATE_SPURR(r13) 690 ld r7,VCPU_PURR(r4) 691 ld r8,VCPU_SPURR(r4) 692 mtspr SPRN_PURR,r7 693 mtspr SPRN_SPURR,r8 694 695 /* Save host values of some registers */ 696BEGIN_FTR_SECTION 697 mfspr r5, SPRN_TIDR 698 mfspr r6, SPRN_PSSCR 699 std r5, STACK_SLOT_TID(r1) 700 std r6, STACK_SLOT_PSSCR(r1) 701END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 702 703BEGIN_FTR_SECTION 704 /* Set partition DABR */ 705 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 706 lwz r5,VCPU_DABRX(r4) 707 ld r6,VCPU_DABR(r4) 708 mtspr SPRN_DABRX,r5 709 mtspr SPRN_DABR,r6 710 isync 711END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 712 713#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 714BEGIN_FTR_SECTION 715 bl kvmppc_restore_tm 716END_FTR_SECTION_IFSET(CPU_FTR_TM) 717#endif 718 719 /* Load guest PMU registers */ 720 /* R4 is live here (vcpu pointer) */ 721 li r3, 1 722 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 723 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 724 isync 725BEGIN_FTR_SECTION 726 ld r3, VCPU_MMCR(r4) 727 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 728 cmpwi r5, MMCR0_PMAO 729 beql kvmppc_fix_pmao 730END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 731 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 732 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 733 lwz r6, VCPU_PMC + 8(r4) 734 lwz r7, VCPU_PMC + 12(r4) 735 lwz r8, VCPU_PMC + 16(r4) 736 lwz r9, VCPU_PMC + 20(r4) 737 mtspr SPRN_PMC1, r3 738 mtspr SPRN_PMC2, r5 739 mtspr SPRN_PMC3, r6 740 mtspr SPRN_PMC4, r7 741 mtspr SPRN_PMC5, r8 742 mtspr SPRN_PMC6, r9 743 ld r3, VCPU_MMCR(r4) 744 ld r5, VCPU_MMCR + 8(r4) 745 ld r6, VCPU_MMCR + 16(r4) 746 ld r7, VCPU_SIAR(r4) 747 ld r8, VCPU_SDAR(r4) 748 mtspr SPRN_MMCR1, r5 749 mtspr SPRN_MMCRA, r6 750 mtspr SPRN_SIAR, r7 751 mtspr SPRN_SDAR, r8 752BEGIN_FTR_SECTION 753 ld r5, VCPU_MMCR + 24(r4) 754 ld r6, VCPU_SIER(r4) 755 mtspr SPRN_MMCR2, r5 756 mtspr SPRN_SIER, r6 757BEGIN_FTR_SECTION_NESTED(96) 758 lwz r7, VCPU_PMC + 24(r4) 759 lwz r8, VCPU_PMC + 28(r4) 760 ld r9, VCPU_MMCR + 32(r4) 761 mtspr SPRN_SPMC1, r7 762 mtspr SPRN_SPMC2, r8 763 mtspr SPRN_MMCRS, r9 764END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 765END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 766 mtspr SPRN_MMCR0, r3 767 isync 768 769 /* Load up FP, VMX and VSX registers */ 770 bl kvmppc_load_fp 771 772 ld r14, VCPU_GPR(R14)(r4) 773 ld r15, VCPU_GPR(R15)(r4) 774 ld r16, VCPU_GPR(R16)(r4) 775 ld r17, VCPU_GPR(R17)(r4) 776 ld r18, VCPU_GPR(R18)(r4) 777 ld r19, VCPU_GPR(R19)(r4) 778 ld r20, VCPU_GPR(R20)(r4) 779 ld r21, VCPU_GPR(R21)(r4) 780 ld r22, VCPU_GPR(R22)(r4) 781 ld r23, VCPU_GPR(R23)(r4) 782 ld r24, VCPU_GPR(R24)(r4) 783 ld r25, VCPU_GPR(R25)(r4) 784 ld r26, VCPU_GPR(R26)(r4) 785 ld r27, VCPU_GPR(R27)(r4) 786 ld r28, VCPU_GPR(R28)(r4) 787 ld r29, VCPU_GPR(R29)(r4) 788 ld r30, VCPU_GPR(R30)(r4) 789 ld r31, VCPU_GPR(R31)(r4) 790 791 /* Switch DSCR to guest value */ 792 ld r5, VCPU_DSCR(r4) 793 mtspr SPRN_DSCR, r5 794 795BEGIN_FTR_SECTION 796 /* Skip next section on POWER7 */ 797 b 8f 798END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 799 /* Load up POWER8-specific registers */ 800 ld r5, VCPU_IAMR(r4) 801 lwz r6, VCPU_PSPB(r4) 802 ld r7, VCPU_FSCR(r4) 803 mtspr SPRN_IAMR, r5 804 mtspr SPRN_PSPB, r6 805 mtspr SPRN_FSCR, r7 806 ld r5, VCPU_DAWR(r4) 807 ld r6, VCPU_DAWRX(r4) 808 ld r7, VCPU_CIABR(r4) 809 ld r8, VCPU_TAR(r4) 810 mtspr SPRN_DAWR, r5 811 mtspr SPRN_DAWRX, r6 812 mtspr SPRN_CIABR, r7 813 mtspr SPRN_TAR, r8 814 ld r5, VCPU_IC(r4) 815 ld r8, VCPU_EBBHR(r4) 816 mtspr SPRN_IC, r5 817 mtspr SPRN_EBBHR, r8 818 ld r5, VCPU_EBBRR(r4) 819 ld r6, VCPU_BESCR(r4) 820 lwz r7, VCPU_GUEST_PID(r4) 821 ld r8, VCPU_WORT(r4) 822 mtspr SPRN_EBBRR, r5 823 mtspr SPRN_BESCR, r6 824 mtspr SPRN_PID, r7 825 mtspr SPRN_WORT, r8 826BEGIN_FTR_SECTION 827 /* POWER8-only registers */ 828 ld r5, VCPU_TCSCR(r4) 829 ld r6, VCPU_ACOP(r4) 830 ld r7, VCPU_CSIGR(r4) 831 ld r8, VCPU_TACR(r4) 832 mtspr SPRN_TCSCR, r5 833 mtspr SPRN_ACOP, r6 834 mtspr SPRN_CSIGR, r7 835 mtspr SPRN_TACR, r8 836FTR_SECTION_ELSE 837 /* POWER9-only registers */ 838 ld r5, VCPU_TID(r4) 839 ld r6, VCPU_PSSCR(r4) 840 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 841 mtspr SPRN_TIDR, r5 842 mtspr SPRN_PSSCR, r6 843ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 8448: 845 846 /* 847 * Set the decrementer to the guest decrementer. 848 */ 849 ld r8,VCPU_DEC_EXPIRES(r4) 850 /* r8 is a host timebase value here, convert to guest TB */ 851 ld r5,HSTATE_KVM_VCORE(r13) 852 ld r6,VCORE_TB_OFFSET(r5) 853 add r8,r8,r6 854 mftb r7 855 subf r3,r7,r8 856 mtspr SPRN_DEC,r3 857 stw r3,VCPU_DEC(r4) 858 859 ld r5, VCPU_SPRG0(r4) 860 ld r6, VCPU_SPRG1(r4) 861 ld r7, VCPU_SPRG2(r4) 862 ld r8, VCPU_SPRG3(r4) 863 mtspr SPRN_SPRG0, r5 864 mtspr SPRN_SPRG1, r6 865 mtspr SPRN_SPRG2, r7 866 mtspr SPRN_SPRG3, r8 867 868 /* Load up DAR and DSISR */ 869 ld r5, VCPU_DAR(r4) 870 lwz r6, VCPU_DSISR(r4) 871 mtspr SPRN_DAR, r5 872 mtspr SPRN_DSISR, r6 873 874 /* Restore AMR and UAMOR, set AMOR to all 1s */ 875 ld r5,VCPU_AMR(r4) 876 ld r6,VCPU_UAMOR(r4) 877 li r7,-1 878 mtspr SPRN_AMR,r5 879 mtspr SPRN_UAMOR,r6 880 mtspr SPRN_AMOR,r7 881 882 /* Restore state of CTRL run bit; assume 1 on entry */ 883 lwz r5,VCPU_CTRL(r4) 884 andi. r5,r5,1 885 bne 4f 886 mfspr r6,SPRN_CTRLF 887 clrrdi r6,r6,1 888 mtspr SPRN_CTRLT,r6 8894: 890 /* Secondary threads wait for primary to have done partition switch */ 891 ld r5, HSTATE_KVM_VCORE(r13) 892 lbz r6, HSTATE_PTID(r13) 893 cmpwi r6, 0 894 beq 21f 895 lbz r0, VCORE_IN_GUEST(r5) 896 cmpwi r0, 0 897 bne 21f 898 HMT_LOW 89920: lwz r3, VCORE_ENTRY_EXIT(r5) 900 cmpwi r3, 0x100 901 bge no_switch_exit 902 lbz r0, VCORE_IN_GUEST(r5) 903 cmpwi r0, 0 904 beq 20b 905 HMT_MEDIUM 90621: 907 /* Set LPCR. */ 908 ld r8,VCORE_LPCR(r5) 909 mtspr SPRN_LPCR,r8 910 isync 911 912 /* Check if HDEC expires soon */ 913 mfspr r3, SPRN_HDEC 914 cmpwi r3, 512 /* 1 microsecond */ 915 blt hdec_soon 916 917deliver_guest_interrupt: 918 ld r6, VCPU_CTR(r4) 919 ld r7, VCPU_XER(r4) 920 921 mtctr r6 922 mtxer r7 923 924kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 925 ld r10, VCPU_PC(r4) 926 ld r11, VCPU_MSR(r4) 927 ld r6, VCPU_SRR0(r4) 928 ld r7, VCPU_SRR1(r4) 929 mtspr SPRN_SRR0, r6 930 mtspr SPRN_SRR1, r7 931 932 /* r11 = vcpu->arch.msr & ~MSR_HV */ 933 rldicl r11, r11, 63 - MSR_HV_LG, 1 934 rotldi r11, r11, 1 + MSR_HV_LG 935 ori r11, r11, MSR_ME 936 937 /* Check if we can deliver an external or decrementer interrupt now */ 938 ld r0, VCPU_PENDING_EXC(r4) 939 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 940 cmpdi cr1, r0, 0 941 andi. r8, r11, MSR_EE 942 mfspr r8, SPRN_LPCR 943 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 944 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 945 mtspr SPRN_LPCR, r8 946 isync 947 beq 5f 948 li r0, BOOK3S_INTERRUPT_EXTERNAL 949 bne cr1, 12f 950 mfspr r0, SPRN_DEC 951 cmpwi r0, 0 952 li r0, BOOK3S_INTERRUPT_DECREMENTER 953 bge 5f 954 95512: mtspr SPRN_SRR0, r10 956 mr r10,r0 957 mtspr SPRN_SRR1, r11 958 mr r9, r4 959 bl kvmppc_msr_interrupt 9605: 961 962/* 963 * Required state: 964 * R4 = vcpu 965 * R10: value for HSRR0 966 * R11: value for HSRR1 967 * R13 = PACA 968 */ 969fast_guest_return: 970 li r0,0 971 stb r0,VCPU_CEDED(r4) /* cancel cede */ 972 mtspr SPRN_HSRR0,r10 973 mtspr SPRN_HSRR1,r11 974 975 /* Activate guest mode, so faults get handled by KVM */ 976 li r9, KVM_GUEST_MODE_GUEST_HV 977 stb r9, HSTATE_IN_GUEST(r13) 978 979#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 980 /* Accumulate timing */ 981 addi r3, r4, VCPU_TB_GUEST 982 bl kvmhv_accumulate_time 983#endif 984 985 /* Enter guest */ 986 987BEGIN_FTR_SECTION 988 ld r5, VCPU_CFAR(r4) 989 mtspr SPRN_CFAR, r5 990END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 991BEGIN_FTR_SECTION 992 ld r0, VCPU_PPR(r4) 993END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 994 995 ld r5, VCPU_LR(r4) 996 lwz r6, VCPU_CR(r4) 997 mtlr r5 998 mtcr r6 999 1000 ld r1, VCPU_GPR(R1)(r4) 1001 ld r2, VCPU_GPR(R2)(r4) 1002 ld r3, VCPU_GPR(R3)(r4) 1003 ld r5, VCPU_GPR(R5)(r4) 1004 ld r6, VCPU_GPR(R6)(r4) 1005 ld r7, VCPU_GPR(R7)(r4) 1006 ld r8, VCPU_GPR(R8)(r4) 1007 ld r9, VCPU_GPR(R9)(r4) 1008 ld r10, VCPU_GPR(R10)(r4) 1009 ld r11, VCPU_GPR(R11)(r4) 1010 ld r12, VCPU_GPR(R12)(r4) 1011 ld r13, VCPU_GPR(R13)(r4) 1012 1013BEGIN_FTR_SECTION 1014 mtspr SPRN_PPR, r0 1015END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1016 ld r0, VCPU_GPR(R0)(r4) 1017 ld r4, VCPU_GPR(R4)(r4) 1018 1019 hrfid 1020 b . 1021 1022secondary_too_late: 1023 li r12, 0 1024 cmpdi r4, 0 1025 beq 11f 1026 stw r12, VCPU_TRAP(r4) 1027#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1028 addi r3, r4, VCPU_TB_RMEXIT 1029 bl kvmhv_accumulate_time 1030#endif 103111: b kvmhv_switch_to_host 1032 1033no_switch_exit: 1034 HMT_MEDIUM 1035 li r12, 0 1036 b 12f 1037hdec_soon: 1038 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 103912: stw r12, VCPU_TRAP(r4) 1040 mr r9, r4 1041#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1042 addi r3, r4, VCPU_TB_RMEXIT 1043 bl kvmhv_accumulate_time 1044#endif 1045 b guest_exit_cont 1046 1047/****************************************************************************** 1048 * * 1049 * Exit code * 1050 * * 1051 *****************************************************************************/ 1052 1053/* 1054 * We come here from the first-level interrupt handlers. 1055 */ 1056 .globl kvmppc_interrupt_hv 1057kvmppc_interrupt_hv: 1058 /* 1059 * Register contents: 1060 * R12 = interrupt vector 1061 * R13 = PACA 1062 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 1063 * guest R13 saved in SPRN_SCRATCH0 1064 */ 1065 std r9, HSTATE_SCRATCH2(r13) 1066 1067 lbz r9, HSTATE_IN_GUEST(r13) 1068 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1069 beq kvmppc_bad_host_intr 1070#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1071 cmpwi r9, KVM_GUEST_MODE_GUEST 1072 ld r9, HSTATE_SCRATCH2(r13) 1073 beq kvmppc_interrupt_pr 1074#endif 1075 /* We're now back in the host but in guest MMU context */ 1076 li r9, KVM_GUEST_MODE_HOST_HV 1077 stb r9, HSTATE_IN_GUEST(r13) 1078 1079 ld r9, HSTATE_KVM_VCPU(r13) 1080 1081 /* Save registers */ 1082 1083 std r0, VCPU_GPR(R0)(r9) 1084 std r1, VCPU_GPR(R1)(r9) 1085 std r2, VCPU_GPR(R2)(r9) 1086 std r3, VCPU_GPR(R3)(r9) 1087 std r4, VCPU_GPR(R4)(r9) 1088 std r5, VCPU_GPR(R5)(r9) 1089 std r6, VCPU_GPR(R6)(r9) 1090 std r7, VCPU_GPR(R7)(r9) 1091 std r8, VCPU_GPR(R8)(r9) 1092 ld r0, HSTATE_SCRATCH2(r13) 1093 std r0, VCPU_GPR(R9)(r9) 1094 std r10, VCPU_GPR(R10)(r9) 1095 std r11, VCPU_GPR(R11)(r9) 1096 ld r3, HSTATE_SCRATCH0(r13) 1097 lwz r4, HSTATE_SCRATCH1(r13) 1098 std r3, VCPU_GPR(R12)(r9) 1099 stw r4, VCPU_CR(r9) 1100BEGIN_FTR_SECTION 1101 ld r3, HSTATE_CFAR(r13) 1102 std r3, VCPU_CFAR(r9) 1103END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1104BEGIN_FTR_SECTION 1105 ld r4, HSTATE_PPR(r13) 1106 std r4, VCPU_PPR(r9) 1107END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1108 1109 /* Restore R1/R2 so we can handle faults */ 1110 ld r1, HSTATE_HOST_R1(r13) 1111 ld r2, PACATOC(r13) 1112 1113 mfspr r10, SPRN_SRR0 1114 mfspr r11, SPRN_SRR1 1115 std r10, VCPU_SRR0(r9) 1116 std r11, VCPU_SRR1(r9) 1117 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1118 beq 1f 1119 mfspr r10, SPRN_HSRR0 1120 mfspr r11, SPRN_HSRR1 1121 clrrdi r12, r12, 2 11221: std r10, VCPU_PC(r9) 1123 std r11, VCPU_MSR(r9) 1124 1125 GET_SCRATCH0(r3) 1126 mflr r4 1127 std r3, VCPU_GPR(R13)(r9) 1128 std r4, VCPU_LR(r9) 1129 1130 stw r12,VCPU_TRAP(r9) 1131 1132#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1133 addi r3, r9, VCPU_TB_RMINTR 1134 mr r4, r9 1135 bl kvmhv_accumulate_time 1136 ld r5, VCPU_GPR(R5)(r9) 1137 ld r6, VCPU_GPR(R6)(r9) 1138 ld r7, VCPU_GPR(R7)(r9) 1139 ld r8, VCPU_GPR(R8)(r9) 1140#endif 1141 1142 /* Save HEIR (HV emulation assist reg) in emul_inst 1143 if this is an HEI (HV emulation interrupt, e40) */ 1144 li r3,KVM_INST_FETCH_FAILED 1145 stw r3,VCPU_LAST_INST(r9) 1146 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1147 bne 11f 1148 mfspr r3,SPRN_HEIR 114911: stw r3,VCPU_HEIR(r9) 1150 1151 /* these are volatile across C function calls */ 1152 mfctr r3 1153 mfxer r4 1154 std r3, VCPU_CTR(r9) 1155 std r4, VCPU_XER(r9) 1156 1157 /* If this is a page table miss then see if it's theirs or ours */ 1158 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1159 beq kvmppc_hdsi 1160 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1161 beq kvmppc_hisi 1162 1163 /* See if this is a leftover HDEC interrupt */ 1164 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1165 bne 2f 1166 mfspr r3,SPRN_HDEC 1167 cmpwi r3,0 1168 mr r4,r9 1169 bge fast_guest_return 11702: 1171 /* See if this is an hcall we can handle in real mode */ 1172 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1173 beq hcall_try_real_mode 1174 1175 /* Hypervisor doorbell - exit only if host IPI flag set */ 1176 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1177 bne 3f 1178 lbz r0, HSTATE_HOST_IPI(r13) 1179 cmpwi r0, 0 1180 beq 4f 1181 b guest_exit_cont 11823: 1183 /* External interrupt ? */ 1184 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1185 bne+ guest_exit_cont 1186 1187 /* External interrupt, first check for host_ipi. If this is 1188 * set, we know the host wants us out so let's do it now 1189 */ 1190 bl kvmppc_read_intr 1191 1192 /* 1193 * Restore the active volatile registers after returning from 1194 * a C function. 1195 */ 1196 ld r9, HSTATE_KVM_VCPU(r13) 1197 li r12, BOOK3S_INTERRUPT_EXTERNAL 1198 1199 /* 1200 * kvmppc_read_intr return codes: 1201 * 1202 * Exit to host (r3 > 0) 1203 * 1 An interrupt is pending that needs to be handled by the host 1204 * Exit guest and return to host by branching to guest_exit_cont 1205 * 1206 * 2 Passthrough that needs completion in the host 1207 * Exit guest and return to host by branching to guest_exit_cont 1208 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1209 * to indicate to the host to complete handling the interrupt 1210 * 1211 * Before returning to guest, we check if any CPU is heading out 1212 * to the host and if so, we head out also. If no CPUs are heading 1213 * check return values <= 0. 1214 * 1215 * Return to guest (r3 <= 0) 1216 * 0 No external interrupt is pending 1217 * -1 A guest wakeup IPI (which has now been cleared) 1218 * In either case, we return to guest to deliver any pending 1219 * guest interrupts. 1220 * 1221 * -2 A PCI passthrough external interrupt was handled 1222 * (interrupt was delivered directly to guest) 1223 * Return to guest to deliver any pending guest interrupts. 1224 */ 1225 1226 cmpdi r3, 1 1227 ble 1f 1228 1229 /* Return code = 2 */ 1230 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1231 stw r12, VCPU_TRAP(r9) 1232 b guest_exit_cont 1233 12341: /* Return code <= 1 */ 1235 cmpdi r3, 0 1236 bgt guest_exit_cont 1237 1238 /* Return code <= 0 */ 12394: ld r5, HSTATE_KVM_VCORE(r13) 1240 lwz r0, VCORE_ENTRY_EXIT(r5) 1241 cmpwi r0, 0x100 1242 mr r4, r9 1243 blt deliver_guest_interrupt 1244 1245guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1246 /* Save more register state */ 1247 mfdar r6 1248 mfdsisr r7 1249 std r6, VCPU_DAR(r9) 1250 stw r7, VCPU_DSISR(r9) 1251 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1252 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1253 beq mc_cont 1254 std r6, VCPU_FAULT_DAR(r9) 1255 stw r7, VCPU_FAULT_DSISR(r9) 1256 1257 /* See if it is a machine check */ 1258 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1259 beq machine_check_realmode 1260mc_cont: 1261#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1262 addi r3, r9, VCPU_TB_RMEXIT 1263 mr r4, r9 1264 bl kvmhv_accumulate_time 1265#endif 1266 1267 mr r3, r12 1268 /* Increment exit count, poke other threads to exit */ 1269 bl kvmhv_commence_exit 1270 nop 1271 ld r9, HSTATE_KVM_VCPU(r13) 1272 lwz r12, VCPU_TRAP(r9) 1273 1274 /* Stop others sending VCPU interrupts to this physical CPU */ 1275 li r0, -1 1276 stw r0, VCPU_CPU(r9) 1277 stw r0, VCPU_THREAD_CPU(r9) 1278 1279 /* Save guest CTRL register, set runlatch to 1 */ 1280 mfspr r6,SPRN_CTRLF 1281 stw r6,VCPU_CTRL(r9) 1282 andi. r0,r6,1 1283 bne 4f 1284 ori r6,r6,1 1285 mtspr SPRN_CTRLT,r6 12864: 1287 /* Read the guest SLB and save it away */ 1288 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1289 mtctr r0 1290 li r6,0 1291 addi r7,r9,VCPU_SLB 1292 li r5,0 12931: slbmfee r8,r6 1294 andis. r0,r8,SLB_ESID_V@h 1295 beq 2f 1296 add r8,r8,r6 /* put index in */ 1297 slbmfev r3,r6 1298 std r8,VCPU_SLB_E(r7) 1299 std r3,VCPU_SLB_V(r7) 1300 addi r7,r7,VCPU_SLB_SIZE 1301 addi r5,r5,1 13022: addi r6,r6,1 1303 bdnz 1b 1304 stw r5,VCPU_SLB_MAX(r9) 1305 1306 /* 1307 * Save the guest PURR/SPURR 1308 */ 1309 mfspr r5,SPRN_PURR 1310 mfspr r6,SPRN_SPURR 1311 ld r7,VCPU_PURR(r9) 1312 ld r8,VCPU_SPURR(r9) 1313 std r5,VCPU_PURR(r9) 1314 std r6,VCPU_SPURR(r9) 1315 subf r5,r7,r5 1316 subf r6,r8,r6 1317 1318 /* 1319 * Restore host PURR/SPURR and add guest times 1320 * so that the time in the guest gets accounted. 1321 */ 1322 ld r3,HSTATE_PURR(r13) 1323 ld r4,HSTATE_SPURR(r13) 1324 add r3,r3,r5 1325 add r4,r4,r6 1326 mtspr SPRN_PURR,r3 1327 mtspr SPRN_SPURR,r4 1328 1329 /* Save DEC */ 1330 mfspr r5,SPRN_DEC 1331 mftb r6 1332 extsw r5,r5 1333 add r5,r5,r6 1334 /* r5 is a guest timebase value here, convert to host TB */ 1335 ld r3,HSTATE_KVM_VCORE(r13) 1336 ld r4,VCORE_TB_OFFSET(r3) 1337 subf r5,r4,r5 1338 std r5,VCPU_DEC_EXPIRES(r9) 1339 1340BEGIN_FTR_SECTION 1341 b 8f 1342END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1343 /* Save POWER8-specific registers */ 1344 mfspr r5, SPRN_IAMR 1345 mfspr r6, SPRN_PSPB 1346 mfspr r7, SPRN_FSCR 1347 std r5, VCPU_IAMR(r9) 1348 stw r6, VCPU_PSPB(r9) 1349 std r7, VCPU_FSCR(r9) 1350 mfspr r5, SPRN_IC 1351 mfspr r7, SPRN_TAR 1352 std r5, VCPU_IC(r9) 1353 std r7, VCPU_TAR(r9) 1354 mfspr r8, SPRN_EBBHR 1355 std r8, VCPU_EBBHR(r9) 1356 mfspr r5, SPRN_EBBRR 1357 mfspr r6, SPRN_BESCR 1358 mfspr r7, SPRN_PID 1359 mfspr r8, SPRN_WORT 1360 std r5, VCPU_EBBRR(r9) 1361 std r6, VCPU_BESCR(r9) 1362 stw r7, VCPU_GUEST_PID(r9) 1363 std r8, VCPU_WORT(r9) 1364BEGIN_FTR_SECTION 1365 mfspr r5, SPRN_TCSCR 1366 mfspr r6, SPRN_ACOP 1367 mfspr r7, SPRN_CSIGR 1368 mfspr r8, SPRN_TACR 1369 std r5, VCPU_TCSCR(r9) 1370 std r6, VCPU_ACOP(r9) 1371 std r7, VCPU_CSIGR(r9) 1372 std r8, VCPU_TACR(r9) 1373FTR_SECTION_ELSE 1374 mfspr r5, SPRN_TIDR 1375 mfspr r6, SPRN_PSSCR 1376 std r5, VCPU_TID(r9) 1377 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1378 rotldi r6, r6, 60 1379 std r6, VCPU_PSSCR(r9) 1380ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1381 /* 1382 * Restore various registers to 0, where non-zero values 1383 * set by the guest could disrupt the host. 1384 */ 1385 li r0, 0 1386 mtspr SPRN_IAMR, r0 1387 mtspr SPRN_CIABR, r0 1388 mtspr SPRN_DAWRX, r0 1389 mtspr SPRN_WORT, r0 1390BEGIN_FTR_SECTION 1391 mtspr SPRN_TCSCR, r0 1392 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1393 li r0, 1 1394 sldi r0, r0, 31 1395 mtspr SPRN_MMCRS, r0 1396END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 13978: 1398 1399 /* Save and reset AMR and UAMOR before turning on the MMU */ 1400 mfspr r5,SPRN_AMR 1401 mfspr r6,SPRN_UAMOR 1402 std r5,VCPU_AMR(r9) 1403 std r6,VCPU_UAMOR(r9) 1404 li r6,0 1405 mtspr SPRN_AMR,r6 1406 1407 /* Switch DSCR back to host value */ 1408 mfspr r8, SPRN_DSCR 1409 ld r7, HSTATE_DSCR(r13) 1410 std r8, VCPU_DSCR(r9) 1411 mtspr SPRN_DSCR, r7 1412 1413 /* Save non-volatile GPRs */ 1414 std r14, VCPU_GPR(R14)(r9) 1415 std r15, VCPU_GPR(R15)(r9) 1416 std r16, VCPU_GPR(R16)(r9) 1417 std r17, VCPU_GPR(R17)(r9) 1418 std r18, VCPU_GPR(R18)(r9) 1419 std r19, VCPU_GPR(R19)(r9) 1420 std r20, VCPU_GPR(R20)(r9) 1421 std r21, VCPU_GPR(R21)(r9) 1422 std r22, VCPU_GPR(R22)(r9) 1423 std r23, VCPU_GPR(R23)(r9) 1424 std r24, VCPU_GPR(R24)(r9) 1425 std r25, VCPU_GPR(R25)(r9) 1426 std r26, VCPU_GPR(R26)(r9) 1427 std r27, VCPU_GPR(R27)(r9) 1428 std r28, VCPU_GPR(R28)(r9) 1429 std r29, VCPU_GPR(R29)(r9) 1430 std r30, VCPU_GPR(R30)(r9) 1431 std r31, VCPU_GPR(R31)(r9) 1432 1433 /* Save SPRGs */ 1434 mfspr r3, SPRN_SPRG0 1435 mfspr r4, SPRN_SPRG1 1436 mfspr r5, SPRN_SPRG2 1437 mfspr r6, SPRN_SPRG3 1438 std r3, VCPU_SPRG0(r9) 1439 std r4, VCPU_SPRG1(r9) 1440 std r5, VCPU_SPRG2(r9) 1441 std r6, VCPU_SPRG3(r9) 1442 1443 /* save FP state */ 1444 mr r3, r9 1445 bl kvmppc_save_fp 1446 1447#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1448BEGIN_FTR_SECTION 1449 bl kvmppc_save_tm 1450END_FTR_SECTION_IFSET(CPU_FTR_TM) 1451#endif 1452 1453 /* Increment yield count if they have a VPA */ 1454 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1455 cmpdi r8, 0 1456 beq 25f 1457 li r4, LPPACA_YIELDCOUNT 1458 LWZX_BE r3, r8, r4 1459 addi r3, r3, 1 1460 STWX_BE r3, r8, r4 1461 li r3, 1 1462 stb r3, VCPU_VPA_DIRTY(r9) 146325: 1464 /* Save PMU registers if requested */ 1465 /* r8 and cr0.eq are live here */ 1466BEGIN_FTR_SECTION 1467 /* 1468 * POWER8 seems to have a hardware bug where setting 1469 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 1470 * when some counters are already negative doesn't seem 1471 * to cause a performance monitor alert (and hence interrupt). 1472 * The effect of this is that when saving the PMU state, 1473 * if there is no PMU alert pending when we read MMCR0 1474 * before freezing the counters, but one becomes pending 1475 * before we read the counters, we lose it. 1476 * To work around this, we need a way to freeze the counters 1477 * before reading MMCR0. Normally, freezing the counters 1478 * is done by writing MMCR0 (to set MMCR0[FC]) which 1479 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 1480 * we can also freeze the counters using MMCR2, by writing 1481 * 1s to all the counter freeze condition bits (there are 1482 * 9 bits each for 6 counters). 1483 */ 1484 li r3, -1 /* set all freeze bits */ 1485 clrrdi r3, r3, 10 1486 mfspr r10, SPRN_MMCR2 1487 mtspr SPRN_MMCR2, r3 1488 isync 1489END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1490 li r3, 1 1491 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1492 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1493 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1494 mfspr r6, SPRN_MMCRA 1495 /* Clear MMCRA in order to disable SDAR updates */ 1496 li r7, 0 1497 mtspr SPRN_MMCRA, r7 1498 isync 1499 beq 21f /* if no VPA, save PMU stuff anyway */ 1500 lbz r7, LPPACA_PMCINUSE(r8) 1501 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1502 bne 21f 1503 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1504 b 22f 150521: mfspr r5, SPRN_MMCR1 1506 mfspr r7, SPRN_SIAR 1507 mfspr r8, SPRN_SDAR 1508 std r4, VCPU_MMCR(r9) 1509 std r5, VCPU_MMCR + 8(r9) 1510 std r6, VCPU_MMCR + 16(r9) 1511BEGIN_FTR_SECTION 1512 std r10, VCPU_MMCR + 24(r9) 1513END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1514 std r7, VCPU_SIAR(r9) 1515 std r8, VCPU_SDAR(r9) 1516 mfspr r3, SPRN_PMC1 1517 mfspr r4, SPRN_PMC2 1518 mfspr r5, SPRN_PMC3 1519 mfspr r6, SPRN_PMC4 1520 mfspr r7, SPRN_PMC5 1521 mfspr r8, SPRN_PMC6 1522 stw r3, VCPU_PMC(r9) 1523 stw r4, VCPU_PMC + 4(r9) 1524 stw r5, VCPU_PMC + 8(r9) 1525 stw r6, VCPU_PMC + 12(r9) 1526 stw r7, VCPU_PMC + 16(r9) 1527 stw r8, VCPU_PMC + 20(r9) 1528BEGIN_FTR_SECTION 1529 mfspr r5, SPRN_SIER 1530 std r5, VCPU_SIER(r9) 1531BEGIN_FTR_SECTION_NESTED(96) 1532 mfspr r6, SPRN_SPMC1 1533 mfspr r7, SPRN_SPMC2 1534 mfspr r8, SPRN_MMCRS 1535 stw r6, VCPU_PMC + 24(r9) 1536 stw r7, VCPU_PMC + 28(r9) 1537 std r8, VCPU_MMCR + 32(r9) 1538 lis r4, 0x8000 1539 mtspr SPRN_MMCRS, r4 1540END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 1541END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 154222: 1543 /* Clear out SLB */ 1544 li r5,0 1545 slbmte r5,r5 1546 slbia 1547 ptesync 1548 1549 /* Restore host values of some registers */ 1550BEGIN_FTR_SECTION 1551 ld r5, STACK_SLOT_TID(r1) 1552 ld r6, STACK_SLOT_PSSCR(r1) 1553 mtspr SPRN_TIDR, r5 1554 mtspr SPRN_PSSCR, r6 1555END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1556 1557 /* 1558 * POWER7/POWER8 guest -> host partition switch code. 1559 * We don't have to lock against tlbies but we do 1560 * have to coordinate the hardware threads. 1561 */ 1562kvmhv_switch_to_host: 1563 /* Secondary threads wait for primary to do partition switch */ 1564 ld r5,HSTATE_KVM_VCORE(r13) 1565 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1566 lbz r3,HSTATE_PTID(r13) 1567 cmpwi r3,0 1568 beq 15f 1569 HMT_LOW 157013: lbz r3,VCORE_IN_GUEST(r5) 1571 cmpwi r3,0 1572 bne 13b 1573 HMT_MEDIUM 1574 b 16f 1575 1576 /* Primary thread waits for all the secondaries to exit guest */ 157715: lwz r3,VCORE_ENTRY_EXIT(r5) 1578 rlwinm r0,r3,32-8,0xff 1579 clrldi r3,r3,56 1580 cmpw r3,r0 1581 bne 15b 1582 isync 1583 1584 /* Did we actually switch to the guest at all? */ 1585 lbz r6, VCORE_IN_GUEST(r5) 1586 cmpwi r6, 0 1587 beq 19f 1588 1589 /* Primary thread switches back to host partition */ 1590 lwz r7,KVM_HOST_LPID(r4) 1591BEGIN_FTR_SECTION 1592 ld r6,KVM_HOST_SDR1(r4) 1593 li r8,LPID_RSVD /* switch to reserved LPID */ 1594 mtspr SPRN_LPID,r8 1595 ptesync 1596 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1597END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1598 mtspr SPRN_LPID,r7 1599 isync 1600 1601BEGIN_FTR_SECTION 1602 /* DPDES and VTB are shared between threads */ 1603 mfspr r7, SPRN_DPDES 1604 mfspr r8, SPRN_VTB 1605 std r7, VCORE_DPDES(r5) 1606 std r8, VCORE_VTB(r5) 1607 /* clear DPDES so we don't get guest doorbells in the host */ 1608 li r8, 0 1609 mtspr SPRN_DPDES, r8 1610END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1611 1612 /* If HMI, call kvmppc_realmode_hmi_handler() */ 1613 cmpwi r12, BOOK3S_INTERRUPT_HMI 1614 bne 27f 1615 bl kvmppc_realmode_hmi_handler 1616 nop 1617 li r12, BOOK3S_INTERRUPT_HMI 1618 /* 1619 * At this point kvmppc_realmode_hmi_handler would have resync-ed 1620 * the TB. Hence it is not required to subtract guest timebase 1621 * offset from timebase. So, skip it. 1622 * 1623 * Also, do not call kvmppc_subcore_exit_guest() because it has 1624 * been invoked as part of kvmppc_realmode_hmi_handler(). 1625 */ 1626 b 30f 1627 162827: 1629 /* Subtract timebase offset from timebase */ 1630 ld r8,VCORE_TB_OFFSET(r5) 1631 cmpdi r8,0 1632 beq 17f 1633 mftb r6 /* current guest timebase */ 1634 subf r8,r8,r6 1635 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1636 mftb r7 /* check if lower 24 bits overflowed */ 1637 clrldi r6,r6,40 1638 clrldi r7,r7,40 1639 cmpld r7,r6 1640 bge 17f 1641 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1642 mtspr SPRN_TBU40,r8 1643 164417: bl kvmppc_subcore_exit_guest 1645 nop 164630: ld r5,HSTATE_KVM_VCORE(r13) 1647 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1648 1649 /* Reset PCR */ 1650 ld r0, VCORE_PCR(r5) 1651 cmpdi r0, 0 1652 beq 18f 1653 li r0, 0 1654 mtspr SPRN_PCR, r0 165518: 1656 /* Signal secondary CPUs to continue */ 1657 stb r0,VCORE_IN_GUEST(r5) 165819: lis r8,0x7fff /* MAX_INT@h */ 1659 mtspr SPRN_HDEC,r8 1660 166116: ld r8,KVM_HOST_LPCR(r4) 1662 mtspr SPRN_LPCR,r8 1663 isync 1664 1665 /* load host SLB entries */ 1666 ld r8,PACA_SLBSHADOWPTR(r13) 1667 1668 .rept SLB_NUM_BOLTED 1669 li r3, SLBSHADOW_SAVEAREA 1670 LDX_BE r5, r8, r3 1671 addi r3, r3, 8 1672 LDX_BE r6, r8, r3 1673 andis. r7,r5,SLB_ESID_V@h 1674 beq 1f 1675 slbmte r6,r5 16761: addi r8,r8,16 1677 .endr 1678 1679#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1680 /* Finish timing, if we have a vcpu */ 1681 ld r4, HSTATE_KVM_VCPU(r13) 1682 cmpdi r4, 0 1683 li r3, 0 1684 beq 2f 1685 bl kvmhv_accumulate_time 16862: 1687#endif 1688 /* Unset guest mode */ 1689 li r0, KVM_GUEST_MODE_NONE 1690 stb r0, HSTATE_IN_GUEST(r13) 1691 1692 ld r0, 112+PPC_LR_STKOFF(r1) 1693 addi r1, r1, 112 1694 mtlr r0 1695 blr 1696 1697/* 1698 * Check whether an HDSI is an HPTE not found fault or something else. 1699 * If it is an HPTE not found fault that is due to the guest accessing 1700 * a page that they have mapped but which we have paged out, then 1701 * we continue on with the guest exit path. In all other cases, 1702 * reflect the HDSI to the guest as a DSI. 1703 */ 1704kvmppc_hdsi: 1705 mfspr r4, SPRN_HDAR 1706 mfspr r6, SPRN_HDSISR 1707 /* HPTE not found fault or protection fault? */ 1708 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1709 beq 1f /* if not, send it to the guest */ 1710 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1711 beq 3f 1712 clrrdi r0, r4, 28 1713 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1714 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 1715 bne 7f /* if no SLB entry found */ 17164: std r4, VCPU_FAULT_DAR(r9) 1717 stw r6, VCPU_FAULT_DSISR(r9) 1718 1719 /* Search the hash table. */ 1720 mr r3, r9 /* vcpu pointer */ 1721 li r7, 1 /* data fault */ 1722 bl kvmppc_hpte_hv_fault 1723 ld r9, HSTATE_KVM_VCPU(r13) 1724 ld r10, VCPU_PC(r9) 1725 ld r11, VCPU_MSR(r9) 1726 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1727 cmpdi r3, 0 /* retry the instruction */ 1728 beq 6f 1729 cmpdi r3, -1 /* handle in kernel mode */ 1730 beq guest_exit_cont 1731 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1732 beq 2f 1733 1734 /* Synthesize a DSI (or DSegI) for the guest */ 1735 ld r4, VCPU_FAULT_DAR(r9) 1736 mr r6, r3 17371: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 1738 mtspr SPRN_DSISR, r6 17397: mtspr SPRN_DAR, r4 1740 mtspr SPRN_SRR0, r10 1741 mtspr SPRN_SRR1, r11 1742 mr r10, r0 1743 bl kvmppc_msr_interrupt 1744fast_interrupt_c_return: 17456: ld r7, VCPU_CTR(r9) 1746 ld r8, VCPU_XER(r9) 1747 mtctr r7 1748 mtxer r8 1749 mr r4, r9 1750 b fast_guest_return 1751 17523: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1753 ld r5, KVM_VRMA_SLB_V(r5) 1754 b 4b 1755 1756 /* If this is for emulated MMIO, load the instruction word */ 17572: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1758 1759 /* Set guest mode to 'jump over instruction' so if lwz faults 1760 * we'll just continue at the next IP. */ 1761 li r0, KVM_GUEST_MODE_SKIP 1762 stb r0, HSTATE_IN_GUEST(r13) 1763 1764 /* Do the access with MSR:DR enabled */ 1765 mfmsr r3 1766 ori r4, r3, MSR_DR /* Enable paging for data */ 1767 mtmsrd r4 1768 lwz r8, 0(r10) 1769 mtmsrd r3 1770 1771 /* Store the result */ 1772 stw r8, VCPU_LAST_INST(r9) 1773 1774 /* Unset guest mode. */ 1775 li r0, KVM_GUEST_MODE_HOST_HV 1776 stb r0, HSTATE_IN_GUEST(r13) 1777 b guest_exit_cont 1778 1779/* 1780 * Similarly for an HISI, reflect it to the guest as an ISI unless 1781 * it is an HPTE not found fault for a page that we have paged out. 1782 */ 1783kvmppc_hisi: 1784 andis. r0, r11, SRR1_ISI_NOPT@h 1785 beq 1f 1786 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1787 beq 3f 1788 clrrdi r0, r10, 28 1789 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1790 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 1791 bne 7f /* if no SLB entry found */ 17924: 1793 /* Search the hash table. */ 1794 mr r3, r9 /* vcpu pointer */ 1795 mr r4, r10 1796 mr r6, r11 1797 li r7, 0 /* instruction fault */ 1798 bl kvmppc_hpte_hv_fault 1799 ld r9, HSTATE_KVM_VCPU(r13) 1800 ld r10, VCPU_PC(r9) 1801 ld r11, VCPU_MSR(r9) 1802 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1803 cmpdi r3, 0 /* retry the instruction */ 1804 beq fast_interrupt_c_return 1805 cmpdi r3, -1 /* handle in kernel mode */ 1806 beq guest_exit_cont 1807 1808 /* Synthesize an ISI (or ISegI) for the guest */ 1809 mr r11, r3 18101: li r0, BOOK3S_INTERRUPT_INST_STORAGE 18117: mtspr SPRN_SRR0, r10 1812 mtspr SPRN_SRR1, r11 1813 mr r10, r0 1814 bl kvmppc_msr_interrupt 1815 b fast_interrupt_c_return 1816 18173: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1818 ld r5, KVM_VRMA_SLB_V(r6) 1819 b 4b 1820 1821/* 1822 * Try to handle an hcall in real mode. 1823 * Returns to the guest if we handle it, or continues on up to 1824 * the kernel if we can't (i.e. if we don't have a handler for 1825 * it, or if the handler returns H_TOO_HARD). 1826 * 1827 * r5 - r8 contain hcall args, 1828 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 1829 */ 1830hcall_try_real_mode: 1831 ld r3,VCPU_GPR(R3)(r9) 1832 andi. r0,r11,MSR_PR 1833 /* sc 1 from userspace - reflect to guest syscall */ 1834 bne sc_1_fast_return 1835 clrrdi r3,r3,2 1836 cmpldi r3,hcall_real_table_end - hcall_real_table 1837 bge guest_exit_cont 1838 /* See if this hcall is enabled for in-kernel handling */ 1839 ld r4, VCPU_KVM(r9) 1840 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 1841 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 1842 add r4, r4, r0 1843 ld r0, KVM_ENABLED_HCALLS(r4) 1844 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 1845 srd r0, r0, r4 1846 andi. r0, r0, 1 1847 beq guest_exit_cont 1848 /* Get pointer to handler, if any, and call it */ 1849 LOAD_REG_ADDR(r4, hcall_real_table) 1850 lwax r3,r3,r4 1851 cmpwi r3,0 1852 beq guest_exit_cont 1853 add r12,r3,r4 1854 mtctr r12 1855 mr r3,r9 /* get vcpu pointer */ 1856 ld r4,VCPU_GPR(R4)(r9) 1857 bctrl 1858 cmpdi r3,H_TOO_HARD 1859 beq hcall_real_fallback 1860 ld r4,HSTATE_KVM_VCPU(r13) 1861 std r3,VCPU_GPR(R3)(r4) 1862 ld r10,VCPU_PC(r4) 1863 ld r11,VCPU_MSR(r4) 1864 b fast_guest_return 1865 1866sc_1_fast_return: 1867 mtspr SPRN_SRR0,r10 1868 mtspr SPRN_SRR1,r11 1869 li r10, BOOK3S_INTERRUPT_SYSCALL 1870 bl kvmppc_msr_interrupt 1871 mr r4,r9 1872 b fast_guest_return 1873 1874 /* We've attempted a real mode hcall, but it's punted it back 1875 * to userspace. We need to restore some clobbered volatiles 1876 * before resuming the pass-it-to-qemu path */ 1877hcall_real_fallback: 1878 li r12,BOOK3S_INTERRUPT_SYSCALL 1879 ld r9, HSTATE_KVM_VCPU(r13) 1880 1881 b guest_exit_cont 1882 1883 .globl hcall_real_table 1884hcall_real_table: 1885 .long 0 /* 0 - unused */ 1886 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 1887 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 1888 .long DOTSYM(kvmppc_h_read) - hcall_real_table 1889 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 1890 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 1891 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 1892 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 1893 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 1894 .long 0 /* 0x24 - H_SET_SPRG0 */ 1895 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 1896 .long 0 /* 0x2c */ 1897 .long 0 /* 0x30 */ 1898 .long 0 /* 0x34 */ 1899 .long 0 /* 0x38 */ 1900 .long 0 /* 0x3c */ 1901 .long 0 /* 0x40 */ 1902 .long 0 /* 0x44 */ 1903 .long 0 /* 0x48 */ 1904 .long 0 /* 0x4c */ 1905 .long 0 /* 0x50 */ 1906 .long 0 /* 0x54 */ 1907 .long 0 /* 0x58 */ 1908 .long 0 /* 0x5c */ 1909 .long 0 /* 0x60 */ 1910#ifdef CONFIG_KVM_XICS 1911 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 1912 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 1913 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 1914 .long 0 /* 0x70 - H_IPOLL */ 1915 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 1916#else 1917 .long 0 /* 0x64 - H_EOI */ 1918 .long 0 /* 0x68 - H_CPPR */ 1919 .long 0 /* 0x6c - H_IPI */ 1920 .long 0 /* 0x70 - H_IPOLL */ 1921 .long 0 /* 0x74 - H_XIRR */ 1922#endif 1923 .long 0 /* 0x78 */ 1924 .long 0 /* 0x7c */ 1925 .long 0 /* 0x80 */ 1926 .long 0 /* 0x84 */ 1927 .long 0 /* 0x88 */ 1928 .long 0 /* 0x8c */ 1929 .long 0 /* 0x90 */ 1930 .long 0 /* 0x94 */ 1931 .long 0 /* 0x98 */ 1932 .long 0 /* 0x9c */ 1933 .long 0 /* 0xa0 */ 1934 .long 0 /* 0xa4 */ 1935 .long 0 /* 0xa8 */ 1936 .long 0 /* 0xac */ 1937 .long 0 /* 0xb0 */ 1938 .long 0 /* 0xb4 */ 1939 .long 0 /* 0xb8 */ 1940 .long 0 /* 0xbc */ 1941 .long 0 /* 0xc0 */ 1942 .long 0 /* 0xc4 */ 1943 .long 0 /* 0xc8 */ 1944 .long 0 /* 0xcc */ 1945 .long 0 /* 0xd0 */ 1946 .long 0 /* 0xd4 */ 1947 .long 0 /* 0xd8 */ 1948 .long 0 /* 0xdc */ 1949 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 1950 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 1951 .long 0 /* 0xe8 */ 1952 .long 0 /* 0xec */ 1953 .long 0 /* 0xf0 */ 1954 .long 0 /* 0xf4 */ 1955 .long 0 /* 0xf8 */ 1956 .long 0 /* 0xfc */ 1957 .long 0 /* 0x100 */ 1958 .long 0 /* 0x104 */ 1959 .long 0 /* 0x108 */ 1960 .long 0 /* 0x10c */ 1961 .long 0 /* 0x110 */ 1962 .long 0 /* 0x114 */ 1963 .long 0 /* 0x118 */ 1964 .long 0 /* 0x11c */ 1965 .long 0 /* 0x120 */ 1966 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 1967 .long 0 /* 0x128 */ 1968 .long 0 /* 0x12c */ 1969 .long 0 /* 0x130 */ 1970 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 1971 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 1972 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 1973 .long 0 /* 0x140 */ 1974 .long 0 /* 0x144 */ 1975 .long 0 /* 0x148 */ 1976 .long 0 /* 0x14c */ 1977 .long 0 /* 0x150 */ 1978 .long 0 /* 0x154 */ 1979 .long 0 /* 0x158 */ 1980 .long 0 /* 0x15c */ 1981 .long 0 /* 0x160 */ 1982 .long 0 /* 0x164 */ 1983 .long 0 /* 0x168 */ 1984 .long 0 /* 0x16c */ 1985 .long 0 /* 0x170 */ 1986 .long 0 /* 0x174 */ 1987 .long 0 /* 0x178 */ 1988 .long 0 /* 0x17c */ 1989 .long 0 /* 0x180 */ 1990 .long 0 /* 0x184 */ 1991 .long 0 /* 0x188 */ 1992 .long 0 /* 0x18c */ 1993 .long 0 /* 0x190 */ 1994 .long 0 /* 0x194 */ 1995 .long 0 /* 0x198 */ 1996 .long 0 /* 0x19c */ 1997 .long 0 /* 0x1a0 */ 1998 .long 0 /* 0x1a4 */ 1999 .long 0 /* 0x1a8 */ 2000 .long 0 /* 0x1ac */ 2001 .long 0 /* 0x1b0 */ 2002 .long 0 /* 0x1b4 */ 2003 .long 0 /* 0x1b8 */ 2004 .long 0 /* 0x1bc */ 2005 .long 0 /* 0x1c0 */ 2006 .long 0 /* 0x1c4 */ 2007 .long 0 /* 0x1c8 */ 2008 .long 0 /* 0x1cc */ 2009 .long 0 /* 0x1d0 */ 2010 .long 0 /* 0x1d4 */ 2011 .long 0 /* 0x1d8 */ 2012 .long 0 /* 0x1dc */ 2013 .long 0 /* 0x1e0 */ 2014 .long 0 /* 0x1e4 */ 2015 .long 0 /* 0x1e8 */ 2016 .long 0 /* 0x1ec */ 2017 .long 0 /* 0x1f0 */ 2018 .long 0 /* 0x1f4 */ 2019 .long 0 /* 0x1f8 */ 2020 .long 0 /* 0x1fc */ 2021 .long 0 /* 0x200 */ 2022 .long 0 /* 0x204 */ 2023 .long 0 /* 0x208 */ 2024 .long 0 /* 0x20c */ 2025 .long 0 /* 0x210 */ 2026 .long 0 /* 0x214 */ 2027 .long 0 /* 0x218 */ 2028 .long 0 /* 0x21c */ 2029 .long 0 /* 0x220 */ 2030 .long 0 /* 0x224 */ 2031 .long 0 /* 0x228 */ 2032 .long 0 /* 0x22c */ 2033 .long 0 /* 0x230 */ 2034 .long 0 /* 0x234 */ 2035 .long 0 /* 0x238 */ 2036 .long 0 /* 0x23c */ 2037 .long 0 /* 0x240 */ 2038 .long 0 /* 0x244 */ 2039 .long 0 /* 0x248 */ 2040 .long 0 /* 0x24c */ 2041 .long 0 /* 0x250 */ 2042 .long 0 /* 0x254 */ 2043 .long 0 /* 0x258 */ 2044 .long 0 /* 0x25c */ 2045 .long 0 /* 0x260 */ 2046 .long 0 /* 0x264 */ 2047 .long 0 /* 0x268 */ 2048 .long 0 /* 0x26c */ 2049 .long 0 /* 0x270 */ 2050 .long 0 /* 0x274 */ 2051 .long 0 /* 0x278 */ 2052 .long 0 /* 0x27c */ 2053 .long 0 /* 0x280 */ 2054 .long 0 /* 0x284 */ 2055 .long 0 /* 0x288 */ 2056 .long 0 /* 0x28c */ 2057 .long 0 /* 0x290 */ 2058 .long 0 /* 0x294 */ 2059 .long 0 /* 0x298 */ 2060 .long 0 /* 0x29c */ 2061 .long 0 /* 0x2a0 */ 2062 .long 0 /* 0x2a4 */ 2063 .long 0 /* 0x2a8 */ 2064 .long 0 /* 0x2ac */ 2065 .long 0 /* 0x2b0 */ 2066 .long 0 /* 0x2b4 */ 2067 .long 0 /* 0x2b8 */ 2068 .long 0 /* 0x2bc */ 2069 .long 0 /* 0x2c0 */ 2070 .long 0 /* 0x2c4 */ 2071 .long 0 /* 0x2c8 */ 2072 .long 0 /* 0x2cc */ 2073 .long 0 /* 0x2d0 */ 2074 .long 0 /* 0x2d4 */ 2075 .long 0 /* 0x2d8 */ 2076 .long 0 /* 0x2dc */ 2077 .long 0 /* 0x2e0 */ 2078 .long 0 /* 0x2e4 */ 2079 .long 0 /* 0x2e8 */ 2080 .long 0 /* 0x2ec */ 2081 .long 0 /* 0x2f0 */ 2082 .long 0 /* 0x2f4 */ 2083 .long 0 /* 0x2f8 */ 2084 .long 0 /* 0x2fc */ 2085 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2086 .globl hcall_real_table_end 2087hcall_real_table_end: 2088 2089_GLOBAL(kvmppc_h_set_xdabr) 2090 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2091 beq 6f 2092 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2093 andc. r0, r5, r0 2094 beq 3f 20956: li r3, H_PARAMETER 2096 blr 2097 2098_GLOBAL(kvmppc_h_set_dabr) 2099 li r5, DABRX_USER | DABRX_KERNEL 21003: 2101BEGIN_FTR_SECTION 2102 b 2f 2103END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2104 std r4,VCPU_DABR(r3) 2105 stw r5, VCPU_DABRX(r3) 2106 mtspr SPRN_DABRX, r5 2107 /* Work around P7 bug where DABR can get corrupted on mtspr */ 21081: mtspr SPRN_DABR,r4 2109 mfspr r5, SPRN_DABR 2110 cmpd r4, r5 2111 bne 1b 2112 isync 2113 li r3,0 2114 blr 2115 2116 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 21172: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2118 rlwimi r5, r4, 2, DAWRX_WT 2119 clrrdi r4, r4, 3 2120 std r4, VCPU_DAWR(r3) 2121 std r5, VCPU_DAWRX(r3) 2122 mtspr SPRN_DAWR, r4 2123 mtspr SPRN_DAWRX, r5 2124 li r3, 0 2125 blr 2126 2127_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2128 ori r11,r11,MSR_EE 2129 std r11,VCPU_MSR(r3) 2130 li r0,1 2131 stb r0,VCPU_CEDED(r3) 2132 sync /* order setting ceded vs. testing prodded */ 2133 lbz r5,VCPU_PRODDED(r3) 2134 cmpwi r5,0 2135 bne kvm_cede_prodded 2136 li r12,0 /* set trap to 0 to say hcall is handled */ 2137 stw r12,VCPU_TRAP(r3) 2138 li r0,H_SUCCESS 2139 std r0,VCPU_GPR(R3)(r3) 2140 2141 /* 2142 * Set our bit in the bitmask of napping threads unless all the 2143 * other threads are already napping, in which case we send this 2144 * up to the host. 2145 */ 2146 ld r5,HSTATE_KVM_VCORE(r13) 2147 lbz r6,HSTATE_PTID(r13) 2148 lwz r8,VCORE_ENTRY_EXIT(r5) 2149 clrldi r8,r8,56 2150 li r0,1 2151 sld r0,r0,r6 2152 addi r6,r5,VCORE_NAPPING_THREADS 215331: lwarx r4,0,r6 2154 or r4,r4,r0 2155 cmpw r4,r8 2156 beq kvm_cede_exit 2157 stwcx. r4,0,r6 2158 bne 31b 2159 /* order napping_threads update vs testing entry_exit_map */ 2160 isync 2161 li r0,NAPPING_CEDE 2162 stb r0,HSTATE_NAPPING(r13) 2163 lwz r7,VCORE_ENTRY_EXIT(r5) 2164 cmpwi r7,0x100 2165 bge 33f /* another thread already exiting */ 2166 2167/* 2168 * Although not specifically required by the architecture, POWER7 2169 * preserves the following registers in nap mode, even if an SMT mode 2170 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2171 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2172 */ 2173 /* Save non-volatile GPRs */ 2174 std r14, VCPU_GPR(R14)(r3) 2175 std r15, VCPU_GPR(R15)(r3) 2176 std r16, VCPU_GPR(R16)(r3) 2177 std r17, VCPU_GPR(R17)(r3) 2178 std r18, VCPU_GPR(R18)(r3) 2179 std r19, VCPU_GPR(R19)(r3) 2180 std r20, VCPU_GPR(R20)(r3) 2181 std r21, VCPU_GPR(R21)(r3) 2182 std r22, VCPU_GPR(R22)(r3) 2183 std r23, VCPU_GPR(R23)(r3) 2184 std r24, VCPU_GPR(R24)(r3) 2185 std r25, VCPU_GPR(R25)(r3) 2186 std r26, VCPU_GPR(R26)(r3) 2187 std r27, VCPU_GPR(R27)(r3) 2188 std r28, VCPU_GPR(R28)(r3) 2189 std r29, VCPU_GPR(R29)(r3) 2190 std r30, VCPU_GPR(R30)(r3) 2191 std r31, VCPU_GPR(R31)(r3) 2192 2193 /* save FP state */ 2194 bl kvmppc_save_fp 2195 2196#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2197BEGIN_FTR_SECTION 2198 ld r9, HSTATE_KVM_VCPU(r13) 2199 bl kvmppc_save_tm 2200END_FTR_SECTION_IFSET(CPU_FTR_TM) 2201#endif 2202 2203 /* 2204 * Set DEC to the smaller of DEC and HDEC, so that we wake 2205 * no later than the end of our timeslice (HDEC interrupts 2206 * don't wake us from nap). 2207 */ 2208 mfspr r3, SPRN_DEC 2209 mfspr r4, SPRN_HDEC 2210 mftb r5 2211 cmpw r3, r4 2212 ble 67f 2213 mtspr SPRN_DEC, r4 221467: 2215 /* save expiry time of guest decrementer */ 2216 extsw r3, r3 2217 add r3, r3, r5 2218 ld r4, HSTATE_KVM_VCPU(r13) 2219 ld r5, HSTATE_KVM_VCORE(r13) 2220 ld r6, VCORE_TB_OFFSET(r5) 2221 subf r3, r6, r3 /* convert to host TB value */ 2222 std r3, VCPU_DEC_EXPIRES(r4) 2223 2224#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2225 ld r4, HSTATE_KVM_VCPU(r13) 2226 addi r3, r4, VCPU_TB_CEDE 2227 bl kvmhv_accumulate_time 2228#endif 2229 2230 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2231 2232 /* 2233 * Take a nap until a decrementer or external or doobell interrupt 2234 * occurs, with PECE1 and PECE0 set in LPCR. 2235 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2236 * Also clear the runlatch bit before napping. 2237 */ 2238kvm_do_nap: 2239 mfspr r0, SPRN_CTRLF 2240 clrrdi r0, r0, 1 2241 mtspr SPRN_CTRLT, r0 2242 2243 li r0,1 2244 stb r0,HSTATE_HWTHREAD_REQ(r13) 2245 mfspr r5,SPRN_LPCR 2246 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2247BEGIN_FTR_SECTION 2248 ori r5, r5, LPCR_PECEDH 2249 rlwimi r5, r3, 0, LPCR_PECEDP 2250END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2251 2252kvm_nap_sequence: /* desired LPCR value in r5 */ 2253BEGIN_FTR_SECTION 2254 /* 2255 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2256 * enable state loss = 1 (allow SMT mode switch) 2257 * requested level = 0 (just stop dispatching) 2258 */ 2259 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2260 mtspr SPRN_PSSCR, r3 2261 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2262 li r4, LPCR_PECE_HVEE@higher 2263 sldi r4, r4, 32 2264 or r5, r5, r4 2265END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2266 mtspr SPRN_LPCR,r5 2267 isync 2268 li r0, 0 2269 std r0, HSTATE_SCRATCH0(r13) 2270 ptesync 2271 ld r0, HSTATE_SCRATCH0(r13) 22721: cmpd r0, r0 2273 bne 1b 2274BEGIN_FTR_SECTION 2275 nap 2276FTR_SECTION_ELSE 2277 PPC_STOP 2278ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 2279 b . 2280 228133: mr r4, r3 2282 li r3, 0 2283 li r12, 0 2284 b 34f 2285 2286kvm_end_cede: 2287 /* get vcpu pointer */ 2288 ld r4, HSTATE_KVM_VCPU(r13) 2289 2290 /* Woken by external or decrementer interrupt */ 2291 ld r1, HSTATE_HOST_R1(r13) 2292 2293#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2294 addi r3, r4, VCPU_TB_RMINTR 2295 bl kvmhv_accumulate_time 2296#endif 2297 2298#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2299BEGIN_FTR_SECTION 2300 bl kvmppc_restore_tm 2301END_FTR_SECTION_IFSET(CPU_FTR_TM) 2302#endif 2303 2304 /* load up FP state */ 2305 bl kvmppc_load_fp 2306 2307 /* Restore guest decrementer */ 2308 ld r3, VCPU_DEC_EXPIRES(r4) 2309 ld r5, HSTATE_KVM_VCORE(r13) 2310 ld r6, VCORE_TB_OFFSET(r5) 2311 add r3, r3, r6 /* convert host TB to guest TB value */ 2312 mftb r7 2313 subf r3, r7, r3 2314 mtspr SPRN_DEC, r3 2315 2316 /* Load NV GPRS */ 2317 ld r14, VCPU_GPR(R14)(r4) 2318 ld r15, VCPU_GPR(R15)(r4) 2319 ld r16, VCPU_GPR(R16)(r4) 2320 ld r17, VCPU_GPR(R17)(r4) 2321 ld r18, VCPU_GPR(R18)(r4) 2322 ld r19, VCPU_GPR(R19)(r4) 2323 ld r20, VCPU_GPR(R20)(r4) 2324 ld r21, VCPU_GPR(R21)(r4) 2325 ld r22, VCPU_GPR(R22)(r4) 2326 ld r23, VCPU_GPR(R23)(r4) 2327 ld r24, VCPU_GPR(R24)(r4) 2328 ld r25, VCPU_GPR(R25)(r4) 2329 ld r26, VCPU_GPR(R26)(r4) 2330 ld r27, VCPU_GPR(R27)(r4) 2331 ld r28, VCPU_GPR(R28)(r4) 2332 ld r29, VCPU_GPR(R29)(r4) 2333 ld r30, VCPU_GPR(R30)(r4) 2334 ld r31, VCPU_GPR(R31)(r4) 2335 2336 /* Check the wake reason in SRR1 to see why we got here */ 2337 bl kvmppc_check_wake_reason 2338 2339 /* 2340 * Restore volatile registers since we could have called a 2341 * C routine in kvmppc_check_wake_reason 2342 * r4 = VCPU 2343 * r3 tells us whether we need to return to host or not 2344 * WARNING: it gets checked further down: 2345 * should not modify r3 until this check is done. 2346 */ 2347 ld r4, HSTATE_KVM_VCPU(r13) 2348 2349 /* clear our bit in vcore->napping_threads */ 235034: ld r5,HSTATE_KVM_VCORE(r13) 2351 lbz r7,HSTATE_PTID(r13) 2352 li r0,1 2353 sld r0,r0,r7 2354 addi r6,r5,VCORE_NAPPING_THREADS 235532: lwarx r7,0,r6 2356 andc r7,r7,r0 2357 stwcx. r7,0,r6 2358 bne 32b 2359 li r0,0 2360 stb r0,HSTATE_NAPPING(r13) 2361 2362 /* See if the wake reason saved in r3 means we need to exit */ 2363 stw r12, VCPU_TRAP(r4) 2364 mr r9, r4 2365 cmpdi r3, 0 2366 bgt guest_exit_cont 2367 2368 /* see if any other thread is already exiting */ 2369 lwz r0,VCORE_ENTRY_EXIT(r5) 2370 cmpwi r0,0x100 2371 bge guest_exit_cont 2372 2373 b kvmppc_cede_reentry /* if not go back to guest */ 2374 2375 /* cede when already previously prodded case */ 2376kvm_cede_prodded: 2377 li r0,0 2378 stb r0,VCPU_PRODDED(r3) 2379 sync /* order testing prodded vs. clearing ceded */ 2380 stb r0,VCPU_CEDED(r3) 2381 li r3,H_SUCCESS 2382 blr 2383 2384 /* we've ceded but we want to give control to the host */ 2385kvm_cede_exit: 2386 ld r9, HSTATE_KVM_VCPU(r13) 2387 b guest_exit_cont 2388 2389 /* Try to handle a machine check in real mode */ 2390machine_check_realmode: 2391 mr r3, r9 /* get vcpu pointer */ 2392 bl kvmppc_realmode_machine_check 2393 nop 2394 ld r9, HSTATE_KVM_VCPU(r13) 2395 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2396 /* 2397 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through 2398 * machine check interrupt (set HSRR0 to 0x200). And for handled 2399 * errors (no-fatal), just go back to guest execution with current 2400 * HSRR0 instead of exiting guest. This new approach will inject 2401 * machine check to guest for fatal error causing guest to crash. 2402 * 2403 * The old code used to return to host for unhandled errors which 2404 * was causing guest to hang with soft lockups inside guest and 2405 * makes it difficult to recover guest instance. 2406 * 2407 * if we receive machine check with MSR(RI=0) then deliver it to 2408 * guest as machine check causing guest to crash. 2409 */ 2410 ld r11, VCPU_MSR(r9) 2411 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ 2412 bne mc_cont /* if so, exit to host */ 2413 andi. r10, r11, MSR_RI /* check for unrecoverable exception */ 2414 beq 1f /* Deliver a machine check to guest */ 2415 ld r10, VCPU_PC(r9) 2416 cmpdi r3, 0 /* Did we handle MCE ? */ 2417 bne 2f /* Continue guest execution. */ 2418 /* If not, deliver a machine check. SRR0/1 are already set */ 24191: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2420 bl kvmppc_msr_interrupt 24212: b fast_interrupt_c_return 2422 2423/* 2424 * Check the reason we woke from nap, and take appropriate action. 2425 * Returns (in r3): 2426 * 0 if nothing needs to be done 2427 * 1 if something happened that needs to be handled by the host 2428 * -1 if there was a guest wakeup (IPI or msgsnd) 2429 * -2 if we handled a PCI passthrough interrupt (returned by 2430 * kvmppc_read_intr only) 2431 * 2432 * Also sets r12 to the interrupt vector for any interrupt that needs 2433 * to be handled now by the host (0x500 for external interrupt), or zero. 2434 * Modifies all volatile registers (since it may call a C function). 2435 * This routine calls kvmppc_read_intr, a C function, if an external 2436 * interrupt is pending. 2437 */ 2438kvmppc_check_wake_reason: 2439 mfspr r6, SPRN_SRR1 2440BEGIN_FTR_SECTION 2441 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2442FTR_SECTION_ELSE 2443 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2444ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2445 cmpwi r6, 8 /* was it an external interrupt? */ 2446 beq 7f /* if so, see what it was */ 2447 li r3, 0 2448 li r12, 0 2449 cmpwi r6, 6 /* was it the decrementer? */ 2450 beq 0f 2451BEGIN_FTR_SECTION 2452 cmpwi r6, 5 /* privileged doorbell? */ 2453 beq 0f 2454 cmpwi r6, 3 /* hypervisor doorbell? */ 2455 beq 3f 2456END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2457 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2458 beq 4f 2459 li r3, 1 /* anything else, return 1 */ 24600: blr 2461 2462 /* hypervisor doorbell */ 24633: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2464 2465 /* 2466 * Clear the doorbell as we will invoke the handler 2467 * explicitly in the guest exit path. 2468 */ 2469 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2470 PPC_MSGCLR(6) 2471 /* see if it's a host IPI */ 2472 li r3, 1 2473 lbz r0, HSTATE_HOST_IPI(r13) 2474 cmpwi r0, 0 2475 bnelr 2476 /* if not, return -1 */ 2477 li r3, -1 2478 blr 2479 2480 /* Woken up due to Hypervisor maintenance interrupt */ 24814: li r12, BOOK3S_INTERRUPT_HMI 2482 li r3, 1 2483 blr 2484 2485 /* external interrupt - create a stack frame so we can call C */ 24867: mflr r0 2487 std r0, PPC_LR_STKOFF(r1) 2488 stdu r1, -PPC_MIN_STKFRM(r1) 2489 bl kvmppc_read_intr 2490 nop 2491 li r12, BOOK3S_INTERRUPT_EXTERNAL 2492 cmpdi r3, 1 2493 ble 1f 2494 2495 /* 2496 * Return code of 2 means PCI passthrough interrupt, but 2497 * we need to return back to host to complete handling the 2498 * interrupt. Trap reason is expected in r12 by guest 2499 * exit code. 2500 */ 2501 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 25021: 2503 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2504 addi r1, r1, PPC_MIN_STKFRM 2505 mtlr r0 2506 blr 2507 2508/* 2509 * Save away FP, VMX and VSX registers. 2510 * r3 = vcpu pointer 2511 * N.B. r30 and r31 are volatile across this function, 2512 * thus it is not callable from C. 2513 */ 2514kvmppc_save_fp: 2515 mflr r30 2516 mr r31,r3 2517 mfmsr r5 2518 ori r8,r5,MSR_FP 2519#ifdef CONFIG_ALTIVEC 2520BEGIN_FTR_SECTION 2521 oris r8,r8,MSR_VEC@h 2522END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2523#endif 2524#ifdef CONFIG_VSX 2525BEGIN_FTR_SECTION 2526 oris r8,r8,MSR_VSX@h 2527END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2528#endif 2529 mtmsrd r8 2530 addi r3,r3,VCPU_FPRS 2531 bl store_fp_state 2532#ifdef CONFIG_ALTIVEC 2533BEGIN_FTR_SECTION 2534 addi r3,r31,VCPU_VRS 2535 bl store_vr_state 2536END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2537#endif 2538 mfspr r6,SPRN_VRSAVE 2539 stw r6,VCPU_VRSAVE(r31) 2540 mtlr r30 2541 blr 2542 2543/* 2544 * Load up FP, VMX and VSX registers 2545 * r4 = vcpu pointer 2546 * N.B. r30 and r31 are volatile across this function, 2547 * thus it is not callable from C. 2548 */ 2549kvmppc_load_fp: 2550 mflr r30 2551 mr r31,r4 2552 mfmsr r9 2553 ori r8,r9,MSR_FP 2554#ifdef CONFIG_ALTIVEC 2555BEGIN_FTR_SECTION 2556 oris r8,r8,MSR_VEC@h 2557END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2558#endif 2559#ifdef CONFIG_VSX 2560BEGIN_FTR_SECTION 2561 oris r8,r8,MSR_VSX@h 2562END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2563#endif 2564 mtmsrd r8 2565 addi r3,r4,VCPU_FPRS 2566 bl load_fp_state 2567#ifdef CONFIG_ALTIVEC 2568BEGIN_FTR_SECTION 2569 addi r3,r31,VCPU_VRS 2570 bl load_vr_state 2571END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2572#endif 2573 lwz r7,VCPU_VRSAVE(r31) 2574 mtspr SPRN_VRSAVE,r7 2575 mtlr r30 2576 mr r4,r31 2577 blr 2578 2579#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2580/* 2581 * Save transactional state and TM-related registers. 2582 * Called with r9 pointing to the vcpu struct. 2583 * This can modify all checkpointed registers, but 2584 * restores r1, r2 and r9 (vcpu pointer) before exit. 2585 */ 2586kvmppc_save_tm: 2587 mflr r0 2588 std r0, PPC_LR_STKOFF(r1) 2589 2590 /* Turn on TM. */ 2591 mfmsr r8 2592 li r0, 1 2593 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 2594 mtmsrd r8 2595 2596 ld r5, VCPU_MSR(r9) 2597 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 2598 beq 1f /* TM not active in guest. */ 2599 2600 std r1, HSTATE_HOST_R1(r13) 2601 li r3, TM_CAUSE_KVM_RESCHED 2602 2603 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 2604 li r5, 0 2605 mtmsrd r5, 1 2606 2607 /* All GPRs are volatile at this point. */ 2608 TRECLAIM(R3) 2609 2610 /* Temporarily store r13 and r9 so we have some regs to play with */ 2611 SET_SCRATCH0(r13) 2612 GET_PACA(r13) 2613 std r9, PACATMSCRATCH(r13) 2614 ld r9, HSTATE_KVM_VCPU(r13) 2615 2616 /* Get a few more GPRs free. */ 2617 std r29, VCPU_GPRS_TM(29)(r9) 2618 std r30, VCPU_GPRS_TM(30)(r9) 2619 std r31, VCPU_GPRS_TM(31)(r9) 2620 2621 /* Save away PPR and DSCR soon so don't run with user values. */ 2622 mfspr r31, SPRN_PPR 2623 HMT_MEDIUM 2624 mfspr r30, SPRN_DSCR 2625 ld r29, HSTATE_DSCR(r13) 2626 mtspr SPRN_DSCR, r29 2627 2628 /* Save all but r9, r13 & r29-r31 */ 2629 reg = 0 2630 .rept 29 2631 .if (reg != 9) && (reg != 13) 2632 std reg, VCPU_GPRS_TM(reg)(r9) 2633 .endif 2634 reg = reg + 1 2635 .endr 2636 /* ... now save r13 */ 2637 GET_SCRATCH0(r4) 2638 std r4, VCPU_GPRS_TM(13)(r9) 2639 /* ... and save r9 */ 2640 ld r4, PACATMSCRATCH(r13) 2641 std r4, VCPU_GPRS_TM(9)(r9) 2642 2643 /* Reload stack pointer and TOC. */ 2644 ld r1, HSTATE_HOST_R1(r13) 2645 ld r2, PACATOC(r13) 2646 2647 /* Set MSR RI now we have r1 and r13 back. */ 2648 li r5, MSR_RI 2649 mtmsrd r5, 1 2650 2651 /* Save away checkpinted SPRs. */ 2652 std r31, VCPU_PPR_TM(r9) 2653 std r30, VCPU_DSCR_TM(r9) 2654 mflr r5 2655 mfcr r6 2656 mfctr r7 2657 mfspr r8, SPRN_AMR 2658 mfspr r10, SPRN_TAR 2659 mfxer r11 2660 std r5, VCPU_LR_TM(r9) 2661 stw r6, VCPU_CR_TM(r9) 2662 std r7, VCPU_CTR_TM(r9) 2663 std r8, VCPU_AMR_TM(r9) 2664 std r10, VCPU_TAR_TM(r9) 2665 std r11, VCPU_XER_TM(r9) 2666 2667 /* Restore r12 as trap number. */ 2668 lwz r12, VCPU_TRAP(r9) 2669 2670 /* Save FP/VSX. */ 2671 addi r3, r9, VCPU_FPRS_TM 2672 bl store_fp_state 2673 addi r3, r9, VCPU_VRS_TM 2674 bl store_vr_state 2675 mfspr r6, SPRN_VRSAVE 2676 stw r6, VCPU_VRSAVE_TM(r9) 26771: 2678 /* 2679 * We need to save these SPRs after the treclaim so that the software 2680 * error code is recorded correctly in the TEXASR. Also the user may 2681 * change these outside of a transaction, so they must always be 2682 * context switched. 2683 */ 2684 mfspr r5, SPRN_TFHAR 2685 mfspr r6, SPRN_TFIAR 2686 mfspr r7, SPRN_TEXASR 2687 std r5, VCPU_TFHAR(r9) 2688 std r6, VCPU_TFIAR(r9) 2689 std r7, VCPU_TEXASR(r9) 2690 2691 ld r0, PPC_LR_STKOFF(r1) 2692 mtlr r0 2693 blr 2694 2695/* 2696 * Restore transactional state and TM-related registers. 2697 * Called with r4 pointing to the vcpu struct. 2698 * This potentially modifies all checkpointed registers. 2699 * It restores r1, r2, r4 from the PACA. 2700 */ 2701kvmppc_restore_tm: 2702 mflr r0 2703 std r0, PPC_LR_STKOFF(r1) 2704 2705 /* Turn on TM/FP/VSX/VMX so we can restore them. */ 2706 mfmsr r5 2707 li r6, MSR_TM >> 32 2708 sldi r6, r6, 32 2709 or r5, r5, r6 2710 ori r5, r5, MSR_FP 2711 oris r5, r5, (MSR_VEC | MSR_VSX)@h 2712 mtmsrd r5 2713 2714 /* 2715 * The user may change these outside of a transaction, so they must 2716 * always be context switched. 2717 */ 2718 ld r5, VCPU_TFHAR(r4) 2719 ld r6, VCPU_TFIAR(r4) 2720 ld r7, VCPU_TEXASR(r4) 2721 mtspr SPRN_TFHAR, r5 2722 mtspr SPRN_TFIAR, r6 2723 mtspr SPRN_TEXASR, r7 2724 2725 ld r5, VCPU_MSR(r4) 2726 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 2727 beqlr /* TM not active in guest */ 2728 std r1, HSTATE_HOST_R1(r13) 2729 2730 /* Make sure the failure summary is set, otherwise we'll program check 2731 * when we trechkpt. It's possible that this might have been not set 2732 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the 2733 * host. 2734 */ 2735 oris r7, r7, (TEXASR_FS)@h 2736 mtspr SPRN_TEXASR, r7 2737 2738 /* 2739 * We need to load up the checkpointed state for the guest. 2740 * We need to do this early as it will blow away any GPRs, VSRs and 2741 * some SPRs. 2742 */ 2743 2744 mr r31, r4 2745 addi r3, r31, VCPU_FPRS_TM 2746 bl load_fp_state 2747 addi r3, r31, VCPU_VRS_TM 2748 bl load_vr_state 2749 mr r4, r31 2750 lwz r7, VCPU_VRSAVE_TM(r4) 2751 mtspr SPRN_VRSAVE, r7 2752 2753 ld r5, VCPU_LR_TM(r4) 2754 lwz r6, VCPU_CR_TM(r4) 2755 ld r7, VCPU_CTR_TM(r4) 2756 ld r8, VCPU_AMR_TM(r4) 2757 ld r9, VCPU_TAR_TM(r4) 2758 ld r10, VCPU_XER_TM(r4) 2759 mtlr r5 2760 mtcr r6 2761 mtctr r7 2762 mtspr SPRN_AMR, r8 2763 mtspr SPRN_TAR, r9 2764 mtxer r10 2765 2766 /* 2767 * Load up PPR and DSCR values but don't put them in the actual SPRs 2768 * till the last moment to avoid running with userspace PPR and DSCR for 2769 * too long. 2770 */ 2771 ld r29, VCPU_DSCR_TM(r4) 2772 ld r30, VCPU_PPR_TM(r4) 2773 2774 std r2, PACATMSCRATCH(r13) /* Save TOC */ 2775 2776 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 2777 li r5, 0 2778 mtmsrd r5, 1 2779 2780 /* Load GPRs r0-r28 */ 2781 reg = 0 2782 .rept 29 2783 ld reg, VCPU_GPRS_TM(reg)(r31) 2784 reg = reg + 1 2785 .endr 2786 2787 mtspr SPRN_DSCR, r29 2788 mtspr SPRN_PPR, r30 2789 2790 /* Load final GPRs */ 2791 ld 29, VCPU_GPRS_TM(29)(r31) 2792 ld 30, VCPU_GPRS_TM(30)(r31) 2793 ld 31, VCPU_GPRS_TM(31)(r31) 2794 2795 /* TM checkpointed state is now setup. All GPRs are now volatile. */ 2796 TRECHKPT 2797 2798 /* Now let's get back the state we need. */ 2799 HMT_MEDIUM 2800 GET_PACA(r13) 2801 ld r29, HSTATE_DSCR(r13) 2802 mtspr SPRN_DSCR, r29 2803 ld r4, HSTATE_KVM_VCPU(r13) 2804 ld r1, HSTATE_HOST_R1(r13) 2805 ld r2, PACATMSCRATCH(r13) 2806 2807 /* Set the MSR RI since we have our registers back. */ 2808 li r5, MSR_RI 2809 mtmsrd r5, 1 2810 2811 ld r0, PPC_LR_STKOFF(r1) 2812 mtlr r0 2813 blr 2814#endif 2815 2816/* 2817 * We come here if we get any exception or interrupt while we are 2818 * executing host real mode code while in guest MMU context. 2819 * For now just spin, but we should do something better. 2820 */ 2821kvmppc_bad_host_intr: 2822 b . 2823 2824/* 2825 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2826 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2827 * r11 has the guest MSR value (in/out) 2828 * r9 has a vcpu pointer (in) 2829 * r0 is used as a scratch register 2830 */ 2831kvmppc_msr_interrupt: 2832 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2833 cmpwi r0, 2 /* Check if we are in transactional state.. */ 2834 ld r11, VCPU_INTR_MSR(r9) 2835 bne 1f 2836 /* ... if transactional, change to suspended */ 2837 li r0, 1 28381: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2839 blr 2840 2841/* 2842 * This works around a hardware bug on POWER8E processors, where 2843 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 2844 * performance monitor interrupt. Instead, when we need to have 2845 * an interrupt pending, we have to arrange for a counter to overflow. 2846 */ 2847kvmppc_fix_pmao: 2848 li r3, 0 2849 mtspr SPRN_MMCR2, r3 2850 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 2851 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 2852 mtspr SPRN_MMCR0, r3 2853 lis r3, 0x7fff 2854 ori r3, r3, 0xffff 2855 mtspr SPRN_PMC6, r3 2856 isync 2857 blr 2858 2859#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2860/* 2861 * Start timing an activity 2862 * r3 = pointer to time accumulation struct, r4 = vcpu 2863 */ 2864kvmhv_start_timing: 2865 ld r5, HSTATE_KVM_VCORE(r13) 2866 lbz r6, VCORE_IN_GUEST(r5) 2867 cmpwi r6, 0 2868 beq 5f /* if in guest, need to */ 2869 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 28705: mftb r5 2871 subf r5, r6, r5 2872 std r3, VCPU_CUR_ACTIVITY(r4) 2873 std r5, VCPU_ACTIVITY_START(r4) 2874 blr 2875 2876/* 2877 * Accumulate time to one activity and start another. 2878 * r3 = pointer to new time accumulation struct, r4 = vcpu 2879 */ 2880kvmhv_accumulate_time: 2881 ld r5, HSTATE_KVM_VCORE(r13) 2882 lbz r8, VCORE_IN_GUEST(r5) 2883 cmpwi r8, 0 2884 beq 4f /* if in guest, need to */ 2885 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 28864: ld r5, VCPU_CUR_ACTIVITY(r4) 2887 ld r6, VCPU_ACTIVITY_START(r4) 2888 std r3, VCPU_CUR_ACTIVITY(r4) 2889 mftb r7 2890 subf r7, r8, r7 2891 std r7, VCPU_ACTIVITY_START(r4) 2892 cmpdi r5, 0 2893 beqlr 2894 subf r3, r6, r7 2895 ld r8, TAS_SEQCOUNT(r5) 2896 cmpdi r8, 0 2897 addi r8, r8, 1 2898 std r8, TAS_SEQCOUNT(r5) 2899 lwsync 2900 ld r7, TAS_TOTAL(r5) 2901 add r7, r7, r3 2902 std r7, TAS_TOTAL(r5) 2903 ld r6, TAS_MIN(r5) 2904 ld r7, TAS_MAX(r5) 2905 beq 3f 2906 cmpd r3, r6 2907 bge 1f 29083: std r3, TAS_MIN(r5) 29091: cmpd r3, r7 2910 ble 2f 2911 std r3, TAS_MAX(r5) 29122: lwsync 2913 addi r8, r8, 1 2914 std r8, TAS_SEQCOUNT(r5) 2915 blr 2916#endif 2917