1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/mmu-hash64.h> 31 32#ifdef __LITTLE_ENDIAN__ 33#error Need to fix lppaca and SLB shadow accesses in little endian mode 34#endif 35 36/* 37 * Call kvmppc_hv_entry in real mode. 38 * Must be called with interrupts hard-disabled. 39 * 40 * Input Registers: 41 * 42 * LR = return address to continue at after eventually re-enabling MMU 43 */ 44_GLOBAL(kvmppc_hv_entry_trampoline) 45 mflr r0 46 std r0, PPC_LR_STKOFF(r1) 47 stdu r1, -112(r1) 48 mfmsr r10 49 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 50 li r0,MSR_RI 51 andc r0,r10,r0 52 li r6,MSR_IR | MSR_DR 53 andc r6,r10,r6 54 mtmsrd r0,1 /* clear RI in MSR */ 55 mtsrr0 r5 56 mtsrr1 r6 57 RFI 58 59kvmppc_call_hv_entry: 60 bl kvmppc_hv_entry 61 62 /* Back from guest - restore host state and return to caller */ 63 64 /* Restore host DABR and DABRX */ 65 ld r5,HSTATE_DABR(r13) 66 li r6,7 67 mtspr SPRN_DABR,r5 68 mtspr SPRN_DABRX,r6 69 70 /* Restore SPRG3 */ 71 ld r3,PACA_SPRG3(r13) 72 mtspr SPRN_SPRG3,r3 73 74 /* 75 * Reload DEC. HDEC interrupts were disabled when 76 * we reloaded the host's LPCR value. 77 */ 78 ld r3, HSTATE_DECEXP(r13) 79 mftb r4 80 subf r4, r4, r3 81 mtspr SPRN_DEC, r4 82 83 /* Reload the host's PMU registers */ 84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 85 lbz r4, LPPACA_PMCINUSE(r3) 86 cmpwi r4, 0 87 beq 23f /* skip if not */ 88 lwz r3, HSTATE_PMC(r13) 89 lwz r4, HSTATE_PMC + 4(r13) 90 lwz r5, HSTATE_PMC + 8(r13) 91 lwz r6, HSTATE_PMC + 12(r13) 92 lwz r8, HSTATE_PMC + 16(r13) 93 lwz r9, HSTATE_PMC + 20(r13) 94BEGIN_FTR_SECTION 95 lwz r10, HSTATE_PMC + 24(r13) 96 lwz r11, HSTATE_PMC + 28(r13) 97END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 98 mtspr SPRN_PMC1, r3 99 mtspr SPRN_PMC2, r4 100 mtspr SPRN_PMC3, r5 101 mtspr SPRN_PMC4, r6 102 mtspr SPRN_PMC5, r8 103 mtspr SPRN_PMC6, r9 104BEGIN_FTR_SECTION 105 mtspr SPRN_PMC7, r10 106 mtspr SPRN_PMC8, r11 107END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 108 ld r3, HSTATE_MMCR(r13) 109 ld r4, HSTATE_MMCR + 8(r13) 110 ld r5, HSTATE_MMCR + 16(r13) 111 mtspr SPRN_MMCR1, r4 112 mtspr SPRN_MMCRA, r5 113 mtspr SPRN_MMCR0, r3 114 isync 11523: 116 117 /* 118 * For external and machine check interrupts, we need 119 * to call the Linux handler to process the interrupt. 120 * We do that by jumping to absolute address 0x500 for 121 * external interrupts, or the machine_check_fwnmi label 122 * for machine checks (since firmware might have patched 123 * the vector area at 0x200). The [h]rfid at the end of the 124 * handler will return to the book3s_hv_interrupts.S code. 125 * For other interrupts we do the rfid to get back 126 * to the book3s_hv_interrupts.S code here. 127 */ 128 ld r8, 112+PPC_LR_STKOFF(r1) 129 addi r1, r1, 112 130 ld r7, HSTATE_HOST_MSR(r13) 131 132 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 133 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 134BEGIN_FTR_SECTION 135 beq 11f 136END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 137 138 /* RFI into the highmem handler, or branch to interrupt handler */ 139 mfmsr r6 140 li r0, MSR_RI 141 andc r6, r6, r0 142 mtmsrd r6, 1 /* Clear RI in MSR */ 143 mtsrr0 r8 144 mtsrr1 r7 145 beqa 0x500 /* external interrupt (PPC970) */ 146 beq cr1, 13f /* machine check */ 147 RFI 148 149 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 15011: mtspr SPRN_HSRR0, r8 151 mtspr SPRN_HSRR1, r7 152 ba 0x500 153 15413: b machine_check_fwnmi 155 156/* 157 * We come in here when wakened from nap mode on a secondary hw thread. 158 * Relocation is off and most register values are lost. 159 * r13 points to the PACA. 160 */ 161 .globl kvm_start_guest 162kvm_start_guest: 163 ld r1,PACAEMERGSP(r13) 164 subi r1,r1,STACK_FRAME_OVERHEAD 165 ld r2,PACATOC(r13) 166 167 li r0,KVM_HWTHREAD_IN_KVM 168 stb r0,HSTATE_HWTHREAD_STATE(r13) 169 170 /* NV GPR values from power7_idle() will no longer be valid */ 171 li r0,1 172 stb r0,PACA_NAPSTATELOST(r13) 173 174 /* were we napping due to cede? */ 175 lbz r0,HSTATE_NAPPING(r13) 176 cmpwi r0,0 177 bne kvm_end_cede 178 179 /* 180 * We weren't napping due to cede, so this must be a secondary 181 * thread being woken up to run a guest, or being woken up due 182 * to a stray IPI. (Or due to some machine check or hypervisor 183 * maintenance interrupt while the core is in KVM.) 184 */ 185 186 /* Check the wake reason in SRR1 to see why we got here */ 187 mfspr r3,SPRN_SRR1 188 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 189 cmpwi r3,4 /* was it an external interrupt? */ 190 bne 27f /* if not */ 191 ld r5,HSTATE_XICS_PHYS(r13) 192 li r7,XICS_XIRR /* if it was an external interrupt, */ 193 lwzcix r8,r5,r7 /* get and ack the interrupt */ 194 sync 195 clrldi. r9,r8,40 /* get interrupt source ID. */ 196 beq 28f /* none there? */ 197 cmpwi r9,XICS_IPI /* was it an IPI? */ 198 bne 29f 199 li r0,0xff 200 li r6,XICS_MFRR 201 stbcix r0,r5,r6 /* clear IPI */ 202 stwcix r8,r5,r7 /* EOI the interrupt */ 203 sync /* order loading of vcpu after that */ 204 205 /* get vcpu pointer, NULL if we have no vcpu to run */ 206 ld r4,HSTATE_KVM_VCPU(r13) 207 cmpdi r4,0 208 /* if we have no vcpu to run, go back to sleep */ 209 beq kvm_no_guest 210 b 30f 211 21227: /* XXX should handle hypervisor maintenance interrupts etc. here */ 213 b kvm_no_guest 21428: /* SRR1 said external but ICP said nope?? */ 215 b kvm_no_guest 21629: /* External non-IPI interrupt to offline secondary thread? help?? */ 217 stw r8,HSTATE_SAVED_XIRR(r13) 218 b kvm_no_guest 219 22030: bl kvmppc_hv_entry 221 222 /* Back from the guest, go back to nap */ 223 /* Clear our vcpu pointer so we don't come back in early */ 224 li r0, 0 225 std r0, HSTATE_KVM_VCPU(r13) 226 /* 227 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing 228 * the nap_count, because once the increment to nap_count is 229 * visible we could be given another vcpu. 230 */ 231 lwsync 232 /* Clear any pending IPI - we're an offline thread */ 233 ld r5, HSTATE_XICS_PHYS(r13) 234 li r7, XICS_XIRR 235 lwzcix r3, r5, r7 /* ack any pending interrupt */ 236 rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 237 beq 37f 238 sync 239 li r0, 0xff 240 li r6, XICS_MFRR 241 stbcix r0, r5, r6 /* clear the IPI */ 242 stwcix r3, r5, r7 /* EOI it */ 24337: sync 244 245 /* increment the nap count and then go to nap mode */ 246 ld r4, HSTATE_KVM_VCORE(r13) 247 addi r4, r4, VCORE_NAP_COUNT 24851: lwarx r3, 0, r4 249 addi r3, r3, 1 250 stwcx. r3, 0, r4 251 bne 51b 252 253kvm_no_guest: 254 li r0, KVM_HWTHREAD_IN_NAP 255 stb r0, HSTATE_HWTHREAD_STATE(r13) 256 li r3, LPCR_PECE0 257 mfspr r4, SPRN_LPCR 258 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 259 mtspr SPRN_LPCR, r4 260 isync 261 std r0, HSTATE_SCRATCH0(r13) 262 ptesync 263 ld r0, HSTATE_SCRATCH0(r13) 2641: cmpd r0, r0 265 bne 1b 266 nap 267 b . 268 269/****************************************************************************** 270 * * 271 * Entry code * 272 * * 273 *****************************************************************************/ 274 275.global kvmppc_hv_entry 276kvmppc_hv_entry: 277 278 /* Required state: 279 * 280 * R4 = vcpu pointer 281 * MSR = ~IR|DR 282 * R13 = PACA 283 * R1 = host R1 284 * all other volatile GPRS = free 285 */ 286 mflr r0 287 std r0, PPC_LR_STKOFF(r1) 288 stdu r1, -112(r1) 289 290 /* Set partition DABR */ 291 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 292 li r5,3 293 ld r6,VCPU_DABR(r4) 294 mtspr SPRN_DABRX,r5 295 mtspr SPRN_DABR,r6 296BEGIN_FTR_SECTION 297 isync 298END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 299 300 /* Load guest PMU registers */ 301 /* R4 is live here (vcpu pointer) */ 302 li r3, 1 303 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 304 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 305 isync 306 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 307 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 308 lwz r6, VCPU_PMC + 8(r4) 309 lwz r7, VCPU_PMC + 12(r4) 310 lwz r8, VCPU_PMC + 16(r4) 311 lwz r9, VCPU_PMC + 20(r4) 312BEGIN_FTR_SECTION 313 lwz r10, VCPU_PMC + 24(r4) 314 lwz r11, VCPU_PMC + 28(r4) 315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 316 mtspr SPRN_PMC1, r3 317 mtspr SPRN_PMC2, r5 318 mtspr SPRN_PMC3, r6 319 mtspr SPRN_PMC4, r7 320 mtspr SPRN_PMC5, r8 321 mtspr SPRN_PMC6, r9 322BEGIN_FTR_SECTION 323 mtspr SPRN_PMC7, r10 324 mtspr SPRN_PMC8, r11 325END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 326 ld r3, VCPU_MMCR(r4) 327 ld r5, VCPU_MMCR + 8(r4) 328 ld r6, VCPU_MMCR + 16(r4) 329 ld r7, VCPU_SIAR(r4) 330 ld r8, VCPU_SDAR(r4) 331 mtspr SPRN_MMCR1, r5 332 mtspr SPRN_MMCRA, r6 333 mtspr SPRN_SIAR, r7 334 mtspr SPRN_SDAR, r8 335 mtspr SPRN_MMCR0, r3 336 isync 337 338 /* Load up FP, VMX and VSX registers */ 339 bl kvmppc_load_fp 340 341 ld r14, VCPU_GPR(R14)(r4) 342 ld r15, VCPU_GPR(R15)(r4) 343 ld r16, VCPU_GPR(R16)(r4) 344 ld r17, VCPU_GPR(R17)(r4) 345 ld r18, VCPU_GPR(R18)(r4) 346 ld r19, VCPU_GPR(R19)(r4) 347 ld r20, VCPU_GPR(R20)(r4) 348 ld r21, VCPU_GPR(R21)(r4) 349 ld r22, VCPU_GPR(R22)(r4) 350 ld r23, VCPU_GPR(R23)(r4) 351 ld r24, VCPU_GPR(R24)(r4) 352 ld r25, VCPU_GPR(R25)(r4) 353 ld r26, VCPU_GPR(R26)(r4) 354 ld r27, VCPU_GPR(R27)(r4) 355 ld r28, VCPU_GPR(R28)(r4) 356 ld r29, VCPU_GPR(R29)(r4) 357 ld r30, VCPU_GPR(R30)(r4) 358 ld r31, VCPU_GPR(R31)(r4) 359 360BEGIN_FTR_SECTION 361 /* Switch DSCR to guest value */ 362 ld r5, VCPU_DSCR(r4) 363 mtspr SPRN_DSCR, r5 364END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 365 366 /* 367 * Set the decrementer to the guest decrementer. 368 */ 369 ld r8,VCPU_DEC_EXPIRES(r4) 370 mftb r7 371 subf r3,r7,r8 372 mtspr SPRN_DEC,r3 373 stw r3,VCPU_DEC(r4) 374 375 ld r5, VCPU_SPRG0(r4) 376 ld r6, VCPU_SPRG1(r4) 377 ld r7, VCPU_SPRG2(r4) 378 ld r8, VCPU_SPRG3(r4) 379 mtspr SPRN_SPRG0, r5 380 mtspr SPRN_SPRG1, r6 381 mtspr SPRN_SPRG2, r7 382 mtspr SPRN_SPRG3, r8 383 384 /* Save R1 in the PACA */ 385 std r1, HSTATE_HOST_R1(r13) 386 387 /* Load up DAR and DSISR */ 388 ld r5, VCPU_DAR(r4) 389 lwz r6, VCPU_DSISR(r4) 390 mtspr SPRN_DAR, r5 391 mtspr SPRN_DSISR, r6 392 393 li r6, KVM_GUEST_MODE_HOST_HV 394 stb r6, HSTATE_IN_GUEST(r13) 395 396BEGIN_FTR_SECTION 397 /* Restore AMR and UAMOR, set AMOR to all 1s */ 398 ld r5,VCPU_AMR(r4) 399 ld r6,VCPU_UAMOR(r4) 400 li r7,-1 401 mtspr SPRN_AMR,r5 402 mtspr SPRN_UAMOR,r6 403 mtspr SPRN_AMOR,r7 404END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 405 406 /* Clear out SLB */ 407 li r6,0 408 slbmte r6,r6 409 slbia 410 ptesync 411 412BEGIN_FTR_SECTION 413 b 30f 414END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 415 /* 416 * POWER7 host -> guest partition switch code. 417 * We don't have to lock against concurrent tlbies, 418 * but we do have to coordinate across hardware threads. 419 */ 420 /* Increment entry count iff exit count is zero. */ 421 ld r5,HSTATE_KVM_VCORE(r13) 422 addi r9,r5,VCORE_ENTRY_EXIT 42321: lwarx r3,0,r9 424 cmpwi r3,0x100 /* any threads starting to exit? */ 425 bge secondary_too_late /* if so we're too late to the party */ 426 addi r3,r3,1 427 stwcx. r3,0,r9 428 bne 21b 429 430 /* Primary thread switches to guest partition. */ 431 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 432 lwz r6,VCPU_PTID(r4) 433 cmpwi r6,0 434 bne 20f 435 ld r6,KVM_SDR1(r9) 436 lwz r7,KVM_LPID(r9) 437 li r0,LPID_RSVD /* switch to reserved LPID */ 438 mtspr SPRN_LPID,r0 439 ptesync 440 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 441 mtspr SPRN_LPID,r7 442 isync 443 444 /* See if we need to flush the TLB */ 445 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 446 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 447 srdi r6,r6,6 /* doubleword number */ 448 sldi r6,r6,3 /* address offset */ 449 add r6,r6,r9 450 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 451 li r0,1 452 sld r0,r0,r7 453 ld r7,0(r6) 454 and. r7,r7,r0 455 beq 22f 45623: ldarx r7,0,r6 /* if set, clear the bit */ 457 andc r7,r7,r0 458 stdcx. r7,0,r6 459 bne 23b 460 li r6,128 /* and flush the TLB */ 461 mtctr r6 462 li r7,0x800 /* IS field = 0b10 */ 463 ptesync 46428: tlbiel r7 465 addi r7,r7,0x1000 466 bdnz 28b 467 ptesync 468 469 /* Add timebase offset onto timebase */ 47022: ld r8,VCORE_TB_OFFSET(r5) 471 cmpdi r8,0 472 beq 37f 473 mftb r6 /* current host timebase */ 474 add r8,r8,r6 475 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 476 mftb r7 /* check if lower 24 bits overflowed */ 477 clrldi r6,r6,40 478 clrldi r7,r7,40 479 cmpld r7,r6 480 bge 37f 481 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 482 mtspr SPRN_TBU40,r8 483 484 /* Load guest PCR value to select appropriate compat mode */ 48537: ld r7, VCORE_PCR(r5) 486 cmpdi r7, 0 487 beq 38f 488 mtspr SPRN_PCR, r7 48938: 490 li r0,1 491 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 492 b 10f 493 494 /* Secondary threads wait for primary to have done partition switch */ 49520: lbz r0,VCORE_IN_GUEST(r5) 496 cmpwi r0,0 497 beq 20b 498 499 /* Set LPCR and RMOR. */ 50010: ld r8,VCORE_LPCR(r5) 501 mtspr SPRN_LPCR,r8 502 ld r8,KVM_RMOR(r9) 503 mtspr SPRN_RMOR,r8 504 isync 505 506 /* Increment yield count if they have a VPA */ 507 ld r3, VCPU_VPA(r4) 508 cmpdi r3, 0 509 beq 25f 510 lwz r5, LPPACA_YIELDCOUNT(r3) 511 addi r5, r5, 1 512 stw r5, LPPACA_YIELDCOUNT(r3) 513 li r6, 1 514 stb r6, VCPU_VPA_DIRTY(r4) 51525: 516 /* Check if HDEC expires soon */ 517 mfspr r3,SPRN_HDEC 518 cmpwi r3,10 519 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 520 mr r9,r4 521 blt hdec_soon 522 523 /* Save purr/spurr */ 524 mfspr r5,SPRN_PURR 525 mfspr r6,SPRN_SPURR 526 std r5,HSTATE_PURR(r13) 527 std r6,HSTATE_SPURR(r13) 528 ld r7,VCPU_PURR(r4) 529 ld r8,VCPU_SPURR(r4) 530 mtspr SPRN_PURR,r7 531 mtspr SPRN_SPURR,r8 532 b 31f 533 534 /* 535 * PPC970 host -> guest partition switch code. 536 * We have to lock against concurrent tlbies, 537 * using native_tlbie_lock to lock against host tlbies 538 * and kvm->arch.tlbie_lock to lock against guest tlbies. 539 * We also have to invalidate the TLB since its 540 * entries aren't tagged with the LPID. 541 */ 54230: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 543 544 /* first take native_tlbie_lock */ 545 .section ".toc","aw" 546toc_tlbie_lock: 547 .tc native_tlbie_lock[TC],native_tlbie_lock 548 .previous 549 ld r3,toc_tlbie_lock@toc(2) 550#ifdef __BIG_ENDIAN__ 551 lwz r8,PACA_LOCK_TOKEN(r13) 552#else 553 lwz r8,PACAPACAINDEX(r13) 554#endif 55524: lwarx r0,0,r3 556 cmpwi r0,0 557 bne 24b 558 stwcx. r8,0,r3 559 bne 24b 560 isync 561 562 ld r5,HSTATE_KVM_VCORE(r13) 563 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ 564 li r0,0x18f 565 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 566 or r0,r7,r0 567 ptesync 568 sync 569 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 570 isync 571 li r0,0 572 stw r0,0(r3) /* drop native_tlbie_lock */ 573 574 /* invalidate the whole TLB */ 575 li r0,256 576 mtctr r0 577 li r6,0 57825: tlbiel r6 579 addi r6,r6,0x1000 580 bdnz 25b 581 ptesync 582 583 /* Take the guest's tlbie_lock */ 584 addi r3,r9,KVM_TLBIE_LOCK 58524: lwarx r0,0,r3 586 cmpwi r0,0 587 bne 24b 588 stwcx. r8,0,r3 589 bne 24b 590 isync 591 ld r6,KVM_SDR1(r9) 592 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 593 594 /* Set up HID4 with the guest's LPID etc. */ 595 sync 596 mtspr SPRN_HID4,r7 597 isync 598 599 /* drop the guest's tlbie_lock */ 600 li r0,0 601 stw r0,0(r3) 602 603 /* Check if HDEC expires soon */ 604 mfspr r3,SPRN_HDEC 605 cmpwi r3,10 606 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 607 mr r9,r4 608 blt hdec_soon 609 610 /* Enable HDEC interrupts */ 611 mfspr r0,SPRN_HID0 612 li r3,1 613 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 614 sync 615 mtspr SPRN_HID0,r0 616 mfspr r0,SPRN_HID0 617 mfspr r0,SPRN_HID0 618 mfspr r0,SPRN_HID0 619 mfspr r0,SPRN_HID0 620 mfspr r0,SPRN_HID0 621 mfspr r0,SPRN_HID0 622 623 /* Load up guest SLB entries */ 62431: lwz r5,VCPU_SLB_MAX(r4) 625 cmpwi r5,0 626 beq 9f 627 mtctr r5 628 addi r6,r4,VCPU_SLB 6291: ld r8,VCPU_SLB_E(r6) 630 ld r9,VCPU_SLB_V(r6) 631 slbmte r9,r8 632 addi r6,r6,VCPU_SLB_SIZE 633 bdnz 1b 6349: 635 636 /* Restore state of CTRL run bit; assume 1 on entry */ 637 lwz r5,VCPU_CTRL(r4) 638 andi. r5,r5,1 639 bne 4f 640 mfspr r6,SPRN_CTRLF 641 clrrdi r6,r6,1 642 mtspr SPRN_CTRLT,r6 6434: 644 ld r6, VCPU_CTR(r4) 645 lwz r7, VCPU_XER(r4) 646 647 mtctr r6 648 mtxer r7 649 650 ld r10, VCPU_PC(r4) 651 ld r11, VCPU_MSR(r4) 652kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 653 ld r6, VCPU_SRR0(r4) 654 ld r7, VCPU_SRR1(r4) 655 656 /* r11 = vcpu->arch.msr & ~MSR_HV */ 657 rldicl r11, r11, 63 - MSR_HV_LG, 1 658 rotldi r11, r11, 1 + MSR_HV_LG 659 ori r11, r11, MSR_ME 660 661 /* Check if we can deliver an external or decrementer interrupt now */ 662 ld r0,VCPU_PENDING_EXC(r4) 663 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 664 and r0,r0,r8 665 cmpdi cr1,r0,0 666 andi. r0,r11,MSR_EE 667 beq cr1,11f 668BEGIN_FTR_SECTION 669 mfspr r8,SPRN_LPCR 670 ori r8,r8,LPCR_MER 671 mtspr SPRN_LPCR,r8 672 isync 673END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 674 beq 5f 675 li r0,BOOK3S_INTERRUPT_EXTERNAL 67612: mr r6,r10 677 mr r10,r0 678 mr r7,r11 679 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 680 rotldi r11,r11,63 681 b 5f 68211: beq 5f 683 mfspr r0,SPRN_DEC 684 cmpwi r0,0 685 li r0,BOOK3S_INTERRUPT_DECREMENTER 686 blt 12b 687 688 /* Move SRR0 and SRR1 into the respective regs */ 6895: mtspr SPRN_SRR0, r6 690 mtspr SPRN_SRR1, r7 691 692fast_guest_return: 693 li r0,0 694 stb r0,VCPU_CEDED(r4) /* cancel cede */ 695 mtspr SPRN_HSRR0,r10 696 mtspr SPRN_HSRR1,r11 697 698 /* Activate guest mode, so faults get handled by KVM */ 699 li r9, KVM_GUEST_MODE_GUEST_HV 700 stb r9, HSTATE_IN_GUEST(r13) 701 702 /* Enter guest */ 703 704BEGIN_FTR_SECTION 705 ld r5, VCPU_CFAR(r4) 706 mtspr SPRN_CFAR, r5 707END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 708BEGIN_FTR_SECTION 709 ld r0, VCPU_PPR(r4) 710END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 711 712 ld r5, VCPU_LR(r4) 713 lwz r6, VCPU_CR(r4) 714 mtlr r5 715 mtcr r6 716 717 ld r1, VCPU_GPR(R1)(r4) 718 ld r2, VCPU_GPR(R2)(r4) 719 ld r3, VCPU_GPR(R3)(r4) 720 ld r5, VCPU_GPR(R5)(r4) 721 ld r6, VCPU_GPR(R6)(r4) 722 ld r7, VCPU_GPR(R7)(r4) 723 ld r8, VCPU_GPR(R8)(r4) 724 ld r9, VCPU_GPR(R9)(r4) 725 ld r10, VCPU_GPR(R10)(r4) 726 ld r11, VCPU_GPR(R11)(r4) 727 ld r12, VCPU_GPR(R12)(r4) 728 ld r13, VCPU_GPR(R13)(r4) 729 730BEGIN_FTR_SECTION 731 mtspr SPRN_PPR, r0 732END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 733 ld r0, VCPU_GPR(R0)(r4) 734 ld r4, VCPU_GPR(R4)(r4) 735 736 hrfid 737 b . 738 739/****************************************************************************** 740 * * 741 * Exit code * 742 * * 743 *****************************************************************************/ 744 745/* 746 * We come here from the first-level interrupt handlers. 747 */ 748 .globl kvmppc_interrupt_hv 749kvmppc_interrupt_hv: 750 /* 751 * Register contents: 752 * R12 = interrupt vector 753 * R13 = PACA 754 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 755 * guest R13 saved in SPRN_SCRATCH0 756 */ 757 std r9, HSTATE_SCRATCH2(r13) 758 759 lbz r9, HSTATE_IN_GUEST(r13) 760 cmpwi r9, KVM_GUEST_MODE_HOST_HV 761 beq kvmppc_bad_host_intr 762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 763 cmpwi r9, KVM_GUEST_MODE_GUEST 764 ld r9, HSTATE_SCRATCH2(r13) 765 beq kvmppc_interrupt_pr 766#endif 767 /* We're now back in the host but in guest MMU context */ 768 li r9, KVM_GUEST_MODE_HOST_HV 769 stb r9, HSTATE_IN_GUEST(r13) 770 771 ld r9, HSTATE_KVM_VCPU(r13) 772 773 /* Save registers */ 774 775 std r0, VCPU_GPR(R0)(r9) 776 std r1, VCPU_GPR(R1)(r9) 777 std r2, VCPU_GPR(R2)(r9) 778 std r3, VCPU_GPR(R3)(r9) 779 std r4, VCPU_GPR(R4)(r9) 780 std r5, VCPU_GPR(R5)(r9) 781 std r6, VCPU_GPR(R6)(r9) 782 std r7, VCPU_GPR(R7)(r9) 783 std r8, VCPU_GPR(R8)(r9) 784 ld r0, HSTATE_SCRATCH2(r13) 785 std r0, VCPU_GPR(R9)(r9) 786 std r10, VCPU_GPR(R10)(r9) 787 std r11, VCPU_GPR(R11)(r9) 788 ld r3, HSTATE_SCRATCH0(r13) 789 lwz r4, HSTATE_SCRATCH1(r13) 790 std r3, VCPU_GPR(R12)(r9) 791 stw r4, VCPU_CR(r9) 792BEGIN_FTR_SECTION 793 ld r3, HSTATE_CFAR(r13) 794 std r3, VCPU_CFAR(r9) 795END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 796BEGIN_FTR_SECTION 797 ld r4, HSTATE_PPR(r13) 798 std r4, VCPU_PPR(r9) 799END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 800 801 /* Restore R1/R2 so we can handle faults */ 802 ld r1, HSTATE_HOST_R1(r13) 803 ld r2, PACATOC(r13) 804 805 mfspr r10, SPRN_SRR0 806 mfspr r11, SPRN_SRR1 807 std r10, VCPU_SRR0(r9) 808 std r11, VCPU_SRR1(r9) 809 andi. r0, r12, 2 /* need to read HSRR0/1? */ 810 beq 1f 811 mfspr r10, SPRN_HSRR0 812 mfspr r11, SPRN_HSRR1 813 clrrdi r12, r12, 2 8141: std r10, VCPU_PC(r9) 815 std r11, VCPU_MSR(r9) 816 817 GET_SCRATCH0(r3) 818 mflr r4 819 std r3, VCPU_GPR(R13)(r9) 820 std r4, VCPU_LR(r9) 821 822 stw r12,VCPU_TRAP(r9) 823 824 /* Save HEIR (HV emulation assist reg) in last_inst 825 if this is an HEI (HV emulation interrupt, e40) */ 826 li r3,KVM_INST_FETCH_FAILED 827BEGIN_FTR_SECTION 828 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 829 bne 11f 830 mfspr r3,SPRN_HEIR 831END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 83211: stw r3,VCPU_LAST_INST(r9) 833 834 /* these are volatile across C function calls */ 835 mfctr r3 836 mfxer r4 837 std r3, VCPU_CTR(r9) 838 stw r4, VCPU_XER(r9) 839 840BEGIN_FTR_SECTION 841 /* If this is a page table miss then see if it's theirs or ours */ 842 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 843 beq kvmppc_hdsi 844 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 845 beq kvmppc_hisi 846END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 847 848 /* See if this is a leftover HDEC interrupt */ 849 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 850 bne 2f 851 mfspr r3,SPRN_HDEC 852 cmpwi r3,0 853 bge ignore_hdec 8542: 855 /* See if this is an hcall we can handle in real mode */ 856 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 857 beq hcall_try_real_mode 858 859 /* Only handle external interrupts here on arch 206 and later */ 860BEGIN_FTR_SECTION 861 b ext_interrupt_to_host 862END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 863 864 /* External interrupt ? */ 865 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 866 bne+ ext_interrupt_to_host 867 868 /* External interrupt, first check for host_ipi. If this is 869 * set, we know the host wants us out so let's do it now 870 */ 871do_ext_interrupt: 872 bl kvmppc_read_intr 873 cmpdi r3, 0 874 bgt ext_interrupt_to_host 875 876 /* Allright, looks like an IPI for the guest, we need to set MER */ 877 /* Check if any CPU is heading out to the host, if so head out too */ 878 ld r5, HSTATE_KVM_VCORE(r13) 879 lwz r0, VCORE_ENTRY_EXIT(r5) 880 cmpwi r0, 0x100 881 bge ext_interrupt_to_host 882 883 /* See if there is a pending interrupt for the guest */ 884 mfspr r8, SPRN_LPCR 885 ld r0, VCPU_PENDING_EXC(r9) 886 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 887 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 888 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 889 beq 2f 890 891 /* And if the guest EE is set, we can deliver immediately, else 892 * we return to the guest with MER set 893 */ 894 andi. r0, r11, MSR_EE 895 beq 2f 896 mtspr SPRN_SRR0, r10 897 mtspr SPRN_SRR1, r11 898 li r10, BOOK3S_INTERRUPT_EXTERNAL 899 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 900 rotldi r11, r11, 63 9012: mr r4, r9 902 mtspr SPRN_LPCR, r8 903 b fast_guest_return 904 905ext_interrupt_to_host: 906 907guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 908 /* Save more register state */ 909 mfdar r6 910 mfdsisr r7 911 std r6, VCPU_DAR(r9) 912 stw r7, VCPU_DSISR(r9) 913BEGIN_FTR_SECTION 914 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 915 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 916 beq 6f 917END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 918 std r6, VCPU_FAULT_DAR(r9) 919 stw r7, VCPU_FAULT_DSISR(r9) 920 921 /* See if it is a machine check */ 922 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 923 beq machine_check_realmode 924mc_cont: 925 926 /* Save guest CTRL register, set runlatch to 1 */ 9276: mfspr r6,SPRN_CTRLF 928 stw r6,VCPU_CTRL(r9) 929 andi. r0,r6,1 930 bne 4f 931 ori r6,r6,1 932 mtspr SPRN_CTRLT,r6 9334: 934 /* Read the guest SLB and save it away */ 935 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 936 mtctr r0 937 li r6,0 938 addi r7,r9,VCPU_SLB 939 li r5,0 9401: slbmfee r8,r6 941 andis. r0,r8,SLB_ESID_V@h 942 beq 2f 943 add r8,r8,r6 /* put index in */ 944 slbmfev r3,r6 945 std r8,VCPU_SLB_E(r7) 946 std r3,VCPU_SLB_V(r7) 947 addi r7,r7,VCPU_SLB_SIZE 948 addi r5,r5,1 9492: addi r6,r6,1 950 bdnz 1b 951 stw r5,VCPU_SLB_MAX(r9) 952 953 /* 954 * Save the guest PURR/SPURR 955 */ 956BEGIN_FTR_SECTION 957 mfspr r5,SPRN_PURR 958 mfspr r6,SPRN_SPURR 959 ld r7,VCPU_PURR(r9) 960 ld r8,VCPU_SPURR(r9) 961 std r5,VCPU_PURR(r9) 962 std r6,VCPU_SPURR(r9) 963 subf r5,r7,r5 964 subf r6,r8,r6 965 966 /* 967 * Restore host PURR/SPURR and add guest times 968 * so that the time in the guest gets accounted. 969 */ 970 ld r3,HSTATE_PURR(r13) 971 ld r4,HSTATE_SPURR(r13) 972 add r3,r3,r5 973 add r4,r4,r6 974 mtspr SPRN_PURR,r3 975 mtspr SPRN_SPURR,r4 976END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 977 978 /* Clear out SLB */ 979 li r5,0 980 slbmte r5,r5 981 slbia 982 ptesync 983 984hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 985BEGIN_FTR_SECTION 986 b 32f 987END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 988 /* 989 * POWER7 guest -> host partition switch code. 990 * We don't have to lock against tlbies but we do 991 * have to coordinate the hardware threads. 992 */ 993 /* Increment the threads-exiting-guest count in the 0xff00 994 bits of vcore->entry_exit_count */ 995 ld r5,HSTATE_KVM_VCORE(r13) 996 addi r6,r5,VCORE_ENTRY_EXIT 99741: lwarx r3,0,r6 998 addi r0,r3,0x100 999 stwcx. r0,0,r6 1000 bne 41b 1001 isync /* order stwcx. vs. reading napping_threads */ 1002 1003 /* 1004 * At this point we have an interrupt that we have to pass 1005 * up to the kernel or qemu; we can't handle it in real mode. 1006 * Thus we have to do a partition switch, so we have to 1007 * collect the other threads, if we are the first thread 1008 * to take an interrupt. To do this, we set the HDEC to 0, 1009 * which causes an HDEC interrupt in all threads within 2ns 1010 * because the HDEC register is shared between all 4 threads. 1011 * However, we don't need to bother if this is an HDEC 1012 * interrupt, since the other threads will already be on their 1013 * way here in that case. 1014 */ 1015 cmpwi r3,0x100 /* Are we the first here? */ 1016 bge 43f 1017 cmpwi r3,1 /* Are any other threads in the guest? */ 1018 ble 43f 1019 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1020 beq 40f 1021 li r0,0 1022 mtspr SPRN_HDEC,r0 102340: 1024 /* 1025 * Send an IPI to any napping threads, since an HDEC interrupt 1026 * doesn't wake CPUs up from nap. 1027 */ 1028 lwz r3,VCORE_NAPPING_THREADS(r5) 1029 lwz r4,VCPU_PTID(r9) 1030 li r0,1 1031 sld r0,r0,r4 1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1033 beq 43f 1034 /* Order entry/exit update vs. IPIs */ 1035 sync 1036 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1037 subf r6,r4,r13 103842: andi. r0,r3,1 1039 beq 44f 1040 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 1041 li r0,IPI_PRIORITY 1042 li r7,XICS_MFRR 1043 stbcix r0,r7,r8 /* trigger the IPI */ 104444: srdi. r3,r3,1 1045 addi r6,r6,PACA_SIZE 1046 bne 42b 1047 1048 /* Secondary threads wait for primary to do partition switch */ 104943: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 1050 ld r5,HSTATE_KVM_VCORE(r13) 1051 lwz r3,VCPU_PTID(r9) 1052 cmpwi r3,0 1053 beq 15f 1054 HMT_LOW 105513: lbz r3,VCORE_IN_GUEST(r5) 1056 cmpwi r3,0 1057 bne 13b 1058 HMT_MEDIUM 1059 b 16f 1060 1061 /* Primary thread waits for all the secondaries to exit guest */ 106215: lwz r3,VCORE_ENTRY_EXIT(r5) 1063 srwi r0,r3,8 1064 clrldi r3,r3,56 1065 cmpw r3,r0 1066 bne 15b 1067 isync 1068 1069 /* Primary thread switches back to host partition */ 1070 ld r6,KVM_HOST_SDR1(r4) 1071 lwz r7,KVM_HOST_LPID(r4) 1072 li r8,LPID_RSVD /* switch to reserved LPID */ 1073 mtspr SPRN_LPID,r8 1074 ptesync 1075 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1076 mtspr SPRN_LPID,r7 1077 isync 1078 1079 /* Subtract timebase offset from timebase */ 1080 ld r8,VCORE_TB_OFFSET(r5) 1081 cmpdi r8,0 1082 beq 17f 1083 mftb r6 /* current host timebase */ 1084 subf r8,r8,r6 1085 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1086 mftb r7 /* check if lower 24 bits overflowed */ 1087 clrldi r6,r6,40 1088 clrldi r7,r7,40 1089 cmpld r7,r6 1090 bge 17f 1091 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1092 mtspr SPRN_TBU40,r8 1093 1094 /* Reset PCR */ 109517: ld r0, VCORE_PCR(r5) 1096 cmpdi r0, 0 1097 beq 18f 1098 li r0, 0 1099 mtspr SPRN_PCR, r0 110018: 1101 /* Signal secondary CPUs to continue */ 1102 stb r0,VCORE_IN_GUEST(r5) 1103 lis r8,0x7fff /* MAX_INT@h */ 1104 mtspr SPRN_HDEC,r8 1105 110616: ld r8,KVM_HOST_LPCR(r4) 1107 mtspr SPRN_LPCR,r8 1108 isync 1109 b 33f 1110 1111 /* 1112 * PPC970 guest -> host partition switch code. 1113 * We have to lock against concurrent tlbies, and 1114 * we have to flush the whole TLB. 1115 */ 111632: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 1117 1118 /* Take the guest's tlbie_lock */ 1119#ifdef __BIG_ENDIAN__ 1120 lwz r8,PACA_LOCK_TOKEN(r13) 1121#else 1122 lwz r8,PACAPACAINDEX(r13) 1123#endif 1124 addi r3,r4,KVM_TLBIE_LOCK 112524: lwarx r0,0,r3 1126 cmpwi r0,0 1127 bne 24b 1128 stwcx. r8,0,r3 1129 bne 24b 1130 isync 1131 1132 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 1133 li r0,0x18f 1134 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 1135 or r0,r7,r0 1136 ptesync 1137 sync 1138 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 1139 isync 1140 li r0,0 1141 stw r0,0(r3) /* drop guest tlbie_lock */ 1142 1143 /* invalidate the whole TLB */ 1144 li r0,256 1145 mtctr r0 1146 li r6,0 114725: tlbiel r6 1148 addi r6,r6,0x1000 1149 bdnz 25b 1150 ptesync 1151 1152 /* take native_tlbie_lock */ 1153 ld r3,toc_tlbie_lock@toc(2) 115424: lwarx r0,0,r3 1155 cmpwi r0,0 1156 bne 24b 1157 stwcx. r8,0,r3 1158 bne 24b 1159 isync 1160 1161 ld r6,KVM_HOST_SDR1(r4) 1162 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1163 1164 /* Set up host HID4 value */ 1165 sync 1166 mtspr SPRN_HID4,r7 1167 isync 1168 li r0,0 1169 stw r0,0(r3) /* drop native_tlbie_lock */ 1170 1171 lis r8,0x7fff /* MAX_INT@h */ 1172 mtspr SPRN_HDEC,r8 1173 1174 /* Disable HDEC interrupts */ 1175 mfspr r0,SPRN_HID0 1176 li r3,0 1177 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 1178 sync 1179 mtspr SPRN_HID0,r0 1180 mfspr r0,SPRN_HID0 1181 mfspr r0,SPRN_HID0 1182 mfspr r0,SPRN_HID0 1183 mfspr r0,SPRN_HID0 1184 mfspr r0,SPRN_HID0 1185 mfspr r0,SPRN_HID0 1186 1187 /* load host SLB entries */ 118833: ld r8,PACA_SLBSHADOWPTR(r13) 1189 1190 .rept SLB_NUM_BOLTED 1191 ld r5,SLBSHADOW_SAVEAREA(r8) 1192 ld r6,SLBSHADOW_SAVEAREA+8(r8) 1193 andis. r7,r5,SLB_ESID_V@h 1194 beq 1f 1195 slbmte r6,r5 11961: addi r8,r8,16 1197 .endr 1198 1199 /* Save DEC */ 1200 mfspr r5,SPRN_DEC 1201 mftb r6 1202 extsw r5,r5 1203 add r5,r5,r6 1204 std r5,VCPU_DEC_EXPIRES(r9) 1205 1206 /* Save and reset AMR and UAMOR before turning on the MMU */ 1207BEGIN_FTR_SECTION 1208 mfspr r5,SPRN_AMR 1209 mfspr r6,SPRN_UAMOR 1210 std r5,VCPU_AMR(r9) 1211 std r6,VCPU_UAMOR(r9) 1212 li r6,0 1213 mtspr SPRN_AMR,r6 1214END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1215 1216 /* Unset guest mode */ 1217 li r0, KVM_GUEST_MODE_NONE 1218 stb r0, HSTATE_IN_GUEST(r13) 1219 1220 /* Switch DSCR back to host value */ 1221BEGIN_FTR_SECTION 1222 mfspr r8, SPRN_DSCR 1223 ld r7, HSTATE_DSCR(r13) 1224 std r8, VCPU_DSCR(r9) 1225 mtspr SPRN_DSCR, r7 1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1227 1228 /* Save non-volatile GPRs */ 1229 std r14, VCPU_GPR(R14)(r9) 1230 std r15, VCPU_GPR(R15)(r9) 1231 std r16, VCPU_GPR(R16)(r9) 1232 std r17, VCPU_GPR(R17)(r9) 1233 std r18, VCPU_GPR(R18)(r9) 1234 std r19, VCPU_GPR(R19)(r9) 1235 std r20, VCPU_GPR(R20)(r9) 1236 std r21, VCPU_GPR(R21)(r9) 1237 std r22, VCPU_GPR(R22)(r9) 1238 std r23, VCPU_GPR(R23)(r9) 1239 std r24, VCPU_GPR(R24)(r9) 1240 std r25, VCPU_GPR(R25)(r9) 1241 std r26, VCPU_GPR(R26)(r9) 1242 std r27, VCPU_GPR(R27)(r9) 1243 std r28, VCPU_GPR(R28)(r9) 1244 std r29, VCPU_GPR(R29)(r9) 1245 std r30, VCPU_GPR(R30)(r9) 1246 std r31, VCPU_GPR(R31)(r9) 1247 1248 /* Save SPRGs */ 1249 mfspr r3, SPRN_SPRG0 1250 mfspr r4, SPRN_SPRG1 1251 mfspr r5, SPRN_SPRG2 1252 mfspr r6, SPRN_SPRG3 1253 std r3, VCPU_SPRG0(r9) 1254 std r4, VCPU_SPRG1(r9) 1255 std r5, VCPU_SPRG2(r9) 1256 std r6, VCPU_SPRG3(r9) 1257 1258 /* save FP state */ 1259 mr r3, r9 1260 bl .kvmppc_save_fp 1261 1262 /* Increment yield count if they have a VPA */ 1263 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1264 cmpdi r8, 0 1265 beq 25f 1266 lwz r3, LPPACA_YIELDCOUNT(r8) 1267 addi r3, r3, 1 1268 stw r3, LPPACA_YIELDCOUNT(r8) 1269 li r3, 1 1270 stb r3, VCPU_VPA_DIRTY(r9) 127125: 1272 /* Save PMU registers if requested */ 1273 /* r8 and cr0.eq are live here */ 1274 li r3, 1 1275 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1276 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1277 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1278 mfspr r6, SPRN_MMCRA 1279BEGIN_FTR_SECTION 1280 /* On P7, clear MMCRA in order to disable SDAR updates */ 1281 li r7, 0 1282 mtspr SPRN_MMCRA, r7 1283END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1284 isync 1285 beq 21f /* if no VPA, save PMU stuff anyway */ 1286 lbz r7, LPPACA_PMCINUSE(r8) 1287 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1288 bne 21f 1289 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1290 b 22f 129121: mfspr r5, SPRN_MMCR1 1292 mfspr r7, SPRN_SIAR 1293 mfspr r8, SPRN_SDAR 1294 std r4, VCPU_MMCR(r9) 1295 std r5, VCPU_MMCR + 8(r9) 1296 std r6, VCPU_MMCR + 16(r9) 1297 std r7, VCPU_SIAR(r9) 1298 std r8, VCPU_SDAR(r9) 1299 mfspr r3, SPRN_PMC1 1300 mfspr r4, SPRN_PMC2 1301 mfspr r5, SPRN_PMC3 1302 mfspr r6, SPRN_PMC4 1303 mfspr r7, SPRN_PMC5 1304 mfspr r8, SPRN_PMC6 1305BEGIN_FTR_SECTION 1306 mfspr r10, SPRN_PMC7 1307 mfspr r11, SPRN_PMC8 1308END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1309 stw r3, VCPU_PMC(r9) 1310 stw r4, VCPU_PMC + 4(r9) 1311 stw r5, VCPU_PMC + 8(r9) 1312 stw r6, VCPU_PMC + 12(r9) 1313 stw r7, VCPU_PMC + 16(r9) 1314 stw r8, VCPU_PMC + 20(r9) 1315BEGIN_FTR_SECTION 1316 stw r10, VCPU_PMC + 24(r9) 1317 stw r11, VCPU_PMC + 28(r9) 1318END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 131922: 1320 ld r0, 112+PPC_LR_STKOFF(r1) 1321 addi r1, r1, 112 1322 mtlr r0 1323 blr 1324secondary_too_late: 1325 ld r5,HSTATE_KVM_VCORE(r13) 1326 HMT_LOW 132713: lbz r3,VCORE_IN_GUEST(r5) 1328 cmpwi r3,0 1329 bne 13b 1330 HMT_MEDIUM 1331 li r0, KVM_GUEST_MODE_NONE 1332 stb r0, HSTATE_IN_GUEST(r13) 1333 ld r11,PACA_SLBSHADOWPTR(r13) 1334 1335 .rept SLB_NUM_BOLTED 1336 ld r5,SLBSHADOW_SAVEAREA(r11) 1337 ld r6,SLBSHADOW_SAVEAREA+8(r11) 1338 andis. r7,r5,SLB_ESID_V@h 1339 beq 1f 1340 slbmte r6,r5 13411: addi r11,r11,16 1342 .endr 1343 b 22b 1344 1345/* 1346 * Check whether an HDSI is an HPTE not found fault or something else. 1347 * If it is an HPTE not found fault that is due to the guest accessing 1348 * a page that they have mapped but which we have paged out, then 1349 * we continue on with the guest exit path. In all other cases, 1350 * reflect the HDSI to the guest as a DSI. 1351 */ 1352kvmppc_hdsi: 1353 mfspr r4, SPRN_HDAR 1354 mfspr r6, SPRN_HDSISR 1355 /* HPTE not found fault or protection fault? */ 1356 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1357 beq 1f /* if not, send it to the guest */ 1358 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1359 beq 3f 1360 clrrdi r0, r4, 28 1361 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1362 bne 1f /* if no SLB entry found */ 13634: std r4, VCPU_FAULT_DAR(r9) 1364 stw r6, VCPU_FAULT_DSISR(r9) 1365 1366 /* Search the hash table. */ 1367 mr r3, r9 /* vcpu pointer */ 1368 li r7, 1 /* data fault */ 1369 bl .kvmppc_hpte_hv_fault 1370 ld r9, HSTATE_KVM_VCPU(r13) 1371 ld r10, VCPU_PC(r9) 1372 ld r11, VCPU_MSR(r9) 1373 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1374 cmpdi r3, 0 /* retry the instruction */ 1375 beq 6f 1376 cmpdi r3, -1 /* handle in kernel mode */ 1377 beq guest_exit_cont 1378 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1379 beq 2f 1380 1381 /* Synthesize a DSI for the guest */ 1382 ld r4, VCPU_FAULT_DAR(r9) 1383 mr r6, r3 13841: mtspr SPRN_DAR, r4 1385 mtspr SPRN_DSISR, r6 1386 mtspr SPRN_SRR0, r10 1387 mtspr SPRN_SRR1, r11 1388 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1389 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1390 rotldi r11, r11, 63 1391fast_interrupt_c_return: 13926: ld r7, VCPU_CTR(r9) 1393 lwz r8, VCPU_XER(r9) 1394 mtctr r7 1395 mtxer r8 1396 mr r4, r9 1397 b fast_guest_return 1398 13993: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1400 ld r5, KVM_VRMA_SLB_V(r5) 1401 b 4b 1402 1403 /* If this is for emulated MMIO, load the instruction word */ 14042: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1405 1406 /* Set guest mode to 'jump over instruction' so if lwz faults 1407 * we'll just continue at the next IP. */ 1408 li r0, KVM_GUEST_MODE_SKIP 1409 stb r0, HSTATE_IN_GUEST(r13) 1410 1411 /* Do the access with MSR:DR enabled */ 1412 mfmsr r3 1413 ori r4, r3, MSR_DR /* Enable paging for data */ 1414 mtmsrd r4 1415 lwz r8, 0(r10) 1416 mtmsrd r3 1417 1418 /* Store the result */ 1419 stw r8, VCPU_LAST_INST(r9) 1420 1421 /* Unset guest mode. */ 1422 li r0, KVM_GUEST_MODE_HOST_HV 1423 stb r0, HSTATE_IN_GUEST(r13) 1424 b guest_exit_cont 1425 1426/* 1427 * Similarly for an HISI, reflect it to the guest as an ISI unless 1428 * it is an HPTE not found fault for a page that we have paged out. 1429 */ 1430kvmppc_hisi: 1431 andis. r0, r11, SRR1_ISI_NOPT@h 1432 beq 1f 1433 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1434 beq 3f 1435 clrrdi r0, r10, 28 1436 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1437 bne 1f /* if no SLB entry found */ 14384: 1439 /* Search the hash table. */ 1440 mr r3, r9 /* vcpu pointer */ 1441 mr r4, r10 1442 mr r6, r11 1443 li r7, 0 /* instruction fault */ 1444 bl .kvmppc_hpte_hv_fault 1445 ld r9, HSTATE_KVM_VCPU(r13) 1446 ld r10, VCPU_PC(r9) 1447 ld r11, VCPU_MSR(r9) 1448 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1449 cmpdi r3, 0 /* retry the instruction */ 1450 beq fast_interrupt_c_return 1451 cmpdi r3, -1 /* handle in kernel mode */ 1452 beq guest_exit_cont 1453 1454 /* Synthesize an ISI for the guest */ 1455 mr r11, r3 14561: mtspr SPRN_SRR0, r10 1457 mtspr SPRN_SRR1, r11 1458 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1459 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1460 rotldi r11, r11, 63 1461 b fast_interrupt_c_return 1462 14633: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1464 ld r5, KVM_VRMA_SLB_V(r6) 1465 b 4b 1466 1467/* 1468 * Try to handle an hcall in real mode. 1469 * Returns to the guest if we handle it, or continues on up to 1470 * the kernel if we can't (i.e. if we don't have a handler for 1471 * it, or if the handler returns H_TOO_HARD). 1472 */ 1473 .globl hcall_try_real_mode 1474hcall_try_real_mode: 1475 ld r3,VCPU_GPR(R3)(r9) 1476 andi. r0,r11,MSR_PR 1477 bne guest_exit_cont 1478 clrrdi r3,r3,2 1479 cmpldi r3,hcall_real_table_end - hcall_real_table 1480 bge guest_exit_cont 1481 LOAD_REG_ADDR(r4, hcall_real_table) 1482 lwax r3,r3,r4 1483 cmpwi r3,0 1484 beq guest_exit_cont 1485 add r3,r3,r4 1486 mtctr r3 1487 mr r3,r9 /* get vcpu pointer */ 1488 ld r4,VCPU_GPR(R4)(r9) 1489 bctrl 1490 cmpdi r3,H_TOO_HARD 1491 beq hcall_real_fallback 1492 ld r4,HSTATE_KVM_VCPU(r13) 1493 std r3,VCPU_GPR(R3)(r4) 1494 ld r10,VCPU_PC(r4) 1495 ld r11,VCPU_MSR(r4) 1496 b fast_guest_return 1497 1498 /* We've attempted a real mode hcall, but it's punted it back 1499 * to userspace. We need to restore some clobbered volatiles 1500 * before resuming the pass-it-to-qemu path */ 1501hcall_real_fallback: 1502 li r12,BOOK3S_INTERRUPT_SYSCALL 1503 ld r9, HSTATE_KVM_VCPU(r13) 1504 1505 b guest_exit_cont 1506 1507 .globl hcall_real_table 1508hcall_real_table: 1509 .long 0 /* 0 - unused */ 1510 .long .kvmppc_h_remove - hcall_real_table 1511 .long .kvmppc_h_enter - hcall_real_table 1512 .long .kvmppc_h_read - hcall_real_table 1513 .long 0 /* 0x10 - H_CLEAR_MOD */ 1514 .long 0 /* 0x14 - H_CLEAR_REF */ 1515 .long .kvmppc_h_protect - hcall_real_table 1516 .long 0 /* 0x1c - H_GET_TCE */ 1517 .long .kvmppc_h_put_tce - hcall_real_table 1518 .long 0 /* 0x24 - H_SET_SPRG0 */ 1519 .long .kvmppc_h_set_dabr - hcall_real_table 1520 .long 0 /* 0x2c */ 1521 .long 0 /* 0x30 */ 1522 .long 0 /* 0x34 */ 1523 .long 0 /* 0x38 */ 1524 .long 0 /* 0x3c */ 1525 .long 0 /* 0x40 */ 1526 .long 0 /* 0x44 */ 1527 .long 0 /* 0x48 */ 1528 .long 0 /* 0x4c */ 1529 .long 0 /* 0x50 */ 1530 .long 0 /* 0x54 */ 1531 .long 0 /* 0x58 */ 1532 .long 0 /* 0x5c */ 1533 .long 0 /* 0x60 */ 1534#ifdef CONFIG_KVM_XICS 1535 .long .kvmppc_rm_h_eoi - hcall_real_table 1536 .long .kvmppc_rm_h_cppr - hcall_real_table 1537 .long .kvmppc_rm_h_ipi - hcall_real_table 1538 .long 0 /* 0x70 - H_IPOLL */ 1539 .long .kvmppc_rm_h_xirr - hcall_real_table 1540#else 1541 .long 0 /* 0x64 - H_EOI */ 1542 .long 0 /* 0x68 - H_CPPR */ 1543 .long 0 /* 0x6c - H_IPI */ 1544 .long 0 /* 0x70 - H_IPOLL */ 1545 .long 0 /* 0x74 - H_XIRR */ 1546#endif 1547 .long 0 /* 0x78 */ 1548 .long 0 /* 0x7c */ 1549 .long 0 /* 0x80 */ 1550 .long 0 /* 0x84 */ 1551 .long 0 /* 0x88 */ 1552 .long 0 /* 0x8c */ 1553 .long 0 /* 0x90 */ 1554 .long 0 /* 0x94 */ 1555 .long 0 /* 0x98 */ 1556 .long 0 /* 0x9c */ 1557 .long 0 /* 0xa0 */ 1558 .long 0 /* 0xa4 */ 1559 .long 0 /* 0xa8 */ 1560 .long 0 /* 0xac */ 1561 .long 0 /* 0xb0 */ 1562 .long 0 /* 0xb4 */ 1563 .long 0 /* 0xb8 */ 1564 .long 0 /* 0xbc */ 1565 .long 0 /* 0xc0 */ 1566 .long 0 /* 0xc4 */ 1567 .long 0 /* 0xc8 */ 1568 .long 0 /* 0xcc */ 1569 .long 0 /* 0xd0 */ 1570 .long 0 /* 0xd4 */ 1571 .long 0 /* 0xd8 */ 1572 .long 0 /* 0xdc */ 1573 .long .kvmppc_h_cede - hcall_real_table 1574 .long 0 /* 0xe4 */ 1575 .long 0 /* 0xe8 */ 1576 .long 0 /* 0xec */ 1577 .long 0 /* 0xf0 */ 1578 .long 0 /* 0xf4 */ 1579 .long 0 /* 0xf8 */ 1580 .long 0 /* 0xfc */ 1581 .long 0 /* 0x100 */ 1582 .long 0 /* 0x104 */ 1583 .long 0 /* 0x108 */ 1584 .long 0 /* 0x10c */ 1585 .long 0 /* 0x110 */ 1586 .long 0 /* 0x114 */ 1587 .long 0 /* 0x118 */ 1588 .long 0 /* 0x11c */ 1589 .long 0 /* 0x120 */ 1590 .long .kvmppc_h_bulk_remove - hcall_real_table 1591hcall_real_table_end: 1592 1593ignore_hdec: 1594 mr r4,r9 1595 b fast_guest_return 1596 1597_GLOBAL(kvmppc_h_set_dabr) 1598 std r4,VCPU_DABR(r3) 1599 /* Work around P7 bug where DABR can get corrupted on mtspr */ 16001: mtspr SPRN_DABR,r4 1601 mfspr r5, SPRN_DABR 1602 cmpd r4, r5 1603 bne 1b 1604 isync 1605 li r3,0 1606 blr 1607 1608_GLOBAL(kvmppc_h_cede) 1609 ori r11,r11,MSR_EE 1610 std r11,VCPU_MSR(r3) 1611 li r0,1 1612 stb r0,VCPU_CEDED(r3) 1613 sync /* order setting ceded vs. testing prodded */ 1614 lbz r5,VCPU_PRODDED(r3) 1615 cmpwi r5,0 1616 bne kvm_cede_prodded 1617 li r0,0 /* set trap to 0 to say hcall is handled */ 1618 stw r0,VCPU_TRAP(r3) 1619 li r0,H_SUCCESS 1620 std r0,VCPU_GPR(R3)(r3) 1621BEGIN_FTR_SECTION 1622 b kvm_cede_exit /* just send it up to host on 970 */ 1623END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1624 1625 /* 1626 * Set our bit in the bitmask of napping threads unless all the 1627 * other threads are already napping, in which case we send this 1628 * up to the host. 1629 */ 1630 ld r5,HSTATE_KVM_VCORE(r13) 1631 lwz r6,VCPU_PTID(r3) 1632 lwz r8,VCORE_ENTRY_EXIT(r5) 1633 clrldi r8,r8,56 1634 li r0,1 1635 sld r0,r0,r6 1636 addi r6,r5,VCORE_NAPPING_THREADS 163731: lwarx r4,0,r6 1638 or r4,r4,r0 1639 PPC_POPCNTW(R7,R4) 1640 cmpw r7,r8 1641 bge kvm_cede_exit 1642 stwcx. r4,0,r6 1643 bne 31b 1644 /* order napping_threads update vs testing entry_exit_count */ 1645 isync 1646 li r0,1 1647 stb r0,HSTATE_NAPPING(r13) 1648 mr r4,r3 1649 lwz r7,VCORE_ENTRY_EXIT(r5) 1650 cmpwi r7,0x100 1651 bge 33f /* another thread already exiting */ 1652 1653/* 1654 * Although not specifically required by the architecture, POWER7 1655 * preserves the following registers in nap mode, even if an SMT mode 1656 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 1657 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1658 */ 1659 /* Save non-volatile GPRs */ 1660 std r14, VCPU_GPR(R14)(r3) 1661 std r15, VCPU_GPR(R15)(r3) 1662 std r16, VCPU_GPR(R16)(r3) 1663 std r17, VCPU_GPR(R17)(r3) 1664 std r18, VCPU_GPR(R18)(r3) 1665 std r19, VCPU_GPR(R19)(r3) 1666 std r20, VCPU_GPR(R20)(r3) 1667 std r21, VCPU_GPR(R21)(r3) 1668 std r22, VCPU_GPR(R22)(r3) 1669 std r23, VCPU_GPR(R23)(r3) 1670 std r24, VCPU_GPR(R24)(r3) 1671 std r25, VCPU_GPR(R25)(r3) 1672 std r26, VCPU_GPR(R26)(r3) 1673 std r27, VCPU_GPR(R27)(r3) 1674 std r28, VCPU_GPR(R28)(r3) 1675 std r29, VCPU_GPR(R29)(r3) 1676 std r30, VCPU_GPR(R30)(r3) 1677 std r31, VCPU_GPR(R31)(r3) 1678 1679 /* save FP state */ 1680 bl .kvmppc_save_fp 1681 1682 /* 1683 * Take a nap until a decrementer or external interrupt occurs, 1684 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1685 */ 1686 li r0,1 1687 stb r0,HSTATE_HWTHREAD_REQ(r13) 1688 mfspr r5,SPRN_LPCR 1689 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1690 mtspr SPRN_LPCR,r5 1691 isync 1692 li r0, 0 1693 std r0, HSTATE_SCRATCH0(r13) 1694 ptesync 1695 ld r0, HSTATE_SCRATCH0(r13) 16961: cmpd r0, r0 1697 bne 1b 1698 nap 1699 b . 1700 1701kvm_end_cede: 1702 /* get vcpu pointer */ 1703 ld r4, HSTATE_KVM_VCPU(r13) 1704 1705 /* Woken by external or decrementer interrupt */ 1706 ld r1, HSTATE_HOST_R1(r13) 1707 1708 /* load up FP state */ 1709 bl kvmppc_load_fp 1710 1711 /* Load NV GPRS */ 1712 ld r14, VCPU_GPR(R14)(r4) 1713 ld r15, VCPU_GPR(R15)(r4) 1714 ld r16, VCPU_GPR(R16)(r4) 1715 ld r17, VCPU_GPR(R17)(r4) 1716 ld r18, VCPU_GPR(R18)(r4) 1717 ld r19, VCPU_GPR(R19)(r4) 1718 ld r20, VCPU_GPR(R20)(r4) 1719 ld r21, VCPU_GPR(R21)(r4) 1720 ld r22, VCPU_GPR(R22)(r4) 1721 ld r23, VCPU_GPR(R23)(r4) 1722 ld r24, VCPU_GPR(R24)(r4) 1723 ld r25, VCPU_GPR(R25)(r4) 1724 ld r26, VCPU_GPR(R26)(r4) 1725 ld r27, VCPU_GPR(R27)(r4) 1726 ld r28, VCPU_GPR(R28)(r4) 1727 ld r29, VCPU_GPR(R29)(r4) 1728 ld r30, VCPU_GPR(R30)(r4) 1729 ld r31, VCPU_GPR(R31)(r4) 1730 1731 /* clear our bit in vcore->napping_threads */ 173233: ld r5,HSTATE_KVM_VCORE(r13) 1733 lwz r3,VCPU_PTID(r4) 1734 li r0,1 1735 sld r0,r0,r3 1736 addi r6,r5,VCORE_NAPPING_THREADS 173732: lwarx r7,0,r6 1738 andc r7,r7,r0 1739 stwcx. r7,0,r6 1740 bne 32b 1741 li r0,0 1742 stb r0,HSTATE_NAPPING(r13) 1743 1744 /* Check the wake reason in SRR1 to see why we got here */ 1745 mfspr r3, SPRN_SRR1 1746 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */ 1747 cmpwi r3, 4 /* was it an external interrupt? */ 1748 li r12, BOOK3S_INTERRUPT_EXTERNAL 1749 mr r9, r4 1750 ld r10, VCPU_PC(r9) 1751 ld r11, VCPU_MSR(r9) 1752 beq do_ext_interrupt /* if so */ 1753 1754 /* see if any other thread is already exiting */ 1755 lwz r0,VCORE_ENTRY_EXIT(r5) 1756 cmpwi r0,0x100 1757 blt kvmppc_cede_reentry /* if not go back to guest */ 1758 1759 /* some threads are exiting, so go to the guest exit path */ 1760 b hcall_real_fallback 1761 1762 /* cede when already previously prodded case */ 1763kvm_cede_prodded: 1764 li r0,0 1765 stb r0,VCPU_PRODDED(r3) 1766 sync /* order testing prodded vs. clearing ceded */ 1767 stb r0,VCPU_CEDED(r3) 1768 li r3,H_SUCCESS 1769 blr 1770 1771 /* we've ceded but we want to give control to the host */ 1772kvm_cede_exit: 1773 b hcall_real_fallback 1774 1775 /* Try to handle a machine check in real mode */ 1776machine_check_realmode: 1777 mr r3, r9 /* get vcpu pointer */ 1778 bl .kvmppc_realmode_machine_check 1779 nop 1780 cmpdi r3, 0 /* continue exiting from guest? */ 1781 ld r9, HSTATE_KVM_VCPU(r13) 1782 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1783 beq mc_cont 1784 /* If not, deliver a machine check. SRR0/1 are already set */ 1785 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 1786 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1787 rotldi r11, r11, 63 1788 b fast_interrupt_c_return 1789 1790/* 1791 * Determine what sort of external interrupt is pending (if any). 1792 * Returns: 1793 * 0 if no interrupt is pending 1794 * 1 if an interrupt is pending that needs to be handled by the host 1795 * -1 if there was a guest wakeup IPI (which has now been cleared) 1796 */ 1797kvmppc_read_intr: 1798 /* see if a host IPI is pending */ 1799 li r3, 1 1800 lbz r0, HSTATE_HOST_IPI(r13) 1801 cmpwi r0, 0 1802 bne 1f 1803 1804 /* Now read the interrupt from the ICP */ 1805 ld r6, HSTATE_XICS_PHYS(r13) 1806 li r7, XICS_XIRR 1807 cmpdi r6, 0 1808 beq- 1f 1809 lwzcix r0, r6, r7 1810 rlwinm. r3, r0, 0, 0xffffff 1811 sync 1812 beq 1f /* if nothing pending in the ICP */ 1813 1814 /* We found something in the ICP... 1815 * 1816 * If it's not an IPI, stash it in the PACA and return to 1817 * the host, we don't (yet) handle directing real external 1818 * interrupts directly to the guest 1819 */ 1820 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 1821 li r3, 1 1822 bne 42f 1823 1824 /* It's an IPI, clear the MFRR and EOI it */ 1825 li r3, 0xff 1826 li r8, XICS_MFRR 1827 stbcix r3, r6, r8 /* clear the IPI */ 1828 stwcix r0, r6, r7 /* EOI it */ 1829 sync 1830 1831 /* We need to re-check host IPI now in case it got set in the 1832 * meantime. If it's clear, we bounce the interrupt to the 1833 * guest 1834 */ 1835 lbz r0, HSTATE_HOST_IPI(r13) 1836 cmpwi r0, 0 1837 bne- 43f 1838 1839 /* OK, it's an IPI for us */ 1840 li r3, -1 18411: blr 1842 184342: /* It's not an IPI and it's for the host, stash it in the PACA 1844 * before exit, it will be picked up by the host ICP driver 1845 */ 1846 stw r0, HSTATE_SAVED_XIRR(r13) 1847 b 1b 1848 184943: /* We raced with the host, we need to resend that IPI, bummer */ 1850 li r0, IPI_PRIORITY 1851 stbcix r0, r6, r8 /* set the IPI */ 1852 sync 1853 b 1b 1854 1855/* 1856 * Save away FP, VMX and VSX registers. 1857 * r3 = vcpu pointer 1858 */ 1859_GLOBAL(kvmppc_save_fp) 1860 mfmsr r5 1861 ori r8,r5,MSR_FP 1862#ifdef CONFIG_ALTIVEC 1863BEGIN_FTR_SECTION 1864 oris r8,r8,MSR_VEC@h 1865END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1866#endif 1867#ifdef CONFIG_VSX 1868BEGIN_FTR_SECTION 1869 oris r8,r8,MSR_VSX@h 1870END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1871#endif 1872 mtmsrd r8 1873 isync 1874#ifdef CONFIG_VSX 1875BEGIN_FTR_SECTION 1876 reg = 0 1877 .rept 32 1878 li r6,reg*16+VCPU_VSRS 1879 STXVD2X(reg,R6,R3) 1880 reg = reg + 1 1881 .endr 1882FTR_SECTION_ELSE 1883#endif 1884 reg = 0 1885 .rept 32 1886 stfd reg,reg*8+VCPU_FPRS(r3) 1887 reg = reg + 1 1888 .endr 1889#ifdef CONFIG_VSX 1890ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1891#endif 1892 mffs fr0 1893 stfd fr0,VCPU_FPSCR(r3) 1894 1895#ifdef CONFIG_ALTIVEC 1896BEGIN_FTR_SECTION 1897 reg = 0 1898 .rept 32 1899 li r6,reg*16+VCPU_VRS 1900 stvx reg,r6,r3 1901 reg = reg + 1 1902 .endr 1903 mfvscr vr0 1904 li r6,VCPU_VSCR 1905 stvx vr0,r6,r3 1906END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1907#endif 1908 mfspr r6,SPRN_VRSAVE 1909 stw r6,VCPU_VRSAVE(r3) 1910 mtmsrd r5 1911 isync 1912 blr 1913 1914/* 1915 * Load up FP, VMX and VSX registers 1916 * r4 = vcpu pointer 1917 */ 1918 .globl kvmppc_load_fp 1919kvmppc_load_fp: 1920 mfmsr r9 1921 ori r8,r9,MSR_FP 1922#ifdef CONFIG_ALTIVEC 1923BEGIN_FTR_SECTION 1924 oris r8,r8,MSR_VEC@h 1925END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1926#endif 1927#ifdef CONFIG_VSX 1928BEGIN_FTR_SECTION 1929 oris r8,r8,MSR_VSX@h 1930END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1931#endif 1932 mtmsrd r8 1933 isync 1934 lfd fr0,VCPU_FPSCR(r4) 1935 MTFSF_L(fr0) 1936#ifdef CONFIG_VSX 1937BEGIN_FTR_SECTION 1938 reg = 0 1939 .rept 32 1940 li r7,reg*16+VCPU_VSRS 1941 LXVD2X(reg,R7,R4) 1942 reg = reg + 1 1943 .endr 1944FTR_SECTION_ELSE 1945#endif 1946 reg = 0 1947 .rept 32 1948 lfd reg,reg*8+VCPU_FPRS(r4) 1949 reg = reg + 1 1950 .endr 1951#ifdef CONFIG_VSX 1952ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1953#endif 1954 1955#ifdef CONFIG_ALTIVEC 1956BEGIN_FTR_SECTION 1957 li r7,VCPU_VSCR 1958 lvx vr0,r7,r4 1959 mtvscr vr0 1960 reg = 0 1961 .rept 32 1962 li r7,reg*16+VCPU_VRS 1963 lvx reg,r7,r4 1964 reg = reg + 1 1965 .endr 1966END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1967#endif 1968 lwz r7,VCPU_VRSAVE(r4) 1969 mtspr SPRN_VRSAVE,r7 1970 blr 1971 1972/* 1973 * We come here if we get any exception or interrupt while we are 1974 * executing host real mode code while in guest MMU context. 1975 * For now just spin, but we should do something better. 1976 */ 1977kvmppc_bad_host_intr: 1978 b . 1979