1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/mmu-hash64.h> 31 32#ifdef __LITTLE_ENDIAN__ 33#error Need to fix lppaca and SLB shadow accesses in little endian mode 34#endif 35 36/* Values in HSTATE_NAPPING(r13) */ 37#define NAPPING_CEDE 1 38#define NAPPING_NOVCPU 2 39 40/* 41 * Call kvmppc_hv_entry in real mode. 42 * Must be called with interrupts hard-disabled. 43 * 44 * Input Registers: 45 * 46 * LR = return address to continue at after eventually re-enabling MMU 47 */ 48_GLOBAL(kvmppc_hv_entry_trampoline) 49 mflr r0 50 std r0, PPC_LR_STKOFF(r1) 51 stdu r1, -112(r1) 52 mfmsr r10 53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 54 li r0,MSR_RI 55 andc r0,r10,r0 56 li r6,MSR_IR | MSR_DR 57 andc r6,r10,r6 58 mtmsrd r0,1 /* clear RI in MSR */ 59 mtsrr0 r5 60 mtsrr1 r6 61 RFI 62 63kvmppc_call_hv_entry: 64 ld r4, HSTATE_KVM_VCPU(r13) 65 bl kvmppc_hv_entry 66 67 /* Back from guest - restore host state and return to caller */ 68 69BEGIN_FTR_SECTION 70 /* Restore host DABR and DABRX */ 71 ld r5,HSTATE_DABR(r13) 72 li r6,7 73 mtspr SPRN_DABR,r5 74 mtspr SPRN_DABRX,r6 75END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 76 77 /* Restore SPRG3 */ 78 ld r3,PACA_SPRG3(r13) 79 mtspr SPRN_SPRG3,r3 80 81 /* Reload the host's PMU registers */ 82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 83 lbz r4, LPPACA_PMCINUSE(r3) 84 cmpwi r4, 0 85 beq 23f /* skip if not */ 86 lwz r3, HSTATE_PMC(r13) 87 lwz r4, HSTATE_PMC + 4(r13) 88 lwz r5, HSTATE_PMC + 8(r13) 89 lwz r6, HSTATE_PMC + 12(r13) 90 lwz r8, HSTATE_PMC + 16(r13) 91 lwz r9, HSTATE_PMC + 20(r13) 92BEGIN_FTR_SECTION 93 lwz r10, HSTATE_PMC + 24(r13) 94 lwz r11, HSTATE_PMC + 28(r13) 95END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 96 mtspr SPRN_PMC1, r3 97 mtspr SPRN_PMC2, r4 98 mtspr SPRN_PMC3, r5 99 mtspr SPRN_PMC4, r6 100 mtspr SPRN_PMC5, r8 101 mtspr SPRN_PMC6, r9 102BEGIN_FTR_SECTION 103 mtspr SPRN_PMC7, r10 104 mtspr SPRN_PMC8, r11 105END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 106 ld r3, HSTATE_MMCR(r13) 107 ld r4, HSTATE_MMCR + 8(r13) 108 ld r5, HSTATE_MMCR + 16(r13) 109 mtspr SPRN_MMCR1, r4 110 mtspr SPRN_MMCRA, r5 111 mtspr SPRN_MMCR0, r3 112 isync 11323: 114 115 /* 116 * Reload DEC. HDEC interrupts were disabled when 117 * we reloaded the host's LPCR value. 118 */ 119 ld r3, HSTATE_DECEXP(r13) 120 mftb r4 121 subf r4, r4, r3 122 mtspr SPRN_DEC, r4 123 124 /* 125 * For external and machine check interrupts, we need 126 * to call the Linux handler to process the interrupt. 127 * We do that by jumping to absolute address 0x500 for 128 * external interrupts, or the machine_check_fwnmi label 129 * for machine checks (since firmware might have patched 130 * the vector area at 0x200). The [h]rfid at the end of the 131 * handler will return to the book3s_hv_interrupts.S code. 132 * For other interrupts we do the rfid to get back 133 * to the book3s_hv_interrupts.S code here. 134 */ 135 ld r8, 112+PPC_LR_STKOFF(r1) 136 addi r1, r1, 112 137 ld r7, HSTATE_HOST_MSR(r13) 138 139 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 140 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 141BEGIN_FTR_SECTION 142 beq 11f 143END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 144 145 /* RFI into the highmem handler, or branch to interrupt handler */ 146 mfmsr r6 147 li r0, MSR_RI 148 andc r6, r6, r0 149 mtmsrd r6, 1 /* Clear RI in MSR */ 150 mtsrr0 r8 151 mtsrr1 r7 152 beqa 0x500 /* external interrupt (PPC970) */ 153 beq cr1, 13f /* machine check */ 154 RFI 155 156 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 15711: mtspr SPRN_HSRR0, r8 158 mtspr SPRN_HSRR1, r7 159 ba 0x500 160 16113: b machine_check_fwnmi 162 163kvmppc_primary_no_guest: 164 /* We handle this much like a ceded vcpu */ 165 /* set our bit in napping_threads */ 166 ld r5, HSTATE_KVM_VCORE(r13) 167 lbz r7, HSTATE_PTID(r13) 168 li r0, 1 169 sld r0, r0, r7 170 addi r6, r5, VCORE_NAPPING_THREADS 1711: lwarx r3, 0, r6 172 or r3, r3, r0 173 stwcx. r3, 0, r6 174 bne 1b 175 /* order napping_threads update vs testing entry_exit_count */ 176 isync 177 li r12, 0 178 lwz r7, VCORE_ENTRY_EXIT(r5) 179 cmpwi r7, 0x100 180 bge kvm_novcpu_exit /* another thread already exiting */ 181 li r3, NAPPING_NOVCPU 182 stb r3, HSTATE_NAPPING(r13) 183 li r3, 1 184 stb r3, HSTATE_HWTHREAD_REQ(r13) 185 186 b kvm_do_nap 187 188kvm_novcpu_wakeup: 189 ld r1, HSTATE_HOST_R1(r13) 190 ld r5, HSTATE_KVM_VCORE(r13) 191 li r0, 0 192 stb r0, HSTATE_NAPPING(r13) 193 stb r0, HSTATE_HWTHREAD_REQ(r13) 194 195 /* check the wake reason */ 196 bl kvmppc_check_wake_reason 197 198 /* see if any other thread is already exiting */ 199 lwz r0, VCORE_ENTRY_EXIT(r5) 200 cmpwi r0, 0x100 201 bge kvm_novcpu_exit 202 203 /* clear our bit in napping_threads */ 204 lbz r7, HSTATE_PTID(r13) 205 li r0, 1 206 sld r0, r0, r7 207 addi r6, r5, VCORE_NAPPING_THREADS 2084: lwarx r7, 0, r6 209 andc r7, r7, r0 210 stwcx. r7, 0, r6 211 bne 4b 212 213 /* See if the wake reason means we need to exit */ 214 cmpdi r3, 0 215 bge kvm_novcpu_exit 216 217 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 218 ld r4, HSTATE_KVM_VCPU(r13) 219 cmpdi r4, 0 220 bne kvmppc_got_guest 221 222kvm_novcpu_exit: 223 b hdec_soon 224 225/* 226 * We come in here when wakened from nap mode. 227 * Relocation is off and most register values are lost. 228 * r13 points to the PACA. 229 */ 230 .globl kvm_start_guest 231kvm_start_guest: 232 ld r2,PACATOC(r13) 233 234 li r0,KVM_HWTHREAD_IN_KVM 235 stb r0,HSTATE_HWTHREAD_STATE(r13) 236 237 /* NV GPR values from power7_idle() will no longer be valid */ 238 li r0,1 239 stb r0,PACA_NAPSTATELOST(r13) 240 241 /* were we napping due to cede? */ 242 lbz r0,HSTATE_NAPPING(r13) 243 cmpwi r0,NAPPING_CEDE 244 beq kvm_end_cede 245 cmpwi r0,NAPPING_NOVCPU 246 beq kvm_novcpu_wakeup 247 248 ld r1,PACAEMERGSP(r13) 249 subi r1,r1,STACK_FRAME_OVERHEAD 250 251 /* 252 * We weren't napping due to cede, so this must be a secondary 253 * thread being woken up to run a guest, or being woken up due 254 * to a stray IPI. (Or due to some machine check or hypervisor 255 * maintenance interrupt while the core is in KVM.) 256 */ 257 258 /* Check the wake reason in SRR1 to see why we got here */ 259 bl kvmppc_check_wake_reason 260 cmpdi r3, 0 261 bge kvm_no_guest 262 263 /* get vcpu pointer, NULL if we have no vcpu to run */ 264 ld r4,HSTATE_KVM_VCPU(r13) 265 cmpdi r4,0 266 /* if we have no vcpu to run, go back to sleep */ 267 beq kvm_no_guest 268 269 /* Set HSTATE_DSCR(r13) to something sensible */ 270 LOAD_REG_ADDR(r6, dscr_default) 271 ld r6, 0(r6) 272 std r6, HSTATE_DSCR(r13) 273 274 bl kvmppc_hv_entry 275 276 /* Back from the guest, go back to nap */ 277 /* Clear our vcpu pointer so we don't come back in early */ 278 li r0, 0 279 std r0, HSTATE_KVM_VCPU(r13) 280 /* 281 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing 282 * the nap_count, because once the increment to nap_count is 283 * visible we could be given another vcpu. 284 */ 285 lwsync 286 287 /* increment the nap count and then go to nap mode */ 288 ld r4, HSTATE_KVM_VCORE(r13) 289 addi r4, r4, VCORE_NAP_COUNT 29051: lwarx r3, 0, r4 291 addi r3, r3, 1 292 stwcx. r3, 0, r4 293 bne 51b 294 295kvm_no_guest: 296 li r0, KVM_HWTHREAD_IN_NAP 297 stb r0, HSTATE_HWTHREAD_STATE(r13) 298kvm_do_nap: 299 li r3, LPCR_PECE0 300 mfspr r4, SPRN_LPCR 301 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 302 mtspr SPRN_LPCR, r4 303 isync 304 std r0, HSTATE_SCRATCH0(r13) 305 ptesync 306 ld r0, HSTATE_SCRATCH0(r13) 3071: cmpd r0, r0 308 bne 1b 309 nap 310 b . 311 312/****************************************************************************** 313 * * 314 * Entry code * 315 * * 316 *****************************************************************************/ 317 318.global kvmppc_hv_entry 319kvmppc_hv_entry: 320 321 /* Required state: 322 * 323 * R4 = vcpu pointer (or NULL) 324 * MSR = ~IR|DR 325 * R13 = PACA 326 * R1 = host R1 327 * all other volatile GPRS = free 328 */ 329 mflr r0 330 std r0, PPC_LR_STKOFF(r1) 331 stdu r1, -112(r1) 332 333 /* Save R1 in the PACA */ 334 std r1, HSTATE_HOST_R1(r13) 335 336 li r6, KVM_GUEST_MODE_HOST_HV 337 stb r6, HSTATE_IN_GUEST(r13) 338 339 /* Clear out SLB */ 340 li r6,0 341 slbmte r6,r6 342 slbia 343 ptesync 344 345BEGIN_FTR_SECTION 346 b 30f 347END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 348 /* 349 * POWER7 host -> guest partition switch code. 350 * We don't have to lock against concurrent tlbies, 351 * but we do have to coordinate across hardware threads. 352 */ 353 /* Increment entry count iff exit count is zero. */ 354 ld r5,HSTATE_KVM_VCORE(r13) 355 addi r9,r5,VCORE_ENTRY_EXIT 35621: lwarx r3,0,r9 357 cmpwi r3,0x100 /* any threads starting to exit? */ 358 bge secondary_too_late /* if so we're too late to the party */ 359 addi r3,r3,1 360 stwcx. r3,0,r9 361 bne 21b 362 363 /* Primary thread switches to guest partition. */ 364 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 365 lbz r6,HSTATE_PTID(r13) 366 cmpwi r6,0 367 bne 20f 368 ld r6,KVM_SDR1(r9) 369 lwz r7,KVM_LPID(r9) 370 li r0,LPID_RSVD /* switch to reserved LPID */ 371 mtspr SPRN_LPID,r0 372 ptesync 373 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 374 mtspr SPRN_LPID,r7 375 isync 376 377 /* See if we need to flush the TLB */ 378 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 379 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 380 srdi r6,r6,6 /* doubleword number */ 381 sldi r6,r6,3 /* address offset */ 382 add r6,r6,r9 383 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 384 li r0,1 385 sld r0,r0,r7 386 ld r7,0(r6) 387 and. r7,r7,r0 388 beq 22f 38923: ldarx r7,0,r6 /* if set, clear the bit */ 390 andc r7,r7,r0 391 stdcx. r7,0,r6 392 bne 23b 393 /* Flush the TLB of any entries for this LPID */ 394 /* use arch 2.07S as a proxy for POWER8 */ 395BEGIN_FTR_SECTION 396 li r6,512 /* POWER8 has 512 sets */ 397FTR_SECTION_ELSE 398 li r6,128 /* POWER7 has 128 sets */ 399ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 400 mtctr r6 401 li r7,0x800 /* IS field = 0b10 */ 402 ptesync 40328: tlbiel r7 404 addi r7,r7,0x1000 405 bdnz 28b 406 ptesync 407 408 /* Add timebase offset onto timebase */ 40922: ld r8,VCORE_TB_OFFSET(r5) 410 cmpdi r8,0 411 beq 37f 412 mftb r6 /* current host timebase */ 413 add r8,r8,r6 414 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 415 mftb r7 /* check if lower 24 bits overflowed */ 416 clrldi r6,r6,40 417 clrldi r7,r7,40 418 cmpld r7,r6 419 bge 37f 420 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 421 mtspr SPRN_TBU40,r8 422 423 /* Load guest PCR value to select appropriate compat mode */ 42437: ld r7, VCORE_PCR(r5) 425 cmpdi r7, 0 426 beq 38f 427 mtspr SPRN_PCR, r7 42838: 429 430BEGIN_FTR_SECTION 431 /* DPDES is shared between threads */ 432 ld r8, VCORE_DPDES(r5) 433 mtspr SPRN_DPDES, r8 434END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 435 436 li r0,1 437 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 438 b 10f 439 440 /* Secondary threads wait for primary to have done partition switch */ 44120: lbz r0,VCORE_IN_GUEST(r5) 442 cmpwi r0,0 443 beq 20b 444 445 /* Set LPCR and RMOR. */ 44610: ld r8,VCORE_LPCR(r5) 447 mtspr SPRN_LPCR,r8 448 ld r8,KVM_RMOR(r9) 449 mtspr SPRN_RMOR,r8 450 isync 451 452 /* Check if HDEC expires soon */ 453 mfspr r3,SPRN_HDEC 454 cmpwi r3,512 /* 1 microsecond */ 455 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 456 blt hdec_soon 457 b 31f 458 459 /* 460 * PPC970 host -> guest partition switch code. 461 * We have to lock against concurrent tlbies, 462 * using native_tlbie_lock to lock against host tlbies 463 * and kvm->arch.tlbie_lock to lock against guest tlbies. 464 * We also have to invalidate the TLB since its 465 * entries aren't tagged with the LPID. 466 */ 46730: ld r5,HSTATE_KVM_VCORE(r13) 468 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 469 470 /* first take native_tlbie_lock */ 471 .section ".toc","aw" 472toc_tlbie_lock: 473 .tc native_tlbie_lock[TC],native_tlbie_lock 474 .previous 475 ld r3,toc_tlbie_lock@toc(2) 476#ifdef __BIG_ENDIAN__ 477 lwz r8,PACA_LOCK_TOKEN(r13) 478#else 479 lwz r8,PACAPACAINDEX(r13) 480#endif 48124: lwarx r0,0,r3 482 cmpwi r0,0 483 bne 24b 484 stwcx. r8,0,r3 485 bne 24b 486 isync 487 488 ld r5,HSTATE_KVM_VCORE(r13) 489 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ 490 li r0,0x18f 491 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 492 or r0,r7,r0 493 ptesync 494 sync 495 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 496 isync 497 li r0,0 498 stw r0,0(r3) /* drop native_tlbie_lock */ 499 500 /* invalidate the whole TLB */ 501 li r0,256 502 mtctr r0 503 li r6,0 50425: tlbiel r6 505 addi r6,r6,0x1000 506 bdnz 25b 507 ptesync 508 509 /* Take the guest's tlbie_lock */ 510 addi r3,r9,KVM_TLBIE_LOCK 51124: lwarx r0,0,r3 512 cmpwi r0,0 513 bne 24b 514 stwcx. r8,0,r3 515 bne 24b 516 isync 517 ld r6,KVM_SDR1(r9) 518 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 519 520 /* Set up HID4 with the guest's LPID etc. */ 521 sync 522 mtspr SPRN_HID4,r7 523 isync 524 525 /* drop the guest's tlbie_lock */ 526 li r0,0 527 stw r0,0(r3) 528 529 /* Check if HDEC expires soon */ 530 mfspr r3,SPRN_HDEC 531 cmpwi r3,10 532 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 533 blt hdec_soon 534 535 /* Enable HDEC interrupts */ 536 mfspr r0,SPRN_HID0 537 li r3,1 538 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 539 sync 540 mtspr SPRN_HID0,r0 541 mfspr r0,SPRN_HID0 542 mfspr r0,SPRN_HID0 543 mfspr r0,SPRN_HID0 544 mfspr r0,SPRN_HID0 545 mfspr r0,SPRN_HID0 546 mfspr r0,SPRN_HID0 54731: 548 /* Do we have a guest vcpu to run? */ 549 cmpdi r4, 0 550 beq kvmppc_primary_no_guest 551kvmppc_got_guest: 552 553 /* Load up guest SLB entries */ 554 lwz r5,VCPU_SLB_MAX(r4) 555 cmpwi r5,0 556 beq 9f 557 mtctr r5 558 addi r6,r4,VCPU_SLB 5591: ld r8,VCPU_SLB_E(r6) 560 ld r9,VCPU_SLB_V(r6) 561 slbmte r9,r8 562 addi r6,r6,VCPU_SLB_SIZE 563 bdnz 1b 5649: 565 /* Increment yield count if they have a VPA */ 566 ld r3, VCPU_VPA(r4) 567 cmpdi r3, 0 568 beq 25f 569 lwz r5, LPPACA_YIELDCOUNT(r3) 570 addi r5, r5, 1 571 stw r5, LPPACA_YIELDCOUNT(r3) 572 li r6, 1 573 stb r6, VCPU_VPA_DIRTY(r4) 57425: 575 576BEGIN_FTR_SECTION 577 /* Save purr/spurr */ 578 mfspr r5,SPRN_PURR 579 mfspr r6,SPRN_SPURR 580 std r5,HSTATE_PURR(r13) 581 std r6,HSTATE_SPURR(r13) 582 ld r7,VCPU_PURR(r4) 583 ld r8,VCPU_SPURR(r4) 584 mtspr SPRN_PURR,r7 585 mtspr SPRN_SPURR,r8 586END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 587 588BEGIN_FTR_SECTION 589 /* Set partition DABR */ 590 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 591 lwz r5,VCPU_DABRX(r4) 592 ld r6,VCPU_DABR(r4) 593 mtspr SPRN_DABRX,r5 594 mtspr SPRN_DABR,r6 595 BEGIN_FTR_SECTION_NESTED(89) 596 isync 597 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 598END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 599 600 /* Load guest PMU registers */ 601 /* R4 is live here (vcpu pointer) */ 602 li r3, 1 603 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 604 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 605 isync 606 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 607 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 608 lwz r6, VCPU_PMC + 8(r4) 609 lwz r7, VCPU_PMC + 12(r4) 610 lwz r8, VCPU_PMC + 16(r4) 611 lwz r9, VCPU_PMC + 20(r4) 612BEGIN_FTR_SECTION 613 lwz r10, VCPU_PMC + 24(r4) 614 lwz r11, VCPU_PMC + 28(r4) 615END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 616 mtspr SPRN_PMC1, r3 617 mtspr SPRN_PMC2, r5 618 mtspr SPRN_PMC3, r6 619 mtspr SPRN_PMC4, r7 620 mtspr SPRN_PMC5, r8 621 mtspr SPRN_PMC6, r9 622BEGIN_FTR_SECTION 623 mtspr SPRN_PMC7, r10 624 mtspr SPRN_PMC8, r11 625END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 626 ld r3, VCPU_MMCR(r4) 627 ld r5, VCPU_MMCR + 8(r4) 628 ld r6, VCPU_MMCR + 16(r4) 629 ld r7, VCPU_SIAR(r4) 630 ld r8, VCPU_SDAR(r4) 631 mtspr SPRN_MMCR1, r5 632 mtspr SPRN_MMCRA, r6 633 mtspr SPRN_SIAR, r7 634 mtspr SPRN_SDAR, r8 635BEGIN_FTR_SECTION 636 ld r5, VCPU_MMCR + 24(r4) 637 ld r6, VCPU_SIER(r4) 638 lwz r7, VCPU_PMC + 24(r4) 639 lwz r8, VCPU_PMC + 28(r4) 640 ld r9, VCPU_MMCR + 32(r4) 641 mtspr SPRN_MMCR2, r5 642 mtspr SPRN_SIER, r6 643 mtspr SPRN_SPMC1, r7 644 mtspr SPRN_SPMC2, r8 645 mtspr SPRN_MMCRS, r9 646END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 647 mtspr SPRN_MMCR0, r3 648 isync 649 650 /* Load up FP, VMX and VSX registers */ 651 bl kvmppc_load_fp 652 653 ld r14, VCPU_GPR(R14)(r4) 654 ld r15, VCPU_GPR(R15)(r4) 655 ld r16, VCPU_GPR(R16)(r4) 656 ld r17, VCPU_GPR(R17)(r4) 657 ld r18, VCPU_GPR(R18)(r4) 658 ld r19, VCPU_GPR(R19)(r4) 659 ld r20, VCPU_GPR(R20)(r4) 660 ld r21, VCPU_GPR(R21)(r4) 661 ld r22, VCPU_GPR(R22)(r4) 662 ld r23, VCPU_GPR(R23)(r4) 663 ld r24, VCPU_GPR(R24)(r4) 664 ld r25, VCPU_GPR(R25)(r4) 665 ld r26, VCPU_GPR(R26)(r4) 666 ld r27, VCPU_GPR(R27)(r4) 667 ld r28, VCPU_GPR(R28)(r4) 668 ld r29, VCPU_GPR(R29)(r4) 669 ld r30, VCPU_GPR(R30)(r4) 670 ld r31, VCPU_GPR(R31)(r4) 671 672BEGIN_FTR_SECTION 673 /* Switch DSCR to guest value */ 674 ld r5, VCPU_DSCR(r4) 675 mtspr SPRN_DSCR, r5 676END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 677 678BEGIN_FTR_SECTION 679 /* Skip next section on POWER7 or PPC970 */ 680 b 8f 681END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 682 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 683 mfmsr r8 684 li r0, 1 685 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 686 mtmsrd r8 687 688 /* Load up POWER8-specific registers */ 689 ld r5, VCPU_IAMR(r4) 690 lwz r6, VCPU_PSPB(r4) 691 ld r7, VCPU_FSCR(r4) 692 mtspr SPRN_IAMR, r5 693 mtspr SPRN_PSPB, r6 694 mtspr SPRN_FSCR, r7 695 ld r5, VCPU_DAWR(r4) 696 ld r6, VCPU_DAWRX(r4) 697 ld r7, VCPU_CIABR(r4) 698 ld r8, VCPU_TAR(r4) 699 mtspr SPRN_DAWR, r5 700 mtspr SPRN_DAWRX, r6 701 mtspr SPRN_CIABR, r7 702 mtspr SPRN_TAR, r8 703 ld r5, VCPU_IC(r4) 704 ld r6, VCPU_VTB(r4) 705 mtspr SPRN_IC, r5 706 mtspr SPRN_VTB, r6 707#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 708 ld r5, VCPU_TFHAR(r4) 709 ld r6, VCPU_TFIAR(r4) 710 ld r7, VCPU_TEXASR(r4) 711 mtspr SPRN_TFHAR, r5 712 mtspr SPRN_TFIAR, r6 713 mtspr SPRN_TEXASR, r7 714#endif 715 ld r8, VCPU_EBBHR(r4) 716 mtspr SPRN_EBBHR, r8 717 ld r5, VCPU_EBBRR(r4) 718 ld r6, VCPU_BESCR(r4) 719 ld r7, VCPU_CSIGR(r4) 720 ld r8, VCPU_TACR(r4) 721 mtspr SPRN_EBBRR, r5 722 mtspr SPRN_BESCR, r6 723 mtspr SPRN_CSIGR, r7 724 mtspr SPRN_TACR, r8 725 ld r5, VCPU_TCSCR(r4) 726 ld r6, VCPU_ACOP(r4) 727 lwz r7, VCPU_GUEST_PID(r4) 728 ld r8, VCPU_WORT(r4) 729 mtspr SPRN_TCSCR, r5 730 mtspr SPRN_ACOP, r6 731 mtspr SPRN_PID, r7 732 mtspr SPRN_WORT, r8 7338: 734 735 /* 736 * Set the decrementer to the guest decrementer. 737 */ 738 ld r8,VCPU_DEC_EXPIRES(r4) 739 mftb r7 740 subf r3,r7,r8 741 mtspr SPRN_DEC,r3 742 stw r3,VCPU_DEC(r4) 743 744 ld r5, VCPU_SPRG0(r4) 745 ld r6, VCPU_SPRG1(r4) 746 ld r7, VCPU_SPRG2(r4) 747 ld r8, VCPU_SPRG3(r4) 748 mtspr SPRN_SPRG0, r5 749 mtspr SPRN_SPRG1, r6 750 mtspr SPRN_SPRG2, r7 751 mtspr SPRN_SPRG3, r8 752 753 /* Load up DAR and DSISR */ 754 ld r5, VCPU_DAR(r4) 755 lwz r6, VCPU_DSISR(r4) 756 mtspr SPRN_DAR, r5 757 mtspr SPRN_DSISR, r6 758 759BEGIN_FTR_SECTION 760 /* Restore AMR and UAMOR, set AMOR to all 1s */ 761 ld r5,VCPU_AMR(r4) 762 ld r6,VCPU_UAMOR(r4) 763 li r7,-1 764 mtspr SPRN_AMR,r5 765 mtspr SPRN_UAMOR,r6 766 mtspr SPRN_AMOR,r7 767END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 768 769 /* Restore state of CTRL run bit; assume 1 on entry */ 770 lwz r5,VCPU_CTRL(r4) 771 andi. r5,r5,1 772 bne 4f 773 mfspr r6,SPRN_CTRLF 774 clrrdi r6,r6,1 775 mtspr SPRN_CTRLT,r6 7764: 777 ld r6, VCPU_CTR(r4) 778 lwz r7, VCPU_XER(r4) 779 780 mtctr r6 781 mtxer r7 782 783kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 784 ld r10, VCPU_PC(r4) 785 ld r11, VCPU_MSR(r4) 786 ld r6, VCPU_SRR0(r4) 787 ld r7, VCPU_SRR1(r4) 788 mtspr SPRN_SRR0, r6 789 mtspr SPRN_SRR1, r7 790 791deliver_guest_interrupt: 792 /* r11 = vcpu->arch.msr & ~MSR_HV */ 793 rldicl r11, r11, 63 - MSR_HV_LG, 1 794 rotldi r11, r11, 1 + MSR_HV_LG 795 ori r11, r11, MSR_ME 796 797 /* Check if we can deliver an external or decrementer interrupt now */ 798 ld r0, VCPU_PENDING_EXC(r4) 799 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 800 cmpdi cr1, r0, 0 801 andi. r8, r11, MSR_EE 802BEGIN_FTR_SECTION 803 mfspr r8, SPRN_LPCR 804 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 805 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 806 mtspr SPRN_LPCR, r8 807 isync 808END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 809 beq 5f 810 li r0, BOOK3S_INTERRUPT_EXTERNAL 811 bne cr1, 12f 812 mfspr r0, SPRN_DEC 813 cmpwi r0, 0 814 li r0, BOOK3S_INTERRUPT_DECREMENTER 815 bge 5f 816 81712: mtspr SPRN_SRR0, r10 818 mr r10,r0 819 mtspr SPRN_SRR1, r11 820 ld r11, VCPU_INTR_MSR(r4) 8215: 822 823/* 824 * Required state: 825 * R4 = vcpu 826 * R10: value for HSRR0 827 * R11: value for HSRR1 828 * R13 = PACA 829 */ 830fast_guest_return: 831 li r0,0 832 stb r0,VCPU_CEDED(r4) /* cancel cede */ 833 mtspr SPRN_HSRR0,r10 834 mtspr SPRN_HSRR1,r11 835 836 /* Activate guest mode, so faults get handled by KVM */ 837 li r9, KVM_GUEST_MODE_GUEST_HV 838 stb r9, HSTATE_IN_GUEST(r13) 839 840 /* Enter guest */ 841 842BEGIN_FTR_SECTION 843 ld r5, VCPU_CFAR(r4) 844 mtspr SPRN_CFAR, r5 845END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 846BEGIN_FTR_SECTION 847 ld r0, VCPU_PPR(r4) 848END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 849 850 ld r5, VCPU_LR(r4) 851 lwz r6, VCPU_CR(r4) 852 mtlr r5 853 mtcr r6 854 855 ld r1, VCPU_GPR(R1)(r4) 856 ld r2, VCPU_GPR(R2)(r4) 857 ld r3, VCPU_GPR(R3)(r4) 858 ld r5, VCPU_GPR(R5)(r4) 859 ld r6, VCPU_GPR(R6)(r4) 860 ld r7, VCPU_GPR(R7)(r4) 861 ld r8, VCPU_GPR(R8)(r4) 862 ld r9, VCPU_GPR(R9)(r4) 863 ld r10, VCPU_GPR(R10)(r4) 864 ld r11, VCPU_GPR(R11)(r4) 865 ld r12, VCPU_GPR(R12)(r4) 866 ld r13, VCPU_GPR(R13)(r4) 867 868BEGIN_FTR_SECTION 869 mtspr SPRN_PPR, r0 870END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 871 ld r0, VCPU_GPR(R0)(r4) 872 ld r4, VCPU_GPR(R4)(r4) 873 874 hrfid 875 b . 876 877/****************************************************************************** 878 * * 879 * Exit code * 880 * * 881 *****************************************************************************/ 882 883/* 884 * We come here from the first-level interrupt handlers. 885 */ 886 .globl kvmppc_interrupt_hv 887kvmppc_interrupt_hv: 888 /* 889 * Register contents: 890 * R12 = interrupt vector 891 * R13 = PACA 892 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 893 * guest R13 saved in SPRN_SCRATCH0 894 */ 895 std r9, HSTATE_SCRATCH2(r13) 896 897 lbz r9, HSTATE_IN_GUEST(r13) 898 cmpwi r9, KVM_GUEST_MODE_HOST_HV 899 beq kvmppc_bad_host_intr 900#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 901 cmpwi r9, KVM_GUEST_MODE_GUEST 902 ld r9, HSTATE_SCRATCH2(r13) 903 beq kvmppc_interrupt_pr 904#endif 905 /* We're now back in the host but in guest MMU context */ 906 li r9, KVM_GUEST_MODE_HOST_HV 907 stb r9, HSTATE_IN_GUEST(r13) 908 909 ld r9, HSTATE_KVM_VCPU(r13) 910 911 /* Save registers */ 912 913 std r0, VCPU_GPR(R0)(r9) 914 std r1, VCPU_GPR(R1)(r9) 915 std r2, VCPU_GPR(R2)(r9) 916 std r3, VCPU_GPR(R3)(r9) 917 std r4, VCPU_GPR(R4)(r9) 918 std r5, VCPU_GPR(R5)(r9) 919 std r6, VCPU_GPR(R6)(r9) 920 std r7, VCPU_GPR(R7)(r9) 921 std r8, VCPU_GPR(R8)(r9) 922 ld r0, HSTATE_SCRATCH2(r13) 923 std r0, VCPU_GPR(R9)(r9) 924 std r10, VCPU_GPR(R10)(r9) 925 std r11, VCPU_GPR(R11)(r9) 926 ld r3, HSTATE_SCRATCH0(r13) 927 lwz r4, HSTATE_SCRATCH1(r13) 928 std r3, VCPU_GPR(R12)(r9) 929 stw r4, VCPU_CR(r9) 930BEGIN_FTR_SECTION 931 ld r3, HSTATE_CFAR(r13) 932 std r3, VCPU_CFAR(r9) 933END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 934BEGIN_FTR_SECTION 935 ld r4, HSTATE_PPR(r13) 936 std r4, VCPU_PPR(r9) 937END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 938 939 /* Restore R1/R2 so we can handle faults */ 940 ld r1, HSTATE_HOST_R1(r13) 941 ld r2, PACATOC(r13) 942 943 mfspr r10, SPRN_SRR0 944 mfspr r11, SPRN_SRR1 945 std r10, VCPU_SRR0(r9) 946 std r11, VCPU_SRR1(r9) 947 andi. r0, r12, 2 /* need to read HSRR0/1? */ 948 beq 1f 949 mfspr r10, SPRN_HSRR0 950 mfspr r11, SPRN_HSRR1 951 clrrdi r12, r12, 2 9521: std r10, VCPU_PC(r9) 953 std r11, VCPU_MSR(r9) 954 955 GET_SCRATCH0(r3) 956 mflr r4 957 std r3, VCPU_GPR(R13)(r9) 958 std r4, VCPU_LR(r9) 959 960 stw r12,VCPU_TRAP(r9) 961 962 /* Save HEIR (HV emulation assist reg) in last_inst 963 if this is an HEI (HV emulation interrupt, e40) */ 964 li r3,KVM_INST_FETCH_FAILED 965BEGIN_FTR_SECTION 966 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 967 bne 11f 968 mfspr r3,SPRN_HEIR 969END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 97011: stw r3,VCPU_LAST_INST(r9) 971 972 /* these are volatile across C function calls */ 973 mfctr r3 974 mfxer r4 975 std r3, VCPU_CTR(r9) 976 stw r4, VCPU_XER(r9) 977 978BEGIN_FTR_SECTION 979 /* If this is a page table miss then see if it's theirs or ours */ 980 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 981 beq kvmppc_hdsi 982 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 983 beq kvmppc_hisi 984END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 985 986 /* See if this is a leftover HDEC interrupt */ 987 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 988 bne 2f 989 mfspr r3,SPRN_HDEC 990 cmpwi r3,0 991 bge ignore_hdec 9922: 993 /* See if this is an hcall we can handle in real mode */ 994 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 995 beq hcall_try_real_mode 996 997 /* Only handle external interrupts here on arch 206 and later */ 998BEGIN_FTR_SECTION 999 b ext_interrupt_to_host 1000END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1001 1002 /* External interrupt ? */ 1003 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1004 bne+ ext_interrupt_to_host 1005 1006 /* External interrupt, first check for host_ipi. If this is 1007 * set, we know the host wants us out so let's do it now 1008 */ 1009 bl kvmppc_read_intr 1010 cmpdi r3, 0 1011 bgt ext_interrupt_to_host 1012 1013 /* Check if any CPU is heading out to the host, if so head out too */ 1014 ld r5, HSTATE_KVM_VCORE(r13) 1015 lwz r0, VCORE_ENTRY_EXIT(r5) 1016 cmpwi r0, 0x100 1017 bge ext_interrupt_to_host 1018 1019 /* Return to guest after delivering any pending interrupt */ 1020 mr r4, r9 1021 b deliver_guest_interrupt 1022 1023ext_interrupt_to_host: 1024 1025guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1026 /* Save more register state */ 1027 mfdar r6 1028 mfdsisr r7 1029 std r6, VCPU_DAR(r9) 1030 stw r7, VCPU_DSISR(r9) 1031BEGIN_FTR_SECTION 1032 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1033 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1034 beq 6f 1035END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1036 std r6, VCPU_FAULT_DAR(r9) 1037 stw r7, VCPU_FAULT_DSISR(r9) 1038 1039 /* See if it is a machine check */ 1040 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1041 beq machine_check_realmode 1042mc_cont: 1043 1044 /* Save guest CTRL register, set runlatch to 1 */ 10456: mfspr r6,SPRN_CTRLF 1046 stw r6,VCPU_CTRL(r9) 1047 andi. r0,r6,1 1048 bne 4f 1049 ori r6,r6,1 1050 mtspr SPRN_CTRLT,r6 10514: 1052 /* Read the guest SLB and save it away */ 1053 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1054 mtctr r0 1055 li r6,0 1056 addi r7,r9,VCPU_SLB 1057 li r5,0 10581: slbmfee r8,r6 1059 andis. r0,r8,SLB_ESID_V@h 1060 beq 2f 1061 add r8,r8,r6 /* put index in */ 1062 slbmfev r3,r6 1063 std r8,VCPU_SLB_E(r7) 1064 std r3,VCPU_SLB_V(r7) 1065 addi r7,r7,VCPU_SLB_SIZE 1066 addi r5,r5,1 10672: addi r6,r6,1 1068 bdnz 1b 1069 stw r5,VCPU_SLB_MAX(r9) 1070 1071 /* 1072 * Save the guest PURR/SPURR 1073 */ 1074BEGIN_FTR_SECTION 1075 mfspr r5,SPRN_PURR 1076 mfspr r6,SPRN_SPURR 1077 ld r7,VCPU_PURR(r9) 1078 ld r8,VCPU_SPURR(r9) 1079 std r5,VCPU_PURR(r9) 1080 std r6,VCPU_SPURR(r9) 1081 subf r5,r7,r5 1082 subf r6,r8,r6 1083 1084 /* 1085 * Restore host PURR/SPURR and add guest times 1086 * so that the time in the guest gets accounted. 1087 */ 1088 ld r3,HSTATE_PURR(r13) 1089 ld r4,HSTATE_SPURR(r13) 1090 add r3,r3,r5 1091 add r4,r4,r6 1092 mtspr SPRN_PURR,r3 1093 mtspr SPRN_SPURR,r4 1094END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1095 1096 /* Save DEC */ 1097 mfspr r5,SPRN_DEC 1098 mftb r6 1099 extsw r5,r5 1100 add r5,r5,r6 1101 std r5,VCPU_DEC_EXPIRES(r9) 1102 1103BEGIN_FTR_SECTION 1104 b 8f 1105END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1106 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 1107 mfmsr r8 1108 li r0, 1 1109 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 1110 mtmsrd r8 1111 1112 /* Save POWER8-specific registers */ 1113 mfspr r5, SPRN_IAMR 1114 mfspr r6, SPRN_PSPB 1115 mfspr r7, SPRN_FSCR 1116 std r5, VCPU_IAMR(r9) 1117 stw r6, VCPU_PSPB(r9) 1118 std r7, VCPU_FSCR(r9) 1119 mfspr r5, SPRN_IC 1120 mfspr r6, SPRN_VTB 1121 mfspr r7, SPRN_TAR 1122 std r5, VCPU_IC(r9) 1123 std r6, VCPU_VTB(r9) 1124 std r7, VCPU_TAR(r9) 1125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1126 mfspr r5, SPRN_TFHAR 1127 mfspr r6, SPRN_TFIAR 1128 mfspr r7, SPRN_TEXASR 1129 std r5, VCPU_TFHAR(r9) 1130 std r6, VCPU_TFIAR(r9) 1131 std r7, VCPU_TEXASR(r9) 1132#endif 1133 mfspr r8, SPRN_EBBHR 1134 std r8, VCPU_EBBHR(r9) 1135 mfspr r5, SPRN_EBBRR 1136 mfspr r6, SPRN_BESCR 1137 mfspr r7, SPRN_CSIGR 1138 mfspr r8, SPRN_TACR 1139 std r5, VCPU_EBBRR(r9) 1140 std r6, VCPU_BESCR(r9) 1141 std r7, VCPU_CSIGR(r9) 1142 std r8, VCPU_TACR(r9) 1143 mfspr r5, SPRN_TCSCR 1144 mfspr r6, SPRN_ACOP 1145 mfspr r7, SPRN_PID 1146 mfspr r8, SPRN_WORT 1147 std r5, VCPU_TCSCR(r9) 1148 std r6, VCPU_ACOP(r9) 1149 stw r7, VCPU_GUEST_PID(r9) 1150 std r8, VCPU_WORT(r9) 11518: 1152 1153 /* Save and reset AMR and UAMOR before turning on the MMU */ 1154BEGIN_FTR_SECTION 1155 mfspr r5,SPRN_AMR 1156 mfspr r6,SPRN_UAMOR 1157 std r5,VCPU_AMR(r9) 1158 std r6,VCPU_UAMOR(r9) 1159 li r6,0 1160 mtspr SPRN_AMR,r6 1161END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1162 1163 /* Switch DSCR back to host value */ 1164BEGIN_FTR_SECTION 1165 mfspr r8, SPRN_DSCR 1166 ld r7, HSTATE_DSCR(r13) 1167 std r8, VCPU_DSCR(r9) 1168 mtspr SPRN_DSCR, r7 1169END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1170 1171 /* Save non-volatile GPRs */ 1172 std r14, VCPU_GPR(R14)(r9) 1173 std r15, VCPU_GPR(R15)(r9) 1174 std r16, VCPU_GPR(R16)(r9) 1175 std r17, VCPU_GPR(R17)(r9) 1176 std r18, VCPU_GPR(R18)(r9) 1177 std r19, VCPU_GPR(R19)(r9) 1178 std r20, VCPU_GPR(R20)(r9) 1179 std r21, VCPU_GPR(R21)(r9) 1180 std r22, VCPU_GPR(R22)(r9) 1181 std r23, VCPU_GPR(R23)(r9) 1182 std r24, VCPU_GPR(R24)(r9) 1183 std r25, VCPU_GPR(R25)(r9) 1184 std r26, VCPU_GPR(R26)(r9) 1185 std r27, VCPU_GPR(R27)(r9) 1186 std r28, VCPU_GPR(R28)(r9) 1187 std r29, VCPU_GPR(R29)(r9) 1188 std r30, VCPU_GPR(R30)(r9) 1189 std r31, VCPU_GPR(R31)(r9) 1190 1191 /* Save SPRGs */ 1192 mfspr r3, SPRN_SPRG0 1193 mfspr r4, SPRN_SPRG1 1194 mfspr r5, SPRN_SPRG2 1195 mfspr r6, SPRN_SPRG3 1196 std r3, VCPU_SPRG0(r9) 1197 std r4, VCPU_SPRG1(r9) 1198 std r5, VCPU_SPRG2(r9) 1199 std r6, VCPU_SPRG3(r9) 1200 1201 /* save FP state */ 1202 mr r3, r9 1203 bl kvmppc_save_fp 1204 1205 /* Increment yield count if they have a VPA */ 1206 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1207 cmpdi r8, 0 1208 beq 25f 1209 lwz r3, LPPACA_YIELDCOUNT(r8) 1210 addi r3, r3, 1 1211 stw r3, LPPACA_YIELDCOUNT(r8) 1212 li r3, 1 1213 stb r3, VCPU_VPA_DIRTY(r9) 121425: 1215 /* Save PMU registers if requested */ 1216 /* r8 and cr0.eq are live here */ 1217 li r3, 1 1218 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1219 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1220 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1221 mfspr r6, SPRN_MMCRA 1222BEGIN_FTR_SECTION 1223 /* On P7, clear MMCRA in order to disable SDAR updates */ 1224 li r7, 0 1225 mtspr SPRN_MMCRA, r7 1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1227 isync 1228 beq 21f /* if no VPA, save PMU stuff anyway */ 1229 lbz r7, LPPACA_PMCINUSE(r8) 1230 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1231 bne 21f 1232 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1233 b 22f 123421: mfspr r5, SPRN_MMCR1 1235 mfspr r7, SPRN_SIAR 1236 mfspr r8, SPRN_SDAR 1237 std r4, VCPU_MMCR(r9) 1238 std r5, VCPU_MMCR + 8(r9) 1239 std r6, VCPU_MMCR + 16(r9) 1240 std r7, VCPU_SIAR(r9) 1241 std r8, VCPU_SDAR(r9) 1242 mfspr r3, SPRN_PMC1 1243 mfspr r4, SPRN_PMC2 1244 mfspr r5, SPRN_PMC3 1245 mfspr r6, SPRN_PMC4 1246 mfspr r7, SPRN_PMC5 1247 mfspr r8, SPRN_PMC6 1248BEGIN_FTR_SECTION 1249 mfspr r10, SPRN_PMC7 1250 mfspr r11, SPRN_PMC8 1251END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1252 stw r3, VCPU_PMC(r9) 1253 stw r4, VCPU_PMC + 4(r9) 1254 stw r5, VCPU_PMC + 8(r9) 1255 stw r6, VCPU_PMC + 12(r9) 1256 stw r7, VCPU_PMC + 16(r9) 1257 stw r8, VCPU_PMC + 20(r9) 1258BEGIN_FTR_SECTION 1259 stw r10, VCPU_PMC + 24(r9) 1260 stw r11, VCPU_PMC + 28(r9) 1261END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1262BEGIN_FTR_SECTION 1263 mfspr r4, SPRN_MMCR2 1264 mfspr r5, SPRN_SIER 1265 mfspr r6, SPRN_SPMC1 1266 mfspr r7, SPRN_SPMC2 1267 mfspr r8, SPRN_MMCRS 1268 std r4, VCPU_MMCR + 24(r9) 1269 std r5, VCPU_SIER(r9) 1270 stw r6, VCPU_PMC + 24(r9) 1271 stw r7, VCPU_PMC + 28(r9) 1272 std r8, VCPU_MMCR + 32(r9) 1273 lis r4, 0x8000 1274 mtspr SPRN_MMCRS, r4 1275END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 127622: 1277 /* Clear out SLB */ 1278 li r5,0 1279 slbmte r5,r5 1280 slbia 1281 ptesync 1282 1283hdec_soon: /* r12 = trap, r13 = paca */ 1284BEGIN_FTR_SECTION 1285 b 32f 1286END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1287 /* 1288 * POWER7 guest -> host partition switch code. 1289 * We don't have to lock against tlbies but we do 1290 * have to coordinate the hardware threads. 1291 */ 1292 /* Increment the threads-exiting-guest count in the 0xff00 1293 bits of vcore->entry_exit_count */ 1294 ld r5,HSTATE_KVM_VCORE(r13) 1295 addi r6,r5,VCORE_ENTRY_EXIT 129641: lwarx r3,0,r6 1297 addi r0,r3,0x100 1298 stwcx. r0,0,r6 1299 bne 41b 1300 isync /* order stwcx. vs. reading napping_threads */ 1301 1302 /* 1303 * At this point we have an interrupt that we have to pass 1304 * up to the kernel or qemu; we can't handle it in real mode. 1305 * Thus we have to do a partition switch, so we have to 1306 * collect the other threads, if we are the first thread 1307 * to take an interrupt. To do this, we set the HDEC to 0, 1308 * which causes an HDEC interrupt in all threads within 2ns 1309 * because the HDEC register is shared between all 4 threads. 1310 * However, we don't need to bother if this is an HDEC 1311 * interrupt, since the other threads will already be on their 1312 * way here in that case. 1313 */ 1314 cmpwi r3,0x100 /* Are we the first here? */ 1315 bge 43f 1316 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1317 beq 40f 1318 li r0,0 1319 mtspr SPRN_HDEC,r0 132040: 1321 /* 1322 * Send an IPI to any napping threads, since an HDEC interrupt 1323 * doesn't wake CPUs up from nap. 1324 */ 1325 lwz r3,VCORE_NAPPING_THREADS(r5) 1326 lbz r4,HSTATE_PTID(r13) 1327 li r0,1 1328 sld r0,r0,r4 1329 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1330 beq 43f 1331 /* Order entry/exit update vs. IPIs */ 1332 sync 1333 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1334 subf r6,r4,r13 133542: andi. r0,r3,1 1336 beq 44f 1337 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 1338 li r0,IPI_PRIORITY 1339 li r7,XICS_MFRR 1340 stbcix r0,r7,r8 /* trigger the IPI */ 134144: srdi. r3,r3,1 1342 addi r6,r6,PACA_SIZE 1343 bne 42b 1344 1345secondary_too_late: 1346 /* Secondary threads wait for primary to do partition switch */ 134743: ld r5,HSTATE_KVM_VCORE(r13) 1348 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1349 lbz r3,HSTATE_PTID(r13) 1350 cmpwi r3,0 1351 beq 15f 1352 HMT_LOW 135313: lbz r3,VCORE_IN_GUEST(r5) 1354 cmpwi r3,0 1355 bne 13b 1356 HMT_MEDIUM 1357 b 16f 1358 1359 /* Primary thread waits for all the secondaries to exit guest */ 136015: lwz r3,VCORE_ENTRY_EXIT(r5) 1361 srwi r0,r3,8 1362 clrldi r3,r3,56 1363 cmpw r3,r0 1364 bne 15b 1365 isync 1366 1367 /* Primary thread switches back to host partition */ 1368 ld r6,KVM_HOST_SDR1(r4) 1369 lwz r7,KVM_HOST_LPID(r4) 1370 li r8,LPID_RSVD /* switch to reserved LPID */ 1371 mtspr SPRN_LPID,r8 1372 ptesync 1373 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1374 mtspr SPRN_LPID,r7 1375 isync 1376 1377BEGIN_FTR_SECTION 1378 /* DPDES is shared between threads */ 1379 mfspr r7, SPRN_DPDES 1380 std r7, VCORE_DPDES(r5) 1381 /* clear DPDES so we don't get guest doorbells in the host */ 1382 li r8, 0 1383 mtspr SPRN_DPDES, r8 1384END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1385 1386 /* Subtract timebase offset from timebase */ 1387 ld r8,VCORE_TB_OFFSET(r5) 1388 cmpdi r8,0 1389 beq 17f 1390 mftb r6 /* current host timebase */ 1391 subf r8,r8,r6 1392 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1393 mftb r7 /* check if lower 24 bits overflowed */ 1394 clrldi r6,r6,40 1395 clrldi r7,r7,40 1396 cmpld r7,r6 1397 bge 17f 1398 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1399 mtspr SPRN_TBU40,r8 1400 1401 /* Reset PCR */ 140217: ld r0, VCORE_PCR(r5) 1403 cmpdi r0, 0 1404 beq 18f 1405 li r0, 0 1406 mtspr SPRN_PCR, r0 140718: 1408 /* Signal secondary CPUs to continue */ 1409 stb r0,VCORE_IN_GUEST(r5) 1410 lis r8,0x7fff /* MAX_INT@h */ 1411 mtspr SPRN_HDEC,r8 1412 141316: ld r8,KVM_HOST_LPCR(r4) 1414 mtspr SPRN_LPCR,r8 1415 isync 1416 b 33f 1417 1418 /* 1419 * PPC970 guest -> host partition switch code. 1420 * We have to lock against concurrent tlbies, and 1421 * we have to flush the whole TLB. 1422 */ 142332: ld r5,HSTATE_KVM_VCORE(r13) 1424 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1425 1426 /* Take the guest's tlbie_lock */ 1427#ifdef __BIG_ENDIAN__ 1428 lwz r8,PACA_LOCK_TOKEN(r13) 1429#else 1430 lwz r8,PACAPACAINDEX(r13) 1431#endif 1432 addi r3,r4,KVM_TLBIE_LOCK 143324: lwarx r0,0,r3 1434 cmpwi r0,0 1435 bne 24b 1436 stwcx. r8,0,r3 1437 bne 24b 1438 isync 1439 1440 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 1441 li r0,0x18f 1442 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 1443 or r0,r7,r0 1444 ptesync 1445 sync 1446 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 1447 isync 1448 li r0,0 1449 stw r0,0(r3) /* drop guest tlbie_lock */ 1450 1451 /* invalidate the whole TLB */ 1452 li r0,256 1453 mtctr r0 1454 li r6,0 145525: tlbiel r6 1456 addi r6,r6,0x1000 1457 bdnz 25b 1458 ptesync 1459 1460 /* take native_tlbie_lock */ 1461 ld r3,toc_tlbie_lock@toc(2) 146224: lwarx r0,0,r3 1463 cmpwi r0,0 1464 bne 24b 1465 stwcx. r8,0,r3 1466 bne 24b 1467 isync 1468 1469 ld r6,KVM_HOST_SDR1(r4) 1470 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1471 1472 /* Set up host HID4 value */ 1473 sync 1474 mtspr SPRN_HID4,r7 1475 isync 1476 li r0,0 1477 stw r0,0(r3) /* drop native_tlbie_lock */ 1478 1479 lis r8,0x7fff /* MAX_INT@h */ 1480 mtspr SPRN_HDEC,r8 1481 1482 /* Disable HDEC interrupts */ 1483 mfspr r0,SPRN_HID0 1484 li r3,0 1485 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 1486 sync 1487 mtspr SPRN_HID0,r0 1488 mfspr r0,SPRN_HID0 1489 mfspr r0,SPRN_HID0 1490 mfspr r0,SPRN_HID0 1491 mfspr r0,SPRN_HID0 1492 mfspr r0,SPRN_HID0 1493 mfspr r0,SPRN_HID0 1494 1495 /* load host SLB entries */ 149633: ld r8,PACA_SLBSHADOWPTR(r13) 1497 1498 .rept SLB_NUM_BOLTED 1499 ld r5,SLBSHADOW_SAVEAREA(r8) 1500 ld r6,SLBSHADOW_SAVEAREA+8(r8) 1501 andis. r7,r5,SLB_ESID_V@h 1502 beq 1f 1503 slbmte r6,r5 15041: addi r8,r8,16 1505 .endr 1506 1507 /* Save DEC */ 1508 mfspr r5,SPRN_DEC 1509 mftb r6 1510 extsw r5,r5 1511 add r5,r5,r6 1512 std r5,VCPU_DEC_EXPIRES(r9) 1513 1514BEGIN_FTR_SECTION 1515 b 8f 1516END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1517 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 1518 mfmsr r8 1519 li r0, 1 1520 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 1521 mtmsrd r8 1522 1523 /* Save POWER8-specific registers */ 1524 mfspr r5, SPRN_IAMR 1525 mfspr r6, SPRN_PSPB 1526 mfspr r7, SPRN_FSCR 1527 std r5, VCPU_IAMR(r9) 1528 stw r6, VCPU_PSPB(r9) 1529 std r7, VCPU_FSCR(r9) 1530 mfspr r5, SPRN_IC 1531 mfspr r6, SPRN_VTB 1532 mfspr r7, SPRN_TAR 1533 std r5, VCPU_IC(r9) 1534 std r6, VCPU_VTB(r9) 1535 std r7, VCPU_TAR(r9) 1536#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1537 mfspr r5, SPRN_TFHAR 1538 mfspr r6, SPRN_TFIAR 1539 mfspr r7, SPRN_TEXASR 1540 std r5, VCPU_TFHAR(r9) 1541 std r6, VCPU_TFIAR(r9) 1542 std r7, VCPU_TEXASR(r9) 1543#endif 1544 mfspr r8, SPRN_EBBHR 1545 std r8, VCPU_EBBHR(r9) 1546 mfspr r5, SPRN_EBBRR 1547 mfspr r6, SPRN_BESCR 1548 mfspr r7, SPRN_CSIGR 1549 mfspr r8, SPRN_TACR 1550 std r5, VCPU_EBBRR(r9) 1551 std r6, VCPU_BESCR(r9) 1552 std r7, VCPU_CSIGR(r9) 1553 std r8, VCPU_TACR(r9) 1554 mfspr r5, SPRN_TCSCR 1555 mfspr r6, SPRN_ACOP 1556 mfspr r7, SPRN_PID 1557 mfspr r8, SPRN_WORT 1558 std r5, VCPU_TCSCR(r9) 1559 std r6, VCPU_ACOP(r9) 1560 stw r7, VCPU_GUEST_PID(r9) 1561 std r8, VCPU_WORT(r9) 15628: 1563 1564 /* Save and reset AMR and UAMOR before turning on the MMU */ 1565BEGIN_FTR_SECTION 1566 mfspr r5,SPRN_AMR 1567 mfspr r6,SPRN_UAMOR 1568 std r5,VCPU_AMR(r9) 1569 std r6,VCPU_UAMOR(r9) 1570 li r6,0 1571 mtspr SPRN_AMR,r6 1572END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1573 1574 /* Unset guest mode */ 1575 li r0, KVM_GUEST_MODE_NONE 1576 stb r0, HSTATE_IN_GUEST(r13) 1577 1578 ld r0, 112+PPC_LR_STKOFF(r1) 1579 addi r1, r1, 112 1580 mtlr r0 1581 blr 1582 1583/* 1584 * Check whether an HDSI is an HPTE not found fault or something else. 1585 * If it is an HPTE not found fault that is due to the guest accessing 1586 * a page that they have mapped but which we have paged out, then 1587 * we continue on with the guest exit path. In all other cases, 1588 * reflect the HDSI to the guest as a DSI. 1589 */ 1590kvmppc_hdsi: 1591 mfspr r4, SPRN_HDAR 1592 mfspr r6, SPRN_HDSISR 1593 /* HPTE not found fault or protection fault? */ 1594 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1595 beq 1f /* if not, send it to the guest */ 1596 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1597 beq 3f 1598 clrrdi r0, r4, 28 1599 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1600 bne 1f /* if no SLB entry found */ 16014: std r4, VCPU_FAULT_DAR(r9) 1602 stw r6, VCPU_FAULT_DSISR(r9) 1603 1604 /* Search the hash table. */ 1605 mr r3, r9 /* vcpu pointer */ 1606 li r7, 1 /* data fault */ 1607 bl .kvmppc_hpte_hv_fault 1608 ld r9, HSTATE_KVM_VCPU(r13) 1609 ld r10, VCPU_PC(r9) 1610 ld r11, VCPU_MSR(r9) 1611 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1612 cmpdi r3, 0 /* retry the instruction */ 1613 beq 6f 1614 cmpdi r3, -1 /* handle in kernel mode */ 1615 beq guest_exit_cont 1616 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1617 beq 2f 1618 1619 /* Synthesize a DSI for the guest */ 1620 ld r4, VCPU_FAULT_DAR(r9) 1621 mr r6, r3 16221: mtspr SPRN_DAR, r4 1623 mtspr SPRN_DSISR, r6 1624 mtspr SPRN_SRR0, r10 1625 mtspr SPRN_SRR1, r11 1626 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1627 ld r11, VCPU_INTR_MSR(r9) 1628fast_interrupt_c_return: 16296: ld r7, VCPU_CTR(r9) 1630 lwz r8, VCPU_XER(r9) 1631 mtctr r7 1632 mtxer r8 1633 mr r4, r9 1634 b fast_guest_return 1635 16363: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1637 ld r5, KVM_VRMA_SLB_V(r5) 1638 b 4b 1639 1640 /* If this is for emulated MMIO, load the instruction word */ 16412: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1642 1643 /* Set guest mode to 'jump over instruction' so if lwz faults 1644 * we'll just continue at the next IP. */ 1645 li r0, KVM_GUEST_MODE_SKIP 1646 stb r0, HSTATE_IN_GUEST(r13) 1647 1648 /* Do the access with MSR:DR enabled */ 1649 mfmsr r3 1650 ori r4, r3, MSR_DR /* Enable paging for data */ 1651 mtmsrd r4 1652 lwz r8, 0(r10) 1653 mtmsrd r3 1654 1655 /* Store the result */ 1656 stw r8, VCPU_LAST_INST(r9) 1657 1658 /* Unset guest mode. */ 1659 li r0, KVM_GUEST_MODE_HOST_HV 1660 stb r0, HSTATE_IN_GUEST(r13) 1661 b guest_exit_cont 1662 1663/* 1664 * Similarly for an HISI, reflect it to the guest as an ISI unless 1665 * it is an HPTE not found fault for a page that we have paged out. 1666 */ 1667kvmppc_hisi: 1668 andis. r0, r11, SRR1_ISI_NOPT@h 1669 beq 1f 1670 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1671 beq 3f 1672 clrrdi r0, r10, 28 1673 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1674 bne 1f /* if no SLB entry found */ 16754: 1676 /* Search the hash table. */ 1677 mr r3, r9 /* vcpu pointer */ 1678 mr r4, r10 1679 mr r6, r11 1680 li r7, 0 /* instruction fault */ 1681 bl .kvmppc_hpte_hv_fault 1682 ld r9, HSTATE_KVM_VCPU(r13) 1683 ld r10, VCPU_PC(r9) 1684 ld r11, VCPU_MSR(r9) 1685 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1686 cmpdi r3, 0 /* retry the instruction */ 1687 beq fast_interrupt_c_return 1688 cmpdi r3, -1 /* handle in kernel mode */ 1689 beq guest_exit_cont 1690 1691 /* Synthesize an ISI for the guest */ 1692 mr r11, r3 16931: mtspr SPRN_SRR0, r10 1694 mtspr SPRN_SRR1, r11 1695 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1696 ld r11, VCPU_INTR_MSR(r9) 1697 b fast_interrupt_c_return 1698 16993: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1700 ld r5, KVM_VRMA_SLB_V(r6) 1701 b 4b 1702 1703/* 1704 * Try to handle an hcall in real mode. 1705 * Returns to the guest if we handle it, or continues on up to 1706 * the kernel if we can't (i.e. if we don't have a handler for 1707 * it, or if the handler returns H_TOO_HARD). 1708 */ 1709 .globl hcall_try_real_mode 1710hcall_try_real_mode: 1711 ld r3,VCPU_GPR(R3)(r9) 1712 andi. r0,r11,MSR_PR 1713 /* sc 1 from userspace - reflect to guest syscall */ 1714 bne sc_1_fast_return 1715 clrrdi r3,r3,2 1716 cmpldi r3,hcall_real_table_end - hcall_real_table 1717 bge guest_exit_cont 1718 LOAD_REG_ADDR(r4, hcall_real_table) 1719 lwax r3,r3,r4 1720 cmpwi r3,0 1721 beq guest_exit_cont 1722 add r3,r3,r4 1723 mtctr r3 1724 mr r3,r9 /* get vcpu pointer */ 1725 ld r4,VCPU_GPR(R4)(r9) 1726 bctrl 1727 cmpdi r3,H_TOO_HARD 1728 beq hcall_real_fallback 1729 ld r4,HSTATE_KVM_VCPU(r13) 1730 std r3,VCPU_GPR(R3)(r4) 1731 ld r10,VCPU_PC(r4) 1732 ld r11,VCPU_MSR(r4) 1733 b fast_guest_return 1734 1735sc_1_fast_return: 1736 mtspr SPRN_SRR0,r10 1737 mtspr SPRN_SRR1,r11 1738 li r10, BOOK3S_INTERRUPT_SYSCALL 1739 ld r11, VCPU_INTR_MSR(r9) 1740 mr r4,r9 1741 b fast_guest_return 1742 1743 /* We've attempted a real mode hcall, but it's punted it back 1744 * to userspace. We need to restore some clobbered volatiles 1745 * before resuming the pass-it-to-qemu path */ 1746hcall_real_fallback: 1747 li r12,BOOK3S_INTERRUPT_SYSCALL 1748 ld r9, HSTATE_KVM_VCPU(r13) 1749 1750 b guest_exit_cont 1751 1752 .globl hcall_real_table 1753hcall_real_table: 1754 .long 0 /* 0 - unused */ 1755 .long .kvmppc_h_remove - hcall_real_table 1756 .long .kvmppc_h_enter - hcall_real_table 1757 .long .kvmppc_h_read - hcall_real_table 1758 .long 0 /* 0x10 - H_CLEAR_MOD */ 1759 .long 0 /* 0x14 - H_CLEAR_REF */ 1760 .long .kvmppc_h_protect - hcall_real_table 1761 .long 0 /* 0x1c - H_GET_TCE */ 1762 .long .kvmppc_h_put_tce - hcall_real_table 1763 .long 0 /* 0x24 - H_SET_SPRG0 */ 1764 .long .kvmppc_h_set_dabr - hcall_real_table 1765 .long 0 /* 0x2c */ 1766 .long 0 /* 0x30 */ 1767 .long 0 /* 0x34 */ 1768 .long 0 /* 0x38 */ 1769 .long 0 /* 0x3c */ 1770 .long 0 /* 0x40 */ 1771 .long 0 /* 0x44 */ 1772 .long 0 /* 0x48 */ 1773 .long 0 /* 0x4c */ 1774 .long 0 /* 0x50 */ 1775 .long 0 /* 0x54 */ 1776 .long 0 /* 0x58 */ 1777 .long 0 /* 0x5c */ 1778 .long 0 /* 0x60 */ 1779#ifdef CONFIG_KVM_XICS 1780 .long .kvmppc_rm_h_eoi - hcall_real_table 1781 .long .kvmppc_rm_h_cppr - hcall_real_table 1782 .long .kvmppc_rm_h_ipi - hcall_real_table 1783 .long 0 /* 0x70 - H_IPOLL */ 1784 .long .kvmppc_rm_h_xirr - hcall_real_table 1785#else 1786 .long 0 /* 0x64 - H_EOI */ 1787 .long 0 /* 0x68 - H_CPPR */ 1788 .long 0 /* 0x6c - H_IPI */ 1789 .long 0 /* 0x70 - H_IPOLL */ 1790 .long 0 /* 0x74 - H_XIRR */ 1791#endif 1792 .long 0 /* 0x78 */ 1793 .long 0 /* 0x7c */ 1794 .long 0 /* 0x80 */ 1795 .long 0 /* 0x84 */ 1796 .long 0 /* 0x88 */ 1797 .long 0 /* 0x8c */ 1798 .long 0 /* 0x90 */ 1799 .long 0 /* 0x94 */ 1800 .long 0 /* 0x98 */ 1801 .long 0 /* 0x9c */ 1802 .long 0 /* 0xa0 */ 1803 .long 0 /* 0xa4 */ 1804 .long 0 /* 0xa8 */ 1805 .long 0 /* 0xac */ 1806 .long 0 /* 0xb0 */ 1807 .long 0 /* 0xb4 */ 1808 .long 0 /* 0xb8 */ 1809 .long 0 /* 0xbc */ 1810 .long 0 /* 0xc0 */ 1811 .long 0 /* 0xc4 */ 1812 .long 0 /* 0xc8 */ 1813 .long 0 /* 0xcc */ 1814 .long 0 /* 0xd0 */ 1815 .long 0 /* 0xd4 */ 1816 .long 0 /* 0xd8 */ 1817 .long 0 /* 0xdc */ 1818 .long .kvmppc_h_cede - hcall_real_table 1819 .long 0 /* 0xe4 */ 1820 .long 0 /* 0xe8 */ 1821 .long 0 /* 0xec */ 1822 .long 0 /* 0xf0 */ 1823 .long 0 /* 0xf4 */ 1824 .long 0 /* 0xf8 */ 1825 .long 0 /* 0xfc */ 1826 .long 0 /* 0x100 */ 1827 .long 0 /* 0x104 */ 1828 .long 0 /* 0x108 */ 1829 .long 0 /* 0x10c */ 1830 .long 0 /* 0x110 */ 1831 .long 0 /* 0x114 */ 1832 .long 0 /* 0x118 */ 1833 .long 0 /* 0x11c */ 1834 .long 0 /* 0x120 */ 1835 .long .kvmppc_h_bulk_remove - hcall_real_table 1836 .long 0 /* 0x128 */ 1837 .long 0 /* 0x12c */ 1838 .long 0 /* 0x130 */ 1839 .long .kvmppc_h_set_xdabr - hcall_real_table 1840hcall_real_table_end: 1841 1842ignore_hdec: 1843 mr r4,r9 1844 b fast_guest_return 1845 1846_GLOBAL(kvmppc_h_set_xdabr) 1847 andi. r0, r5, DABRX_USER | DABRX_KERNEL 1848 beq 6f 1849 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 1850 andc. r0, r5, r0 1851 beq 3f 18526: li r3, H_PARAMETER 1853 blr 1854 1855_GLOBAL(kvmppc_h_set_dabr) 1856 li r5, DABRX_USER | DABRX_KERNEL 18573: 1858BEGIN_FTR_SECTION 1859 b 2f 1860END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1861 std r4,VCPU_DABR(r3) 1862 stw r5, VCPU_DABRX(r3) 1863 mtspr SPRN_DABRX, r5 1864 /* Work around P7 bug where DABR can get corrupted on mtspr */ 18651: mtspr SPRN_DABR,r4 1866 mfspr r5, SPRN_DABR 1867 cmpd r4, r5 1868 bne 1b 1869 isync 1870 li r3,0 1871 blr 1872 1873 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 18742: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 1875 rlwimi r5, r4, 1, DAWRX_WT 1876 clrrdi r4, r4, 3 1877 std r4, VCPU_DAWR(r3) 1878 std r5, VCPU_DAWRX(r3) 1879 mtspr SPRN_DAWR, r4 1880 mtspr SPRN_DAWRX, r5 1881 li r3, 0 1882 blr 1883 1884_GLOBAL(kvmppc_h_cede) 1885 ori r11,r11,MSR_EE 1886 std r11,VCPU_MSR(r3) 1887 li r0,1 1888 stb r0,VCPU_CEDED(r3) 1889 sync /* order setting ceded vs. testing prodded */ 1890 lbz r5,VCPU_PRODDED(r3) 1891 cmpwi r5,0 1892 bne kvm_cede_prodded 1893 li r0,0 /* set trap to 0 to say hcall is handled */ 1894 stw r0,VCPU_TRAP(r3) 1895 li r0,H_SUCCESS 1896 std r0,VCPU_GPR(R3)(r3) 1897BEGIN_FTR_SECTION 1898 b kvm_cede_exit /* just send it up to host on 970 */ 1899END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1900 1901 /* 1902 * Set our bit in the bitmask of napping threads unless all the 1903 * other threads are already napping, in which case we send this 1904 * up to the host. 1905 */ 1906 ld r5,HSTATE_KVM_VCORE(r13) 1907 lbz r6,HSTATE_PTID(r13) 1908 lwz r8,VCORE_ENTRY_EXIT(r5) 1909 clrldi r8,r8,56 1910 li r0,1 1911 sld r0,r0,r6 1912 addi r6,r5,VCORE_NAPPING_THREADS 191331: lwarx r4,0,r6 1914 or r4,r4,r0 1915 PPC_POPCNTW(R7,R4) 1916 cmpw r7,r8 1917 bge kvm_cede_exit 1918 stwcx. r4,0,r6 1919 bne 31b 1920 /* order napping_threads update vs testing entry_exit_count */ 1921 isync 1922 li r0,NAPPING_CEDE 1923 stb r0,HSTATE_NAPPING(r13) 1924 lwz r7,VCORE_ENTRY_EXIT(r5) 1925 cmpwi r7,0x100 1926 bge 33f /* another thread already exiting */ 1927 1928/* 1929 * Although not specifically required by the architecture, POWER7 1930 * preserves the following registers in nap mode, even if an SMT mode 1931 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 1932 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1933 */ 1934 /* Save non-volatile GPRs */ 1935 std r14, VCPU_GPR(R14)(r3) 1936 std r15, VCPU_GPR(R15)(r3) 1937 std r16, VCPU_GPR(R16)(r3) 1938 std r17, VCPU_GPR(R17)(r3) 1939 std r18, VCPU_GPR(R18)(r3) 1940 std r19, VCPU_GPR(R19)(r3) 1941 std r20, VCPU_GPR(R20)(r3) 1942 std r21, VCPU_GPR(R21)(r3) 1943 std r22, VCPU_GPR(R22)(r3) 1944 std r23, VCPU_GPR(R23)(r3) 1945 std r24, VCPU_GPR(R24)(r3) 1946 std r25, VCPU_GPR(R25)(r3) 1947 std r26, VCPU_GPR(R26)(r3) 1948 std r27, VCPU_GPR(R27)(r3) 1949 std r28, VCPU_GPR(R28)(r3) 1950 std r29, VCPU_GPR(R29)(r3) 1951 std r30, VCPU_GPR(R30)(r3) 1952 std r31, VCPU_GPR(R31)(r3) 1953 1954 /* save FP state */ 1955 bl kvmppc_save_fp 1956 1957 /* 1958 * Take a nap until a decrementer or external or doobell interrupt 1959 * occurs, with PECE1, PECE0 and PECEDP set in LPCR 1960 */ 1961 li r0,1 1962 stb r0,HSTATE_HWTHREAD_REQ(r13) 1963 mfspr r5,SPRN_LPCR 1964 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1965BEGIN_FTR_SECTION 1966 oris r5,r5,LPCR_PECEDP@h 1967END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1968 mtspr SPRN_LPCR,r5 1969 isync 1970 li r0, 0 1971 std r0, HSTATE_SCRATCH0(r13) 1972 ptesync 1973 ld r0, HSTATE_SCRATCH0(r13) 19741: cmpd r0, r0 1975 bne 1b 1976 nap 1977 b . 1978 197933: mr r4, r3 1980 li r3, 0 1981 li r12, 0 1982 b 34f 1983 1984kvm_end_cede: 1985 /* get vcpu pointer */ 1986 ld r4, HSTATE_KVM_VCPU(r13) 1987 1988 /* Woken by external or decrementer interrupt */ 1989 ld r1, HSTATE_HOST_R1(r13) 1990 1991 /* load up FP state */ 1992 bl kvmppc_load_fp 1993 1994 /* Load NV GPRS */ 1995 ld r14, VCPU_GPR(R14)(r4) 1996 ld r15, VCPU_GPR(R15)(r4) 1997 ld r16, VCPU_GPR(R16)(r4) 1998 ld r17, VCPU_GPR(R17)(r4) 1999 ld r18, VCPU_GPR(R18)(r4) 2000 ld r19, VCPU_GPR(R19)(r4) 2001 ld r20, VCPU_GPR(R20)(r4) 2002 ld r21, VCPU_GPR(R21)(r4) 2003 ld r22, VCPU_GPR(R22)(r4) 2004 ld r23, VCPU_GPR(R23)(r4) 2005 ld r24, VCPU_GPR(R24)(r4) 2006 ld r25, VCPU_GPR(R25)(r4) 2007 ld r26, VCPU_GPR(R26)(r4) 2008 ld r27, VCPU_GPR(R27)(r4) 2009 ld r28, VCPU_GPR(R28)(r4) 2010 ld r29, VCPU_GPR(R29)(r4) 2011 ld r30, VCPU_GPR(R30)(r4) 2012 ld r31, VCPU_GPR(R31)(r4) 2013 2014 /* Check the wake reason in SRR1 to see why we got here */ 2015 bl kvmppc_check_wake_reason 2016 2017 /* clear our bit in vcore->napping_threads */ 201834: ld r5,HSTATE_KVM_VCORE(r13) 2019 lbz r7,HSTATE_PTID(r13) 2020 li r0,1 2021 sld r0,r0,r7 2022 addi r6,r5,VCORE_NAPPING_THREADS 202332: lwarx r7,0,r6 2024 andc r7,r7,r0 2025 stwcx. r7,0,r6 2026 bne 32b 2027 li r0,0 2028 stb r0,HSTATE_NAPPING(r13) 2029 2030 /* See if the wake reason means we need to exit */ 2031 stw r12, VCPU_TRAP(r4) 2032 mr r9, r4 2033 cmpdi r3, 0 2034 bgt guest_exit_cont 2035 2036 /* see if any other thread is already exiting */ 2037 lwz r0,VCORE_ENTRY_EXIT(r5) 2038 cmpwi r0,0x100 2039 bge guest_exit_cont 2040 2041 b kvmppc_cede_reentry /* if not go back to guest */ 2042 2043 /* cede when already previously prodded case */ 2044kvm_cede_prodded: 2045 li r0,0 2046 stb r0,VCPU_PRODDED(r3) 2047 sync /* order testing prodded vs. clearing ceded */ 2048 stb r0,VCPU_CEDED(r3) 2049 li r3,H_SUCCESS 2050 blr 2051 2052 /* we've ceded but we want to give control to the host */ 2053kvm_cede_exit: 2054 b hcall_real_fallback 2055 2056 /* Try to handle a machine check in real mode */ 2057machine_check_realmode: 2058 mr r3, r9 /* get vcpu pointer */ 2059 bl .kvmppc_realmode_machine_check 2060 nop 2061 cmpdi r3, 0 /* continue exiting from guest? */ 2062 ld r9, HSTATE_KVM_VCPU(r13) 2063 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2064 beq mc_cont 2065 /* If not, deliver a machine check. SRR0/1 are already set */ 2066 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2067 ld r11, VCPU_INTR_MSR(r9) 2068 b fast_interrupt_c_return 2069 2070/* 2071 * Check the reason we woke from nap, and take appropriate action. 2072 * Returns: 2073 * 0 if nothing needs to be done 2074 * 1 if something happened that needs to be handled by the host 2075 * -1 if there was a guest wakeup (IPI) 2076 * 2077 * Also sets r12 to the interrupt vector for any interrupt that needs 2078 * to be handled now by the host (0x500 for external interrupt), or zero. 2079 */ 2080kvmppc_check_wake_reason: 2081 mfspr r6, SPRN_SRR1 2082BEGIN_FTR_SECTION 2083 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2084FTR_SECTION_ELSE 2085 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2086ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2087 cmpwi r6, 8 /* was it an external interrupt? */ 2088 li r12, BOOK3S_INTERRUPT_EXTERNAL 2089 beq kvmppc_read_intr /* if so, see what it was */ 2090 li r3, 0 2091 li r12, 0 2092 cmpwi r6, 6 /* was it the decrementer? */ 2093 beq 0f 2094BEGIN_FTR_SECTION 2095 cmpwi r6, 5 /* privileged doorbell? */ 2096 beq 0f 2097 cmpwi r6, 3 /* hypervisor doorbell? */ 2098 beq 3f 2099END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2100 li r3, 1 /* anything else, return 1 */ 21010: blr 2102 2103 /* hypervisor doorbell */ 21043: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2105 li r3, 1 2106 blr 2107 2108/* 2109 * Determine what sort of external interrupt is pending (if any). 2110 * Returns: 2111 * 0 if no interrupt is pending 2112 * 1 if an interrupt is pending that needs to be handled by the host 2113 * -1 if there was a guest wakeup IPI (which has now been cleared) 2114 */ 2115kvmppc_read_intr: 2116 /* see if a host IPI is pending */ 2117 li r3, 1 2118 lbz r0, HSTATE_HOST_IPI(r13) 2119 cmpwi r0, 0 2120 bne 1f 2121 2122 /* Now read the interrupt from the ICP */ 2123 ld r6, HSTATE_XICS_PHYS(r13) 2124 li r7, XICS_XIRR 2125 cmpdi r6, 0 2126 beq- 1f 2127 lwzcix r0, r6, r7 2128 rlwinm. r3, r0, 0, 0xffffff 2129 sync 2130 beq 1f /* if nothing pending in the ICP */ 2131 2132 /* We found something in the ICP... 2133 * 2134 * If it's not an IPI, stash it in the PACA and return to 2135 * the host, we don't (yet) handle directing real external 2136 * interrupts directly to the guest 2137 */ 2138 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 2139 bne 42f 2140 2141 /* It's an IPI, clear the MFRR and EOI it */ 2142 li r3, 0xff 2143 li r8, XICS_MFRR 2144 stbcix r3, r6, r8 /* clear the IPI */ 2145 stwcix r0, r6, r7 /* EOI it */ 2146 sync 2147 2148 /* We need to re-check host IPI now in case it got set in the 2149 * meantime. If it's clear, we bounce the interrupt to the 2150 * guest 2151 */ 2152 lbz r0, HSTATE_HOST_IPI(r13) 2153 cmpwi r0, 0 2154 bne- 43f 2155 2156 /* OK, it's an IPI for us */ 2157 li r3, -1 21581: blr 2159 216042: /* It's not an IPI and it's for the host, stash it in the PACA 2161 * before exit, it will be picked up by the host ICP driver 2162 */ 2163 stw r0, HSTATE_SAVED_XIRR(r13) 2164 li r3, 1 2165 b 1b 2166 216743: /* We raced with the host, we need to resend that IPI, bummer */ 2168 li r0, IPI_PRIORITY 2169 stbcix r0, r6, r8 /* set the IPI */ 2170 sync 2171 li r3, 1 2172 b 1b 2173 2174/* 2175 * Save away FP, VMX and VSX registers. 2176 * r3 = vcpu pointer 2177 * N.B. r30 and r31 are volatile across this function, 2178 * thus it is not callable from C. 2179 */ 2180kvmppc_save_fp: 2181 mflr r30 2182 mr r31,r3 2183 mfmsr r5 2184 ori r8,r5,MSR_FP 2185#ifdef CONFIG_ALTIVEC 2186BEGIN_FTR_SECTION 2187 oris r8,r8,MSR_VEC@h 2188END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2189#endif 2190#ifdef CONFIG_VSX 2191BEGIN_FTR_SECTION 2192 oris r8,r8,MSR_VSX@h 2193END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2194#endif 2195 mtmsrd r8 2196 isync 2197 addi r3,r3,VCPU_FPRS 2198 bl .store_fp_state 2199#ifdef CONFIG_ALTIVEC 2200BEGIN_FTR_SECTION 2201 addi r3,r31,VCPU_VRS 2202 bl .store_vr_state 2203END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2204#endif 2205 mfspr r6,SPRN_VRSAVE 2206 stw r6,VCPU_VRSAVE(r3) 2207 mtlr r30 2208 mtmsrd r5 2209 isync 2210 blr 2211 2212/* 2213 * Load up FP, VMX and VSX registers 2214 * r4 = vcpu pointer 2215 * N.B. r30 and r31 are volatile across this function, 2216 * thus it is not callable from C. 2217 */ 2218kvmppc_load_fp: 2219 mflr r30 2220 mr r31,r4 2221 mfmsr r9 2222 ori r8,r9,MSR_FP 2223#ifdef CONFIG_ALTIVEC 2224BEGIN_FTR_SECTION 2225 oris r8,r8,MSR_VEC@h 2226END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2227#endif 2228#ifdef CONFIG_VSX 2229BEGIN_FTR_SECTION 2230 oris r8,r8,MSR_VSX@h 2231END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2232#endif 2233 mtmsrd r8 2234 isync 2235 addi r3,r4,VCPU_FPRS 2236 bl .load_fp_state 2237#ifdef CONFIG_ALTIVEC 2238BEGIN_FTR_SECTION 2239 addi r3,r31,VCPU_VRS 2240 bl .load_vr_state 2241END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2242#endif 2243 lwz r7,VCPU_VRSAVE(r4) 2244 mtspr SPRN_VRSAVE,r7 2245 mtlr r30 2246 mr r4,r31 2247 blr 2248 2249/* 2250 * We come here if we get any exception or interrupt while we are 2251 * executing host real mode code while in guest MMU context. 2252 * For now just spin, but we should do something better. 2253 */ 2254kvmppc_bad_host_intr: 2255 b . 2256