1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/mmu-hash64.h> 31 32#ifdef __LITTLE_ENDIAN__ 33#error Need to fix lppaca and SLB shadow accesses in little endian mode 34#endif 35 36/***************************************************************************** 37 * * 38 * Real Mode handlers that need to be in the linear mapping * 39 * * 40 ****************************************************************************/ 41 42 .globl kvmppc_skip_interrupt 43kvmppc_skip_interrupt: 44 mfspr r13,SPRN_SRR0 45 addi r13,r13,4 46 mtspr SPRN_SRR0,r13 47 GET_SCRATCH0(r13) 48 rfid 49 b . 50 51 .globl kvmppc_skip_Hinterrupt 52kvmppc_skip_Hinterrupt: 53 mfspr r13,SPRN_HSRR0 54 addi r13,r13,4 55 mtspr SPRN_HSRR0,r13 56 GET_SCRATCH0(r13) 57 hrfid 58 b . 59 60/* 61 * Call kvmppc_hv_entry in real mode. 62 * Must be called with interrupts hard-disabled. 63 * 64 * Input Registers: 65 * 66 * LR = return address to continue at after eventually re-enabling MMU 67 */ 68_GLOBAL(kvmppc_hv_entry_trampoline) 69 mfmsr r10 70 LOAD_REG_ADDR(r5, kvmppc_hv_entry) 71 li r0,MSR_RI 72 andc r0,r10,r0 73 li r6,MSR_IR | MSR_DR 74 andc r6,r10,r6 75 mtmsrd r0,1 /* clear RI in MSR */ 76 mtsrr0 r5 77 mtsrr1 r6 78 RFI 79 80/****************************************************************************** 81 * * 82 * Entry code * 83 * * 84 *****************************************************************************/ 85 86/* 87 * We come in here when wakened from nap mode on a secondary hw thread. 88 * Relocation is off and most register values are lost. 89 * r13 points to the PACA. 90 */ 91 .globl kvm_start_guest 92kvm_start_guest: 93 ld r1,PACAEMERGSP(r13) 94 subi r1,r1,STACK_FRAME_OVERHEAD 95 ld r2,PACATOC(r13) 96 97 li r0,KVM_HWTHREAD_IN_KVM 98 stb r0,HSTATE_HWTHREAD_STATE(r13) 99 100 /* NV GPR values from power7_idle() will no longer be valid */ 101 li r0,1 102 stb r0,PACA_NAPSTATELOST(r13) 103 104 /* were we napping due to cede? */ 105 lbz r0,HSTATE_NAPPING(r13) 106 cmpwi r0,0 107 bne kvm_end_cede 108 109 /* 110 * We weren't napping due to cede, so this must be a secondary 111 * thread being woken up to run a guest, or being woken up due 112 * to a stray IPI. (Or due to some machine check or hypervisor 113 * maintenance interrupt while the core is in KVM.) 114 */ 115 116 /* Check the wake reason in SRR1 to see why we got here */ 117 mfspr r3,SPRN_SRR1 118 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 119 cmpwi r3,4 /* was it an external interrupt? */ 120 bne 27f /* if not */ 121 ld r5,HSTATE_XICS_PHYS(r13) 122 li r7,XICS_XIRR /* if it was an external interrupt, */ 123 lwzcix r8,r5,r7 /* get and ack the interrupt */ 124 sync 125 clrldi. r9,r8,40 /* get interrupt source ID. */ 126 beq 28f /* none there? */ 127 cmpwi r9,XICS_IPI /* was it an IPI? */ 128 bne 29f 129 li r0,0xff 130 li r6,XICS_MFRR 131 stbcix r0,r5,r6 /* clear IPI */ 132 stwcix r8,r5,r7 /* EOI the interrupt */ 133 sync /* order loading of vcpu after that */ 134 135 /* get vcpu pointer, NULL if we have no vcpu to run */ 136 ld r4,HSTATE_KVM_VCPU(r13) 137 cmpdi r4,0 138 /* if we have no vcpu to run, go back to sleep */ 139 beq kvm_no_guest 140 b kvmppc_hv_entry 141 14227: /* XXX should handle hypervisor maintenance interrupts etc. here */ 143 b kvm_no_guest 14428: /* SRR1 said external but ICP said nope?? */ 145 b kvm_no_guest 14629: /* External non-IPI interrupt to offline secondary thread? help?? */ 147 stw r8,HSTATE_SAVED_XIRR(r13) 148 b kvm_no_guest 149 150.global kvmppc_hv_entry 151kvmppc_hv_entry: 152 153 /* Required state: 154 * 155 * R4 = vcpu pointer 156 * MSR = ~IR|DR 157 * R13 = PACA 158 * R1 = host R1 159 * all other volatile GPRS = free 160 */ 161 mflr r0 162 std r0, HSTATE_VMHANDLER(r13) 163 164 /* Set partition DABR */ 165 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 166 li r5,3 167 ld r6,VCPU_DABR(r4) 168 mtspr SPRN_DABRX,r5 169 mtspr SPRN_DABR,r6 170BEGIN_FTR_SECTION 171 isync 172END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 173 174 /* Load guest PMU registers */ 175 /* R4 is live here (vcpu pointer) */ 176 li r3, 1 177 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 178 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 179 isync 180 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 181 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 182 lwz r6, VCPU_PMC + 8(r4) 183 lwz r7, VCPU_PMC + 12(r4) 184 lwz r8, VCPU_PMC + 16(r4) 185 lwz r9, VCPU_PMC + 20(r4) 186BEGIN_FTR_SECTION 187 lwz r10, VCPU_PMC + 24(r4) 188 lwz r11, VCPU_PMC + 28(r4) 189END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 190 mtspr SPRN_PMC1, r3 191 mtspr SPRN_PMC2, r5 192 mtspr SPRN_PMC3, r6 193 mtspr SPRN_PMC4, r7 194 mtspr SPRN_PMC5, r8 195 mtspr SPRN_PMC6, r9 196BEGIN_FTR_SECTION 197 mtspr SPRN_PMC7, r10 198 mtspr SPRN_PMC8, r11 199END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 200 ld r3, VCPU_MMCR(r4) 201 ld r5, VCPU_MMCR + 8(r4) 202 ld r6, VCPU_MMCR + 16(r4) 203 mtspr SPRN_MMCR1, r5 204 mtspr SPRN_MMCRA, r6 205 mtspr SPRN_MMCR0, r3 206 isync 207 208 /* Load up FP, VMX and VSX registers */ 209 bl kvmppc_load_fp 210 211 ld r14, VCPU_GPR(R14)(r4) 212 ld r15, VCPU_GPR(R15)(r4) 213 ld r16, VCPU_GPR(R16)(r4) 214 ld r17, VCPU_GPR(R17)(r4) 215 ld r18, VCPU_GPR(R18)(r4) 216 ld r19, VCPU_GPR(R19)(r4) 217 ld r20, VCPU_GPR(R20)(r4) 218 ld r21, VCPU_GPR(R21)(r4) 219 ld r22, VCPU_GPR(R22)(r4) 220 ld r23, VCPU_GPR(R23)(r4) 221 ld r24, VCPU_GPR(R24)(r4) 222 ld r25, VCPU_GPR(R25)(r4) 223 ld r26, VCPU_GPR(R26)(r4) 224 ld r27, VCPU_GPR(R27)(r4) 225 ld r28, VCPU_GPR(R28)(r4) 226 ld r29, VCPU_GPR(R29)(r4) 227 ld r30, VCPU_GPR(R30)(r4) 228 ld r31, VCPU_GPR(R31)(r4) 229 230BEGIN_FTR_SECTION 231 /* Switch DSCR to guest value */ 232 ld r5, VCPU_DSCR(r4) 233 mtspr SPRN_DSCR, r5 234END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 235 236 /* 237 * Set the decrementer to the guest decrementer. 238 */ 239 ld r8,VCPU_DEC_EXPIRES(r4) 240 mftb r7 241 subf r3,r7,r8 242 mtspr SPRN_DEC,r3 243 stw r3,VCPU_DEC(r4) 244 245 ld r5, VCPU_SPRG0(r4) 246 ld r6, VCPU_SPRG1(r4) 247 ld r7, VCPU_SPRG2(r4) 248 ld r8, VCPU_SPRG3(r4) 249 mtspr SPRN_SPRG0, r5 250 mtspr SPRN_SPRG1, r6 251 mtspr SPRN_SPRG2, r7 252 mtspr SPRN_SPRG3, r8 253 254 /* Save R1 in the PACA */ 255 std r1, HSTATE_HOST_R1(r13) 256 257 /* Increment yield count if they have a VPA */ 258 ld r3, VCPU_VPA(r4) 259 cmpdi r3, 0 260 beq 25f 261 lwz r5, LPPACA_YIELDCOUNT(r3) 262 addi r5, r5, 1 263 stw r5, LPPACA_YIELDCOUNT(r3) 264 li r6, 1 265 stb r6, VCPU_VPA_DIRTY(r4) 26625: 267 /* Load up DAR and DSISR */ 268 ld r5, VCPU_DAR(r4) 269 lwz r6, VCPU_DSISR(r4) 270 mtspr SPRN_DAR, r5 271 mtspr SPRN_DSISR, r6 272 273BEGIN_FTR_SECTION 274 /* Restore AMR and UAMOR, set AMOR to all 1s */ 275 ld r5,VCPU_AMR(r4) 276 ld r6,VCPU_UAMOR(r4) 277 li r7,-1 278 mtspr SPRN_AMR,r5 279 mtspr SPRN_UAMOR,r6 280 mtspr SPRN_AMOR,r7 281END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 282 283 /* Clear out SLB */ 284 li r6,0 285 slbmte r6,r6 286 slbia 287 ptesync 288 289BEGIN_FTR_SECTION 290 b 30f 291END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 292 /* 293 * POWER7 host -> guest partition switch code. 294 * We don't have to lock against concurrent tlbies, 295 * but we do have to coordinate across hardware threads. 296 */ 297 /* Increment entry count iff exit count is zero. */ 298 ld r5,HSTATE_KVM_VCORE(r13) 299 addi r9,r5,VCORE_ENTRY_EXIT 30021: lwarx r3,0,r9 301 cmpwi r3,0x100 /* any threads starting to exit? */ 302 bge secondary_too_late /* if so we're too late to the party */ 303 addi r3,r3,1 304 stwcx. r3,0,r9 305 bne 21b 306 307 /* Primary thread switches to guest partition. */ 308 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 309 lwz r6,VCPU_PTID(r4) 310 cmpwi r6,0 311 bne 20f 312 ld r6,KVM_SDR1(r9) 313 lwz r7,KVM_LPID(r9) 314 li r0,LPID_RSVD /* switch to reserved LPID */ 315 mtspr SPRN_LPID,r0 316 ptesync 317 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 318 mtspr SPRN_LPID,r7 319 isync 320 321 /* See if we need to flush the TLB */ 322 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 323 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 324 srdi r6,r6,6 /* doubleword number */ 325 sldi r6,r6,3 /* address offset */ 326 add r6,r6,r9 327 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 328 li r0,1 329 sld r0,r0,r7 330 ld r7,0(r6) 331 and. r7,r7,r0 332 beq 22f 33323: ldarx r7,0,r6 /* if set, clear the bit */ 334 andc r7,r7,r0 335 stdcx. r7,0,r6 336 bne 23b 337 li r6,128 /* and flush the TLB */ 338 mtctr r6 339 li r7,0x800 /* IS field = 0b10 */ 340 ptesync 34128: tlbiel r7 342 addi r7,r7,0x1000 343 bdnz 28b 344 ptesync 345 34622: li r0,1 347 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 348 b 10f 349 350 /* Secondary threads wait for primary to have done partition switch */ 35120: lbz r0,VCORE_IN_GUEST(r5) 352 cmpwi r0,0 353 beq 20b 354 355 /* Set LPCR and RMOR. */ 35610: ld r8,KVM_LPCR(r9) 357 mtspr SPRN_LPCR,r8 358 ld r8,KVM_RMOR(r9) 359 mtspr SPRN_RMOR,r8 360 isync 361 362 /* Check if HDEC expires soon */ 363 mfspr r3,SPRN_HDEC 364 cmpwi r3,10 365 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 366 mr r9,r4 367 blt hdec_soon 368 369 /* Save purr/spurr */ 370 mfspr r5,SPRN_PURR 371 mfspr r6,SPRN_SPURR 372 std r5,HSTATE_PURR(r13) 373 std r6,HSTATE_SPURR(r13) 374 ld r7,VCPU_PURR(r4) 375 ld r8,VCPU_SPURR(r4) 376 mtspr SPRN_PURR,r7 377 mtspr SPRN_SPURR,r8 378 b 31f 379 380 /* 381 * PPC970 host -> guest partition switch code. 382 * We have to lock against concurrent tlbies, 383 * using native_tlbie_lock to lock against host tlbies 384 * and kvm->arch.tlbie_lock to lock against guest tlbies. 385 * We also have to invalidate the TLB since its 386 * entries aren't tagged with the LPID. 387 */ 38830: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 389 390 /* first take native_tlbie_lock */ 391 .section ".toc","aw" 392toc_tlbie_lock: 393 .tc native_tlbie_lock[TC],native_tlbie_lock 394 .previous 395 ld r3,toc_tlbie_lock@toc(2) 396#ifdef __BIG_ENDIAN__ 397 lwz r8,PACA_LOCK_TOKEN(r13) 398#else 399 lwz r8,PACAPACAINDEX(r13) 400#endif 40124: lwarx r0,0,r3 402 cmpwi r0,0 403 bne 24b 404 stwcx. r8,0,r3 405 bne 24b 406 isync 407 408 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 409 li r0,0x18f 410 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 411 or r0,r7,r0 412 ptesync 413 sync 414 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 415 isync 416 li r0,0 417 stw r0,0(r3) /* drop native_tlbie_lock */ 418 419 /* invalidate the whole TLB */ 420 li r0,256 421 mtctr r0 422 li r6,0 42325: tlbiel r6 424 addi r6,r6,0x1000 425 bdnz 25b 426 ptesync 427 428 /* Take the guest's tlbie_lock */ 429 addi r3,r9,KVM_TLBIE_LOCK 43024: lwarx r0,0,r3 431 cmpwi r0,0 432 bne 24b 433 stwcx. r8,0,r3 434 bne 24b 435 isync 436 ld r6,KVM_SDR1(r9) 437 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 438 439 /* Set up HID4 with the guest's LPID etc. */ 440 sync 441 mtspr SPRN_HID4,r7 442 isync 443 444 /* drop the guest's tlbie_lock */ 445 li r0,0 446 stw r0,0(r3) 447 448 /* Check if HDEC expires soon */ 449 mfspr r3,SPRN_HDEC 450 cmpwi r3,10 451 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 452 mr r9,r4 453 blt hdec_soon 454 455 /* Enable HDEC interrupts */ 456 mfspr r0,SPRN_HID0 457 li r3,1 458 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 459 sync 460 mtspr SPRN_HID0,r0 461 mfspr r0,SPRN_HID0 462 mfspr r0,SPRN_HID0 463 mfspr r0,SPRN_HID0 464 mfspr r0,SPRN_HID0 465 mfspr r0,SPRN_HID0 466 mfspr r0,SPRN_HID0 467 468 /* Load up guest SLB entries */ 46931: lwz r5,VCPU_SLB_MAX(r4) 470 cmpwi r5,0 471 beq 9f 472 mtctr r5 473 addi r6,r4,VCPU_SLB 4741: ld r8,VCPU_SLB_E(r6) 475 ld r9,VCPU_SLB_V(r6) 476 slbmte r9,r8 477 addi r6,r6,VCPU_SLB_SIZE 478 bdnz 1b 4799: 480 481 /* Restore state of CTRL run bit; assume 1 on entry */ 482 lwz r5,VCPU_CTRL(r4) 483 andi. r5,r5,1 484 bne 4f 485 mfspr r6,SPRN_CTRLF 486 clrrdi r6,r6,1 487 mtspr SPRN_CTRLT,r6 4884: 489 ld r6, VCPU_CTR(r4) 490 lwz r7, VCPU_XER(r4) 491 492 mtctr r6 493 mtxer r7 494 495 ld r10, VCPU_PC(r4) 496 ld r11, VCPU_MSR(r4) 497kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 498 ld r6, VCPU_SRR0(r4) 499 ld r7, VCPU_SRR1(r4) 500 501 /* r11 = vcpu->arch.msr & ~MSR_HV */ 502 rldicl r11, r11, 63 - MSR_HV_LG, 1 503 rotldi r11, r11, 1 + MSR_HV_LG 504 ori r11, r11, MSR_ME 505 506 /* Check if we can deliver an external or decrementer interrupt now */ 507 ld r0,VCPU_PENDING_EXC(r4) 508 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 509 and r0,r0,r8 510 cmpdi cr1,r0,0 511 andi. r0,r11,MSR_EE 512 beq cr1,11f 513BEGIN_FTR_SECTION 514 mfspr r8,SPRN_LPCR 515 ori r8,r8,LPCR_MER 516 mtspr SPRN_LPCR,r8 517 isync 518END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 519 beq 5f 520 li r0,BOOK3S_INTERRUPT_EXTERNAL 52112: mr r6,r10 522 mr r10,r0 523 mr r7,r11 524 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 525 rotldi r11,r11,63 526 b 5f 52711: beq 5f 528 mfspr r0,SPRN_DEC 529 cmpwi r0,0 530 li r0,BOOK3S_INTERRUPT_DECREMENTER 531 blt 12b 532 533 /* Move SRR0 and SRR1 into the respective regs */ 5345: mtspr SPRN_SRR0, r6 535 mtspr SPRN_SRR1, r7 536 537fast_guest_return: 538 li r0,0 539 stb r0,VCPU_CEDED(r4) /* cancel cede */ 540 mtspr SPRN_HSRR0,r10 541 mtspr SPRN_HSRR1,r11 542 543 /* Activate guest mode, so faults get handled by KVM */ 544 li r9, KVM_GUEST_MODE_GUEST 545 stb r9, HSTATE_IN_GUEST(r13) 546 547 /* Enter guest */ 548 549BEGIN_FTR_SECTION 550 ld r5, VCPU_CFAR(r4) 551 mtspr SPRN_CFAR, r5 552END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 553 554 ld r5, VCPU_LR(r4) 555 lwz r6, VCPU_CR(r4) 556 mtlr r5 557 mtcr r6 558 559 ld r0, VCPU_GPR(R0)(r4) 560 ld r1, VCPU_GPR(R1)(r4) 561 ld r2, VCPU_GPR(R2)(r4) 562 ld r3, VCPU_GPR(R3)(r4) 563 ld r5, VCPU_GPR(R5)(r4) 564 ld r6, VCPU_GPR(R6)(r4) 565 ld r7, VCPU_GPR(R7)(r4) 566 ld r8, VCPU_GPR(R8)(r4) 567 ld r9, VCPU_GPR(R9)(r4) 568 ld r10, VCPU_GPR(R10)(r4) 569 ld r11, VCPU_GPR(R11)(r4) 570 ld r12, VCPU_GPR(R12)(r4) 571 ld r13, VCPU_GPR(R13)(r4) 572 573 ld r4, VCPU_GPR(R4)(r4) 574 575 hrfid 576 b . 577 578/****************************************************************************** 579 * * 580 * Exit code * 581 * * 582 *****************************************************************************/ 583 584/* 585 * We come here from the first-level interrupt handlers. 586 */ 587 .globl kvmppc_interrupt 588kvmppc_interrupt: 589 /* 590 * Register contents: 591 * R12 = interrupt vector 592 * R13 = PACA 593 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 594 * guest R13 saved in SPRN_SCRATCH0 595 */ 596 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 597 std r9, HSTATE_HOST_R2(r13) 598 ld r9, HSTATE_KVM_VCPU(r13) 599 600 /* Save registers */ 601 602 std r0, VCPU_GPR(R0)(r9) 603 std r1, VCPU_GPR(R1)(r9) 604 std r2, VCPU_GPR(R2)(r9) 605 std r3, VCPU_GPR(R3)(r9) 606 std r4, VCPU_GPR(R4)(r9) 607 std r5, VCPU_GPR(R5)(r9) 608 std r6, VCPU_GPR(R6)(r9) 609 std r7, VCPU_GPR(R7)(r9) 610 std r8, VCPU_GPR(R8)(r9) 611 ld r0, HSTATE_HOST_R2(r13) 612 std r0, VCPU_GPR(R9)(r9) 613 std r10, VCPU_GPR(R10)(r9) 614 std r11, VCPU_GPR(R11)(r9) 615 ld r3, HSTATE_SCRATCH0(r13) 616 lwz r4, HSTATE_SCRATCH1(r13) 617 std r3, VCPU_GPR(R12)(r9) 618 stw r4, VCPU_CR(r9) 619BEGIN_FTR_SECTION 620 ld r3, HSTATE_CFAR(r13) 621 std r3, VCPU_CFAR(r9) 622END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 623 624 /* Restore R1/R2 so we can handle faults */ 625 ld r1, HSTATE_HOST_R1(r13) 626 ld r2, PACATOC(r13) 627 628 mfspr r10, SPRN_SRR0 629 mfspr r11, SPRN_SRR1 630 std r10, VCPU_SRR0(r9) 631 std r11, VCPU_SRR1(r9) 632 andi. r0, r12, 2 /* need to read HSRR0/1? */ 633 beq 1f 634 mfspr r10, SPRN_HSRR0 635 mfspr r11, SPRN_HSRR1 636 clrrdi r12, r12, 2 6371: std r10, VCPU_PC(r9) 638 std r11, VCPU_MSR(r9) 639 640 GET_SCRATCH0(r3) 641 mflr r4 642 std r3, VCPU_GPR(R13)(r9) 643 std r4, VCPU_LR(r9) 644 645 /* Unset guest mode */ 646 li r0, KVM_GUEST_MODE_NONE 647 stb r0, HSTATE_IN_GUEST(r13) 648 649 stw r12,VCPU_TRAP(r9) 650 651 /* Save HEIR (HV emulation assist reg) in last_inst 652 if this is an HEI (HV emulation interrupt, e40) */ 653 li r3,KVM_INST_FETCH_FAILED 654BEGIN_FTR_SECTION 655 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 656 bne 11f 657 mfspr r3,SPRN_HEIR 658END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 65911: stw r3,VCPU_LAST_INST(r9) 660 661 /* these are volatile across C function calls */ 662 mfctr r3 663 mfxer r4 664 std r3, VCPU_CTR(r9) 665 stw r4, VCPU_XER(r9) 666 667BEGIN_FTR_SECTION 668 /* If this is a page table miss then see if it's theirs or ours */ 669 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 670 beq kvmppc_hdsi 671 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 672 beq kvmppc_hisi 673END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 674 675 /* See if this is a leftover HDEC interrupt */ 676 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 677 bne 2f 678 mfspr r3,SPRN_HDEC 679 cmpwi r3,0 680 bge ignore_hdec 6812: 682 /* See if this is an hcall we can handle in real mode */ 683 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 684 beq hcall_try_real_mode 685 686 /* Only handle external interrupts here on arch 206 and later */ 687BEGIN_FTR_SECTION 688 b ext_interrupt_to_host 689END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 690 691 /* External interrupt ? */ 692 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 693 bne+ ext_interrupt_to_host 694 695 /* External interrupt, first check for host_ipi. If this is 696 * set, we know the host wants us out so let's do it now 697 */ 698do_ext_interrupt: 699 lbz r0, HSTATE_HOST_IPI(r13) 700 cmpwi r0, 0 701 bne ext_interrupt_to_host 702 703 /* Now read the interrupt from the ICP */ 704 ld r5, HSTATE_XICS_PHYS(r13) 705 li r7, XICS_XIRR 706 cmpdi r5, 0 707 beq- ext_interrupt_to_host 708 lwzcix r3, r5, r7 709 rlwinm. r0, r3, 0, 0xffffff 710 sync 711 beq 3f /* if nothing pending in the ICP */ 712 713 /* We found something in the ICP... 714 * 715 * If it's not an IPI, stash it in the PACA and return to 716 * the host, we don't (yet) handle directing real external 717 * interrupts directly to the guest 718 */ 719 cmpwi r0, XICS_IPI 720 bne ext_stash_for_host 721 722 /* It's an IPI, clear the MFRR and EOI it */ 723 li r0, 0xff 724 li r6, XICS_MFRR 725 stbcix r0, r5, r6 /* clear the IPI */ 726 stwcix r3, r5, r7 /* EOI it */ 727 sync 728 729 /* We need to re-check host IPI now in case it got set in the 730 * meantime. If it's clear, we bounce the interrupt to the 731 * guest 732 */ 733 lbz r0, HSTATE_HOST_IPI(r13) 734 cmpwi r0, 0 735 bne- 1f 736 737 /* Allright, looks like an IPI for the guest, we need to set MER */ 7383: 739 /* Check if any CPU is heading out to the host, if so head out too */ 740 ld r5, HSTATE_KVM_VCORE(r13) 741 lwz r0, VCORE_ENTRY_EXIT(r5) 742 cmpwi r0, 0x100 743 bge ext_interrupt_to_host 744 745 /* See if there is a pending interrupt for the guest */ 746 mfspr r8, SPRN_LPCR 747 ld r0, VCPU_PENDING_EXC(r9) 748 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 749 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 750 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 751 beq 2f 752 753 /* And if the guest EE is set, we can deliver immediately, else 754 * we return to the guest with MER set 755 */ 756 andi. r0, r11, MSR_EE 757 beq 2f 758 mtspr SPRN_SRR0, r10 759 mtspr SPRN_SRR1, r11 760 li r10, BOOK3S_INTERRUPT_EXTERNAL 761 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 762 rotldi r11, r11, 63 7632: mr r4, r9 764 mtspr SPRN_LPCR, r8 765 b fast_guest_return 766 767 /* We raced with the host, we need to resend that IPI, bummer */ 7681: li r0, IPI_PRIORITY 769 stbcix r0, r5, r6 /* set the IPI */ 770 sync 771 b ext_interrupt_to_host 772 773ext_stash_for_host: 774 /* It's not an IPI and it's for the host, stash it in the PACA 775 * before exit, it will be picked up by the host ICP driver 776 */ 777 stw r3, HSTATE_SAVED_XIRR(r13) 778ext_interrupt_to_host: 779 780guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 781 /* Save DEC */ 782 mfspr r5,SPRN_DEC 783 mftb r6 784 extsw r5,r5 785 add r5,r5,r6 786 std r5,VCPU_DEC_EXPIRES(r9) 787 788 /* Save more register state */ 789 mfdar r6 790 mfdsisr r7 791 std r6, VCPU_DAR(r9) 792 stw r7, VCPU_DSISR(r9) 793BEGIN_FTR_SECTION 794 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 795 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 796 beq 6f 797END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 798 std r6, VCPU_FAULT_DAR(r9) 799 stw r7, VCPU_FAULT_DSISR(r9) 800 801 /* See if it is a machine check */ 802 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 803 beq machine_check_realmode 804mc_cont: 805 806 /* Save guest CTRL register, set runlatch to 1 */ 8076: mfspr r6,SPRN_CTRLF 808 stw r6,VCPU_CTRL(r9) 809 andi. r0,r6,1 810 bne 4f 811 ori r6,r6,1 812 mtspr SPRN_CTRLT,r6 8134: 814 /* Read the guest SLB and save it away */ 815 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 816 mtctr r0 817 li r6,0 818 addi r7,r9,VCPU_SLB 819 li r5,0 8201: slbmfee r8,r6 821 andis. r0,r8,SLB_ESID_V@h 822 beq 2f 823 add r8,r8,r6 /* put index in */ 824 slbmfev r3,r6 825 std r8,VCPU_SLB_E(r7) 826 std r3,VCPU_SLB_V(r7) 827 addi r7,r7,VCPU_SLB_SIZE 828 addi r5,r5,1 8292: addi r6,r6,1 830 bdnz 1b 831 stw r5,VCPU_SLB_MAX(r9) 832 833 /* 834 * Save the guest PURR/SPURR 835 */ 836BEGIN_FTR_SECTION 837 mfspr r5,SPRN_PURR 838 mfspr r6,SPRN_SPURR 839 ld r7,VCPU_PURR(r9) 840 ld r8,VCPU_SPURR(r9) 841 std r5,VCPU_PURR(r9) 842 std r6,VCPU_SPURR(r9) 843 subf r5,r7,r5 844 subf r6,r8,r6 845 846 /* 847 * Restore host PURR/SPURR and add guest times 848 * so that the time in the guest gets accounted. 849 */ 850 ld r3,HSTATE_PURR(r13) 851 ld r4,HSTATE_SPURR(r13) 852 add r3,r3,r5 853 add r4,r4,r6 854 mtspr SPRN_PURR,r3 855 mtspr SPRN_SPURR,r4 856END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 857 858 /* Clear out SLB */ 859 li r5,0 860 slbmte r5,r5 861 slbia 862 ptesync 863 864hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 865BEGIN_FTR_SECTION 866 b 32f 867END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 868 /* 869 * POWER7 guest -> host partition switch code. 870 * We don't have to lock against tlbies but we do 871 * have to coordinate the hardware threads. 872 */ 873 /* Increment the threads-exiting-guest count in the 0xff00 874 bits of vcore->entry_exit_count */ 875 lwsync 876 ld r5,HSTATE_KVM_VCORE(r13) 877 addi r6,r5,VCORE_ENTRY_EXIT 87841: lwarx r3,0,r6 879 addi r0,r3,0x100 880 stwcx. r0,0,r6 881 bne 41b 882 lwsync 883 884 /* 885 * At this point we have an interrupt that we have to pass 886 * up to the kernel or qemu; we can't handle it in real mode. 887 * Thus we have to do a partition switch, so we have to 888 * collect the other threads, if we are the first thread 889 * to take an interrupt. To do this, we set the HDEC to 0, 890 * which causes an HDEC interrupt in all threads within 2ns 891 * because the HDEC register is shared between all 4 threads. 892 * However, we don't need to bother if this is an HDEC 893 * interrupt, since the other threads will already be on their 894 * way here in that case. 895 */ 896 cmpwi r3,0x100 /* Are we the first here? */ 897 bge 43f 898 cmpwi r3,1 /* Are any other threads in the guest? */ 899 ble 43f 900 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 901 beq 40f 902 li r0,0 903 mtspr SPRN_HDEC,r0 90440: 905 /* 906 * Send an IPI to any napping threads, since an HDEC interrupt 907 * doesn't wake CPUs up from nap. 908 */ 909 lwz r3,VCORE_NAPPING_THREADS(r5) 910 lwz r4,VCPU_PTID(r9) 911 li r0,1 912 sld r0,r0,r4 913 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 914 beq 43f 915 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 916 subf r6,r4,r13 91742: andi. r0,r3,1 918 beq 44f 919 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 920 li r0,IPI_PRIORITY 921 li r7,XICS_MFRR 922 stbcix r0,r7,r8 /* trigger the IPI */ 92344: srdi. r3,r3,1 924 addi r6,r6,PACA_SIZE 925 bne 42b 926 927 /* Secondary threads wait for primary to do partition switch */ 92843: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 929 ld r5,HSTATE_KVM_VCORE(r13) 930 lwz r3,VCPU_PTID(r9) 931 cmpwi r3,0 932 beq 15f 933 HMT_LOW 93413: lbz r3,VCORE_IN_GUEST(r5) 935 cmpwi r3,0 936 bne 13b 937 HMT_MEDIUM 938 b 16f 939 940 /* Primary thread waits for all the secondaries to exit guest */ 94115: lwz r3,VCORE_ENTRY_EXIT(r5) 942 srwi r0,r3,8 943 clrldi r3,r3,56 944 cmpw r3,r0 945 bne 15b 946 isync 947 948 /* Primary thread switches back to host partition */ 949 ld r6,KVM_HOST_SDR1(r4) 950 lwz r7,KVM_HOST_LPID(r4) 951 li r8,LPID_RSVD /* switch to reserved LPID */ 952 mtspr SPRN_LPID,r8 953 ptesync 954 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 955 mtspr SPRN_LPID,r7 956 isync 957 li r0,0 958 stb r0,VCORE_IN_GUEST(r5) 959 lis r8,0x7fff /* MAX_INT@h */ 960 mtspr SPRN_HDEC,r8 961 96216: ld r8,KVM_HOST_LPCR(r4) 963 mtspr SPRN_LPCR,r8 964 isync 965 b 33f 966 967 /* 968 * PPC970 guest -> host partition switch code. 969 * We have to lock against concurrent tlbies, and 970 * we have to flush the whole TLB. 971 */ 97232: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 973 974 /* Take the guest's tlbie_lock */ 975#ifdef __BIG_ENDIAN__ 976 lwz r8,PACA_LOCK_TOKEN(r13) 977#else 978 lwz r8,PACAPACAINDEX(r13) 979#endif 980 addi r3,r4,KVM_TLBIE_LOCK 98124: lwarx r0,0,r3 982 cmpwi r0,0 983 bne 24b 984 stwcx. r8,0,r3 985 bne 24b 986 isync 987 988 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 989 li r0,0x18f 990 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 991 or r0,r7,r0 992 ptesync 993 sync 994 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 995 isync 996 li r0,0 997 stw r0,0(r3) /* drop guest tlbie_lock */ 998 999 /* invalidate the whole TLB */ 1000 li r0,256 1001 mtctr r0 1002 li r6,0 100325: tlbiel r6 1004 addi r6,r6,0x1000 1005 bdnz 25b 1006 ptesync 1007 1008 /* take native_tlbie_lock */ 1009 ld r3,toc_tlbie_lock@toc(2) 101024: lwarx r0,0,r3 1011 cmpwi r0,0 1012 bne 24b 1013 stwcx. r8,0,r3 1014 bne 24b 1015 isync 1016 1017 ld r6,KVM_HOST_SDR1(r4) 1018 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1019 1020 /* Set up host HID4 value */ 1021 sync 1022 mtspr SPRN_HID4,r7 1023 isync 1024 li r0,0 1025 stw r0,0(r3) /* drop native_tlbie_lock */ 1026 1027 lis r8,0x7fff /* MAX_INT@h */ 1028 mtspr SPRN_HDEC,r8 1029 1030 /* Disable HDEC interrupts */ 1031 mfspr r0,SPRN_HID0 1032 li r3,0 1033 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 1034 sync 1035 mtspr SPRN_HID0,r0 1036 mfspr r0,SPRN_HID0 1037 mfspr r0,SPRN_HID0 1038 mfspr r0,SPRN_HID0 1039 mfspr r0,SPRN_HID0 1040 mfspr r0,SPRN_HID0 1041 mfspr r0,SPRN_HID0 1042 1043 /* load host SLB entries */ 104433: ld r8,PACA_SLBSHADOWPTR(r13) 1045 1046 .rept SLB_NUM_BOLTED 1047 ld r5,SLBSHADOW_SAVEAREA(r8) 1048 ld r6,SLBSHADOW_SAVEAREA+8(r8) 1049 andis. r7,r5,SLB_ESID_V@h 1050 beq 1f 1051 slbmte r6,r5 10521: addi r8,r8,16 1053 .endr 1054 1055 /* Save and reset AMR and UAMOR before turning on the MMU */ 1056BEGIN_FTR_SECTION 1057 mfspr r5,SPRN_AMR 1058 mfspr r6,SPRN_UAMOR 1059 std r5,VCPU_AMR(r9) 1060 std r6,VCPU_UAMOR(r9) 1061 li r6,0 1062 mtspr SPRN_AMR,r6 1063END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1064 1065 /* Switch DSCR back to host value */ 1066BEGIN_FTR_SECTION 1067 mfspr r8, SPRN_DSCR 1068 ld r7, HSTATE_DSCR(r13) 1069 std r8, VCPU_DSCR(r9) 1070 mtspr SPRN_DSCR, r7 1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1072 1073 /* Save non-volatile GPRs */ 1074 std r14, VCPU_GPR(R14)(r9) 1075 std r15, VCPU_GPR(R15)(r9) 1076 std r16, VCPU_GPR(R16)(r9) 1077 std r17, VCPU_GPR(R17)(r9) 1078 std r18, VCPU_GPR(R18)(r9) 1079 std r19, VCPU_GPR(R19)(r9) 1080 std r20, VCPU_GPR(R20)(r9) 1081 std r21, VCPU_GPR(R21)(r9) 1082 std r22, VCPU_GPR(R22)(r9) 1083 std r23, VCPU_GPR(R23)(r9) 1084 std r24, VCPU_GPR(R24)(r9) 1085 std r25, VCPU_GPR(R25)(r9) 1086 std r26, VCPU_GPR(R26)(r9) 1087 std r27, VCPU_GPR(R27)(r9) 1088 std r28, VCPU_GPR(R28)(r9) 1089 std r29, VCPU_GPR(R29)(r9) 1090 std r30, VCPU_GPR(R30)(r9) 1091 std r31, VCPU_GPR(R31)(r9) 1092 1093 /* Save SPRGs */ 1094 mfspr r3, SPRN_SPRG0 1095 mfspr r4, SPRN_SPRG1 1096 mfspr r5, SPRN_SPRG2 1097 mfspr r6, SPRN_SPRG3 1098 std r3, VCPU_SPRG0(r9) 1099 std r4, VCPU_SPRG1(r9) 1100 std r5, VCPU_SPRG2(r9) 1101 std r6, VCPU_SPRG3(r9) 1102 1103 /* save FP state */ 1104 mr r3, r9 1105 bl .kvmppc_save_fp 1106 1107 /* Increment yield count if they have a VPA */ 1108 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1109 cmpdi r8, 0 1110 beq 25f 1111 lwz r3, LPPACA_YIELDCOUNT(r8) 1112 addi r3, r3, 1 1113 stw r3, LPPACA_YIELDCOUNT(r8) 1114 li r3, 1 1115 stb r3, VCPU_VPA_DIRTY(r9) 111625: 1117 /* Save PMU registers if requested */ 1118 /* r8 and cr0.eq are live here */ 1119 li r3, 1 1120 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1121 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1122 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1123 mfspr r6, SPRN_MMCRA 1124BEGIN_FTR_SECTION 1125 /* On P7, clear MMCRA in order to disable SDAR updates */ 1126 li r7, 0 1127 mtspr SPRN_MMCRA, r7 1128END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1129 isync 1130 beq 21f /* if no VPA, save PMU stuff anyway */ 1131 lbz r7, LPPACA_PMCINUSE(r8) 1132 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1133 bne 21f 1134 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1135 b 22f 113621: mfspr r5, SPRN_MMCR1 1137 std r4, VCPU_MMCR(r9) 1138 std r5, VCPU_MMCR + 8(r9) 1139 std r6, VCPU_MMCR + 16(r9) 1140 mfspr r3, SPRN_PMC1 1141 mfspr r4, SPRN_PMC2 1142 mfspr r5, SPRN_PMC3 1143 mfspr r6, SPRN_PMC4 1144 mfspr r7, SPRN_PMC5 1145 mfspr r8, SPRN_PMC6 1146BEGIN_FTR_SECTION 1147 mfspr r10, SPRN_PMC7 1148 mfspr r11, SPRN_PMC8 1149END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1150 stw r3, VCPU_PMC(r9) 1151 stw r4, VCPU_PMC + 4(r9) 1152 stw r5, VCPU_PMC + 8(r9) 1153 stw r6, VCPU_PMC + 12(r9) 1154 stw r7, VCPU_PMC + 16(r9) 1155 stw r8, VCPU_PMC + 20(r9) 1156BEGIN_FTR_SECTION 1157 stw r10, VCPU_PMC + 24(r9) 1158 stw r11, VCPU_PMC + 28(r9) 1159END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 116022: 1161 1162 /* Secondary threads go off to take a nap on POWER7 */ 1163BEGIN_FTR_SECTION 1164 lwz r0,VCPU_PTID(r9) 1165 cmpwi r0,0 1166 bne secondary_nap 1167END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1168 1169 /* Restore host DABR and DABRX */ 1170 ld r5,HSTATE_DABR(r13) 1171 li r6,7 1172 mtspr SPRN_DABR,r5 1173 mtspr SPRN_DABRX,r6 1174 1175 /* Restore SPRG3 */ 1176 ld r3,PACA_SPRG3(r13) 1177 mtspr SPRN_SPRG3,r3 1178 1179 /* 1180 * Reload DEC. HDEC interrupts were disabled when 1181 * we reloaded the host's LPCR value. 1182 */ 1183 ld r3, HSTATE_DECEXP(r13) 1184 mftb r4 1185 subf r4, r4, r3 1186 mtspr SPRN_DEC, r4 1187 1188 /* Reload the host's PMU registers */ 1189 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 1190 lbz r4, LPPACA_PMCINUSE(r3) 1191 cmpwi r4, 0 1192 beq 23f /* skip if not */ 1193 lwz r3, HSTATE_PMC(r13) 1194 lwz r4, HSTATE_PMC + 4(r13) 1195 lwz r5, HSTATE_PMC + 8(r13) 1196 lwz r6, HSTATE_PMC + 12(r13) 1197 lwz r8, HSTATE_PMC + 16(r13) 1198 lwz r9, HSTATE_PMC + 20(r13) 1199BEGIN_FTR_SECTION 1200 lwz r10, HSTATE_PMC + 24(r13) 1201 lwz r11, HSTATE_PMC + 28(r13) 1202END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1203 mtspr SPRN_PMC1, r3 1204 mtspr SPRN_PMC2, r4 1205 mtspr SPRN_PMC3, r5 1206 mtspr SPRN_PMC4, r6 1207 mtspr SPRN_PMC5, r8 1208 mtspr SPRN_PMC6, r9 1209BEGIN_FTR_SECTION 1210 mtspr SPRN_PMC7, r10 1211 mtspr SPRN_PMC8, r11 1212END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1213 ld r3, HSTATE_MMCR(r13) 1214 ld r4, HSTATE_MMCR + 8(r13) 1215 ld r5, HSTATE_MMCR + 16(r13) 1216 mtspr SPRN_MMCR1, r4 1217 mtspr SPRN_MMCRA, r5 1218 mtspr SPRN_MMCR0, r3 1219 isync 122023: 1221 /* 1222 * For external and machine check interrupts, we need 1223 * to call the Linux handler to process the interrupt. 1224 * We do that by jumping to absolute address 0x500 for 1225 * external interrupts, or the machine_check_fwnmi label 1226 * for machine checks (since firmware might have patched 1227 * the vector area at 0x200). The [h]rfid at the end of the 1228 * handler will return to the book3s_hv_interrupts.S code. 1229 * For other interrupts we do the rfid to get back 1230 * to the book3s_hv_interrupts.S code here. 1231 */ 1232 ld r8, HSTATE_VMHANDLER(r13) 1233 ld r7, HSTATE_HOST_MSR(r13) 1234 1235 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1236 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1237BEGIN_FTR_SECTION 1238 beq 11f 1239END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1240 1241 /* RFI into the highmem handler, or branch to interrupt handler */ 1242 mfmsr r6 1243 li r0, MSR_RI 1244 andc r6, r6, r0 1245 mtmsrd r6, 1 /* Clear RI in MSR */ 1246 mtsrr0 r8 1247 mtsrr1 r7 1248 beqa 0x500 /* external interrupt (PPC970) */ 1249 beq cr1, 13f /* machine check */ 1250 RFI 1251 1252 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 125311: mtspr SPRN_HSRR0, r8 1254 mtspr SPRN_HSRR1, r7 1255 ba 0x500 1256 125713: b machine_check_fwnmi 1258 1259/* 1260 * Check whether an HDSI is an HPTE not found fault or something else. 1261 * If it is an HPTE not found fault that is due to the guest accessing 1262 * a page that they have mapped but which we have paged out, then 1263 * we continue on with the guest exit path. In all other cases, 1264 * reflect the HDSI to the guest as a DSI. 1265 */ 1266kvmppc_hdsi: 1267 mfspr r4, SPRN_HDAR 1268 mfspr r6, SPRN_HDSISR 1269 /* HPTE not found fault or protection fault? */ 1270 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1271 beq 1f /* if not, send it to the guest */ 1272 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1273 beq 3f 1274 clrrdi r0, r4, 28 1275 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1276 bne 1f /* if no SLB entry found */ 12774: std r4, VCPU_FAULT_DAR(r9) 1278 stw r6, VCPU_FAULT_DSISR(r9) 1279 1280 /* Search the hash table. */ 1281 mr r3, r9 /* vcpu pointer */ 1282 li r7, 1 /* data fault */ 1283 bl .kvmppc_hpte_hv_fault 1284 ld r9, HSTATE_KVM_VCPU(r13) 1285 ld r10, VCPU_PC(r9) 1286 ld r11, VCPU_MSR(r9) 1287 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1288 cmpdi r3, 0 /* retry the instruction */ 1289 beq 6f 1290 cmpdi r3, -1 /* handle in kernel mode */ 1291 beq guest_exit_cont 1292 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1293 beq 2f 1294 1295 /* Synthesize a DSI for the guest */ 1296 ld r4, VCPU_FAULT_DAR(r9) 1297 mr r6, r3 12981: mtspr SPRN_DAR, r4 1299 mtspr SPRN_DSISR, r6 1300 mtspr SPRN_SRR0, r10 1301 mtspr SPRN_SRR1, r11 1302 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1303 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1304 rotldi r11, r11, 63 1305fast_interrupt_c_return: 13066: ld r7, VCPU_CTR(r9) 1307 lwz r8, VCPU_XER(r9) 1308 mtctr r7 1309 mtxer r8 1310 mr r4, r9 1311 b fast_guest_return 1312 13133: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1314 ld r5, KVM_VRMA_SLB_V(r5) 1315 b 4b 1316 1317 /* If this is for emulated MMIO, load the instruction word */ 13182: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1319 1320 /* Set guest mode to 'jump over instruction' so if lwz faults 1321 * we'll just continue at the next IP. */ 1322 li r0, KVM_GUEST_MODE_SKIP 1323 stb r0, HSTATE_IN_GUEST(r13) 1324 1325 /* Do the access with MSR:DR enabled */ 1326 mfmsr r3 1327 ori r4, r3, MSR_DR /* Enable paging for data */ 1328 mtmsrd r4 1329 lwz r8, 0(r10) 1330 mtmsrd r3 1331 1332 /* Store the result */ 1333 stw r8, VCPU_LAST_INST(r9) 1334 1335 /* Unset guest mode. */ 1336 li r0, KVM_GUEST_MODE_NONE 1337 stb r0, HSTATE_IN_GUEST(r13) 1338 b guest_exit_cont 1339 1340/* 1341 * Similarly for an HISI, reflect it to the guest as an ISI unless 1342 * it is an HPTE not found fault for a page that we have paged out. 1343 */ 1344kvmppc_hisi: 1345 andis. r0, r11, SRR1_ISI_NOPT@h 1346 beq 1f 1347 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1348 beq 3f 1349 clrrdi r0, r10, 28 1350 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1351 bne 1f /* if no SLB entry found */ 13524: 1353 /* Search the hash table. */ 1354 mr r3, r9 /* vcpu pointer */ 1355 mr r4, r10 1356 mr r6, r11 1357 li r7, 0 /* instruction fault */ 1358 bl .kvmppc_hpte_hv_fault 1359 ld r9, HSTATE_KVM_VCPU(r13) 1360 ld r10, VCPU_PC(r9) 1361 ld r11, VCPU_MSR(r9) 1362 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1363 cmpdi r3, 0 /* retry the instruction */ 1364 beq fast_interrupt_c_return 1365 cmpdi r3, -1 /* handle in kernel mode */ 1366 beq guest_exit_cont 1367 1368 /* Synthesize an ISI for the guest */ 1369 mr r11, r3 13701: mtspr SPRN_SRR0, r10 1371 mtspr SPRN_SRR1, r11 1372 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1373 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1374 rotldi r11, r11, 63 1375 b fast_interrupt_c_return 1376 13773: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1378 ld r5, KVM_VRMA_SLB_V(r6) 1379 b 4b 1380 1381/* 1382 * Try to handle an hcall in real mode. 1383 * Returns to the guest if we handle it, or continues on up to 1384 * the kernel if we can't (i.e. if we don't have a handler for 1385 * it, or if the handler returns H_TOO_HARD). 1386 */ 1387 .globl hcall_try_real_mode 1388hcall_try_real_mode: 1389 ld r3,VCPU_GPR(R3)(r9) 1390 andi. r0,r11,MSR_PR 1391 bne guest_exit_cont 1392 clrrdi r3,r3,2 1393 cmpldi r3,hcall_real_table_end - hcall_real_table 1394 bge guest_exit_cont 1395 LOAD_REG_ADDR(r4, hcall_real_table) 1396 lwax r3,r3,r4 1397 cmpwi r3,0 1398 beq guest_exit_cont 1399 add r3,r3,r4 1400 mtctr r3 1401 mr r3,r9 /* get vcpu pointer */ 1402 ld r4,VCPU_GPR(R4)(r9) 1403 bctrl 1404 cmpdi r3,H_TOO_HARD 1405 beq hcall_real_fallback 1406 ld r4,HSTATE_KVM_VCPU(r13) 1407 std r3,VCPU_GPR(R3)(r4) 1408 ld r10,VCPU_PC(r4) 1409 ld r11,VCPU_MSR(r4) 1410 b fast_guest_return 1411 1412 /* We've attempted a real mode hcall, but it's punted it back 1413 * to userspace. We need to restore some clobbered volatiles 1414 * before resuming the pass-it-to-qemu path */ 1415hcall_real_fallback: 1416 li r12,BOOK3S_INTERRUPT_SYSCALL 1417 ld r9, HSTATE_KVM_VCPU(r13) 1418 1419 b guest_exit_cont 1420 1421 .globl hcall_real_table 1422hcall_real_table: 1423 .long 0 /* 0 - unused */ 1424 .long .kvmppc_h_remove - hcall_real_table 1425 .long .kvmppc_h_enter - hcall_real_table 1426 .long .kvmppc_h_read - hcall_real_table 1427 .long 0 /* 0x10 - H_CLEAR_MOD */ 1428 .long 0 /* 0x14 - H_CLEAR_REF */ 1429 .long .kvmppc_h_protect - hcall_real_table 1430 .long 0 /* 0x1c - H_GET_TCE */ 1431 .long .kvmppc_h_put_tce - hcall_real_table 1432 .long 0 /* 0x24 - H_SET_SPRG0 */ 1433 .long .kvmppc_h_set_dabr - hcall_real_table 1434 .long 0 /* 0x2c */ 1435 .long 0 /* 0x30 */ 1436 .long 0 /* 0x34 */ 1437 .long 0 /* 0x38 */ 1438 .long 0 /* 0x3c */ 1439 .long 0 /* 0x40 */ 1440 .long 0 /* 0x44 */ 1441 .long 0 /* 0x48 */ 1442 .long 0 /* 0x4c */ 1443 .long 0 /* 0x50 */ 1444 .long 0 /* 0x54 */ 1445 .long 0 /* 0x58 */ 1446 .long 0 /* 0x5c */ 1447 .long 0 /* 0x60 */ 1448#ifdef CONFIG_KVM_XICS 1449 .long .kvmppc_rm_h_eoi - hcall_real_table 1450 .long .kvmppc_rm_h_cppr - hcall_real_table 1451 .long .kvmppc_rm_h_ipi - hcall_real_table 1452 .long 0 /* 0x70 - H_IPOLL */ 1453 .long .kvmppc_rm_h_xirr - hcall_real_table 1454#else 1455 .long 0 /* 0x64 - H_EOI */ 1456 .long 0 /* 0x68 - H_CPPR */ 1457 .long 0 /* 0x6c - H_IPI */ 1458 .long 0 /* 0x70 - H_IPOLL */ 1459 .long 0 /* 0x74 - H_XIRR */ 1460#endif 1461 .long 0 /* 0x78 */ 1462 .long 0 /* 0x7c */ 1463 .long 0 /* 0x80 */ 1464 .long 0 /* 0x84 */ 1465 .long 0 /* 0x88 */ 1466 .long 0 /* 0x8c */ 1467 .long 0 /* 0x90 */ 1468 .long 0 /* 0x94 */ 1469 .long 0 /* 0x98 */ 1470 .long 0 /* 0x9c */ 1471 .long 0 /* 0xa0 */ 1472 .long 0 /* 0xa4 */ 1473 .long 0 /* 0xa8 */ 1474 .long 0 /* 0xac */ 1475 .long 0 /* 0xb0 */ 1476 .long 0 /* 0xb4 */ 1477 .long 0 /* 0xb8 */ 1478 .long 0 /* 0xbc */ 1479 .long 0 /* 0xc0 */ 1480 .long 0 /* 0xc4 */ 1481 .long 0 /* 0xc8 */ 1482 .long 0 /* 0xcc */ 1483 .long 0 /* 0xd0 */ 1484 .long 0 /* 0xd4 */ 1485 .long 0 /* 0xd8 */ 1486 .long 0 /* 0xdc */ 1487 .long .kvmppc_h_cede - hcall_real_table 1488 .long 0 /* 0xe4 */ 1489 .long 0 /* 0xe8 */ 1490 .long 0 /* 0xec */ 1491 .long 0 /* 0xf0 */ 1492 .long 0 /* 0xf4 */ 1493 .long 0 /* 0xf8 */ 1494 .long 0 /* 0xfc */ 1495 .long 0 /* 0x100 */ 1496 .long 0 /* 0x104 */ 1497 .long 0 /* 0x108 */ 1498 .long 0 /* 0x10c */ 1499 .long 0 /* 0x110 */ 1500 .long 0 /* 0x114 */ 1501 .long 0 /* 0x118 */ 1502 .long 0 /* 0x11c */ 1503 .long 0 /* 0x120 */ 1504 .long .kvmppc_h_bulk_remove - hcall_real_table 1505hcall_real_table_end: 1506 1507ignore_hdec: 1508 mr r4,r9 1509 b fast_guest_return 1510 1511_GLOBAL(kvmppc_h_set_dabr) 1512 std r4,VCPU_DABR(r3) 1513 /* Work around P7 bug where DABR can get corrupted on mtspr */ 15141: mtspr SPRN_DABR,r4 1515 mfspr r5, SPRN_DABR 1516 cmpd r4, r5 1517 bne 1b 1518 isync 1519 li r3,0 1520 blr 1521 1522_GLOBAL(kvmppc_h_cede) 1523 ori r11,r11,MSR_EE 1524 std r11,VCPU_MSR(r3) 1525 li r0,1 1526 stb r0,VCPU_CEDED(r3) 1527 sync /* order setting ceded vs. testing prodded */ 1528 lbz r5,VCPU_PRODDED(r3) 1529 cmpwi r5,0 1530 bne kvm_cede_prodded 1531 li r0,0 /* set trap to 0 to say hcall is handled */ 1532 stw r0,VCPU_TRAP(r3) 1533 li r0,H_SUCCESS 1534 std r0,VCPU_GPR(R3)(r3) 1535BEGIN_FTR_SECTION 1536 b kvm_cede_exit /* just send it up to host on 970 */ 1537END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1538 1539 /* 1540 * Set our bit in the bitmask of napping threads unless all the 1541 * other threads are already napping, in which case we send this 1542 * up to the host. 1543 */ 1544 ld r5,HSTATE_KVM_VCORE(r13) 1545 lwz r6,VCPU_PTID(r3) 1546 lwz r8,VCORE_ENTRY_EXIT(r5) 1547 clrldi r8,r8,56 1548 li r0,1 1549 sld r0,r0,r6 1550 addi r6,r5,VCORE_NAPPING_THREADS 155131: lwarx r4,0,r6 1552 or r4,r4,r0 1553 PPC_POPCNTW(R7,R4) 1554 cmpw r7,r8 1555 bge kvm_cede_exit 1556 stwcx. r4,0,r6 1557 bne 31b 1558 li r0,1 1559 stb r0,HSTATE_NAPPING(r13) 1560 /* order napping_threads update vs testing entry_exit_count */ 1561 lwsync 1562 mr r4,r3 1563 lwz r7,VCORE_ENTRY_EXIT(r5) 1564 cmpwi r7,0x100 1565 bge 33f /* another thread already exiting */ 1566 1567/* 1568 * Although not specifically required by the architecture, POWER7 1569 * preserves the following registers in nap mode, even if an SMT mode 1570 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 1571 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1572 */ 1573 /* Save non-volatile GPRs */ 1574 std r14, VCPU_GPR(R14)(r3) 1575 std r15, VCPU_GPR(R15)(r3) 1576 std r16, VCPU_GPR(R16)(r3) 1577 std r17, VCPU_GPR(R17)(r3) 1578 std r18, VCPU_GPR(R18)(r3) 1579 std r19, VCPU_GPR(R19)(r3) 1580 std r20, VCPU_GPR(R20)(r3) 1581 std r21, VCPU_GPR(R21)(r3) 1582 std r22, VCPU_GPR(R22)(r3) 1583 std r23, VCPU_GPR(R23)(r3) 1584 std r24, VCPU_GPR(R24)(r3) 1585 std r25, VCPU_GPR(R25)(r3) 1586 std r26, VCPU_GPR(R26)(r3) 1587 std r27, VCPU_GPR(R27)(r3) 1588 std r28, VCPU_GPR(R28)(r3) 1589 std r29, VCPU_GPR(R29)(r3) 1590 std r30, VCPU_GPR(R30)(r3) 1591 std r31, VCPU_GPR(R31)(r3) 1592 1593 /* save FP state */ 1594 bl .kvmppc_save_fp 1595 1596 /* 1597 * Take a nap until a decrementer or external interrupt occurs, 1598 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1599 */ 1600 li r0,1 1601 stb r0,HSTATE_HWTHREAD_REQ(r13) 1602 mfspr r5,SPRN_LPCR 1603 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1604 mtspr SPRN_LPCR,r5 1605 isync 1606 li r0, 0 1607 std r0, HSTATE_SCRATCH0(r13) 1608 ptesync 1609 ld r0, HSTATE_SCRATCH0(r13) 16101: cmpd r0, r0 1611 bne 1b 1612 nap 1613 b . 1614 1615kvm_end_cede: 1616 /* get vcpu pointer */ 1617 ld r4, HSTATE_KVM_VCPU(r13) 1618 1619 /* Woken by external or decrementer interrupt */ 1620 ld r1, HSTATE_HOST_R1(r13) 1621 1622 /* load up FP state */ 1623 bl kvmppc_load_fp 1624 1625 /* Load NV GPRS */ 1626 ld r14, VCPU_GPR(R14)(r4) 1627 ld r15, VCPU_GPR(R15)(r4) 1628 ld r16, VCPU_GPR(R16)(r4) 1629 ld r17, VCPU_GPR(R17)(r4) 1630 ld r18, VCPU_GPR(R18)(r4) 1631 ld r19, VCPU_GPR(R19)(r4) 1632 ld r20, VCPU_GPR(R20)(r4) 1633 ld r21, VCPU_GPR(R21)(r4) 1634 ld r22, VCPU_GPR(R22)(r4) 1635 ld r23, VCPU_GPR(R23)(r4) 1636 ld r24, VCPU_GPR(R24)(r4) 1637 ld r25, VCPU_GPR(R25)(r4) 1638 ld r26, VCPU_GPR(R26)(r4) 1639 ld r27, VCPU_GPR(R27)(r4) 1640 ld r28, VCPU_GPR(R28)(r4) 1641 ld r29, VCPU_GPR(R29)(r4) 1642 ld r30, VCPU_GPR(R30)(r4) 1643 ld r31, VCPU_GPR(R31)(r4) 1644 1645 /* clear our bit in vcore->napping_threads */ 164633: ld r5,HSTATE_KVM_VCORE(r13) 1647 lwz r3,VCPU_PTID(r4) 1648 li r0,1 1649 sld r0,r0,r3 1650 addi r6,r5,VCORE_NAPPING_THREADS 165132: lwarx r7,0,r6 1652 andc r7,r7,r0 1653 stwcx. r7,0,r6 1654 bne 32b 1655 li r0,0 1656 stb r0,HSTATE_NAPPING(r13) 1657 1658 /* Check the wake reason in SRR1 to see why we got here */ 1659 mfspr r3, SPRN_SRR1 1660 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */ 1661 cmpwi r3, 4 /* was it an external interrupt? */ 1662 li r12, BOOK3S_INTERRUPT_EXTERNAL 1663 mr r9, r4 1664 ld r10, VCPU_PC(r9) 1665 ld r11, VCPU_MSR(r9) 1666 beq do_ext_interrupt /* if so */ 1667 1668 /* see if any other thread is already exiting */ 1669 lwz r0,VCORE_ENTRY_EXIT(r5) 1670 cmpwi r0,0x100 1671 blt kvmppc_cede_reentry /* if not go back to guest */ 1672 1673 /* some threads are exiting, so go to the guest exit path */ 1674 b hcall_real_fallback 1675 1676 /* cede when already previously prodded case */ 1677kvm_cede_prodded: 1678 li r0,0 1679 stb r0,VCPU_PRODDED(r3) 1680 sync /* order testing prodded vs. clearing ceded */ 1681 stb r0,VCPU_CEDED(r3) 1682 li r3,H_SUCCESS 1683 blr 1684 1685 /* we've ceded but we want to give control to the host */ 1686kvm_cede_exit: 1687 b hcall_real_fallback 1688 1689 /* Try to handle a machine check in real mode */ 1690machine_check_realmode: 1691 mr r3, r9 /* get vcpu pointer */ 1692 bl .kvmppc_realmode_machine_check 1693 nop 1694 cmpdi r3, 0 /* continue exiting from guest? */ 1695 ld r9, HSTATE_KVM_VCPU(r13) 1696 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1697 beq mc_cont 1698 /* If not, deliver a machine check. SRR0/1 are already set */ 1699 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 1700 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1701 rotldi r11, r11, 63 1702 b fast_interrupt_c_return 1703 1704secondary_too_late: 1705 ld r5,HSTATE_KVM_VCORE(r13) 1706 HMT_LOW 170713: lbz r3,VCORE_IN_GUEST(r5) 1708 cmpwi r3,0 1709 bne 13b 1710 HMT_MEDIUM 1711 ld r11,PACA_SLBSHADOWPTR(r13) 1712 1713 .rept SLB_NUM_BOLTED 1714 ld r5,SLBSHADOW_SAVEAREA(r11) 1715 ld r6,SLBSHADOW_SAVEAREA+8(r11) 1716 andis. r7,r5,SLB_ESID_V@h 1717 beq 1f 1718 slbmte r6,r5 17191: addi r11,r11,16 1720 .endr 1721 1722secondary_nap: 1723 /* Clear our vcpu pointer so we don't come back in early */ 1724 li r0, 0 1725 std r0, HSTATE_KVM_VCPU(r13) 1726 lwsync 1727 /* Clear any pending IPI - assume we're a secondary thread */ 1728 ld r5, HSTATE_XICS_PHYS(r13) 1729 li r7, XICS_XIRR 1730 lwzcix r3, r5, r7 /* ack any pending interrupt */ 1731 rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 1732 beq 37f 1733 sync 1734 li r0, 0xff 1735 li r6, XICS_MFRR 1736 stbcix r0, r5, r6 /* clear the IPI */ 1737 stwcix r3, r5, r7 /* EOI it */ 173837: sync 1739 1740 /* increment the nap count and then go to nap mode */ 1741 ld r4, HSTATE_KVM_VCORE(r13) 1742 addi r4, r4, VCORE_NAP_COUNT 1743 lwsync /* make previous updates visible */ 174451: lwarx r3, 0, r4 1745 addi r3, r3, 1 1746 stwcx. r3, 0, r4 1747 bne 51b 1748 1749kvm_no_guest: 1750 li r0, KVM_HWTHREAD_IN_NAP 1751 stb r0, HSTATE_HWTHREAD_STATE(r13) 1752 1753 li r3, LPCR_PECE0 1754 mfspr r4, SPRN_LPCR 1755 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 1756 mtspr SPRN_LPCR, r4 1757 isync 1758 std r0, HSTATE_SCRATCH0(r13) 1759 ptesync 1760 ld r0, HSTATE_SCRATCH0(r13) 17611: cmpd r0, r0 1762 bne 1b 1763 nap 1764 b . 1765 1766/* 1767 * Save away FP, VMX and VSX registers. 1768 * r3 = vcpu pointer 1769 */ 1770_GLOBAL(kvmppc_save_fp) 1771 mfmsr r5 1772 ori r8,r5,MSR_FP 1773#ifdef CONFIG_ALTIVEC 1774BEGIN_FTR_SECTION 1775 oris r8,r8,MSR_VEC@h 1776END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1777#endif 1778#ifdef CONFIG_VSX 1779BEGIN_FTR_SECTION 1780 oris r8,r8,MSR_VSX@h 1781END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1782#endif 1783 mtmsrd r8 1784 isync 1785#ifdef CONFIG_VSX 1786BEGIN_FTR_SECTION 1787 reg = 0 1788 .rept 32 1789 li r6,reg*16+VCPU_VSRS 1790 STXVD2X(reg,R6,R3) 1791 reg = reg + 1 1792 .endr 1793FTR_SECTION_ELSE 1794#endif 1795 reg = 0 1796 .rept 32 1797 stfd reg,reg*8+VCPU_FPRS(r3) 1798 reg = reg + 1 1799 .endr 1800#ifdef CONFIG_VSX 1801ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1802#endif 1803 mffs fr0 1804 stfd fr0,VCPU_FPSCR(r3) 1805 1806#ifdef CONFIG_ALTIVEC 1807BEGIN_FTR_SECTION 1808 reg = 0 1809 .rept 32 1810 li r6,reg*16+VCPU_VRS 1811 stvx reg,r6,r3 1812 reg = reg + 1 1813 .endr 1814 mfvscr vr0 1815 li r6,VCPU_VSCR 1816 stvx vr0,r6,r3 1817END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1818#endif 1819 mfspr r6,SPRN_VRSAVE 1820 stw r6,VCPU_VRSAVE(r3) 1821 mtmsrd r5 1822 isync 1823 blr 1824 1825/* 1826 * Load up FP, VMX and VSX registers 1827 * r4 = vcpu pointer 1828 */ 1829 .globl kvmppc_load_fp 1830kvmppc_load_fp: 1831 mfmsr r9 1832 ori r8,r9,MSR_FP 1833#ifdef CONFIG_ALTIVEC 1834BEGIN_FTR_SECTION 1835 oris r8,r8,MSR_VEC@h 1836END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1837#endif 1838#ifdef CONFIG_VSX 1839BEGIN_FTR_SECTION 1840 oris r8,r8,MSR_VSX@h 1841END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1842#endif 1843 mtmsrd r8 1844 isync 1845 lfd fr0,VCPU_FPSCR(r4) 1846 MTFSF_L(fr0) 1847#ifdef CONFIG_VSX 1848BEGIN_FTR_SECTION 1849 reg = 0 1850 .rept 32 1851 li r7,reg*16+VCPU_VSRS 1852 LXVD2X(reg,R7,R4) 1853 reg = reg + 1 1854 .endr 1855FTR_SECTION_ELSE 1856#endif 1857 reg = 0 1858 .rept 32 1859 lfd reg,reg*8+VCPU_FPRS(r4) 1860 reg = reg + 1 1861 .endr 1862#ifdef CONFIG_VSX 1863ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1864#endif 1865 1866#ifdef CONFIG_ALTIVEC 1867BEGIN_FTR_SECTION 1868 li r7,VCPU_VSCR 1869 lvx vr0,r7,r4 1870 mtvscr vr0 1871 reg = 0 1872 .rept 32 1873 li r7,reg*16+VCPU_VRS 1874 lvx reg,r7,r4 1875 reg = reg + 1 1876 .endr 1877END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1878#endif 1879 lwz r7,VCPU_VRSAVE(r4) 1880 mtspr SPRN_VRSAVE,r7 1881 blr 1882