1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29 30/***************************************************************************** 31 * * 32 * Real Mode handlers that need to be in the linear mapping * 33 * * 34 ****************************************************************************/ 35 36 .globl kvmppc_skip_interrupt 37kvmppc_skip_interrupt: 38 mfspr r13,SPRN_SRR0 39 addi r13,r13,4 40 mtspr SPRN_SRR0,r13 41 GET_SCRATCH0(r13) 42 rfid 43 b . 44 45 .globl kvmppc_skip_Hinterrupt 46kvmppc_skip_Hinterrupt: 47 mfspr r13,SPRN_HSRR0 48 addi r13,r13,4 49 mtspr SPRN_HSRR0,r13 50 GET_SCRATCH0(r13) 51 hrfid 52 b . 53 54/* 55 * Call kvmppc_hv_entry in real mode. 56 * Must be called with interrupts hard-disabled. 57 * 58 * Input Registers: 59 * 60 * LR = return address to continue at after eventually re-enabling MMU 61 */ 62_GLOBAL(kvmppc_hv_entry_trampoline) 63 mfmsr r10 64 LOAD_REG_ADDR(r5, kvmppc_hv_entry) 65 li r0,MSR_RI 66 andc r0,r10,r0 67 li r6,MSR_IR | MSR_DR 68 andc r6,r10,r6 69 mtmsrd r0,1 /* clear RI in MSR */ 70 mtsrr0 r5 71 mtsrr1 r6 72 RFI 73 74#define ULONG_SIZE 8 75#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 76 77/****************************************************************************** 78 * * 79 * Entry code * 80 * * 81 *****************************************************************************/ 82 83#define XICS_XIRR 4 84#define XICS_QIRR 0xc 85 86/* 87 * We come in here when wakened from nap mode on a secondary hw thread. 88 * Relocation is off and most register values are lost. 89 * r13 points to the PACA. 90 */ 91 .globl kvm_start_guest 92kvm_start_guest: 93 ld r1,PACAEMERGSP(r13) 94 subi r1,r1,STACK_FRAME_OVERHEAD 95 ld r2,PACATOC(r13) 96 97 /* were we napping due to cede? */ 98 lbz r0,HSTATE_NAPPING(r13) 99 cmpwi r0,0 100 bne kvm_end_cede 101 102 /* get vcpu pointer */ 103 ld r4, HSTATE_KVM_VCPU(r13) 104 105 /* We got here with an IPI; clear it */ 106 ld r5, HSTATE_XICS_PHYS(r13) 107 li r0, 0xff 108 li r6, XICS_QIRR 109 li r7, XICS_XIRR 110 lwzcix r8, r5, r7 /* ack the interrupt */ 111 sync 112 stbcix r0, r5, r6 /* clear it */ 113 stwcix r8, r5, r7 /* EOI it */ 114 115 /* NV GPR values from power7_idle() will no longer be valid */ 116 stb r0, PACA_NAPSTATELOST(r13) 117 118.global kvmppc_hv_entry 119kvmppc_hv_entry: 120 121 /* Required state: 122 * 123 * R4 = vcpu pointer 124 * MSR = ~IR|DR 125 * R13 = PACA 126 * R1 = host R1 127 * all other volatile GPRS = free 128 */ 129 mflr r0 130 std r0, HSTATE_VMHANDLER(r13) 131 132 ld r14, VCPU_GPR(r14)(r4) 133 ld r15, VCPU_GPR(r15)(r4) 134 ld r16, VCPU_GPR(r16)(r4) 135 ld r17, VCPU_GPR(r17)(r4) 136 ld r18, VCPU_GPR(r18)(r4) 137 ld r19, VCPU_GPR(r19)(r4) 138 ld r20, VCPU_GPR(r20)(r4) 139 ld r21, VCPU_GPR(r21)(r4) 140 ld r22, VCPU_GPR(r22)(r4) 141 ld r23, VCPU_GPR(r23)(r4) 142 ld r24, VCPU_GPR(r24)(r4) 143 ld r25, VCPU_GPR(r25)(r4) 144 ld r26, VCPU_GPR(r26)(r4) 145 ld r27, VCPU_GPR(r27)(r4) 146 ld r28, VCPU_GPR(r28)(r4) 147 ld r29, VCPU_GPR(r29)(r4) 148 ld r30, VCPU_GPR(r30)(r4) 149 ld r31, VCPU_GPR(r31)(r4) 150 151 /* Load guest PMU registers */ 152 /* R4 is live here (vcpu pointer) */ 153 li r3, 1 154 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 155 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 156 isync 157 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 158 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 159 lwz r6, VCPU_PMC + 8(r4) 160 lwz r7, VCPU_PMC + 12(r4) 161 lwz r8, VCPU_PMC + 16(r4) 162 lwz r9, VCPU_PMC + 20(r4) 163BEGIN_FTR_SECTION 164 lwz r10, VCPU_PMC + 24(r4) 165 lwz r11, VCPU_PMC + 28(r4) 166END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 167 mtspr SPRN_PMC1, r3 168 mtspr SPRN_PMC2, r5 169 mtspr SPRN_PMC3, r6 170 mtspr SPRN_PMC4, r7 171 mtspr SPRN_PMC5, r8 172 mtspr SPRN_PMC6, r9 173BEGIN_FTR_SECTION 174 mtspr SPRN_PMC7, r10 175 mtspr SPRN_PMC8, r11 176END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 177 ld r3, VCPU_MMCR(r4) 178 ld r5, VCPU_MMCR + 8(r4) 179 ld r6, VCPU_MMCR + 16(r4) 180 mtspr SPRN_MMCR1, r5 181 mtspr SPRN_MMCRA, r6 182 mtspr SPRN_MMCR0, r3 183 isync 184 185 /* Load up FP, VMX and VSX registers */ 186 bl kvmppc_load_fp 187 188BEGIN_FTR_SECTION 189 /* Switch DSCR to guest value */ 190 ld r5, VCPU_DSCR(r4) 191 mtspr SPRN_DSCR, r5 192END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 193 194 /* 195 * Set the decrementer to the guest decrementer. 196 */ 197 ld r8,VCPU_DEC_EXPIRES(r4) 198 mftb r7 199 subf r3,r7,r8 200 mtspr SPRN_DEC,r3 201 stw r3,VCPU_DEC(r4) 202 203 ld r5, VCPU_SPRG0(r4) 204 ld r6, VCPU_SPRG1(r4) 205 ld r7, VCPU_SPRG2(r4) 206 ld r8, VCPU_SPRG3(r4) 207 mtspr SPRN_SPRG0, r5 208 mtspr SPRN_SPRG1, r6 209 mtspr SPRN_SPRG2, r7 210 mtspr SPRN_SPRG3, r8 211 212 /* Save R1 in the PACA */ 213 std r1, HSTATE_HOST_R1(r13) 214 215 /* Increment yield count if they have a VPA */ 216 ld r3, VCPU_VPA(r4) 217 cmpdi r3, 0 218 beq 25f 219 lwz r5, LPPACA_YIELDCOUNT(r3) 220 addi r5, r5, 1 221 stw r5, LPPACA_YIELDCOUNT(r3) 22225: 223 /* Load up DAR and DSISR */ 224 ld r5, VCPU_DAR(r4) 225 lwz r6, VCPU_DSISR(r4) 226 mtspr SPRN_DAR, r5 227 mtspr SPRN_DSISR, r6 228 229 /* Set partition DABR */ 230 li r5,3 231 ld r6,VCPU_DABR(r4) 232 mtspr SPRN_DABRX,r5 233 mtspr SPRN_DABR,r6 234 235BEGIN_FTR_SECTION 236 /* Restore AMR and UAMOR, set AMOR to all 1s */ 237 ld r5,VCPU_AMR(r4) 238 ld r6,VCPU_UAMOR(r4) 239 li r7,-1 240 mtspr SPRN_AMR,r5 241 mtspr SPRN_UAMOR,r6 242 mtspr SPRN_AMOR,r7 243END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 244 245 /* Clear out SLB */ 246 li r6,0 247 slbmte r6,r6 248 slbia 249 ptesync 250 251BEGIN_FTR_SECTION 252 b 30f 253END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 254 /* 255 * POWER7 host -> guest partition switch code. 256 * We don't have to lock against concurrent tlbies, 257 * but we do have to coordinate across hardware threads. 258 */ 259 /* Increment entry count iff exit count is zero. */ 260 ld r5,HSTATE_KVM_VCORE(r13) 261 addi r9,r5,VCORE_ENTRY_EXIT 26221: lwarx r3,0,r9 263 cmpwi r3,0x100 /* any threads starting to exit? */ 264 bge secondary_too_late /* if so we're too late to the party */ 265 addi r3,r3,1 266 stwcx. r3,0,r9 267 bne 21b 268 269 /* Primary thread switches to guest partition. */ 270 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 271 lwz r6,VCPU_PTID(r4) 272 cmpwi r6,0 273 bne 20f 274 ld r6,KVM_SDR1(r9) 275 lwz r7,KVM_LPID(r9) 276 li r0,LPID_RSVD /* switch to reserved LPID */ 277 mtspr SPRN_LPID,r0 278 ptesync 279 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 280 mtspr SPRN_LPID,r7 281 isync 282 li r0,1 283 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 284 b 10f 285 286 /* Secondary threads wait for primary to have done partition switch */ 28720: lbz r0,VCORE_IN_GUEST(r5) 288 cmpwi r0,0 289 beq 20b 290 291 /* Set LPCR and RMOR. */ 29210: ld r8,KVM_LPCR(r9) 293 mtspr SPRN_LPCR,r8 294 ld r8,KVM_RMOR(r9) 295 mtspr SPRN_RMOR,r8 296 isync 297 298 /* Check if HDEC expires soon */ 299 mfspr r3,SPRN_HDEC 300 cmpwi r3,10 301 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 302 mr r9,r4 303 blt hdec_soon 304 305 /* 306 * Invalidate the TLB if we could possibly have stale TLB 307 * entries for this partition on this core due to the use 308 * of tlbiel. 309 * XXX maybe only need this on primary thread? 310 */ 311 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 312 lwz r5,VCPU_VCPUID(r4) 313 lhz r6,PACAPACAINDEX(r13) 314 rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */ 315 lhz r8,VCPU_LAST_CPU(r4) 316 sldi r7,r6,1 /* see if this is the same vcpu */ 317 add r7,r7,r9 /* as last ran on this pcpu */ 318 lhz r0,KVM_LAST_VCPU(r7) 319 cmpw r6,r8 /* on the same cpu core as last time? */ 320 bne 3f 321 cmpw r0,r5 /* same vcpu as this core last ran? */ 322 beq 1f 3233: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ 324 sth r5,KVM_LAST_VCPU(r7) 325 li r6,128 326 mtctr r6 327 li r7,0x800 /* IS field = 0b10 */ 328 ptesync 3292: tlbiel r7 330 addi r7,r7,0x1000 331 bdnz 2b 332 ptesync 3331: 334 335 /* Save purr/spurr */ 336 mfspr r5,SPRN_PURR 337 mfspr r6,SPRN_SPURR 338 std r5,HSTATE_PURR(r13) 339 std r6,HSTATE_SPURR(r13) 340 ld r7,VCPU_PURR(r4) 341 ld r8,VCPU_SPURR(r4) 342 mtspr SPRN_PURR,r7 343 mtspr SPRN_SPURR,r8 344 b 31f 345 346 /* 347 * PPC970 host -> guest partition switch code. 348 * We have to lock against concurrent tlbies, 349 * using native_tlbie_lock to lock against host tlbies 350 * and kvm->arch.tlbie_lock to lock against guest tlbies. 351 * We also have to invalidate the TLB since its 352 * entries aren't tagged with the LPID. 353 */ 35430: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 355 356 /* first take native_tlbie_lock */ 357 .section ".toc","aw" 358toc_tlbie_lock: 359 .tc native_tlbie_lock[TC],native_tlbie_lock 360 .previous 361 ld r3,toc_tlbie_lock@toc(2) 362 lwz r8,PACA_LOCK_TOKEN(r13) 36324: lwarx r0,0,r3 364 cmpwi r0,0 365 bne 24b 366 stwcx. r8,0,r3 367 bne 24b 368 isync 369 370 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 371 li r0,0x18f 372 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 373 or r0,r7,r0 374 ptesync 375 sync 376 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 377 isync 378 li r0,0 379 stw r0,0(r3) /* drop native_tlbie_lock */ 380 381 /* invalidate the whole TLB */ 382 li r0,256 383 mtctr r0 384 li r6,0 38525: tlbiel r6 386 addi r6,r6,0x1000 387 bdnz 25b 388 ptesync 389 390 /* Take the guest's tlbie_lock */ 391 addi r3,r9,KVM_TLBIE_LOCK 39224: lwarx r0,0,r3 393 cmpwi r0,0 394 bne 24b 395 stwcx. r8,0,r3 396 bne 24b 397 isync 398 ld r6,KVM_SDR1(r9) 399 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 400 401 /* Set up HID4 with the guest's LPID etc. */ 402 sync 403 mtspr SPRN_HID4,r7 404 isync 405 406 /* drop the guest's tlbie_lock */ 407 li r0,0 408 stw r0,0(r3) 409 410 /* Check if HDEC expires soon */ 411 mfspr r3,SPRN_HDEC 412 cmpwi r3,10 413 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 414 mr r9,r4 415 blt hdec_soon 416 417 /* Enable HDEC interrupts */ 418 mfspr r0,SPRN_HID0 419 li r3,1 420 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 421 sync 422 mtspr SPRN_HID0,r0 423 mfspr r0,SPRN_HID0 424 mfspr r0,SPRN_HID0 425 mfspr r0,SPRN_HID0 426 mfspr r0,SPRN_HID0 427 mfspr r0,SPRN_HID0 428 mfspr r0,SPRN_HID0 429 430 /* Load up guest SLB entries */ 43131: lwz r5,VCPU_SLB_MAX(r4) 432 cmpwi r5,0 433 beq 9f 434 mtctr r5 435 addi r6,r4,VCPU_SLB 4361: ld r8,VCPU_SLB_E(r6) 437 ld r9,VCPU_SLB_V(r6) 438 slbmte r9,r8 439 addi r6,r6,VCPU_SLB_SIZE 440 bdnz 1b 4419: 442 443 /* Restore state of CTRL run bit; assume 1 on entry */ 444 lwz r5,VCPU_CTRL(r4) 445 andi. r5,r5,1 446 bne 4f 447 mfspr r6,SPRN_CTRLF 448 clrrdi r6,r6,1 449 mtspr SPRN_CTRLT,r6 4504: 451 ld r6, VCPU_CTR(r4) 452 lwz r7, VCPU_XER(r4) 453 454 mtctr r6 455 mtxer r7 456 457kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 458 ld r6, VCPU_SRR0(r4) 459 ld r7, VCPU_SRR1(r4) 460 ld r10, VCPU_PC(r4) 461 ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */ 462 463 rldicl r11, r11, 63 - MSR_HV_LG, 1 464 rotldi r11, r11, 1 + MSR_HV_LG 465 ori r11, r11, MSR_ME 466 467 /* Check if we can deliver an external or decrementer interrupt now */ 468 ld r0,VCPU_PENDING_EXC(r4) 469 li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL) 470 oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 471 and r0,r0,r8 472 cmpdi cr1,r0,0 473 andi. r0,r11,MSR_EE 474 beq cr1,11f 475BEGIN_FTR_SECTION 476 mfspr r8,SPRN_LPCR 477 ori r8,r8,LPCR_MER 478 mtspr SPRN_LPCR,r8 479 isync 480END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 481 beq 5f 482 li r0,BOOK3S_INTERRUPT_EXTERNAL 48312: mr r6,r10 484 mr r10,r0 485 mr r7,r11 486 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 487 rotldi r11,r11,63 488 b 5f 48911: beq 5f 490 mfspr r0,SPRN_DEC 491 cmpwi r0,0 492 li r0,BOOK3S_INTERRUPT_DECREMENTER 493 blt 12b 494 495 /* Move SRR0 and SRR1 into the respective regs */ 4965: mtspr SPRN_SRR0, r6 497 mtspr SPRN_SRR1, r7 498 li r0,0 499 stb r0,VCPU_CEDED(r4) /* cancel cede */ 500 501fast_guest_return: 502 mtspr SPRN_HSRR0,r10 503 mtspr SPRN_HSRR1,r11 504 505 /* Activate guest mode, so faults get handled by KVM */ 506 li r9, KVM_GUEST_MODE_GUEST 507 stb r9, HSTATE_IN_GUEST(r13) 508 509 /* Enter guest */ 510 511 ld r5, VCPU_LR(r4) 512 lwz r6, VCPU_CR(r4) 513 mtlr r5 514 mtcr r6 515 516 ld r0, VCPU_GPR(r0)(r4) 517 ld r1, VCPU_GPR(r1)(r4) 518 ld r2, VCPU_GPR(r2)(r4) 519 ld r3, VCPU_GPR(r3)(r4) 520 ld r5, VCPU_GPR(r5)(r4) 521 ld r6, VCPU_GPR(r6)(r4) 522 ld r7, VCPU_GPR(r7)(r4) 523 ld r8, VCPU_GPR(r8)(r4) 524 ld r9, VCPU_GPR(r9)(r4) 525 ld r10, VCPU_GPR(r10)(r4) 526 ld r11, VCPU_GPR(r11)(r4) 527 ld r12, VCPU_GPR(r12)(r4) 528 ld r13, VCPU_GPR(r13)(r4) 529 530 ld r4, VCPU_GPR(r4)(r4) 531 532 hrfid 533 b . 534 535/****************************************************************************** 536 * * 537 * Exit code * 538 * * 539 *****************************************************************************/ 540 541/* 542 * We come here from the first-level interrupt handlers. 543 */ 544 .globl kvmppc_interrupt 545kvmppc_interrupt: 546 /* 547 * Register contents: 548 * R12 = interrupt vector 549 * R13 = PACA 550 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 551 * guest R13 saved in SPRN_SCRATCH0 552 */ 553 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 554 std r9, HSTATE_HOST_R2(r13) 555 ld r9, HSTATE_KVM_VCPU(r13) 556 557 /* Save registers */ 558 559 std r0, VCPU_GPR(r0)(r9) 560 std r1, VCPU_GPR(r1)(r9) 561 std r2, VCPU_GPR(r2)(r9) 562 std r3, VCPU_GPR(r3)(r9) 563 std r4, VCPU_GPR(r4)(r9) 564 std r5, VCPU_GPR(r5)(r9) 565 std r6, VCPU_GPR(r6)(r9) 566 std r7, VCPU_GPR(r7)(r9) 567 std r8, VCPU_GPR(r8)(r9) 568 ld r0, HSTATE_HOST_R2(r13) 569 std r0, VCPU_GPR(r9)(r9) 570 std r10, VCPU_GPR(r10)(r9) 571 std r11, VCPU_GPR(r11)(r9) 572 ld r3, HSTATE_SCRATCH0(r13) 573 lwz r4, HSTATE_SCRATCH1(r13) 574 std r3, VCPU_GPR(r12)(r9) 575 stw r4, VCPU_CR(r9) 576 577 /* Restore R1/R2 so we can handle faults */ 578 ld r1, HSTATE_HOST_R1(r13) 579 ld r2, PACATOC(r13) 580 581 mfspr r10, SPRN_SRR0 582 mfspr r11, SPRN_SRR1 583 std r10, VCPU_SRR0(r9) 584 std r11, VCPU_SRR1(r9) 585 andi. r0, r12, 2 /* need to read HSRR0/1? */ 586 beq 1f 587 mfspr r10, SPRN_HSRR0 588 mfspr r11, SPRN_HSRR1 589 clrrdi r12, r12, 2 5901: std r10, VCPU_PC(r9) 591 std r11, VCPU_MSR(r9) 592 593 GET_SCRATCH0(r3) 594 mflr r4 595 std r3, VCPU_GPR(r13)(r9) 596 std r4, VCPU_LR(r9) 597 598 /* Unset guest mode */ 599 li r0, KVM_GUEST_MODE_NONE 600 stb r0, HSTATE_IN_GUEST(r13) 601 602 stw r12,VCPU_TRAP(r9) 603 604 /* See if this is a leftover HDEC interrupt */ 605 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 606 bne 2f 607 mfspr r3,SPRN_HDEC 608 cmpwi r3,0 609 bge ignore_hdec 6102: 611 /* See if this is something we can handle in real mode */ 612 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 613 beq hcall_try_real_mode 614 615 /* Check for mediated interrupts (could be done earlier really ...) */ 616BEGIN_FTR_SECTION 617 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 618 bne+ 1f 619 andi. r0,r11,MSR_EE 620 beq 1f 621 mfspr r5,SPRN_LPCR 622 andi. r0,r5,LPCR_MER 623 bne bounce_ext_interrupt 6241: 625END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 626 627hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 628 /* Save DEC */ 629 mfspr r5,SPRN_DEC 630 mftb r6 631 extsw r5,r5 632 add r5,r5,r6 633 std r5,VCPU_DEC_EXPIRES(r9) 634 635 /* Save HEIR (HV emulation assist reg) in last_inst 636 if this is an HEI (HV emulation interrupt, e40) */ 637 li r3,-1 638BEGIN_FTR_SECTION 639 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 640 bne 11f 641 mfspr r3,SPRN_HEIR 642END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 64311: stw r3,VCPU_LAST_INST(r9) 644 645 /* Save more register state */ 646 mfxer r5 647 mfdar r6 648 mfdsisr r7 649 mfctr r8 650 651 stw r5, VCPU_XER(r9) 652 std r6, VCPU_DAR(r9) 653 stw r7, VCPU_DSISR(r9) 654 std r8, VCPU_CTR(r9) 655 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ 656BEGIN_FTR_SECTION 657 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 658 beq 6f 659END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 6607: std r6, VCPU_FAULT_DAR(r9) 661 stw r7, VCPU_FAULT_DSISR(r9) 662 663 /* Save guest CTRL register, set runlatch to 1 */ 664 mfspr r6,SPRN_CTRLF 665 stw r6,VCPU_CTRL(r9) 666 andi. r0,r6,1 667 bne 4f 668 ori r6,r6,1 669 mtspr SPRN_CTRLT,r6 6704: 671 /* Read the guest SLB and save it away */ 672 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 673 mtctr r0 674 li r6,0 675 addi r7,r9,VCPU_SLB 676 li r5,0 6771: slbmfee r8,r6 678 andis. r0,r8,SLB_ESID_V@h 679 beq 2f 680 add r8,r8,r6 /* put index in */ 681 slbmfev r3,r6 682 std r8,VCPU_SLB_E(r7) 683 std r3,VCPU_SLB_V(r7) 684 addi r7,r7,VCPU_SLB_SIZE 685 addi r5,r5,1 6862: addi r6,r6,1 687 bdnz 1b 688 stw r5,VCPU_SLB_MAX(r9) 689 690 /* 691 * Save the guest PURR/SPURR 692 */ 693BEGIN_FTR_SECTION 694 mfspr r5,SPRN_PURR 695 mfspr r6,SPRN_SPURR 696 ld r7,VCPU_PURR(r9) 697 ld r8,VCPU_SPURR(r9) 698 std r5,VCPU_PURR(r9) 699 std r6,VCPU_SPURR(r9) 700 subf r5,r7,r5 701 subf r6,r8,r6 702 703 /* 704 * Restore host PURR/SPURR and add guest times 705 * so that the time in the guest gets accounted. 706 */ 707 ld r3,HSTATE_PURR(r13) 708 ld r4,HSTATE_SPURR(r13) 709 add r3,r3,r5 710 add r4,r4,r6 711 mtspr SPRN_PURR,r3 712 mtspr SPRN_SPURR,r4 713END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 714 715 /* Clear out SLB */ 716 li r5,0 717 slbmte r5,r5 718 slbia 719 ptesync 720 721hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 722BEGIN_FTR_SECTION 723 b 32f 724END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 725 /* 726 * POWER7 guest -> host partition switch code. 727 * We don't have to lock against tlbies but we do 728 * have to coordinate the hardware threads. 729 */ 730 /* Increment the threads-exiting-guest count in the 0xff00 731 bits of vcore->entry_exit_count */ 732 lwsync 733 ld r5,HSTATE_KVM_VCORE(r13) 734 addi r6,r5,VCORE_ENTRY_EXIT 73541: lwarx r3,0,r6 736 addi r0,r3,0x100 737 stwcx. r0,0,r6 738 bne 41b 739 lwsync 740 741 /* 742 * At this point we have an interrupt that we have to pass 743 * up to the kernel or qemu; we can't handle it in real mode. 744 * Thus we have to do a partition switch, so we have to 745 * collect the other threads, if we are the first thread 746 * to take an interrupt. To do this, we set the HDEC to 0, 747 * which causes an HDEC interrupt in all threads within 2ns 748 * because the HDEC register is shared between all 4 threads. 749 * However, we don't need to bother if this is an HDEC 750 * interrupt, since the other threads will already be on their 751 * way here in that case. 752 */ 753 cmpwi r3,0x100 /* Are we the first here? */ 754 bge 43f 755 cmpwi r3,1 /* Are any other threads in the guest? */ 756 ble 43f 757 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 758 beq 40f 759 li r0,0 760 mtspr SPRN_HDEC,r0 76140: 762 /* 763 * Send an IPI to any napping threads, since an HDEC interrupt 764 * doesn't wake CPUs up from nap. 765 */ 766 lwz r3,VCORE_NAPPING_THREADS(r5) 767 lwz r4,VCPU_PTID(r9) 768 li r0,1 769 sldi r0,r0,r4 770 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 771 beq 43f 772 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 773 subf r6,r4,r13 77442: andi. r0,r3,1 775 beq 44f 776 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 777 li r0,IPI_PRIORITY 778 li r7,XICS_QIRR 779 stbcix r0,r7,r8 /* trigger the IPI */ 78044: srdi. r3,r3,1 781 addi r6,r6,PACA_SIZE 782 bne 42b 783 784 /* Secondary threads wait for primary to do partition switch */ 78543: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 786 ld r5,HSTATE_KVM_VCORE(r13) 787 lwz r3,VCPU_PTID(r9) 788 cmpwi r3,0 789 beq 15f 790 HMT_LOW 79113: lbz r3,VCORE_IN_GUEST(r5) 792 cmpwi r3,0 793 bne 13b 794 HMT_MEDIUM 795 b 16f 796 797 /* Primary thread waits for all the secondaries to exit guest */ 79815: lwz r3,VCORE_ENTRY_EXIT(r5) 799 srwi r0,r3,8 800 clrldi r3,r3,56 801 cmpw r3,r0 802 bne 15b 803 isync 804 805 /* Primary thread switches back to host partition */ 806 ld r6,KVM_HOST_SDR1(r4) 807 lwz r7,KVM_HOST_LPID(r4) 808 li r8,LPID_RSVD /* switch to reserved LPID */ 809 mtspr SPRN_LPID,r8 810 ptesync 811 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 812 mtspr SPRN_LPID,r7 813 isync 814 li r0,0 815 stb r0,VCORE_IN_GUEST(r5) 816 lis r8,0x7fff /* MAX_INT@h */ 817 mtspr SPRN_HDEC,r8 818 81916: ld r8,KVM_HOST_LPCR(r4) 820 mtspr SPRN_LPCR,r8 821 isync 822 b 33f 823 824 /* 825 * PPC970 guest -> host partition switch code. 826 * We have to lock against concurrent tlbies, and 827 * we have to flush the whole TLB. 828 */ 82932: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 830 831 /* Take the guest's tlbie_lock */ 832 lwz r8,PACA_LOCK_TOKEN(r13) 833 addi r3,r4,KVM_TLBIE_LOCK 83424: lwarx r0,0,r3 835 cmpwi r0,0 836 bne 24b 837 stwcx. r8,0,r3 838 bne 24b 839 isync 840 841 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 842 li r0,0x18f 843 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 844 or r0,r7,r0 845 ptesync 846 sync 847 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 848 isync 849 li r0,0 850 stw r0,0(r3) /* drop guest tlbie_lock */ 851 852 /* invalidate the whole TLB */ 853 li r0,256 854 mtctr r0 855 li r6,0 85625: tlbiel r6 857 addi r6,r6,0x1000 858 bdnz 25b 859 ptesync 860 861 /* take native_tlbie_lock */ 862 ld r3,toc_tlbie_lock@toc(2) 86324: lwarx r0,0,r3 864 cmpwi r0,0 865 bne 24b 866 stwcx. r8,0,r3 867 bne 24b 868 isync 869 870 ld r6,KVM_HOST_SDR1(r4) 871 mtspr SPRN_SDR1,r6 /* switch to host page table */ 872 873 /* Set up host HID4 value */ 874 sync 875 mtspr SPRN_HID4,r7 876 isync 877 li r0,0 878 stw r0,0(r3) /* drop native_tlbie_lock */ 879 880 lis r8,0x7fff /* MAX_INT@h */ 881 mtspr SPRN_HDEC,r8 882 883 /* Disable HDEC interrupts */ 884 mfspr r0,SPRN_HID0 885 li r3,0 886 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 887 sync 888 mtspr SPRN_HID0,r0 889 mfspr r0,SPRN_HID0 890 mfspr r0,SPRN_HID0 891 mfspr r0,SPRN_HID0 892 mfspr r0,SPRN_HID0 893 mfspr r0,SPRN_HID0 894 mfspr r0,SPRN_HID0 895 896 /* load host SLB entries */ 89733: ld r8,PACA_SLBSHADOWPTR(r13) 898 899 .rept SLB_NUM_BOLTED 900 ld r5,SLBSHADOW_SAVEAREA(r8) 901 ld r6,SLBSHADOW_SAVEAREA+8(r8) 902 andis. r7,r5,SLB_ESID_V@h 903 beq 1f 904 slbmte r6,r5 9051: addi r8,r8,16 906 .endr 907 908 /* Save and reset AMR and UAMOR before turning on the MMU */ 909BEGIN_FTR_SECTION 910 mfspr r5,SPRN_AMR 911 mfspr r6,SPRN_UAMOR 912 std r5,VCPU_AMR(r9) 913 std r6,VCPU_UAMOR(r9) 914 li r6,0 915 mtspr SPRN_AMR,r6 916END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 917 918 /* Restore host DABR and DABRX */ 919 ld r5,HSTATE_DABR(r13) 920 li r6,7 921 mtspr SPRN_DABR,r5 922 mtspr SPRN_DABRX,r6 923 924 /* Switch DSCR back to host value */ 925BEGIN_FTR_SECTION 926 mfspr r8, SPRN_DSCR 927 ld r7, HSTATE_DSCR(r13) 928 std r8, VCPU_DSCR(r7) 929 mtspr SPRN_DSCR, r7 930END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 931 932 /* Save non-volatile GPRs */ 933 std r14, VCPU_GPR(r14)(r9) 934 std r15, VCPU_GPR(r15)(r9) 935 std r16, VCPU_GPR(r16)(r9) 936 std r17, VCPU_GPR(r17)(r9) 937 std r18, VCPU_GPR(r18)(r9) 938 std r19, VCPU_GPR(r19)(r9) 939 std r20, VCPU_GPR(r20)(r9) 940 std r21, VCPU_GPR(r21)(r9) 941 std r22, VCPU_GPR(r22)(r9) 942 std r23, VCPU_GPR(r23)(r9) 943 std r24, VCPU_GPR(r24)(r9) 944 std r25, VCPU_GPR(r25)(r9) 945 std r26, VCPU_GPR(r26)(r9) 946 std r27, VCPU_GPR(r27)(r9) 947 std r28, VCPU_GPR(r28)(r9) 948 std r29, VCPU_GPR(r29)(r9) 949 std r30, VCPU_GPR(r30)(r9) 950 std r31, VCPU_GPR(r31)(r9) 951 952 /* Save SPRGs */ 953 mfspr r3, SPRN_SPRG0 954 mfspr r4, SPRN_SPRG1 955 mfspr r5, SPRN_SPRG2 956 mfspr r6, SPRN_SPRG3 957 std r3, VCPU_SPRG0(r9) 958 std r4, VCPU_SPRG1(r9) 959 std r5, VCPU_SPRG2(r9) 960 std r6, VCPU_SPRG3(r9) 961 962 /* Increment yield count if they have a VPA */ 963 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 964 cmpdi r8, 0 965 beq 25f 966 lwz r3, LPPACA_YIELDCOUNT(r8) 967 addi r3, r3, 1 968 stw r3, LPPACA_YIELDCOUNT(r8) 96925: 970 /* Save PMU registers if requested */ 971 /* r8 and cr0.eq are live here */ 972 li r3, 1 973 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 974 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 975 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 976 isync 977 beq 21f /* if no VPA, save PMU stuff anyway */ 978 lbz r7, LPPACA_PMCINUSE(r8) 979 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 980 bne 21f 981 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 982 b 22f 98321: mfspr r5, SPRN_MMCR1 984 mfspr r6, SPRN_MMCRA 985 std r4, VCPU_MMCR(r9) 986 std r5, VCPU_MMCR + 8(r9) 987 std r6, VCPU_MMCR + 16(r9) 988 mfspr r3, SPRN_PMC1 989 mfspr r4, SPRN_PMC2 990 mfspr r5, SPRN_PMC3 991 mfspr r6, SPRN_PMC4 992 mfspr r7, SPRN_PMC5 993 mfspr r8, SPRN_PMC6 994BEGIN_FTR_SECTION 995 mfspr r10, SPRN_PMC7 996 mfspr r11, SPRN_PMC8 997END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 998 stw r3, VCPU_PMC(r9) 999 stw r4, VCPU_PMC + 4(r9) 1000 stw r5, VCPU_PMC + 8(r9) 1001 stw r6, VCPU_PMC + 12(r9) 1002 stw r7, VCPU_PMC + 16(r9) 1003 stw r8, VCPU_PMC + 20(r9) 1004BEGIN_FTR_SECTION 1005 stw r10, VCPU_PMC + 24(r9) 1006 stw r11, VCPU_PMC + 28(r9) 1007END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 100822: 1009 /* save FP state */ 1010 mr r3, r9 1011 bl .kvmppc_save_fp 1012 1013 /* Secondary threads go off to take a nap on POWER7 */ 1014BEGIN_FTR_SECTION 1015 lwz r0,VCPU_PTID(r3) 1016 cmpwi r0,0 1017 bne secondary_nap 1018END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1019 1020 /* 1021 * Reload DEC. HDEC interrupts were disabled when 1022 * we reloaded the host's LPCR value. 1023 */ 1024 ld r3, HSTATE_DECEXP(r13) 1025 mftb r4 1026 subf r4, r4, r3 1027 mtspr SPRN_DEC, r4 1028 1029 /* Reload the host's PMU registers */ 1030 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 1031 lbz r4, LPPACA_PMCINUSE(r3) 1032 cmpwi r4, 0 1033 beq 23f /* skip if not */ 1034 lwz r3, HSTATE_PMC(r13) 1035 lwz r4, HSTATE_PMC + 4(r13) 1036 lwz r5, HSTATE_PMC + 8(r13) 1037 lwz r6, HSTATE_PMC + 12(r13) 1038 lwz r8, HSTATE_PMC + 16(r13) 1039 lwz r9, HSTATE_PMC + 20(r13) 1040BEGIN_FTR_SECTION 1041 lwz r10, HSTATE_PMC + 24(r13) 1042 lwz r11, HSTATE_PMC + 28(r13) 1043END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1044 mtspr SPRN_PMC1, r3 1045 mtspr SPRN_PMC2, r4 1046 mtspr SPRN_PMC3, r5 1047 mtspr SPRN_PMC4, r6 1048 mtspr SPRN_PMC5, r8 1049 mtspr SPRN_PMC6, r9 1050BEGIN_FTR_SECTION 1051 mtspr SPRN_PMC7, r10 1052 mtspr SPRN_PMC8, r11 1053END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1054 ld r3, HSTATE_MMCR(r13) 1055 ld r4, HSTATE_MMCR + 8(r13) 1056 ld r5, HSTATE_MMCR + 16(r13) 1057 mtspr SPRN_MMCR1, r4 1058 mtspr SPRN_MMCRA, r5 1059 mtspr SPRN_MMCR0, r3 1060 isync 106123: 1062 /* 1063 * For external and machine check interrupts, we need 1064 * to call the Linux handler to process the interrupt. 1065 * We do that by jumping to the interrupt vector address 1066 * which we have in r12. The [h]rfid at the end of the 1067 * handler will return to the book3s_hv_interrupts.S code. 1068 * For other interrupts we do the rfid to get back 1069 * to the book3s_interrupts.S code here. 1070 */ 1071 ld r8, HSTATE_VMHANDLER(r13) 1072 ld r7, HSTATE_HOST_MSR(r13) 1073 1074 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1075 beq 11f 1076 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1077 1078 /* RFI into the highmem handler, or branch to interrupt handler */ 107912: mfmsr r6 1080 mtctr r12 1081 li r0, MSR_RI 1082 andc r6, r6, r0 1083 mtmsrd r6, 1 /* Clear RI in MSR */ 1084 mtsrr0 r8 1085 mtsrr1 r7 1086 beqctr 1087 RFI 1088 108911: 1090BEGIN_FTR_SECTION 1091 b 12b 1092END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1093 mtspr SPRN_HSRR0, r8 1094 mtspr SPRN_HSRR1, r7 1095 ba 0x500 1096 10976: mfspr r6,SPRN_HDAR 1098 mfspr r7,SPRN_HDSISR 1099 b 7b 1100 1101/* 1102 * Try to handle an hcall in real mode. 1103 * Returns to the guest if we handle it, or continues on up to 1104 * the kernel if we can't (i.e. if we don't have a handler for 1105 * it, or if the handler returns H_TOO_HARD). 1106 */ 1107 .globl hcall_try_real_mode 1108hcall_try_real_mode: 1109 ld r3,VCPU_GPR(r3)(r9) 1110 andi. r0,r11,MSR_PR 1111 bne hcall_real_cont 1112 clrrdi r3,r3,2 1113 cmpldi r3,hcall_real_table_end - hcall_real_table 1114 bge hcall_real_cont 1115 LOAD_REG_ADDR(r4, hcall_real_table) 1116 lwzx r3,r3,r4 1117 cmpwi r3,0 1118 beq hcall_real_cont 1119 add r3,r3,r4 1120 mtctr r3 1121 mr r3,r9 /* get vcpu pointer */ 1122 ld r4,VCPU_GPR(r4)(r9) 1123 bctrl 1124 cmpdi r3,H_TOO_HARD 1125 beq hcall_real_fallback 1126 ld r4,HSTATE_KVM_VCPU(r13) 1127 std r3,VCPU_GPR(r3)(r4) 1128 ld r10,VCPU_PC(r4) 1129 ld r11,VCPU_MSR(r4) 1130 b fast_guest_return 1131 1132 /* We've attempted a real mode hcall, but it's punted it back 1133 * to userspace. We need to restore some clobbered volatiles 1134 * before resuming the pass-it-to-qemu path */ 1135hcall_real_fallback: 1136 li r12,BOOK3S_INTERRUPT_SYSCALL 1137 ld r9, HSTATE_KVM_VCPU(r13) 1138 1139 b hcall_real_cont 1140 1141 .globl hcall_real_table 1142hcall_real_table: 1143 .long 0 /* 0 - unused */ 1144 .long .kvmppc_h_remove - hcall_real_table 1145 .long .kvmppc_h_enter - hcall_real_table 1146 .long .kvmppc_h_read - hcall_real_table 1147 .long 0 /* 0x10 - H_CLEAR_MOD */ 1148 .long 0 /* 0x14 - H_CLEAR_REF */ 1149 .long .kvmppc_h_protect - hcall_real_table 1150 .long 0 /* 0x1c - H_GET_TCE */ 1151 .long .kvmppc_h_put_tce - hcall_real_table 1152 .long 0 /* 0x24 - H_SET_SPRG0 */ 1153 .long .kvmppc_h_set_dabr - hcall_real_table 1154 .long 0 /* 0x2c */ 1155 .long 0 /* 0x30 */ 1156 .long 0 /* 0x34 */ 1157 .long 0 /* 0x38 */ 1158 .long 0 /* 0x3c */ 1159 .long 0 /* 0x40 */ 1160 .long 0 /* 0x44 */ 1161 .long 0 /* 0x48 */ 1162 .long 0 /* 0x4c */ 1163 .long 0 /* 0x50 */ 1164 .long 0 /* 0x54 */ 1165 .long 0 /* 0x58 */ 1166 .long 0 /* 0x5c */ 1167 .long 0 /* 0x60 */ 1168 .long 0 /* 0x64 */ 1169 .long 0 /* 0x68 */ 1170 .long 0 /* 0x6c */ 1171 .long 0 /* 0x70 */ 1172 .long 0 /* 0x74 */ 1173 .long 0 /* 0x78 */ 1174 .long 0 /* 0x7c */ 1175 .long 0 /* 0x80 */ 1176 .long 0 /* 0x84 */ 1177 .long 0 /* 0x88 */ 1178 .long 0 /* 0x8c */ 1179 .long 0 /* 0x90 */ 1180 .long 0 /* 0x94 */ 1181 .long 0 /* 0x98 */ 1182 .long 0 /* 0x9c */ 1183 .long 0 /* 0xa0 */ 1184 .long 0 /* 0xa4 */ 1185 .long 0 /* 0xa8 */ 1186 .long 0 /* 0xac */ 1187 .long 0 /* 0xb0 */ 1188 .long 0 /* 0xb4 */ 1189 .long 0 /* 0xb8 */ 1190 .long 0 /* 0xbc */ 1191 .long 0 /* 0xc0 */ 1192 .long 0 /* 0xc4 */ 1193 .long 0 /* 0xc8 */ 1194 .long 0 /* 0xcc */ 1195 .long 0 /* 0xd0 */ 1196 .long 0 /* 0xd4 */ 1197 .long 0 /* 0xd8 */ 1198 .long 0 /* 0xdc */ 1199 .long .kvmppc_h_cede - hcall_real_table 1200 .long 0 /* 0xe4 */ 1201 .long 0 /* 0xe8 */ 1202 .long 0 /* 0xec */ 1203 .long 0 /* 0xf0 */ 1204 .long 0 /* 0xf4 */ 1205 .long 0 /* 0xf8 */ 1206 .long 0 /* 0xfc */ 1207 .long 0 /* 0x100 */ 1208 .long 0 /* 0x104 */ 1209 .long 0 /* 0x108 */ 1210 .long 0 /* 0x10c */ 1211 .long 0 /* 0x110 */ 1212 .long 0 /* 0x114 */ 1213 .long 0 /* 0x118 */ 1214 .long 0 /* 0x11c */ 1215 .long 0 /* 0x120 */ 1216 .long .kvmppc_h_bulk_remove - hcall_real_table 1217hcall_real_table_end: 1218 1219ignore_hdec: 1220 mr r4,r9 1221 b fast_guest_return 1222 1223bounce_ext_interrupt: 1224 mr r4,r9 1225 mtspr SPRN_SRR0,r10 1226 mtspr SPRN_SRR1,r11 1227 li r10,BOOK3S_INTERRUPT_EXTERNAL 1228 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1229 rotldi r11,r11,63 1230 b fast_guest_return 1231 1232_GLOBAL(kvmppc_h_set_dabr) 1233 std r4,VCPU_DABR(r3) 1234 mtspr SPRN_DABR,r4 1235 li r3,0 1236 blr 1237 1238_GLOBAL(kvmppc_h_cede) 1239 ori r11,r11,MSR_EE 1240 std r11,VCPU_MSR(r3) 1241 li r0,1 1242 stb r0,VCPU_CEDED(r3) 1243 sync /* order setting ceded vs. testing prodded */ 1244 lbz r5,VCPU_PRODDED(r3) 1245 cmpwi r5,0 1246 bne 1f 1247 li r0,0 /* set trap to 0 to say hcall is handled */ 1248 stw r0,VCPU_TRAP(r3) 1249 li r0,H_SUCCESS 1250 std r0,VCPU_GPR(r3)(r3) 1251BEGIN_FTR_SECTION 1252 b 2f /* just send it up to host on 970 */ 1253END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1254 1255 /* 1256 * Set our bit in the bitmask of napping threads unless all the 1257 * other threads are already napping, in which case we send this 1258 * up to the host. 1259 */ 1260 ld r5,HSTATE_KVM_VCORE(r13) 1261 lwz r6,VCPU_PTID(r3) 1262 lwz r8,VCORE_ENTRY_EXIT(r5) 1263 clrldi r8,r8,56 1264 li r0,1 1265 sld r0,r0,r6 1266 addi r6,r5,VCORE_NAPPING_THREADS 126731: lwarx r4,0,r6 1268 or r4,r4,r0 1269 PPC_POPCNTW(r7,r4) 1270 cmpw r7,r8 1271 bge 2f 1272 stwcx. r4,0,r6 1273 bne 31b 1274 li r0,1 1275 stb r0,HSTATE_NAPPING(r13) 1276 /* order napping_threads update vs testing entry_exit_count */ 1277 lwsync 1278 mr r4,r3 1279 lwz r7,VCORE_ENTRY_EXIT(r5) 1280 cmpwi r7,0x100 1281 bge 33f /* another thread already exiting */ 1282 1283/* 1284 * Although not specifically required by the architecture, POWER7 1285 * preserves the following registers in nap mode, even if an SMT mode 1286 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 1287 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1288 */ 1289 /* Save non-volatile GPRs */ 1290 std r14, VCPU_GPR(r14)(r3) 1291 std r15, VCPU_GPR(r15)(r3) 1292 std r16, VCPU_GPR(r16)(r3) 1293 std r17, VCPU_GPR(r17)(r3) 1294 std r18, VCPU_GPR(r18)(r3) 1295 std r19, VCPU_GPR(r19)(r3) 1296 std r20, VCPU_GPR(r20)(r3) 1297 std r21, VCPU_GPR(r21)(r3) 1298 std r22, VCPU_GPR(r22)(r3) 1299 std r23, VCPU_GPR(r23)(r3) 1300 std r24, VCPU_GPR(r24)(r3) 1301 std r25, VCPU_GPR(r25)(r3) 1302 std r26, VCPU_GPR(r26)(r3) 1303 std r27, VCPU_GPR(r27)(r3) 1304 std r28, VCPU_GPR(r28)(r3) 1305 std r29, VCPU_GPR(r29)(r3) 1306 std r30, VCPU_GPR(r30)(r3) 1307 std r31, VCPU_GPR(r31)(r3) 1308 1309 /* save FP state */ 1310 bl .kvmppc_save_fp 1311 1312 /* 1313 * Take a nap until a decrementer or external interrupt occurs, 1314 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1315 */ 1316 li r0,0x80 1317 stb r0,PACAPROCSTART(r13) 1318 mfspr r5,SPRN_LPCR 1319 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1320 mtspr SPRN_LPCR,r5 1321 isync 1322 li r0, 0 1323 std r0, HSTATE_SCRATCH0(r13) 1324 ptesync 1325 ld r0, HSTATE_SCRATCH0(r13) 13261: cmpd r0, r0 1327 bne 1b 1328 nap 1329 b . 1330 1331kvm_end_cede: 1332 /* Woken by external or decrementer interrupt */ 1333 ld r1, HSTATE_HOST_R1(r13) 1334 ld r2, PACATOC(r13) 1335 1336 /* If we're a secondary thread and we got here by an IPI, ack it */ 1337 ld r4,HSTATE_KVM_VCPU(r13) 1338 lwz r3,VCPU_PTID(r4) 1339 cmpwi r3,0 1340 beq 27f 1341 mfspr r3,SPRN_SRR1 1342 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 1343 cmpwi r3,4 /* was it an external interrupt? */ 1344 bne 27f 1345 ld r5, HSTATE_XICS_PHYS(r13) 1346 li r0,0xff 1347 li r6,XICS_QIRR 1348 li r7,XICS_XIRR 1349 lwzcix r8,r5,r7 /* ack the interrupt */ 1350 sync 1351 stbcix r0,r5,r6 /* clear it */ 1352 stwcix r8,r5,r7 /* EOI it */ 135327: 1354 /* load up FP state */ 1355 bl kvmppc_load_fp 1356 1357 /* Load NV GPRS */ 1358 ld r14, VCPU_GPR(r14)(r4) 1359 ld r15, VCPU_GPR(r15)(r4) 1360 ld r16, VCPU_GPR(r16)(r4) 1361 ld r17, VCPU_GPR(r17)(r4) 1362 ld r18, VCPU_GPR(r18)(r4) 1363 ld r19, VCPU_GPR(r19)(r4) 1364 ld r20, VCPU_GPR(r20)(r4) 1365 ld r21, VCPU_GPR(r21)(r4) 1366 ld r22, VCPU_GPR(r22)(r4) 1367 ld r23, VCPU_GPR(r23)(r4) 1368 ld r24, VCPU_GPR(r24)(r4) 1369 ld r25, VCPU_GPR(r25)(r4) 1370 ld r26, VCPU_GPR(r26)(r4) 1371 ld r27, VCPU_GPR(r27)(r4) 1372 ld r28, VCPU_GPR(r28)(r4) 1373 ld r29, VCPU_GPR(r29)(r4) 1374 ld r30, VCPU_GPR(r30)(r4) 1375 ld r31, VCPU_GPR(r31)(r4) 1376 1377 /* clear our bit in vcore->napping_threads */ 137833: ld r5,HSTATE_KVM_VCORE(r13) 1379 lwz r3,VCPU_PTID(r4) 1380 li r0,1 1381 sld r0,r0,r3 1382 addi r6,r5,VCORE_NAPPING_THREADS 138332: lwarx r7,0,r6 1384 andc r7,r7,r0 1385 stwcx. r7,0,r6 1386 bne 32b 1387 li r0,0 1388 stb r0,HSTATE_NAPPING(r13) 1389 1390 /* see if any other thread is already exiting */ 1391 lwz r0,VCORE_ENTRY_EXIT(r5) 1392 cmpwi r0,0x100 1393 blt kvmppc_cede_reentry /* if not go back to guest */ 1394 1395 /* some threads are exiting, so go to the guest exit path */ 1396 b hcall_real_fallback 1397 1398 /* cede when already previously prodded case */ 13991: li r0,0 1400 stb r0,VCPU_PRODDED(r3) 1401 sync /* order testing prodded vs. clearing ceded */ 1402 stb r0,VCPU_CEDED(r3) 1403 li r3,H_SUCCESS 1404 blr 1405 1406 /* we've ceded but we want to give control to the host */ 14072: li r3,H_TOO_HARD 1408 blr 1409 1410secondary_too_late: 1411 ld r5,HSTATE_KVM_VCORE(r13) 1412 HMT_LOW 141313: lbz r3,VCORE_IN_GUEST(r5) 1414 cmpwi r3,0 1415 bne 13b 1416 HMT_MEDIUM 1417 ld r11,PACA_SLBSHADOWPTR(r13) 1418 1419 .rept SLB_NUM_BOLTED 1420 ld r5,SLBSHADOW_SAVEAREA(r11) 1421 ld r6,SLBSHADOW_SAVEAREA+8(r11) 1422 andis. r7,r5,SLB_ESID_V@h 1423 beq 1f 1424 slbmte r6,r5 14251: addi r11,r11,16 1426 .endr 1427 1428secondary_nap: 1429 /* Clear any pending IPI - assume we're a secondary thread */ 1430 ld r5, HSTATE_XICS_PHYS(r13) 1431 li r7, XICS_XIRR 1432 lwzcix r3, r5, r7 /* ack any pending interrupt */ 1433 rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 1434 beq 37f 1435 sync 1436 li r0, 0xff 1437 li r6, XICS_QIRR 1438 stbcix r0, r5, r6 /* clear the IPI */ 1439 stwcix r3, r5, r7 /* EOI it */ 144037: sync 1441 1442 /* increment the nap count and then go to nap mode */ 1443 ld r4, HSTATE_KVM_VCORE(r13) 1444 addi r4, r4, VCORE_NAP_COUNT 1445 lwsync /* make previous updates visible */ 144651: lwarx r3, 0, r4 1447 addi r3, r3, 1 1448 stwcx. r3, 0, r4 1449 bne 51b 1450 1451 li r3, LPCR_PECE0 1452 mfspr r4, SPRN_LPCR 1453 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 1454 mtspr SPRN_LPCR, r4 1455 isync 1456 li r0, 0 1457 std r0, HSTATE_SCRATCH0(r13) 1458 ptesync 1459 ld r0, HSTATE_SCRATCH0(r13) 14601: cmpd r0, r0 1461 bne 1b 1462 nap 1463 b . 1464 1465/* 1466 * Save away FP, VMX and VSX registers. 1467 * r3 = vcpu pointer 1468 */ 1469_GLOBAL(kvmppc_save_fp) 1470 mfmsr r9 1471 ori r8,r9,MSR_FP 1472#ifdef CONFIG_ALTIVEC 1473BEGIN_FTR_SECTION 1474 oris r8,r8,MSR_VEC@h 1475END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1476#endif 1477#ifdef CONFIG_VSX 1478BEGIN_FTR_SECTION 1479 oris r8,r8,MSR_VSX@h 1480END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1481#endif 1482 mtmsrd r8 1483 isync 1484#ifdef CONFIG_VSX 1485BEGIN_FTR_SECTION 1486 reg = 0 1487 .rept 32 1488 li r6,reg*16+VCPU_VSRS 1489 STXVD2X(reg,r6,r3) 1490 reg = reg + 1 1491 .endr 1492FTR_SECTION_ELSE 1493#endif 1494 reg = 0 1495 .rept 32 1496 stfd reg,reg*8+VCPU_FPRS(r3) 1497 reg = reg + 1 1498 .endr 1499#ifdef CONFIG_VSX 1500ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1501#endif 1502 mffs fr0 1503 stfd fr0,VCPU_FPSCR(r3) 1504 1505#ifdef CONFIG_ALTIVEC 1506BEGIN_FTR_SECTION 1507 reg = 0 1508 .rept 32 1509 li r6,reg*16+VCPU_VRS 1510 stvx reg,r6,r3 1511 reg = reg + 1 1512 .endr 1513 mfvscr vr0 1514 li r6,VCPU_VSCR 1515 stvx vr0,r6,r3 1516END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1517#endif 1518 mfspr r6,SPRN_VRSAVE 1519 stw r6,VCPU_VRSAVE(r3) 1520 mtmsrd r9 1521 isync 1522 blr 1523 1524/* 1525 * Load up FP, VMX and VSX registers 1526 * r4 = vcpu pointer 1527 */ 1528 .globl kvmppc_load_fp 1529kvmppc_load_fp: 1530 mfmsr r9 1531 ori r8,r9,MSR_FP 1532#ifdef CONFIG_ALTIVEC 1533BEGIN_FTR_SECTION 1534 oris r8,r8,MSR_VEC@h 1535END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1536#endif 1537#ifdef CONFIG_VSX 1538BEGIN_FTR_SECTION 1539 oris r8,r8,MSR_VSX@h 1540END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1541#endif 1542 mtmsrd r8 1543 isync 1544 lfd fr0,VCPU_FPSCR(r4) 1545 MTFSF_L(fr0) 1546#ifdef CONFIG_VSX 1547BEGIN_FTR_SECTION 1548 reg = 0 1549 .rept 32 1550 li r7,reg*16+VCPU_VSRS 1551 LXVD2X(reg,r7,r4) 1552 reg = reg + 1 1553 .endr 1554FTR_SECTION_ELSE 1555#endif 1556 reg = 0 1557 .rept 32 1558 lfd reg,reg*8+VCPU_FPRS(r4) 1559 reg = reg + 1 1560 .endr 1561#ifdef CONFIG_VSX 1562ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1563#endif 1564 1565#ifdef CONFIG_ALTIVEC 1566BEGIN_FTR_SECTION 1567 li r7,VCPU_VSCR 1568 lvx vr0,r7,r4 1569 mtvscr vr0 1570 reg = 0 1571 .rept 32 1572 li r7,reg*16+VCPU_VRS 1573 lvx reg,r7,r4 1574 reg = reg + 1 1575 .endr 1576END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1577#endif 1578 lwz r7,VCPU_VRSAVE(r4) 1579 mtspr SPRN_VRSAVE,r7 1580 blr 1581