1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * Copyright 2011 Freescale Semiconductor, Inc. 17 * 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 19 */ 20 21#include <asm/ppc_asm.h> 22#include <asm/kvm_asm.h> 23#include <asm/reg.h> 24#include <asm/mmu-44x.h> 25#include <asm/page.h> 26#include <asm/asm-offsets.h> 27 28#define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) 29 30/* The host stack layout: */ 31#define HOST_R1 0 /* Implied by stwu. */ 32#define HOST_CALLEE_LR 4 33#define HOST_RUN 8 34/* r2 is special: it holds 'current', and it made nonvolatile in the 35 * kernel with the -ffixed-r2 gcc option. */ 36#define HOST_R2 12 37#define HOST_CR 16 38#define HOST_NV_GPRS 20 39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) 41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 43 44#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ 45 (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ 46 (1<<BOOKE_INTERRUPT_DEBUG)) 47 48#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 49 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 50 51#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 52 (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ 53 (1<<BOOKE_INTERRUPT_PROGRAM) | \ 54 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 55 56.macro KVM_HANDLER ivor_nr scratch srr0 57_GLOBAL(kvmppc_handler_\ivor_nr) 58 /* Get pointer to vcpu and record exit number. */ 59 mtspr \scratch , r4 60 mfspr r4, SPRN_SPRG_RVCPU 61 stw r3, VCPU_GPR(r3)(r4) 62 stw r5, VCPU_GPR(r5)(r4) 63 stw r6, VCPU_GPR(r6)(r4) 64 mfspr r3, \scratch 65 mfctr r5 66 stw r3, VCPU_GPR(r4)(r4) 67 stw r5, VCPU_CTR(r4) 68 mfspr r3, \srr0 69 lis r6, kvmppc_resume_host@h 70 stw r3, VCPU_PC(r4) 71 li r5, \ivor_nr 72 ori r6, r6, kvmppc_resume_host@l 73 mtctr r6 74 bctr 75.endm 76 77_GLOBAL(kvmppc_handlers_start) 78KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 79KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 80KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 81KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 82KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 83KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 84KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0 85KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 86KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 87KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 88KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0 89KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 90KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 91KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 92KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 93KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 94KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 95KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 96KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 97 98_GLOBAL(kvmppc_handler_len) 99 .long kvmppc_handler_1 - kvmppc_handler_0 100 101/* Registers: 102 * SPRG_SCRATCH0: guest r4 103 * r4: vcpu pointer 104 * r5: KVM exit number 105 */ 106_GLOBAL(kvmppc_resume_host) 107 mfcr r3 108 stw r3, VCPU_CR(r4) 109 stw r7, VCPU_GPR(r7)(r4) 110 stw r8, VCPU_GPR(r8)(r4) 111 stw r9, VCPU_GPR(r9)(r4) 112 113 li r6, 1 114 slw r6, r6, r5 115 116#ifdef CONFIG_KVM_EXIT_TIMING 117 /* save exit time */ 1181: 119 mfspr r7, SPRN_TBRU 120 mfspr r8, SPRN_TBRL 121 mfspr r9, SPRN_TBRU 122 cmpw r9, r7 123 bne 1b 124 stw r8, VCPU_TIMING_EXIT_TBL(r4) 125 stw r9, VCPU_TIMING_EXIT_TBU(r4) 126#endif 127 128 /* Save the faulting instruction and all GPRs for emulation. */ 129 andi. r7, r6, NEED_INST_MASK 130 beq ..skip_inst_copy 131 mfspr r9, SPRN_SRR0 132 mfmsr r8 133 ori r7, r8, MSR_DS 134 mtmsr r7 135 isync 136 lwz r9, 0(r9) 137 mtmsr r8 138 isync 139 stw r9, VCPU_LAST_INST(r4) 140 141 stw r15, VCPU_GPR(r15)(r4) 142 stw r16, VCPU_GPR(r16)(r4) 143 stw r17, VCPU_GPR(r17)(r4) 144 stw r18, VCPU_GPR(r18)(r4) 145 stw r19, VCPU_GPR(r19)(r4) 146 stw r20, VCPU_GPR(r20)(r4) 147 stw r21, VCPU_GPR(r21)(r4) 148 stw r22, VCPU_GPR(r22)(r4) 149 stw r23, VCPU_GPR(r23)(r4) 150 stw r24, VCPU_GPR(r24)(r4) 151 stw r25, VCPU_GPR(r25)(r4) 152 stw r26, VCPU_GPR(r26)(r4) 153 stw r27, VCPU_GPR(r27)(r4) 154 stw r28, VCPU_GPR(r28)(r4) 155 stw r29, VCPU_GPR(r29)(r4) 156 stw r30, VCPU_GPR(r30)(r4) 157 stw r31, VCPU_GPR(r31)(r4) 158..skip_inst_copy: 159 160 /* Also grab DEAR and ESR before the host can clobber them. */ 161 162 andi. r7, r6, NEED_DEAR_MASK 163 beq ..skip_dear 164 mfspr r9, SPRN_DEAR 165 stw r9, VCPU_FAULT_DEAR(r4) 166..skip_dear: 167 168 andi. r7, r6, NEED_ESR_MASK 169 beq ..skip_esr 170 mfspr r9, SPRN_ESR 171 stw r9, VCPU_FAULT_ESR(r4) 172..skip_esr: 173 174 /* Save remaining volatile guest register state to vcpu. */ 175 stw r0, VCPU_GPR(r0)(r4) 176 stw r1, VCPU_GPR(r1)(r4) 177 stw r2, VCPU_GPR(r2)(r4) 178 stw r10, VCPU_GPR(r10)(r4) 179 stw r11, VCPU_GPR(r11)(r4) 180 stw r12, VCPU_GPR(r12)(r4) 181 stw r13, VCPU_GPR(r13)(r4) 182 stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ 183 mflr r3 184 stw r3, VCPU_LR(r4) 185 mfxer r3 186 stw r3, VCPU_XER(r4) 187 188 /* Restore host stack pointer and PID before IVPR, since the host 189 * exception handlers use them. */ 190 lwz r1, VCPU_HOST_STACK(r4) 191 lwz r3, VCPU_HOST_PID(r4) 192 mtspr SPRN_PID, r3 193 194#ifdef CONFIG_FSL_BOOKE 195 /* we cheat and know that Linux doesn't use PID1 which is always 0 */ 196 lis r3, 0 197 mtspr SPRN_PID1, r3 198#endif 199 200 /* Restore host IVPR before re-enabling interrupts. We cheat and know 201 * that Linux IVPR is always 0xc0000000. */ 202 lis r3, 0xc000 203 mtspr SPRN_IVPR, r3 204 205 /* Switch to kernel stack and jump to handler. */ 206 LOAD_REG_ADDR(r3, kvmppc_handle_exit) 207 mtctr r3 208 lwz r3, HOST_RUN(r1) 209 lwz r2, HOST_R2(r1) 210 mr r14, r4 /* Save vcpu pointer. */ 211 212 bctrl /* kvmppc_handle_exit() */ 213 214 /* Restore vcpu pointer and the nonvolatiles we used. */ 215 mr r4, r14 216 lwz r14, VCPU_GPR(r14)(r4) 217 218 /* Sometimes instruction emulation must restore complete GPR state. */ 219 andi. r5, r3, RESUME_FLAG_NV 220 beq ..skip_nv_load 221 lwz r15, VCPU_GPR(r15)(r4) 222 lwz r16, VCPU_GPR(r16)(r4) 223 lwz r17, VCPU_GPR(r17)(r4) 224 lwz r18, VCPU_GPR(r18)(r4) 225 lwz r19, VCPU_GPR(r19)(r4) 226 lwz r20, VCPU_GPR(r20)(r4) 227 lwz r21, VCPU_GPR(r21)(r4) 228 lwz r22, VCPU_GPR(r22)(r4) 229 lwz r23, VCPU_GPR(r23)(r4) 230 lwz r24, VCPU_GPR(r24)(r4) 231 lwz r25, VCPU_GPR(r25)(r4) 232 lwz r26, VCPU_GPR(r26)(r4) 233 lwz r27, VCPU_GPR(r27)(r4) 234 lwz r28, VCPU_GPR(r28)(r4) 235 lwz r29, VCPU_GPR(r29)(r4) 236 lwz r30, VCPU_GPR(r30)(r4) 237 lwz r31, VCPU_GPR(r31)(r4) 238..skip_nv_load: 239 240 /* Should we return to the guest? */ 241 andi. r5, r3, RESUME_FLAG_HOST 242 beq lightweight_exit 243 244 srawi r3, r3, 2 /* Shift -ERR back down. */ 245 246heavyweight_exit: 247 /* Not returning to guest. */ 248 249#ifdef CONFIG_SPE 250 /* save guest SPEFSCR and load host SPEFSCR */ 251 mfspr r9, SPRN_SPEFSCR 252 stw r9, VCPU_SPEFSCR(r4) 253 lwz r9, VCPU_HOST_SPEFSCR(r4) 254 mtspr SPRN_SPEFSCR, r9 255#endif 256 257 /* We already saved guest volatile register state; now save the 258 * non-volatiles. */ 259 stw r15, VCPU_GPR(r15)(r4) 260 stw r16, VCPU_GPR(r16)(r4) 261 stw r17, VCPU_GPR(r17)(r4) 262 stw r18, VCPU_GPR(r18)(r4) 263 stw r19, VCPU_GPR(r19)(r4) 264 stw r20, VCPU_GPR(r20)(r4) 265 stw r21, VCPU_GPR(r21)(r4) 266 stw r22, VCPU_GPR(r22)(r4) 267 stw r23, VCPU_GPR(r23)(r4) 268 stw r24, VCPU_GPR(r24)(r4) 269 stw r25, VCPU_GPR(r25)(r4) 270 stw r26, VCPU_GPR(r26)(r4) 271 stw r27, VCPU_GPR(r27)(r4) 272 stw r28, VCPU_GPR(r28)(r4) 273 stw r29, VCPU_GPR(r29)(r4) 274 stw r30, VCPU_GPR(r30)(r4) 275 stw r31, VCPU_GPR(r31)(r4) 276 277 /* Load host non-volatile register state from host stack. */ 278 lwz r14, HOST_NV_GPR(r14)(r1) 279 lwz r15, HOST_NV_GPR(r15)(r1) 280 lwz r16, HOST_NV_GPR(r16)(r1) 281 lwz r17, HOST_NV_GPR(r17)(r1) 282 lwz r18, HOST_NV_GPR(r18)(r1) 283 lwz r19, HOST_NV_GPR(r19)(r1) 284 lwz r20, HOST_NV_GPR(r20)(r1) 285 lwz r21, HOST_NV_GPR(r21)(r1) 286 lwz r22, HOST_NV_GPR(r22)(r1) 287 lwz r23, HOST_NV_GPR(r23)(r1) 288 lwz r24, HOST_NV_GPR(r24)(r1) 289 lwz r25, HOST_NV_GPR(r25)(r1) 290 lwz r26, HOST_NV_GPR(r26)(r1) 291 lwz r27, HOST_NV_GPR(r27)(r1) 292 lwz r28, HOST_NV_GPR(r28)(r1) 293 lwz r29, HOST_NV_GPR(r29)(r1) 294 lwz r30, HOST_NV_GPR(r30)(r1) 295 lwz r31, HOST_NV_GPR(r31)(r1) 296 297 /* Return to kvm_vcpu_run(). */ 298 lwz r4, HOST_STACK_LR(r1) 299 lwz r5, HOST_CR(r1) 300 addi r1, r1, HOST_STACK_SIZE 301 mtlr r4 302 mtcr r5 303 /* r3 still contains the return code from kvmppc_handle_exit(). */ 304 blr 305 306 307/* Registers: 308 * r3: kvm_run pointer 309 * r4: vcpu pointer 310 */ 311_GLOBAL(__kvmppc_vcpu_run) 312 stwu r1, -HOST_STACK_SIZE(r1) 313 stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ 314 315 /* Save host state to stack. */ 316 stw r3, HOST_RUN(r1) 317 mflr r3 318 stw r3, HOST_STACK_LR(r1) 319 mfcr r5 320 stw r5, HOST_CR(r1) 321 322 /* Save host non-volatile register state to stack. */ 323 stw r14, HOST_NV_GPR(r14)(r1) 324 stw r15, HOST_NV_GPR(r15)(r1) 325 stw r16, HOST_NV_GPR(r16)(r1) 326 stw r17, HOST_NV_GPR(r17)(r1) 327 stw r18, HOST_NV_GPR(r18)(r1) 328 stw r19, HOST_NV_GPR(r19)(r1) 329 stw r20, HOST_NV_GPR(r20)(r1) 330 stw r21, HOST_NV_GPR(r21)(r1) 331 stw r22, HOST_NV_GPR(r22)(r1) 332 stw r23, HOST_NV_GPR(r23)(r1) 333 stw r24, HOST_NV_GPR(r24)(r1) 334 stw r25, HOST_NV_GPR(r25)(r1) 335 stw r26, HOST_NV_GPR(r26)(r1) 336 stw r27, HOST_NV_GPR(r27)(r1) 337 stw r28, HOST_NV_GPR(r28)(r1) 338 stw r29, HOST_NV_GPR(r29)(r1) 339 stw r30, HOST_NV_GPR(r30)(r1) 340 stw r31, HOST_NV_GPR(r31)(r1) 341 342 /* Load guest non-volatiles. */ 343 lwz r14, VCPU_GPR(r14)(r4) 344 lwz r15, VCPU_GPR(r15)(r4) 345 lwz r16, VCPU_GPR(r16)(r4) 346 lwz r17, VCPU_GPR(r17)(r4) 347 lwz r18, VCPU_GPR(r18)(r4) 348 lwz r19, VCPU_GPR(r19)(r4) 349 lwz r20, VCPU_GPR(r20)(r4) 350 lwz r21, VCPU_GPR(r21)(r4) 351 lwz r22, VCPU_GPR(r22)(r4) 352 lwz r23, VCPU_GPR(r23)(r4) 353 lwz r24, VCPU_GPR(r24)(r4) 354 lwz r25, VCPU_GPR(r25)(r4) 355 lwz r26, VCPU_GPR(r26)(r4) 356 lwz r27, VCPU_GPR(r27)(r4) 357 lwz r28, VCPU_GPR(r28)(r4) 358 lwz r29, VCPU_GPR(r29)(r4) 359 lwz r30, VCPU_GPR(r30)(r4) 360 lwz r31, VCPU_GPR(r31)(r4) 361 362#ifdef CONFIG_SPE 363 /* save host SPEFSCR and load guest SPEFSCR */ 364 mfspr r3, SPRN_SPEFSCR 365 stw r3, VCPU_HOST_SPEFSCR(r4) 366 lwz r3, VCPU_SPEFSCR(r4) 367 mtspr SPRN_SPEFSCR, r3 368#endif 369 370lightweight_exit: 371 stw r2, HOST_R2(r1) 372 373 mfspr r3, SPRN_PID 374 stw r3, VCPU_HOST_PID(r4) 375 lwz r3, VCPU_SHADOW_PID(r4) 376 mtspr SPRN_PID, r3 377 378#ifdef CONFIG_FSL_BOOKE 379 lwz r3, VCPU_SHADOW_PID1(r4) 380 mtspr SPRN_PID1, r3 381#endif 382 383#ifdef CONFIG_44x 384 iccci 0, 0 /* XXX hack */ 385#endif 386 387 /* Load some guest volatiles. */ 388 lwz r0, VCPU_GPR(r0)(r4) 389 lwz r2, VCPU_GPR(r2)(r4) 390 lwz r9, VCPU_GPR(r9)(r4) 391 lwz r10, VCPU_GPR(r10)(r4) 392 lwz r11, VCPU_GPR(r11)(r4) 393 lwz r12, VCPU_GPR(r12)(r4) 394 lwz r13, VCPU_GPR(r13)(r4) 395 lwz r3, VCPU_LR(r4) 396 mtlr r3 397 lwz r3, VCPU_XER(r4) 398 mtxer r3 399 400 /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, 401 * so how do we make sure vcpu won't fault? */ 402 lis r8, kvmppc_booke_handlers@ha 403 lwz r8, kvmppc_booke_handlers@l(r8) 404 mtspr SPRN_IVPR, r8 405 406 /* Save vcpu pointer for the exception handlers. */ 407 mtspr SPRN_SPRG_WVCPU, r4 408 409 lwz r5, VCPU_SHARED(r4) 410 411 /* Can't switch the stack pointer until after IVPR is switched, 412 * because host interrupt handlers would get confused. */ 413 lwz r1, VCPU_GPR(r1)(r4) 414 415 /* 416 * Host interrupt handlers may have clobbered these 417 * guest-readable SPRGs, or the guest kernel may have 418 * written directly to the shared area, so we 419 * need to reload them here with the guest's values. 420 */ 421 PPC_LD(r3, VCPU_SHARED_SPRG4, r5) 422 mtspr SPRN_SPRG4W, r3 423 PPC_LD(r3, VCPU_SHARED_SPRG5, r5) 424 mtspr SPRN_SPRG5W, r3 425 PPC_LD(r3, VCPU_SHARED_SPRG6, r5) 426 mtspr SPRN_SPRG6W, r3 427 PPC_LD(r3, VCPU_SHARED_SPRG7, r5) 428 mtspr SPRN_SPRG7W, r3 429 430#ifdef CONFIG_KVM_EXIT_TIMING 431 /* save enter time */ 4321: 433 mfspr r6, SPRN_TBRU 434 mfspr r7, SPRN_TBRL 435 mfspr r8, SPRN_TBRU 436 cmpw r8, r6 437 bne 1b 438 stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) 439 stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) 440#endif 441 442 /* Finish loading guest volatiles and jump to guest. */ 443 lwz r3, VCPU_CTR(r4) 444 lwz r5, VCPU_CR(r4) 445 lwz r6, VCPU_PC(r4) 446 lwz r7, VCPU_SHADOW_MSR(r4) 447 mtctr r3 448 mtcr r5 449 mtsrr0 r6 450 mtsrr1 r7 451 lwz r5, VCPU_GPR(r5)(r4) 452 lwz r6, VCPU_GPR(r6)(r4) 453 lwz r7, VCPU_GPR(r7)(r4) 454 lwz r8, VCPU_GPR(r8)(r4) 455 456 /* Clear any debug events which occurred since we disabled MSR[DE]. 457 * XXX This gives us a 3-instruction window in which a breakpoint 458 * intended for guest context could fire in the host instead. */ 459 lis r3, 0xffff 460 ori r3, r3, 0xffff 461 mtspr SPRN_DBSR, r3 462 463 lwz r3, VCPU_GPR(r3)(r4) 464 lwz r4, VCPU_GPR(r4)(r4) 465 rfi 466 467#ifdef CONFIG_SPE 468_GLOBAL(kvmppc_save_guest_spe) 469 cmpi 0,r3,0 470 beqlr- 471 SAVE_32EVRS(0, r4, r3, VCPU_EVR) 472 evxor evr6, evr6, evr6 473 evmwumiaa evr6, evr6, evr6 474 li r4,VCPU_ACC 475 evstddx evr6, r4, r3 /* save acc */ 476 blr 477 478_GLOBAL(kvmppc_load_guest_spe) 479 cmpi 0,r3,0 480 beqlr- 481 li r4,VCPU_ACC 482 evlddx evr6,r4,r3 483 evmra evr6,evr6 /* load acc */ 484 REST_32EVRS(0, r4, r3, VCPU_EVR) 485 blr 486#endif 487