1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/reg.h> 24 #include <asm/switch_to.h> 25 #include <asm/time.h> 26 #include <asm/tm.h> 27 #include "book3s.h" 28 #include <asm/asm-prototypes.h> 29 30 #define OP_19_XOP_RFID 18 31 #define OP_19_XOP_RFI 50 32 33 #define OP_31_XOP_MFMSR 83 34 #define OP_31_XOP_MTMSR 146 35 #define OP_31_XOP_MTMSRD 178 36 #define OP_31_XOP_MTSR 210 37 #define OP_31_XOP_MTSRIN 242 38 #define OP_31_XOP_TLBIEL 274 39 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ 40 #define OP_31_XOP_FAKE_SC1 308 41 #define OP_31_XOP_SLBMTE 402 42 #define OP_31_XOP_SLBIE 434 43 #define OP_31_XOP_SLBIA 498 44 #define OP_31_XOP_MFSR 595 45 #define OP_31_XOP_MFSRIN 659 46 #define OP_31_XOP_DCBA 758 47 #define OP_31_XOP_SLBMFEV 851 48 #define OP_31_XOP_EIOIO 854 49 #define OP_31_XOP_SLBMFEE 915 50 #define OP_31_XOP_SLBFEE 979 51 52 #define OP_31_XOP_TBEGIN 654 53 #define OP_31_XOP_TABORT 910 54 55 #define OP_31_XOP_TRECLAIM 942 56 #define OP_31_XOP_TRCHKPT 1006 57 58 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 59 #define OP_31_XOP_DCBZ 1010 60 61 #define OP_LFS 48 62 #define OP_LFD 50 63 #define OP_STFS 52 64 #define OP_STFD 54 65 66 #define SPRN_GQR0 912 67 #define SPRN_GQR1 913 68 #define SPRN_GQR2 914 69 #define SPRN_GQR3 915 70 #define SPRN_GQR4 916 71 #define SPRN_GQR5 917 72 #define SPRN_GQR6 918 73 #define SPRN_GQR7 919 74 75 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract 76 * function pointers, so let's just disable the define. */ 77 #undef mfsrin 78 79 enum priv_level { 80 PRIV_PROBLEM = 0, 81 PRIV_SUPER = 1, 82 PRIV_HYPER = 2, 83 }; 84 85 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) 86 { 87 /* PAPR VMs only access supervisor SPRs */ 88 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) 89 return false; 90 91 /* Limit user space to its own small SPR set */ 92 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) 93 return false; 94 95 return true; 96 } 97 98 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 99 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) 100 { 101 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], 102 sizeof(vcpu->arch.gpr_tm)); 103 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, 104 sizeof(struct thread_fp_state)); 105 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, 106 sizeof(struct thread_vr_state)); 107 vcpu->arch.ppr_tm = vcpu->arch.ppr; 108 vcpu->arch.dscr_tm = vcpu->arch.dscr; 109 vcpu->arch.amr_tm = vcpu->arch.amr; 110 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; 111 vcpu->arch.tar_tm = vcpu->arch.tar; 112 vcpu->arch.lr_tm = vcpu->arch.regs.link; 113 vcpu->arch.cr_tm = vcpu->arch.regs.ccr; 114 vcpu->arch.xer_tm = vcpu->arch.regs.xer; 115 vcpu->arch.vrsave_tm = vcpu->arch.vrsave; 116 } 117 118 static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) 119 { 120 memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0], 121 sizeof(vcpu->arch.regs.gpr)); 122 memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm, 123 sizeof(struct thread_fp_state)); 124 memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm, 125 sizeof(struct thread_vr_state)); 126 vcpu->arch.ppr = vcpu->arch.ppr_tm; 127 vcpu->arch.dscr = vcpu->arch.dscr_tm; 128 vcpu->arch.amr = vcpu->arch.amr_tm; 129 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; 130 vcpu->arch.tar = vcpu->arch.tar_tm; 131 vcpu->arch.regs.link = vcpu->arch.lr_tm; 132 vcpu->arch.regs.ccr = vcpu->arch.cr_tm; 133 vcpu->arch.regs.xer = vcpu->arch.xer_tm; 134 vcpu->arch.vrsave = vcpu->arch.vrsave_tm; 135 } 136 137 static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) 138 { 139 unsigned long guest_msr = kvmppc_get_msr(vcpu); 140 int fc_val = ra_val ? ra_val : 1; 141 uint64_t texasr; 142 143 /* CR0 = 0 | MSR[TS] | 0 */ 144 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | 145 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) 146 << CR0_SHIFT); 147 148 preempt_disable(); 149 tm_enable(); 150 texasr = mfspr(SPRN_TEXASR); 151 kvmppc_save_tm_pr(vcpu); 152 kvmppc_copyfrom_vcpu_tm(vcpu); 153 154 /* failure recording depends on Failure Summary bit */ 155 if (!(texasr & TEXASR_FS)) { 156 texasr &= ~TEXASR_FC; 157 texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS; 158 159 texasr &= ~(TEXASR_PR | TEXASR_HV); 160 if (kvmppc_get_msr(vcpu) & MSR_PR) 161 texasr |= TEXASR_PR; 162 163 if (kvmppc_get_msr(vcpu) & MSR_HV) 164 texasr |= TEXASR_HV; 165 166 vcpu->arch.texasr = texasr; 167 vcpu->arch.tfiar = kvmppc_get_pc(vcpu); 168 mtspr(SPRN_TEXASR, texasr); 169 mtspr(SPRN_TFIAR, vcpu->arch.tfiar); 170 } 171 tm_disable(); 172 /* 173 * treclaim need quit to non-transactional state. 174 */ 175 guest_msr &= ~(MSR_TS_MASK); 176 kvmppc_set_msr(vcpu, guest_msr); 177 preempt_enable(); 178 179 if (vcpu->arch.shadow_fscr & FSCR_TAR) 180 mtspr(SPRN_TAR, vcpu->arch.tar); 181 } 182 183 static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) 184 { 185 unsigned long guest_msr = kvmppc_get_msr(vcpu); 186 187 preempt_disable(); 188 /* 189 * need flush FP/VEC/VSX to vcpu save area before 190 * copy. 191 */ 192 kvmppc_giveup_ext(vcpu, MSR_VSX); 193 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 194 kvmppc_copyto_vcpu_tm(vcpu); 195 kvmppc_save_tm_sprs(vcpu); 196 197 /* 198 * as a result of trecheckpoint. set TS to suspended. 199 */ 200 guest_msr &= ~(MSR_TS_MASK); 201 guest_msr |= MSR_TS_S; 202 kvmppc_set_msr(vcpu, guest_msr); 203 kvmppc_restore_tm_pr(vcpu); 204 preempt_enable(); 205 } 206 207 /* emulate tabort. at guest privilege state */ 208 void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) 209 { 210 /* currently we only emulate tabort. but no emulation of other 211 * tabort variants since there is no kernel usage of them at 212 * present. 213 */ 214 unsigned long guest_msr = kvmppc_get_msr(vcpu); 215 uint64_t org_texasr; 216 217 preempt_disable(); 218 tm_enable(); 219 org_texasr = mfspr(SPRN_TEXASR); 220 tm_abort(ra_val); 221 222 /* CR0 = 0 | MSR[TS] | 0 */ 223 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | 224 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) 225 << CR0_SHIFT); 226 227 vcpu->arch.texasr = mfspr(SPRN_TEXASR); 228 /* failure recording depends on Failure Summary bit, 229 * and tabort will be treated as nops in non-transactional 230 * state. 231 */ 232 if (!(org_texasr & TEXASR_FS) && 233 MSR_TM_ACTIVE(guest_msr)) { 234 vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV); 235 if (guest_msr & MSR_PR) 236 vcpu->arch.texasr |= TEXASR_PR; 237 238 if (guest_msr & MSR_HV) 239 vcpu->arch.texasr |= TEXASR_HV; 240 241 vcpu->arch.tfiar = kvmppc_get_pc(vcpu); 242 } 243 tm_disable(); 244 preempt_enable(); 245 } 246 247 #endif 248 249 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 250 unsigned int inst, int *advance) 251 { 252 int emulated = EMULATE_DONE; 253 int rt = get_rt(inst); 254 int rs = get_rs(inst); 255 int ra = get_ra(inst); 256 int rb = get_rb(inst); 257 u32 inst_sc = 0x44000002; 258 259 switch (get_op(inst)) { 260 case 0: 261 emulated = EMULATE_FAIL; 262 if ((kvmppc_get_msr(vcpu) & MSR_LE) && 263 (inst == swab32(inst_sc))) { 264 /* 265 * This is the byte reversed syscall instruction of our 266 * hypercall handler. Early versions of LE Linux didn't 267 * swap the instructions correctly and ended up in 268 * illegal instructions. 269 * Just always fail hypercalls on these broken systems. 270 */ 271 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); 272 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 273 emulated = EMULATE_DONE; 274 } 275 break; 276 case 19: 277 switch (get_xop(inst)) { 278 case OP_19_XOP_RFID: 279 case OP_19_XOP_RFI: { 280 unsigned long srr1 = kvmppc_get_srr1(vcpu); 281 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 282 unsigned long cur_msr = kvmppc_get_msr(vcpu); 283 284 /* 285 * add rules to fit in ISA specification regarding TM 286 * state transistion in TM disable/Suspended state, 287 * and target TM state is TM inactive(00) state. (the 288 * change should be suppressed). 289 */ 290 if (((cur_msr & MSR_TM) == 0) && 291 ((srr1 & MSR_TM) == 0) && 292 MSR_TM_SUSPENDED(cur_msr) && 293 !MSR_TM_ACTIVE(srr1)) 294 srr1 |= MSR_TS_S; 295 #endif 296 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); 297 kvmppc_set_msr(vcpu, srr1); 298 *advance = 0; 299 break; 300 } 301 302 default: 303 emulated = EMULATE_FAIL; 304 break; 305 } 306 break; 307 case 31: 308 switch (get_xop(inst)) { 309 case OP_31_XOP_MFMSR: 310 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); 311 break; 312 case OP_31_XOP_MTMSRD: 313 { 314 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 315 if (inst & 0x10000) { 316 ulong new_msr = kvmppc_get_msr(vcpu); 317 new_msr &= ~(MSR_RI | MSR_EE); 318 new_msr |= rs_val & (MSR_RI | MSR_EE); 319 kvmppc_set_msr_fast(vcpu, new_msr); 320 } else 321 kvmppc_set_msr(vcpu, rs_val); 322 break; 323 } 324 case OP_31_XOP_MTMSR: 325 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 326 break; 327 case OP_31_XOP_MFSR: 328 { 329 int srnum; 330 331 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); 332 if (vcpu->arch.mmu.mfsrin) { 333 u32 sr; 334 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 335 kvmppc_set_gpr(vcpu, rt, sr); 336 } 337 break; 338 } 339 case OP_31_XOP_MFSRIN: 340 { 341 int srnum; 342 343 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; 344 if (vcpu->arch.mmu.mfsrin) { 345 u32 sr; 346 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 347 kvmppc_set_gpr(vcpu, rt, sr); 348 } 349 break; 350 } 351 case OP_31_XOP_MTSR: 352 vcpu->arch.mmu.mtsrin(vcpu, 353 (inst >> 16) & 0xf, 354 kvmppc_get_gpr(vcpu, rs)); 355 break; 356 case OP_31_XOP_MTSRIN: 357 vcpu->arch.mmu.mtsrin(vcpu, 358 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, 359 kvmppc_get_gpr(vcpu, rs)); 360 break; 361 case OP_31_XOP_TLBIE: 362 case OP_31_XOP_TLBIEL: 363 { 364 bool large = (inst & 0x00200000) ? true : false; 365 ulong addr = kvmppc_get_gpr(vcpu, rb); 366 vcpu->arch.mmu.tlbie(vcpu, addr, large); 367 break; 368 } 369 #ifdef CONFIG_PPC_BOOK3S_64 370 case OP_31_XOP_FAKE_SC1: 371 { 372 /* SC 1 papr hypercalls */ 373 ulong cmd = kvmppc_get_gpr(vcpu, 3); 374 int i; 375 376 if ((kvmppc_get_msr(vcpu) & MSR_PR) || 377 !vcpu->arch.papr_enabled) { 378 emulated = EMULATE_FAIL; 379 break; 380 } 381 382 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) 383 break; 384 385 run->papr_hcall.nr = cmd; 386 for (i = 0; i < 9; ++i) { 387 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 388 run->papr_hcall.args[i] = gpr; 389 } 390 391 run->exit_reason = KVM_EXIT_PAPR_HCALL; 392 vcpu->arch.hcall_needed = 1; 393 emulated = EMULATE_EXIT_USER; 394 break; 395 } 396 #endif 397 case OP_31_XOP_EIOIO: 398 break; 399 case OP_31_XOP_SLBMTE: 400 if (!vcpu->arch.mmu.slbmte) 401 return EMULATE_FAIL; 402 403 vcpu->arch.mmu.slbmte(vcpu, 404 kvmppc_get_gpr(vcpu, rs), 405 kvmppc_get_gpr(vcpu, rb)); 406 break; 407 case OP_31_XOP_SLBIE: 408 if (!vcpu->arch.mmu.slbie) 409 return EMULATE_FAIL; 410 411 vcpu->arch.mmu.slbie(vcpu, 412 kvmppc_get_gpr(vcpu, rb)); 413 break; 414 case OP_31_XOP_SLBIA: 415 if (!vcpu->arch.mmu.slbia) 416 return EMULATE_FAIL; 417 418 vcpu->arch.mmu.slbia(vcpu); 419 break; 420 case OP_31_XOP_SLBFEE: 421 if (!(inst & 1) || !vcpu->arch.mmu.slbfee) { 422 return EMULATE_FAIL; 423 } else { 424 ulong b, t; 425 ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK; 426 427 b = kvmppc_get_gpr(vcpu, rb); 428 if (!vcpu->arch.mmu.slbfee(vcpu, b, &t)) 429 cr |= 2 << CR0_SHIFT; 430 kvmppc_set_gpr(vcpu, rt, t); 431 /* copy XER[SO] bit to CR0[SO] */ 432 cr |= (vcpu->arch.regs.xer & 0x80000000) >> 433 (31 - CR0_SHIFT); 434 kvmppc_set_cr(vcpu, cr); 435 } 436 break; 437 case OP_31_XOP_SLBMFEE: 438 if (!vcpu->arch.mmu.slbmfee) { 439 emulated = EMULATE_FAIL; 440 } else { 441 ulong t, rb_val; 442 443 rb_val = kvmppc_get_gpr(vcpu, rb); 444 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); 445 kvmppc_set_gpr(vcpu, rt, t); 446 } 447 break; 448 case OP_31_XOP_SLBMFEV: 449 if (!vcpu->arch.mmu.slbmfev) { 450 emulated = EMULATE_FAIL; 451 } else { 452 ulong t, rb_val; 453 454 rb_val = kvmppc_get_gpr(vcpu, rb); 455 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); 456 kvmppc_set_gpr(vcpu, rt, t); 457 } 458 break; 459 case OP_31_XOP_DCBA: 460 /* Gets treated as NOP */ 461 break; 462 case OP_31_XOP_DCBZ: 463 { 464 ulong rb_val = kvmppc_get_gpr(vcpu, rb); 465 ulong ra_val = 0; 466 ulong addr, vaddr; 467 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 468 u32 dsisr; 469 int r; 470 471 if (ra) 472 ra_val = kvmppc_get_gpr(vcpu, ra); 473 474 addr = (ra_val + rb_val) & ~31ULL; 475 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 476 addr &= 0xffffffff; 477 vaddr = addr; 478 479 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 480 if ((r == -ENOENT) || (r == -EPERM)) { 481 *advance = 0; 482 kvmppc_set_dar(vcpu, vaddr); 483 vcpu->arch.fault_dar = vaddr; 484 485 dsisr = DSISR_ISSTORE; 486 if (r == -ENOENT) 487 dsisr |= DSISR_NOHPTE; 488 else if (r == -EPERM) 489 dsisr |= DSISR_PROTFAULT; 490 491 kvmppc_set_dsisr(vcpu, dsisr); 492 vcpu->arch.fault_dsisr = dsisr; 493 494 kvmppc_book3s_queue_irqprio(vcpu, 495 BOOK3S_INTERRUPT_DATA_STORAGE); 496 } 497 498 break; 499 } 500 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 501 case OP_31_XOP_TBEGIN: 502 { 503 if (!cpu_has_feature(CPU_FTR_TM)) 504 break; 505 506 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { 507 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); 508 emulated = EMULATE_AGAIN; 509 break; 510 } 511 512 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { 513 preempt_disable(); 514 vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | 515 (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); 516 517 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | 518 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 519 << TEXASR_FC_LG)); 520 521 if ((inst >> 21) & 0x1) 522 vcpu->arch.texasr |= TEXASR_ROT; 523 524 if (kvmppc_get_msr(vcpu) & MSR_HV) 525 vcpu->arch.texasr |= TEXASR_HV; 526 527 vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; 528 vcpu->arch.tfiar = kvmppc_get_pc(vcpu); 529 530 kvmppc_restore_tm_sprs(vcpu); 531 preempt_enable(); 532 } else 533 emulated = EMULATE_FAIL; 534 break; 535 } 536 case OP_31_XOP_TABORT: 537 { 538 ulong guest_msr = kvmppc_get_msr(vcpu); 539 unsigned long ra_val = 0; 540 541 if (!cpu_has_feature(CPU_FTR_TM)) 542 break; 543 544 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { 545 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); 546 emulated = EMULATE_AGAIN; 547 break; 548 } 549 550 /* only emulate for privilege guest, since problem state 551 * guest can run with TM enabled and we don't expect to 552 * trap at here for that case. 553 */ 554 WARN_ON(guest_msr & MSR_PR); 555 556 if (ra) 557 ra_val = kvmppc_get_gpr(vcpu, ra); 558 559 kvmppc_emulate_tabort(vcpu, ra_val); 560 break; 561 } 562 case OP_31_XOP_TRECLAIM: 563 { 564 ulong guest_msr = kvmppc_get_msr(vcpu); 565 unsigned long ra_val = 0; 566 567 if (!cpu_has_feature(CPU_FTR_TM)) 568 break; 569 570 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { 571 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); 572 emulated = EMULATE_AGAIN; 573 break; 574 } 575 576 /* generate interrupts based on priorities */ 577 if (guest_msr & MSR_PR) { 578 /* Privileged Instruction type Program Interrupt */ 579 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 580 emulated = EMULATE_AGAIN; 581 break; 582 } 583 584 if (!MSR_TM_ACTIVE(guest_msr)) { 585 /* TM bad thing interrupt */ 586 kvmppc_core_queue_program(vcpu, SRR1_PROGTM); 587 emulated = EMULATE_AGAIN; 588 break; 589 } 590 591 if (ra) 592 ra_val = kvmppc_get_gpr(vcpu, ra); 593 kvmppc_emulate_treclaim(vcpu, ra_val); 594 break; 595 } 596 case OP_31_XOP_TRCHKPT: 597 { 598 ulong guest_msr = kvmppc_get_msr(vcpu); 599 unsigned long texasr; 600 601 if (!cpu_has_feature(CPU_FTR_TM)) 602 break; 603 604 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { 605 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); 606 emulated = EMULATE_AGAIN; 607 break; 608 } 609 610 /* generate interrupt based on priorities */ 611 if (guest_msr & MSR_PR) { 612 /* Privileged Instruction type Program Intr */ 613 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 614 emulated = EMULATE_AGAIN; 615 break; 616 } 617 618 tm_enable(); 619 texasr = mfspr(SPRN_TEXASR); 620 tm_disable(); 621 622 if (MSR_TM_ACTIVE(guest_msr) || 623 !(texasr & (TEXASR_FS))) { 624 /* TM bad thing interrupt */ 625 kvmppc_core_queue_program(vcpu, SRR1_PROGTM); 626 emulated = EMULATE_AGAIN; 627 break; 628 } 629 630 kvmppc_emulate_trchkpt(vcpu); 631 break; 632 } 633 #endif 634 default: 635 emulated = EMULATE_FAIL; 636 } 637 break; 638 default: 639 emulated = EMULATE_FAIL; 640 } 641 642 if (emulated == EMULATE_FAIL) 643 emulated = kvmppc_emulate_paired_single(run, vcpu); 644 645 return emulated; 646 } 647 648 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, 649 u32 val) 650 { 651 if (upper) { 652 /* Upper BAT */ 653 u32 bl = (val >> 2) & 0x7ff; 654 bat->bepi_mask = (~bl << 17); 655 bat->bepi = val & 0xfffe0000; 656 bat->vs = (val & 2) ? 1 : 0; 657 bat->vp = (val & 1) ? 1 : 0; 658 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; 659 } else { 660 /* Lower BAT */ 661 bat->brpn = val & 0xfffe0000; 662 bat->wimg = (val >> 3) & 0xf; 663 bat->pp = val & 3; 664 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); 665 } 666 } 667 668 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) 669 { 670 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 671 struct kvmppc_bat *bat; 672 673 switch (sprn) { 674 case SPRN_IBAT0U ... SPRN_IBAT3L: 675 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 676 break; 677 case SPRN_IBAT4U ... SPRN_IBAT7L: 678 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 679 break; 680 case SPRN_DBAT0U ... SPRN_DBAT3L: 681 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 682 break; 683 case SPRN_DBAT4U ... SPRN_DBAT7L: 684 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 685 break; 686 default: 687 BUG(); 688 } 689 690 return bat; 691 } 692 693 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 694 { 695 int emulated = EMULATE_DONE; 696 697 switch (sprn) { 698 case SPRN_SDR1: 699 if (!spr_allowed(vcpu, PRIV_HYPER)) 700 goto unprivileged; 701 to_book3s(vcpu)->sdr1 = spr_val; 702 break; 703 case SPRN_DSISR: 704 kvmppc_set_dsisr(vcpu, spr_val); 705 break; 706 case SPRN_DAR: 707 kvmppc_set_dar(vcpu, spr_val); 708 break; 709 case SPRN_HIOR: 710 to_book3s(vcpu)->hior = spr_val; 711 break; 712 case SPRN_IBAT0U ... SPRN_IBAT3L: 713 case SPRN_IBAT4U ... SPRN_IBAT7L: 714 case SPRN_DBAT0U ... SPRN_DBAT3L: 715 case SPRN_DBAT4U ... SPRN_DBAT7L: 716 { 717 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 718 719 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); 720 /* BAT writes happen so rarely that we're ok to flush 721 * everything here */ 722 kvmppc_mmu_pte_flush(vcpu, 0, 0); 723 kvmppc_mmu_flush_segments(vcpu); 724 break; 725 } 726 case SPRN_HID0: 727 to_book3s(vcpu)->hid[0] = spr_val; 728 break; 729 case SPRN_HID1: 730 to_book3s(vcpu)->hid[1] = spr_val; 731 break; 732 case SPRN_HID2: 733 to_book3s(vcpu)->hid[2] = spr_val; 734 break; 735 case SPRN_HID2_GEKKO: 736 to_book3s(vcpu)->hid[2] = spr_val; 737 /* HID2.PSE controls paired single on gekko */ 738 switch (vcpu->arch.pvr) { 739 case 0x00080200: /* lonestar 2.0 */ 740 case 0x00088202: /* lonestar 2.2 */ 741 case 0x70000100: /* gekko 1.0 */ 742 case 0x00080100: /* gekko 2.0 */ 743 case 0x00083203: /* gekko 2.3a */ 744 case 0x00083213: /* gekko 2.3b */ 745 case 0x00083204: /* gekko 2.4 */ 746 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 747 case 0x00087200: /* broadway */ 748 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { 749 /* Native paired singles */ 750 } else if (spr_val & (1 << 29)) { /* HID2.PSE */ 751 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; 752 kvmppc_giveup_ext(vcpu, MSR_FP); 753 } else { 754 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; 755 } 756 break; 757 } 758 break; 759 case SPRN_HID4: 760 case SPRN_HID4_GEKKO: 761 to_book3s(vcpu)->hid[4] = spr_val; 762 break; 763 case SPRN_HID5: 764 to_book3s(vcpu)->hid[5] = spr_val; 765 /* guest HID5 set can change is_dcbz32 */ 766 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 767 (mfmsr() & MSR_HV)) 768 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 769 break; 770 case SPRN_GQR0: 771 case SPRN_GQR1: 772 case SPRN_GQR2: 773 case SPRN_GQR3: 774 case SPRN_GQR4: 775 case SPRN_GQR5: 776 case SPRN_GQR6: 777 case SPRN_GQR7: 778 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 779 break; 780 #ifdef CONFIG_PPC_BOOK3S_64 781 case SPRN_FSCR: 782 kvmppc_set_fscr(vcpu, spr_val); 783 break; 784 case SPRN_BESCR: 785 vcpu->arch.bescr = spr_val; 786 break; 787 case SPRN_EBBHR: 788 vcpu->arch.ebbhr = spr_val; 789 break; 790 case SPRN_EBBRR: 791 vcpu->arch.ebbrr = spr_val; 792 break; 793 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 794 case SPRN_TFHAR: 795 case SPRN_TEXASR: 796 case SPRN_TFIAR: 797 if (!cpu_has_feature(CPU_FTR_TM)) 798 break; 799 800 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { 801 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); 802 emulated = EMULATE_AGAIN; 803 break; 804 } 805 806 if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) && 807 !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) && 808 (sprn == SPRN_TFHAR))) { 809 /* it is illegal to mtspr() TM regs in 810 * other than non-transactional state, with 811 * the exception of TFHAR in suspend state. 812 */ 813 kvmppc_core_queue_program(vcpu, SRR1_PROGTM); 814 emulated = EMULATE_AGAIN; 815 break; 816 } 817 818 tm_enable(); 819 if (sprn == SPRN_TFHAR) 820 mtspr(SPRN_TFHAR, spr_val); 821 else if (sprn == SPRN_TEXASR) 822 mtspr(SPRN_TEXASR, spr_val); 823 else 824 mtspr(SPRN_TFIAR, spr_val); 825 tm_disable(); 826 827 break; 828 #endif 829 #endif 830 case SPRN_ICTC: 831 case SPRN_THRM1: 832 case SPRN_THRM2: 833 case SPRN_THRM3: 834 case SPRN_CTRLF: 835 case SPRN_CTRLT: 836 case SPRN_L2CR: 837 case SPRN_DSCR: 838 case SPRN_MMCR0_GEKKO: 839 case SPRN_MMCR1_GEKKO: 840 case SPRN_PMC1_GEKKO: 841 case SPRN_PMC2_GEKKO: 842 case SPRN_PMC3_GEKKO: 843 case SPRN_PMC4_GEKKO: 844 case SPRN_WPAR_GEKKO: 845 case SPRN_MSSSR0: 846 case SPRN_DABR: 847 #ifdef CONFIG_PPC_BOOK3S_64 848 case SPRN_MMCRS: 849 case SPRN_MMCRA: 850 case SPRN_MMCR0: 851 case SPRN_MMCR1: 852 case SPRN_MMCR2: 853 case SPRN_UMMCR2: 854 #endif 855 break; 856 unprivileged: 857 default: 858 pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn); 859 if (sprn & 0x10) { 860 if (kvmppc_get_msr(vcpu) & MSR_PR) { 861 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 862 emulated = EMULATE_AGAIN; 863 } 864 } else { 865 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) { 866 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 867 emulated = EMULATE_AGAIN; 868 } 869 } 870 break; 871 } 872 873 return emulated; 874 } 875 876 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 877 { 878 int emulated = EMULATE_DONE; 879 880 switch (sprn) { 881 case SPRN_IBAT0U ... SPRN_IBAT3L: 882 case SPRN_IBAT4U ... SPRN_IBAT7L: 883 case SPRN_DBAT0U ... SPRN_DBAT3L: 884 case SPRN_DBAT4U ... SPRN_DBAT7L: 885 { 886 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 887 888 if (sprn % 2) 889 *spr_val = bat->raw >> 32; 890 else 891 *spr_val = bat->raw; 892 893 break; 894 } 895 case SPRN_SDR1: 896 if (!spr_allowed(vcpu, PRIV_HYPER)) 897 goto unprivileged; 898 *spr_val = to_book3s(vcpu)->sdr1; 899 break; 900 case SPRN_DSISR: 901 *spr_val = kvmppc_get_dsisr(vcpu); 902 break; 903 case SPRN_DAR: 904 *spr_val = kvmppc_get_dar(vcpu); 905 break; 906 case SPRN_HIOR: 907 *spr_val = to_book3s(vcpu)->hior; 908 break; 909 case SPRN_HID0: 910 *spr_val = to_book3s(vcpu)->hid[0]; 911 break; 912 case SPRN_HID1: 913 *spr_val = to_book3s(vcpu)->hid[1]; 914 break; 915 case SPRN_HID2: 916 case SPRN_HID2_GEKKO: 917 *spr_val = to_book3s(vcpu)->hid[2]; 918 break; 919 case SPRN_HID4: 920 case SPRN_HID4_GEKKO: 921 *spr_val = to_book3s(vcpu)->hid[4]; 922 break; 923 case SPRN_HID5: 924 *spr_val = to_book3s(vcpu)->hid[5]; 925 break; 926 case SPRN_CFAR: 927 case SPRN_DSCR: 928 *spr_val = 0; 929 break; 930 case SPRN_PURR: 931 /* 932 * On exit we would have updated purr 933 */ 934 *spr_val = vcpu->arch.purr; 935 break; 936 case SPRN_SPURR: 937 /* 938 * On exit we would have updated spurr 939 */ 940 *spr_val = vcpu->arch.spurr; 941 break; 942 case SPRN_VTB: 943 *spr_val = to_book3s(vcpu)->vtb; 944 break; 945 case SPRN_IC: 946 *spr_val = vcpu->arch.ic; 947 break; 948 case SPRN_GQR0: 949 case SPRN_GQR1: 950 case SPRN_GQR2: 951 case SPRN_GQR3: 952 case SPRN_GQR4: 953 case SPRN_GQR5: 954 case SPRN_GQR6: 955 case SPRN_GQR7: 956 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 957 break; 958 #ifdef CONFIG_PPC_BOOK3S_64 959 case SPRN_FSCR: 960 *spr_val = vcpu->arch.fscr; 961 break; 962 case SPRN_BESCR: 963 *spr_val = vcpu->arch.bescr; 964 break; 965 case SPRN_EBBHR: 966 *spr_val = vcpu->arch.ebbhr; 967 break; 968 case SPRN_EBBRR: 969 *spr_val = vcpu->arch.ebbrr; 970 break; 971 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 972 case SPRN_TFHAR: 973 case SPRN_TEXASR: 974 case SPRN_TFIAR: 975 if (!cpu_has_feature(CPU_FTR_TM)) 976 break; 977 978 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { 979 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); 980 emulated = EMULATE_AGAIN; 981 break; 982 } 983 984 tm_enable(); 985 if (sprn == SPRN_TFHAR) 986 *spr_val = mfspr(SPRN_TFHAR); 987 else if (sprn == SPRN_TEXASR) 988 *spr_val = mfspr(SPRN_TEXASR); 989 else if (sprn == SPRN_TFIAR) 990 *spr_val = mfspr(SPRN_TFIAR); 991 tm_disable(); 992 break; 993 #endif 994 #endif 995 case SPRN_THRM1: 996 case SPRN_THRM2: 997 case SPRN_THRM3: 998 case SPRN_CTRLF: 999 case SPRN_CTRLT: 1000 case SPRN_L2CR: 1001 case SPRN_MMCR0_GEKKO: 1002 case SPRN_MMCR1_GEKKO: 1003 case SPRN_PMC1_GEKKO: 1004 case SPRN_PMC2_GEKKO: 1005 case SPRN_PMC3_GEKKO: 1006 case SPRN_PMC4_GEKKO: 1007 case SPRN_WPAR_GEKKO: 1008 case SPRN_MSSSR0: 1009 case SPRN_DABR: 1010 #ifdef CONFIG_PPC_BOOK3S_64 1011 case SPRN_MMCRS: 1012 case SPRN_MMCRA: 1013 case SPRN_MMCR0: 1014 case SPRN_MMCR1: 1015 case SPRN_MMCR2: 1016 case SPRN_UMMCR2: 1017 case SPRN_TIR: 1018 #endif 1019 *spr_val = 0; 1020 break; 1021 default: 1022 unprivileged: 1023 pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn); 1024 if (sprn & 0x10) { 1025 if (kvmppc_get_msr(vcpu) & MSR_PR) { 1026 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 1027 emulated = EMULATE_AGAIN; 1028 } 1029 } else { 1030 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 || 1031 sprn == 4 || sprn == 5 || sprn == 6) { 1032 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1033 emulated = EMULATE_AGAIN; 1034 } 1035 } 1036 1037 break; 1038 } 1039 1040 return emulated; 1041 } 1042 1043 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) 1044 { 1045 return make_dsisr(inst); 1046 } 1047 1048 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 1049 { 1050 #ifdef CONFIG_PPC_BOOK3S_64 1051 /* 1052 * Linux's fix_alignment() assumes that DAR is valid, so can we 1053 */ 1054 return vcpu->arch.fault_dar; 1055 #else 1056 ulong dar = 0; 1057 ulong ra = get_ra(inst); 1058 ulong rb = get_rb(inst); 1059 1060 switch (get_op(inst)) { 1061 case OP_LFS: 1062 case OP_LFD: 1063 case OP_STFD: 1064 case OP_STFS: 1065 if (ra) 1066 dar = kvmppc_get_gpr(vcpu, ra); 1067 dar += (s32)((s16)inst); 1068 break; 1069 case 31: 1070 if (ra) 1071 dar = kvmppc_get_gpr(vcpu, ra); 1072 dar += kvmppc_get_gpr(vcpu, rb); 1073 break; 1074 default: 1075 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 1076 break; 1077 } 1078 1079 return dar; 1080 #endif 1081 } 1082