1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/reg.h> 24 #include <asm/switch_to.h> 25 #include <asm/time.h> 26 27 #define OP_19_XOP_RFID 18 28 #define OP_19_XOP_RFI 50 29 30 #define OP_31_XOP_MFMSR 83 31 #define OP_31_XOP_MTMSR 146 32 #define OP_31_XOP_MTMSRD 178 33 #define OP_31_XOP_MTSR 210 34 #define OP_31_XOP_MTSRIN 242 35 #define OP_31_XOP_TLBIEL 274 36 #define OP_31_XOP_TLBIE 306 37 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ 38 #define OP_31_XOP_FAKE_SC1 308 39 #define OP_31_XOP_SLBMTE 402 40 #define OP_31_XOP_SLBIE 434 41 #define OP_31_XOP_SLBIA 498 42 #define OP_31_XOP_MFSR 595 43 #define OP_31_XOP_MFSRIN 659 44 #define OP_31_XOP_DCBA 758 45 #define OP_31_XOP_SLBMFEV 851 46 #define OP_31_XOP_EIOIO 854 47 #define OP_31_XOP_SLBMFEE 915 48 49 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 50 #define OP_31_XOP_DCBZ 1010 51 52 #define OP_LFS 48 53 #define OP_LFD 50 54 #define OP_STFS 52 55 #define OP_STFD 54 56 57 #define SPRN_GQR0 912 58 #define SPRN_GQR1 913 59 #define SPRN_GQR2 914 60 #define SPRN_GQR3 915 61 #define SPRN_GQR4 916 62 #define SPRN_GQR5 917 63 #define SPRN_GQR6 918 64 #define SPRN_GQR7 919 65 66 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract 67 * function pointers, so let's just disable the define. */ 68 #undef mfsrin 69 70 enum priv_level { 71 PRIV_PROBLEM = 0, 72 PRIV_SUPER = 1, 73 PRIV_HYPER = 2, 74 }; 75 76 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) 77 { 78 /* PAPR VMs only access supervisor SPRs */ 79 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) 80 return false; 81 82 /* Limit user space to its own small SPR set */ 83 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) 84 return false; 85 86 return true; 87 } 88 89 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 90 unsigned int inst, int *advance) 91 { 92 int emulated = EMULATE_DONE; 93 int rt = get_rt(inst); 94 int rs = get_rs(inst); 95 int ra = get_ra(inst); 96 int rb = get_rb(inst); 97 u32 inst_sc = 0x44000002; 98 99 switch (get_op(inst)) { 100 case 0: 101 emulated = EMULATE_FAIL; 102 if ((kvmppc_get_msr(vcpu) & MSR_LE) && 103 (inst == swab32(inst_sc))) { 104 /* 105 * This is the byte reversed syscall instruction of our 106 * hypercall handler. Early versions of LE Linux didn't 107 * swap the instructions correctly and ended up in 108 * illegal instructions. 109 * Just always fail hypercalls on these broken systems. 110 */ 111 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); 112 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 113 emulated = EMULATE_DONE; 114 } 115 break; 116 case 19: 117 switch (get_xop(inst)) { 118 case OP_19_XOP_RFID: 119 case OP_19_XOP_RFI: 120 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); 121 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); 122 *advance = 0; 123 break; 124 125 default: 126 emulated = EMULATE_FAIL; 127 break; 128 } 129 break; 130 case 31: 131 switch (get_xop(inst)) { 132 case OP_31_XOP_MFMSR: 133 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); 134 break; 135 case OP_31_XOP_MTMSRD: 136 { 137 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 138 if (inst & 0x10000) { 139 ulong new_msr = kvmppc_get_msr(vcpu); 140 new_msr &= ~(MSR_RI | MSR_EE); 141 new_msr |= rs_val & (MSR_RI | MSR_EE); 142 kvmppc_set_msr_fast(vcpu, new_msr); 143 } else 144 kvmppc_set_msr(vcpu, rs_val); 145 break; 146 } 147 case OP_31_XOP_MTMSR: 148 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 149 break; 150 case OP_31_XOP_MFSR: 151 { 152 int srnum; 153 154 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); 155 if (vcpu->arch.mmu.mfsrin) { 156 u32 sr; 157 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 158 kvmppc_set_gpr(vcpu, rt, sr); 159 } 160 break; 161 } 162 case OP_31_XOP_MFSRIN: 163 { 164 int srnum; 165 166 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; 167 if (vcpu->arch.mmu.mfsrin) { 168 u32 sr; 169 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 170 kvmppc_set_gpr(vcpu, rt, sr); 171 } 172 break; 173 } 174 case OP_31_XOP_MTSR: 175 vcpu->arch.mmu.mtsrin(vcpu, 176 (inst >> 16) & 0xf, 177 kvmppc_get_gpr(vcpu, rs)); 178 break; 179 case OP_31_XOP_MTSRIN: 180 vcpu->arch.mmu.mtsrin(vcpu, 181 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, 182 kvmppc_get_gpr(vcpu, rs)); 183 break; 184 case OP_31_XOP_TLBIE: 185 case OP_31_XOP_TLBIEL: 186 { 187 bool large = (inst & 0x00200000) ? true : false; 188 ulong addr = kvmppc_get_gpr(vcpu, rb); 189 vcpu->arch.mmu.tlbie(vcpu, addr, large); 190 break; 191 } 192 #ifdef CONFIG_PPC_BOOK3S_64 193 case OP_31_XOP_FAKE_SC1: 194 { 195 /* SC 1 papr hypercalls */ 196 ulong cmd = kvmppc_get_gpr(vcpu, 3); 197 int i; 198 199 if ((kvmppc_get_msr(vcpu) & MSR_PR) || 200 !vcpu->arch.papr_enabled) { 201 emulated = EMULATE_FAIL; 202 break; 203 } 204 205 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) 206 break; 207 208 run->papr_hcall.nr = cmd; 209 for (i = 0; i < 9; ++i) { 210 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 211 run->papr_hcall.args[i] = gpr; 212 } 213 214 run->exit_reason = KVM_EXIT_PAPR_HCALL; 215 vcpu->arch.hcall_needed = 1; 216 emulated = EMULATE_EXIT_USER; 217 break; 218 } 219 #endif 220 case OP_31_XOP_EIOIO: 221 break; 222 case OP_31_XOP_SLBMTE: 223 if (!vcpu->arch.mmu.slbmte) 224 return EMULATE_FAIL; 225 226 vcpu->arch.mmu.slbmte(vcpu, 227 kvmppc_get_gpr(vcpu, rs), 228 kvmppc_get_gpr(vcpu, rb)); 229 break; 230 case OP_31_XOP_SLBIE: 231 if (!vcpu->arch.mmu.slbie) 232 return EMULATE_FAIL; 233 234 vcpu->arch.mmu.slbie(vcpu, 235 kvmppc_get_gpr(vcpu, rb)); 236 break; 237 case OP_31_XOP_SLBIA: 238 if (!vcpu->arch.mmu.slbia) 239 return EMULATE_FAIL; 240 241 vcpu->arch.mmu.slbia(vcpu); 242 break; 243 case OP_31_XOP_SLBMFEE: 244 if (!vcpu->arch.mmu.slbmfee) { 245 emulated = EMULATE_FAIL; 246 } else { 247 ulong t, rb_val; 248 249 rb_val = kvmppc_get_gpr(vcpu, rb); 250 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); 251 kvmppc_set_gpr(vcpu, rt, t); 252 } 253 break; 254 case OP_31_XOP_SLBMFEV: 255 if (!vcpu->arch.mmu.slbmfev) { 256 emulated = EMULATE_FAIL; 257 } else { 258 ulong t, rb_val; 259 260 rb_val = kvmppc_get_gpr(vcpu, rb); 261 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); 262 kvmppc_set_gpr(vcpu, rt, t); 263 } 264 break; 265 case OP_31_XOP_DCBA: 266 /* Gets treated as NOP */ 267 break; 268 case OP_31_XOP_DCBZ: 269 { 270 ulong rb_val = kvmppc_get_gpr(vcpu, rb); 271 ulong ra_val = 0; 272 ulong addr, vaddr; 273 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 274 u32 dsisr; 275 int r; 276 277 if (ra) 278 ra_val = kvmppc_get_gpr(vcpu, ra); 279 280 addr = (ra_val + rb_val) & ~31ULL; 281 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 282 addr &= 0xffffffff; 283 vaddr = addr; 284 285 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 286 if ((r == -ENOENT) || (r == -EPERM)) { 287 *advance = 0; 288 kvmppc_set_dar(vcpu, vaddr); 289 vcpu->arch.fault_dar = vaddr; 290 291 dsisr = DSISR_ISSTORE; 292 if (r == -ENOENT) 293 dsisr |= DSISR_NOHPTE; 294 else if (r == -EPERM) 295 dsisr |= DSISR_PROTFAULT; 296 297 kvmppc_set_dsisr(vcpu, dsisr); 298 vcpu->arch.fault_dsisr = dsisr; 299 300 kvmppc_book3s_queue_irqprio(vcpu, 301 BOOK3S_INTERRUPT_DATA_STORAGE); 302 } 303 304 break; 305 } 306 default: 307 emulated = EMULATE_FAIL; 308 } 309 break; 310 default: 311 emulated = EMULATE_FAIL; 312 } 313 314 if (emulated == EMULATE_FAIL) 315 emulated = kvmppc_emulate_paired_single(run, vcpu); 316 317 return emulated; 318 } 319 320 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, 321 u32 val) 322 { 323 if (upper) { 324 /* Upper BAT */ 325 u32 bl = (val >> 2) & 0x7ff; 326 bat->bepi_mask = (~bl << 17); 327 bat->bepi = val & 0xfffe0000; 328 bat->vs = (val & 2) ? 1 : 0; 329 bat->vp = (val & 1) ? 1 : 0; 330 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; 331 } else { 332 /* Lower BAT */ 333 bat->brpn = val & 0xfffe0000; 334 bat->wimg = (val >> 3) & 0xf; 335 bat->pp = val & 3; 336 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); 337 } 338 } 339 340 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) 341 { 342 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 343 struct kvmppc_bat *bat; 344 345 switch (sprn) { 346 case SPRN_IBAT0U ... SPRN_IBAT3L: 347 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 348 break; 349 case SPRN_IBAT4U ... SPRN_IBAT7L: 350 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 351 break; 352 case SPRN_DBAT0U ... SPRN_DBAT3L: 353 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 354 break; 355 case SPRN_DBAT4U ... SPRN_DBAT7L: 356 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 357 break; 358 default: 359 BUG(); 360 } 361 362 return bat; 363 } 364 365 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 366 { 367 int emulated = EMULATE_DONE; 368 369 switch (sprn) { 370 case SPRN_SDR1: 371 if (!spr_allowed(vcpu, PRIV_HYPER)) 372 goto unprivileged; 373 to_book3s(vcpu)->sdr1 = spr_val; 374 break; 375 case SPRN_DSISR: 376 kvmppc_set_dsisr(vcpu, spr_val); 377 break; 378 case SPRN_DAR: 379 kvmppc_set_dar(vcpu, spr_val); 380 break; 381 case SPRN_HIOR: 382 to_book3s(vcpu)->hior = spr_val; 383 break; 384 case SPRN_IBAT0U ... SPRN_IBAT3L: 385 case SPRN_IBAT4U ... SPRN_IBAT7L: 386 case SPRN_DBAT0U ... SPRN_DBAT3L: 387 case SPRN_DBAT4U ... SPRN_DBAT7L: 388 { 389 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 390 391 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); 392 /* BAT writes happen so rarely that we're ok to flush 393 * everything here */ 394 kvmppc_mmu_pte_flush(vcpu, 0, 0); 395 kvmppc_mmu_flush_segments(vcpu); 396 break; 397 } 398 case SPRN_HID0: 399 to_book3s(vcpu)->hid[0] = spr_val; 400 break; 401 case SPRN_HID1: 402 to_book3s(vcpu)->hid[1] = spr_val; 403 break; 404 case SPRN_HID2: 405 to_book3s(vcpu)->hid[2] = spr_val; 406 break; 407 case SPRN_HID2_GEKKO: 408 to_book3s(vcpu)->hid[2] = spr_val; 409 /* HID2.PSE controls paired single on gekko */ 410 switch (vcpu->arch.pvr) { 411 case 0x00080200: /* lonestar 2.0 */ 412 case 0x00088202: /* lonestar 2.2 */ 413 case 0x70000100: /* gekko 1.0 */ 414 case 0x00080100: /* gekko 2.0 */ 415 case 0x00083203: /* gekko 2.3a */ 416 case 0x00083213: /* gekko 2.3b */ 417 case 0x00083204: /* gekko 2.4 */ 418 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 419 case 0x00087200: /* broadway */ 420 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { 421 /* Native paired singles */ 422 } else if (spr_val & (1 << 29)) { /* HID2.PSE */ 423 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; 424 kvmppc_giveup_ext(vcpu, MSR_FP); 425 } else { 426 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; 427 } 428 break; 429 } 430 break; 431 case SPRN_HID4: 432 case SPRN_HID4_GEKKO: 433 to_book3s(vcpu)->hid[4] = spr_val; 434 break; 435 case SPRN_HID5: 436 to_book3s(vcpu)->hid[5] = spr_val; 437 /* guest HID5 set can change is_dcbz32 */ 438 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 439 (mfmsr() & MSR_HV)) 440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 441 break; 442 case SPRN_PURR: 443 to_book3s(vcpu)->purr_offset = spr_val - get_tb(); 444 break; 445 case SPRN_SPURR: 446 to_book3s(vcpu)->spurr_offset = spr_val - get_tb(); 447 break; 448 case SPRN_GQR0: 449 case SPRN_GQR1: 450 case SPRN_GQR2: 451 case SPRN_GQR3: 452 case SPRN_GQR4: 453 case SPRN_GQR5: 454 case SPRN_GQR6: 455 case SPRN_GQR7: 456 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 457 break; 458 case SPRN_FSCR: 459 vcpu->arch.fscr = spr_val; 460 break; 461 #ifdef CONFIG_PPC_BOOK3S_64 462 case SPRN_BESCR: 463 vcpu->arch.bescr = spr_val; 464 break; 465 case SPRN_EBBHR: 466 vcpu->arch.ebbhr = spr_val; 467 break; 468 case SPRN_EBBRR: 469 vcpu->arch.ebbrr = spr_val; 470 break; 471 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 472 case SPRN_TFHAR: 473 vcpu->arch.tfhar = spr_val; 474 break; 475 case SPRN_TEXASR: 476 vcpu->arch.texasr = spr_val; 477 break; 478 case SPRN_TFIAR: 479 vcpu->arch.tfiar = spr_val; 480 break; 481 #endif 482 #endif 483 case SPRN_ICTC: 484 case SPRN_THRM1: 485 case SPRN_THRM2: 486 case SPRN_THRM3: 487 case SPRN_CTRLF: 488 case SPRN_CTRLT: 489 case SPRN_L2CR: 490 case SPRN_DSCR: 491 case SPRN_MMCR0_GEKKO: 492 case SPRN_MMCR1_GEKKO: 493 case SPRN_PMC1_GEKKO: 494 case SPRN_PMC2_GEKKO: 495 case SPRN_PMC3_GEKKO: 496 case SPRN_PMC4_GEKKO: 497 case SPRN_WPAR_GEKKO: 498 case SPRN_MSSSR0: 499 case SPRN_DABR: 500 #ifdef CONFIG_PPC_BOOK3S_64 501 case SPRN_MMCRS: 502 case SPRN_MMCRA: 503 case SPRN_MMCR0: 504 case SPRN_MMCR1: 505 case SPRN_MMCR2: 506 #endif 507 break; 508 unprivileged: 509 default: 510 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); 511 #ifndef DEBUG_SPR 512 emulated = EMULATE_FAIL; 513 #endif 514 break; 515 } 516 517 return emulated; 518 } 519 520 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 521 { 522 int emulated = EMULATE_DONE; 523 524 switch (sprn) { 525 case SPRN_IBAT0U ... SPRN_IBAT3L: 526 case SPRN_IBAT4U ... SPRN_IBAT7L: 527 case SPRN_DBAT0U ... SPRN_DBAT3L: 528 case SPRN_DBAT4U ... SPRN_DBAT7L: 529 { 530 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 531 532 if (sprn % 2) 533 *spr_val = bat->raw >> 32; 534 else 535 *spr_val = bat->raw; 536 537 break; 538 } 539 case SPRN_SDR1: 540 if (!spr_allowed(vcpu, PRIV_HYPER)) 541 goto unprivileged; 542 *spr_val = to_book3s(vcpu)->sdr1; 543 break; 544 case SPRN_DSISR: 545 *spr_val = kvmppc_get_dsisr(vcpu); 546 break; 547 case SPRN_DAR: 548 *spr_val = kvmppc_get_dar(vcpu); 549 break; 550 case SPRN_HIOR: 551 *spr_val = to_book3s(vcpu)->hior; 552 break; 553 case SPRN_HID0: 554 *spr_val = to_book3s(vcpu)->hid[0]; 555 break; 556 case SPRN_HID1: 557 *spr_val = to_book3s(vcpu)->hid[1]; 558 break; 559 case SPRN_HID2: 560 case SPRN_HID2_GEKKO: 561 *spr_val = to_book3s(vcpu)->hid[2]; 562 break; 563 case SPRN_HID4: 564 case SPRN_HID4_GEKKO: 565 *spr_val = to_book3s(vcpu)->hid[4]; 566 break; 567 case SPRN_HID5: 568 *spr_val = to_book3s(vcpu)->hid[5]; 569 break; 570 case SPRN_CFAR: 571 case SPRN_DSCR: 572 *spr_val = 0; 573 break; 574 case SPRN_PURR: 575 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; 576 break; 577 case SPRN_SPURR: 578 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; 579 break; 580 case SPRN_GQR0: 581 case SPRN_GQR1: 582 case SPRN_GQR2: 583 case SPRN_GQR3: 584 case SPRN_GQR4: 585 case SPRN_GQR5: 586 case SPRN_GQR6: 587 case SPRN_GQR7: 588 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 589 break; 590 case SPRN_FSCR: 591 *spr_val = vcpu->arch.fscr; 592 break; 593 #ifdef CONFIG_PPC_BOOK3S_64 594 case SPRN_BESCR: 595 *spr_val = vcpu->arch.bescr; 596 break; 597 case SPRN_EBBHR: 598 *spr_val = vcpu->arch.ebbhr; 599 break; 600 case SPRN_EBBRR: 601 *spr_val = vcpu->arch.ebbrr; 602 break; 603 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 604 case SPRN_TFHAR: 605 *spr_val = vcpu->arch.tfhar; 606 break; 607 case SPRN_TEXASR: 608 *spr_val = vcpu->arch.texasr; 609 break; 610 case SPRN_TFIAR: 611 *spr_val = vcpu->arch.tfiar; 612 break; 613 #endif 614 #endif 615 case SPRN_THRM1: 616 case SPRN_THRM2: 617 case SPRN_THRM3: 618 case SPRN_CTRLF: 619 case SPRN_CTRLT: 620 case SPRN_L2CR: 621 case SPRN_MMCR0_GEKKO: 622 case SPRN_MMCR1_GEKKO: 623 case SPRN_PMC1_GEKKO: 624 case SPRN_PMC2_GEKKO: 625 case SPRN_PMC3_GEKKO: 626 case SPRN_PMC4_GEKKO: 627 case SPRN_WPAR_GEKKO: 628 case SPRN_MSSSR0: 629 case SPRN_DABR: 630 #ifdef CONFIG_PPC_BOOK3S_64 631 case SPRN_MMCRS: 632 case SPRN_MMCRA: 633 case SPRN_MMCR0: 634 case SPRN_MMCR1: 635 case SPRN_MMCR2: 636 case SPRN_TIR: 637 #endif 638 *spr_val = 0; 639 break; 640 default: 641 unprivileged: 642 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); 643 #ifndef DEBUG_SPR 644 emulated = EMULATE_FAIL; 645 #endif 646 break; 647 } 648 649 return emulated; 650 } 651 652 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) 653 { 654 return make_dsisr(inst); 655 } 656 657 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 658 { 659 #ifdef CONFIG_PPC_BOOK3S_64 660 /* 661 * Linux's fix_alignment() assumes that DAR is valid, so can we 662 */ 663 return vcpu->arch.fault_dar; 664 #else 665 ulong dar = 0; 666 ulong ra = get_ra(inst); 667 ulong rb = get_rb(inst); 668 669 switch (get_op(inst)) { 670 case OP_LFS: 671 case OP_LFD: 672 case OP_STFD: 673 case OP_STFS: 674 if (ra) 675 dar = kvmppc_get_gpr(vcpu, ra); 676 dar += (s32)((s16)inst); 677 break; 678 case 31: 679 if (ra) 680 dar = kvmppc_get_gpr(vcpu, ra); 681 dar += kvmppc_get_gpr(vcpu, rb); 682 break; 683 default: 684 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 685 break; 686 } 687 688 return dar; 689 #endif 690 } 691