1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/reg.h> 24 #include <asm/switch_to.h> 25 #include <asm/time.h> 26 #include "book3s.h" 27 28 #define OP_19_XOP_RFID 18 29 #define OP_19_XOP_RFI 50 30 31 #define OP_31_XOP_MFMSR 83 32 #define OP_31_XOP_MTMSR 146 33 #define OP_31_XOP_MTMSRD 178 34 #define OP_31_XOP_MTSR 210 35 #define OP_31_XOP_MTSRIN 242 36 #define OP_31_XOP_TLBIEL 274 37 #define OP_31_XOP_TLBIE 306 38 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ 39 #define OP_31_XOP_FAKE_SC1 308 40 #define OP_31_XOP_SLBMTE 402 41 #define OP_31_XOP_SLBIE 434 42 #define OP_31_XOP_SLBIA 498 43 #define OP_31_XOP_MFSR 595 44 #define OP_31_XOP_MFSRIN 659 45 #define OP_31_XOP_DCBA 758 46 #define OP_31_XOP_SLBMFEV 851 47 #define OP_31_XOP_EIOIO 854 48 #define OP_31_XOP_SLBMFEE 915 49 50 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 51 #define OP_31_XOP_DCBZ 1010 52 53 #define OP_LFS 48 54 #define OP_LFD 50 55 #define OP_STFS 52 56 #define OP_STFD 54 57 58 #define SPRN_GQR0 912 59 #define SPRN_GQR1 913 60 #define SPRN_GQR2 914 61 #define SPRN_GQR3 915 62 #define SPRN_GQR4 916 63 #define SPRN_GQR5 917 64 #define SPRN_GQR6 918 65 #define SPRN_GQR7 919 66 67 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract 68 * function pointers, so let's just disable the define. */ 69 #undef mfsrin 70 71 enum priv_level { 72 PRIV_PROBLEM = 0, 73 PRIV_SUPER = 1, 74 PRIV_HYPER = 2, 75 }; 76 77 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) 78 { 79 /* PAPR VMs only access supervisor SPRs */ 80 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) 81 return false; 82 83 /* Limit user space to its own small SPR set */ 84 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) 85 return false; 86 87 return true; 88 } 89 90 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 91 unsigned int inst, int *advance) 92 { 93 int emulated = EMULATE_DONE; 94 int rt = get_rt(inst); 95 int rs = get_rs(inst); 96 int ra = get_ra(inst); 97 int rb = get_rb(inst); 98 u32 inst_sc = 0x44000002; 99 100 switch (get_op(inst)) { 101 case 0: 102 emulated = EMULATE_FAIL; 103 if ((kvmppc_get_msr(vcpu) & MSR_LE) && 104 (inst == swab32(inst_sc))) { 105 /* 106 * This is the byte reversed syscall instruction of our 107 * hypercall handler. Early versions of LE Linux didn't 108 * swap the instructions correctly and ended up in 109 * illegal instructions. 110 * Just always fail hypercalls on these broken systems. 111 */ 112 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); 113 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 114 emulated = EMULATE_DONE; 115 } 116 break; 117 case 19: 118 switch (get_xop(inst)) { 119 case OP_19_XOP_RFID: 120 case OP_19_XOP_RFI: 121 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); 122 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); 123 *advance = 0; 124 break; 125 126 default: 127 emulated = EMULATE_FAIL; 128 break; 129 } 130 break; 131 case 31: 132 switch (get_xop(inst)) { 133 case OP_31_XOP_MFMSR: 134 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); 135 break; 136 case OP_31_XOP_MTMSRD: 137 { 138 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 139 if (inst & 0x10000) { 140 ulong new_msr = kvmppc_get_msr(vcpu); 141 new_msr &= ~(MSR_RI | MSR_EE); 142 new_msr |= rs_val & (MSR_RI | MSR_EE); 143 kvmppc_set_msr_fast(vcpu, new_msr); 144 } else 145 kvmppc_set_msr(vcpu, rs_val); 146 break; 147 } 148 case OP_31_XOP_MTMSR: 149 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 150 break; 151 case OP_31_XOP_MFSR: 152 { 153 int srnum; 154 155 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); 156 if (vcpu->arch.mmu.mfsrin) { 157 u32 sr; 158 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 159 kvmppc_set_gpr(vcpu, rt, sr); 160 } 161 break; 162 } 163 case OP_31_XOP_MFSRIN: 164 { 165 int srnum; 166 167 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; 168 if (vcpu->arch.mmu.mfsrin) { 169 u32 sr; 170 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 171 kvmppc_set_gpr(vcpu, rt, sr); 172 } 173 break; 174 } 175 case OP_31_XOP_MTSR: 176 vcpu->arch.mmu.mtsrin(vcpu, 177 (inst >> 16) & 0xf, 178 kvmppc_get_gpr(vcpu, rs)); 179 break; 180 case OP_31_XOP_MTSRIN: 181 vcpu->arch.mmu.mtsrin(vcpu, 182 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, 183 kvmppc_get_gpr(vcpu, rs)); 184 break; 185 case OP_31_XOP_TLBIE: 186 case OP_31_XOP_TLBIEL: 187 { 188 bool large = (inst & 0x00200000) ? true : false; 189 ulong addr = kvmppc_get_gpr(vcpu, rb); 190 vcpu->arch.mmu.tlbie(vcpu, addr, large); 191 break; 192 } 193 #ifdef CONFIG_PPC_BOOK3S_64 194 case OP_31_XOP_FAKE_SC1: 195 { 196 /* SC 1 papr hypercalls */ 197 ulong cmd = kvmppc_get_gpr(vcpu, 3); 198 int i; 199 200 if ((kvmppc_get_msr(vcpu) & MSR_PR) || 201 !vcpu->arch.papr_enabled) { 202 emulated = EMULATE_FAIL; 203 break; 204 } 205 206 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) 207 break; 208 209 run->papr_hcall.nr = cmd; 210 for (i = 0; i < 9; ++i) { 211 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 212 run->papr_hcall.args[i] = gpr; 213 } 214 215 run->exit_reason = KVM_EXIT_PAPR_HCALL; 216 vcpu->arch.hcall_needed = 1; 217 emulated = EMULATE_EXIT_USER; 218 break; 219 } 220 #endif 221 case OP_31_XOP_EIOIO: 222 break; 223 case OP_31_XOP_SLBMTE: 224 if (!vcpu->arch.mmu.slbmte) 225 return EMULATE_FAIL; 226 227 vcpu->arch.mmu.slbmte(vcpu, 228 kvmppc_get_gpr(vcpu, rs), 229 kvmppc_get_gpr(vcpu, rb)); 230 break; 231 case OP_31_XOP_SLBIE: 232 if (!vcpu->arch.mmu.slbie) 233 return EMULATE_FAIL; 234 235 vcpu->arch.mmu.slbie(vcpu, 236 kvmppc_get_gpr(vcpu, rb)); 237 break; 238 case OP_31_XOP_SLBIA: 239 if (!vcpu->arch.mmu.slbia) 240 return EMULATE_FAIL; 241 242 vcpu->arch.mmu.slbia(vcpu); 243 break; 244 case OP_31_XOP_SLBMFEE: 245 if (!vcpu->arch.mmu.slbmfee) { 246 emulated = EMULATE_FAIL; 247 } else { 248 ulong t, rb_val; 249 250 rb_val = kvmppc_get_gpr(vcpu, rb); 251 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); 252 kvmppc_set_gpr(vcpu, rt, t); 253 } 254 break; 255 case OP_31_XOP_SLBMFEV: 256 if (!vcpu->arch.mmu.slbmfev) { 257 emulated = EMULATE_FAIL; 258 } else { 259 ulong t, rb_val; 260 261 rb_val = kvmppc_get_gpr(vcpu, rb); 262 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); 263 kvmppc_set_gpr(vcpu, rt, t); 264 } 265 break; 266 case OP_31_XOP_DCBA: 267 /* Gets treated as NOP */ 268 break; 269 case OP_31_XOP_DCBZ: 270 { 271 ulong rb_val = kvmppc_get_gpr(vcpu, rb); 272 ulong ra_val = 0; 273 ulong addr, vaddr; 274 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 275 u32 dsisr; 276 int r; 277 278 if (ra) 279 ra_val = kvmppc_get_gpr(vcpu, ra); 280 281 addr = (ra_val + rb_val) & ~31ULL; 282 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 283 addr &= 0xffffffff; 284 vaddr = addr; 285 286 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 287 if ((r == -ENOENT) || (r == -EPERM)) { 288 *advance = 0; 289 kvmppc_set_dar(vcpu, vaddr); 290 vcpu->arch.fault_dar = vaddr; 291 292 dsisr = DSISR_ISSTORE; 293 if (r == -ENOENT) 294 dsisr |= DSISR_NOHPTE; 295 else if (r == -EPERM) 296 dsisr |= DSISR_PROTFAULT; 297 298 kvmppc_set_dsisr(vcpu, dsisr); 299 vcpu->arch.fault_dsisr = dsisr; 300 301 kvmppc_book3s_queue_irqprio(vcpu, 302 BOOK3S_INTERRUPT_DATA_STORAGE); 303 } 304 305 break; 306 } 307 default: 308 emulated = EMULATE_FAIL; 309 } 310 break; 311 default: 312 emulated = EMULATE_FAIL; 313 } 314 315 if (emulated == EMULATE_FAIL) 316 emulated = kvmppc_emulate_paired_single(run, vcpu); 317 318 return emulated; 319 } 320 321 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, 322 u32 val) 323 { 324 if (upper) { 325 /* Upper BAT */ 326 u32 bl = (val >> 2) & 0x7ff; 327 bat->bepi_mask = (~bl << 17); 328 bat->bepi = val & 0xfffe0000; 329 bat->vs = (val & 2) ? 1 : 0; 330 bat->vp = (val & 1) ? 1 : 0; 331 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; 332 } else { 333 /* Lower BAT */ 334 bat->brpn = val & 0xfffe0000; 335 bat->wimg = (val >> 3) & 0xf; 336 bat->pp = val & 3; 337 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); 338 } 339 } 340 341 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) 342 { 343 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 344 struct kvmppc_bat *bat; 345 346 switch (sprn) { 347 case SPRN_IBAT0U ... SPRN_IBAT3L: 348 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 349 break; 350 case SPRN_IBAT4U ... SPRN_IBAT7L: 351 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 352 break; 353 case SPRN_DBAT0U ... SPRN_DBAT3L: 354 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 355 break; 356 case SPRN_DBAT4U ... SPRN_DBAT7L: 357 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 358 break; 359 default: 360 BUG(); 361 } 362 363 return bat; 364 } 365 366 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 367 { 368 int emulated = EMULATE_DONE; 369 370 switch (sprn) { 371 case SPRN_SDR1: 372 if (!spr_allowed(vcpu, PRIV_HYPER)) 373 goto unprivileged; 374 to_book3s(vcpu)->sdr1 = spr_val; 375 break; 376 case SPRN_DSISR: 377 kvmppc_set_dsisr(vcpu, spr_val); 378 break; 379 case SPRN_DAR: 380 kvmppc_set_dar(vcpu, spr_val); 381 break; 382 case SPRN_HIOR: 383 to_book3s(vcpu)->hior = spr_val; 384 break; 385 case SPRN_IBAT0U ... SPRN_IBAT3L: 386 case SPRN_IBAT4U ... SPRN_IBAT7L: 387 case SPRN_DBAT0U ... SPRN_DBAT3L: 388 case SPRN_DBAT4U ... SPRN_DBAT7L: 389 { 390 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 391 392 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); 393 /* BAT writes happen so rarely that we're ok to flush 394 * everything here */ 395 kvmppc_mmu_pte_flush(vcpu, 0, 0); 396 kvmppc_mmu_flush_segments(vcpu); 397 break; 398 } 399 case SPRN_HID0: 400 to_book3s(vcpu)->hid[0] = spr_val; 401 break; 402 case SPRN_HID1: 403 to_book3s(vcpu)->hid[1] = spr_val; 404 break; 405 case SPRN_HID2: 406 to_book3s(vcpu)->hid[2] = spr_val; 407 break; 408 case SPRN_HID2_GEKKO: 409 to_book3s(vcpu)->hid[2] = spr_val; 410 /* HID2.PSE controls paired single on gekko */ 411 switch (vcpu->arch.pvr) { 412 case 0x00080200: /* lonestar 2.0 */ 413 case 0x00088202: /* lonestar 2.2 */ 414 case 0x70000100: /* gekko 1.0 */ 415 case 0x00080100: /* gekko 2.0 */ 416 case 0x00083203: /* gekko 2.3a */ 417 case 0x00083213: /* gekko 2.3b */ 418 case 0x00083204: /* gekko 2.4 */ 419 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 420 case 0x00087200: /* broadway */ 421 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { 422 /* Native paired singles */ 423 } else if (spr_val & (1 << 29)) { /* HID2.PSE */ 424 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; 425 kvmppc_giveup_ext(vcpu, MSR_FP); 426 } else { 427 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; 428 } 429 break; 430 } 431 break; 432 case SPRN_HID4: 433 case SPRN_HID4_GEKKO: 434 to_book3s(vcpu)->hid[4] = spr_val; 435 break; 436 case SPRN_HID5: 437 to_book3s(vcpu)->hid[5] = spr_val; 438 /* guest HID5 set can change is_dcbz32 */ 439 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 440 (mfmsr() & MSR_HV)) 441 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 442 break; 443 case SPRN_GQR0: 444 case SPRN_GQR1: 445 case SPRN_GQR2: 446 case SPRN_GQR3: 447 case SPRN_GQR4: 448 case SPRN_GQR5: 449 case SPRN_GQR6: 450 case SPRN_GQR7: 451 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 452 break; 453 #ifdef CONFIG_PPC_BOOK3S_64 454 case SPRN_FSCR: 455 kvmppc_set_fscr(vcpu, spr_val); 456 break; 457 case SPRN_BESCR: 458 vcpu->arch.bescr = spr_val; 459 break; 460 case SPRN_EBBHR: 461 vcpu->arch.ebbhr = spr_val; 462 break; 463 case SPRN_EBBRR: 464 vcpu->arch.ebbrr = spr_val; 465 break; 466 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 467 case SPRN_TFHAR: 468 vcpu->arch.tfhar = spr_val; 469 break; 470 case SPRN_TEXASR: 471 vcpu->arch.texasr = spr_val; 472 break; 473 case SPRN_TFIAR: 474 vcpu->arch.tfiar = spr_val; 475 break; 476 #endif 477 #endif 478 case SPRN_ICTC: 479 case SPRN_THRM1: 480 case SPRN_THRM2: 481 case SPRN_THRM3: 482 case SPRN_CTRLF: 483 case SPRN_CTRLT: 484 case SPRN_L2CR: 485 case SPRN_DSCR: 486 case SPRN_MMCR0_GEKKO: 487 case SPRN_MMCR1_GEKKO: 488 case SPRN_PMC1_GEKKO: 489 case SPRN_PMC2_GEKKO: 490 case SPRN_PMC3_GEKKO: 491 case SPRN_PMC4_GEKKO: 492 case SPRN_WPAR_GEKKO: 493 case SPRN_MSSSR0: 494 case SPRN_DABR: 495 #ifdef CONFIG_PPC_BOOK3S_64 496 case SPRN_MMCRS: 497 case SPRN_MMCRA: 498 case SPRN_MMCR0: 499 case SPRN_MMCR1: 500 case SPRN_MMCR2: 501 #endif 502 break; 503 unprivileged: 504 default: 505 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); 506 #ifndef DEBUG_SPR 507 emulated = EMULATE_FAIL; 508 #endif 509 break; 510 } 511 512 return emulated; 513 } 514 515 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 516 { 517 int emulated = EMULATE_DONE; 518 519 switch (sprn) { 520 case SPRN_IBAT0U ... SPRN_IBAT3L: 521 case SPRN_IBAT4U ... SPRN_IBAT7L: 522 case SPRN_DBAT0U ... SPRN_DBAT3L: 523 case SPRN_DBAT4U ... SPRN_DBAT7L: 524 { 525 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 526 527 if (sprn % 2) 528 *spr_val = bat->raw >> 32; 529 else 530 *spr_val = bat->raw; 531 532 break; 533 } 534 case SPRN_SDR1: 535 if (!spr_allowed(vcpu, PRIV_HYPER)) 536 goto unprivileged; 537 *spr_val = to_book3s(vcpu)->sdr1; 538 break; 539 case SPRN_DSISR: 540 *spr_val = kvmppc_get_dsisr(vcpu); 541 break; 542 case SPRN_DAR: 543 *spr_val = kvmppc_get_dar(vcpu); 544 break; 545 case SPRN_HIOR: 546 *spr_val = to_book3s(vcpu)->hior; 547 break; 548 case SPRN_HID0: 549 *spr_val = to_book3s(vcpu)->hid[0]; 550 break; 551 case SPRN_HID1: 552 *spr_val = to_book3s(vcpu)->hid[1]; 553 break; 554 case SPRN_HID2: 555 case SPRN_HID2_GEKKO: 556 *spr_val = to_book3s(vcpu)->hid[2]; 557 break; 558 case SPRN_HID4: 559 case SPRN_HID4_GEKKO: 560 *spr_val = to_book3s(vcpu)->hid[4]; 561 break; 562 case SPRN_HID5: 563 *spr_val = to_book3s(vcpu)->hid[5]; 564 break; 565 case SPRN_CFAR: 566 case SPRN_DSCR: 567 *spr_val = 0; 568 break; 569 case SPRN_PURR: 570 /* 571 * On exit we would have updated purr 572 */ 573 *spr_val = vcpu->arch.purr; 574 break; 575 case SPRN_SPURR: 576 /* 577 * On exit we would have updated spurr 578 */ 579 *spr_val = vcpu->arch.spurr; 580 break; 581 case SPRN_VTB: 582 *spr_val = vcpu->arch.vtb; 583 break; 584 case SPRN_IC: 585 *spr_val = vcpu->arch.ic; 586 break; 587 case SPRN_GQR0: 588 case SPRN_GQR1: 589 case SPRN_GQR2: 590 case SPRN_GQR3: 591 case SPRN_GQR4: 592 case SPRN_GQR5: 593 case SPRN_GQR6: 594 case SPRN_GQR7: 595 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 596 break; 597 #ifdef CONFIG_PPC_BOOK3S_64 598 case SPRN_FSCR: 599 *spr_val = vcpu->arch.fscr; 600 break; 601 case SPRN_BESCR: 602 *spr_val = vcpu->arch.bescr; 603 break; 604 case SPRN_EBBHR: 605 *spr_val = vcpu->arch.ebbhr; 606 break; 607 case SPRN_EBBRR: 608 *spr_val = vcpu->arch.ebbrr; 609 break; 610 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 611 case SPRN_TFHAR: 612 *spr_val = vcpu->arch.tfhar; 613 break; 614 case SPRN_TEXASR: 615 *spr_val = vcpu->arch.texasr; 616 break; 617 case SPRN_TFIAR: 618 *spr_val = vcpu->arch.tfiar; 619 break; 620 #endif 621 #endif 622 case SPRN_THRM1: 623 case SPRN_THRM2: 624 case SPRN_THRM3: 625 case SPRN_CTRLF: 626 case SPRN_CTRLT: 627 case SPRN_L2CR: 628 case SPRN_MMCR0_GEKKO: 629 case SPRN_MMCR1_GEKKO: 630 case SPRN_PMC1_GEKKO: 631 case SPRN_PMC2_GEKKO: 632 case SPRN_PMC3_GEKKO: 633 case SPRN_PMC4_GEKKO: 634 case SPRN_WPAR_GEKKO: 635 case SPRN_MSSSR0: 636 case SPRN_DABR: 637 #ifdef CONFIG_PPC_BOOK3S_64 638 case SPRN_MMCRS: 639 case SPRN_MMCRA: 640 case SPRN_MMCR0: 641 case SPRN_MMCR1: 642 case SPRN_MMCR2: 643 case SPRN_TIR: 644 #endif 645 *spr_val = 0; 646 break; 647 default: 648 unprivileged: 649 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); 650 #ifndef DEBUG_SPR 651 emulated = EMULATE_FAIL; 652 #endif 653 break; 654 } 655 656 return emulated; 657 } 658 659 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) 660 { 661 return make_dsisr(inst); 662 } 663 664 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 665 { 666 #ifdef CONFIG_PPC_BOOK3S_64 667 /* 668 * Linux's fix_alignment() assumes that DAR is valid, so can we 669 */ 670 return vcpu->arch.fault_dar; 671 #else 672 ulong dar = 0; 673 ulong ra = get_ra(inst); 674 ulong rb = get_rb(inst); 675 676 switch (get_op(inst)) { 677 case OP_LFS: 678 case OP_LFD: 679 case OP_STFD: 680 case OP_STFS: 681 if (ra) 682 dar = kvmppc_get_gpr(vcpu, ra); 683 dar += (s32)((s16)inst); 684 break; 685 case 31: 686 if (ra) 687 dar = kvmppc_get_gpr(vcpu, ra); 688 dar += kvmppc_get_gpr(vcpu, rb); 689 break; 690 default: 691 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 692 break; 693 } 694 695 return dar; 696 #endif 697 } 698