1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/reg.h> 24 #include <asm/switch_to.h> 25 #include <asm/time.h> 26 27 #define OP_19_XOP_RFID 18 28 #define OP_19_XOP_RFI 50 29 30 #define OP_31_XOP_MFMSR 83 31 #define OP_31_XOP_MTMSR 146 32 #define OP_31_XOP_MTMSRD 178 33 #define OP_31_XOP_MTSR 210 34 #define OP_31_XOP_MTSRIN 242 35 #define OP_31_XOP_TLBIEL 274 36 #define OP_31_XOP_TLBIE 306 37 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ 38 #define OP_31_XOP_FAKE_SC1 308 39 #define OP_31_XOP_SLBMTE 402 40 #define OP_31_XOP_SLBIE 434 41 #define OP_31_XOP_SLBIA 498 42 #define OP_31_XOP_MFSR 595 43 #define OP_31_XOP_MFSRIN 659 44 #define OP_31_XOP_DCBA 758 45 #define OP_31_XOP_SLBMFEV 851 46 #define OP_31_XOP_EIOIO 854 47 #define OP_31_XOP_SLBMFEE 915 48 49 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 50 #define OP_31_XOP_DCBZ 1010 51 52 #define OP_LFS 48 53 #define OP_LFD 50 54 #define OP_STFS 52 55 #define OP_STFD 54 56 57 #define SPRN_GQR0 912 58 #define SPRN_GQR1 913 59 #define SPRN_GQR2 914 60 #define SPRN_GQR3 915 61 #define SPRN_GQR4 916 62 #define SPRN_GQR5 917 63 #define SPRN_GQR6 918 64 #define SPRN_GQR7 919 65 66 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract 67 * function pointers, so let's just disable the define. */ 68 #undef mfsrin 69 70 enum priv_level { 71 PRIV_PROBLEM = 0, 72 PRIV_SUPER = 1, 73 PRIV_HYPER = 2, 74 }; 75 76 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) 77 { 78 /* PAPR VMs only access supervisor SPRs */ 79 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) 80 return false; 81 82 /* Limit user space to its own small SPR set */ 83 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) 84 return false; 85 86 return true; 87 } 88 89 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 90 unsigned int inst, int *advance) 91 { 92 int emulated = EMULATE_DONE; 93 int rt = get_rt(inst); 94 int rs = get_rs(inst); 95 int ra = get_ra(inst); 96 int rb = get_rb(inst); 97 u32 inst_sc = 0x44000002; 98 99 switch (get_op(inst)) { 100 case 0: 101 emulated = EMULATE_FAIL; 102 if ((kvmppc_get_msr(vcpu) & MSR_LE) && 103 (inst == swab32(inst_sc))) { 104 /* 105 * This is the byte reversed syscall instruction of our 106 * hypercall handler. Early versions of LE Linux didn't 107 * swap the instructions correctly and ended up in 108 * illegal instructions. 109 * Just always fail hypercalls on these broken systems. 110 */ 111 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); 112 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 113 emulated = EMULATE_DONE; 114 } 115 break; 116 case 19: 117 switch (get_xop(inst)) { 118 case OP_19_XOP_RFID: 119 case OP_19_XOP_RFI: 120 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); 121 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); 122 *advance = 0; 123 break; 124 125 default: 126 emulated = EMULATE_FAIL; 127 break; 128 } 129 break; 130 case 31: 131 switch (get_xop(inst)) { 132 case OP_31_XOP_MFMSR: 133 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); 134 break; 135 case OP_31_XOP_MTMSRD: 136 { 137 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 138 if (inst & 0x10000) { 139 ulong new_msr = kvmppc_get_msr(vcpu); 140 new_msr &= ~(MSR_RI | MSR_EE); 141 new_msr |= rs_val & (MSR_RI | MSR_EE); 142 kvmppc_set_msr_fast(vcpu, new_msr); 143 } else 144 kvmppc_set_msr(vcpu, rs_val); 145 break; 146 } 147 case OP_31_XOP_MTMSR: 148 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 149 break; 150 case OP_31_XOP_MFSR: 151 { 152 int srnum; 153 154 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); 155 if (vcpu->arch.mmu.mfsrin) { 156 u32 sr; 157 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 158 kvmppc_set_gpr(vcpu, rt, sr); 159 } 160 break; 161 } 162 case OP_31_XOP_MFSRIN: 163 { 164 int srnum; 165 166 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; 167 if (vcpu->arch.mmu.mfsrin) { 168 u32 sr; 169 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 170 kvmppc_set_gpr(vcpu, rt, sr); 171 } 172 break; 173 } 174 case OP_31_XOP_MTSR: 175 vcpu->arch.mmu.mtsrin(vcpu, 176 (inst >> 16) & 0xf, 177 kvmppc_get_gpr(vcpu, rs)); 178 break; 179 case OP_31_XOP_MTSRIN: 180 vcpu->arch.mmu.mtsrin(vcpu, 181 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, 182 kvmppc_get_gpr(vcpu, rs)); 183 break; 184 case OP_31_XOP_TLBIE: 185 case OP_31_XOP_TLBIEL: 186 { 187 bool large = (inst & 0x00200000) ? true : false; 188 ulong addr = kvmppc_get_gpr(vcpu, rb); 189 vcpu->arch.mmu.tlbie(vcpu, addr, large); 190 break; 191 } 192 #ifdef CONFIG_PPC_BOOK3S_64 193 case OP_31_XOP_FAKE_SC1: 194 { 195 /* SC 1 papr hypercalls */ 196 ulong cmd = kvmppc_get_gpr(vcpu, 3); 197 int i; 198 199 if ((kvmppc_get_msr(vcpu) & MSR_PR) || 200 !vcpu->arch.papr_enabled) { 201 emulated = EMULATE_FAIL; 202 break; 203 } 204 205 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) 206 break; 207 208 run->papr_hcall.nr = cmd; 209 for (i = 0; i < 9; ++i) { 210 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 211 run->papr_hcall.args[i] = gpr; 212 } 213 214 run->exit_reason = KVM_EXIT_PAPR_HCALL; 215 vcpu->arch.hcall_needed = 1; 216 emulated = EMULATE_EXIT_USER; 217 break; 218 } 219 #endif 220 case OP_31_XOP_EIOIO: 221 break; 222 case OP_31_XOP_SLBMTE: 223 if (!vcpu->arch.mmu.slbmte) 224 return EMULATE_FAIL; 225 226 vcpu->arch.mmu.slbmte(vcpu, 227 kvmppc_get_gpr(vcpu, rs), 228 kvmppc_get_gpr(vcpu, rb)); 229 break; 230 case OP_31_XOP_SLBIE: 231 if (!vcpu->arch.mmu.slbie) 232 return EMULATE_FAIL; 233 234 vcpu->arch.mmu.slbie(vcpu, 235 kvmppc_get_gpr(vcpu, rb)); 236 break; 237 case OP_31_XOP_SLBIA: 238 if (!vcpu->arch.mmu.slbia) 239 return EMULATE_FAIL; 240 241 vcpu->arch.mmu.slbia(vcpu); 242 break; 243 case OP_31_XOP_SLBMFEE: 244 if (!vcpu->arch.mmu.slbmfee) { 245 emulated = EMULATE_FAIL; 246 } else { 247 ulong t, rb_val; 248 249 rb_val = kvmppc_get_gpr(vcpu, rb); 250 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); 251 kvmppc_set_gpr(vcpu, rt, t); 252 } 253 break; 254 case OP_31_XOP_SLBMFEV: 255 if (!vcpu->arch.mmu.slbmfev) { 256 emulated = EMULATE_FAIL; 257 } else { 258 ulong t, rb_val; 259 260 rb_val = kvmppc_get_gpr(vcpu, rb); 261 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); 262 kvmppc_set_gpr(vcpu, rt, t); 263 } 264 break; 265 case OP_31_XOP_DCBA: 266 /* Gets treated as NOP */ 267 break; 268 case OP_31_XOP_DCBZ: 269 { 270 ulong rb_val = kvmppc_get_gpr(vcpu, rb); 271 ulong ra_val = 0; 272 ulong addr, vaddr; 273 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 274 u32 dsisr; 275 int r; 276 277 if (ra) 278 ra_val = kvmppc_get_gpr(vcpu, ra); 279 280 addr = (ra_val + rb_val) & ~31ULL; 281 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 282 addr &= 0xffffffff; 283 vaddr = addr; 284 285 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 286 if ((r == -ENOENT) || (r == -EPERM)) { 287 *advance = 0; 288 kvmppc_set_dar(vcpu, vaddr); 289 vcpu->arch.fault_dar = vaddr; 290 291 dsisr = DSISR_ISSTORE; 292 if (r == -ENOENT) 293 dsisr |= DSISR_NOHPTE; 294 else if (r == -EPERM) 295 dsisr |= DSISR_PROTFAULT; 296 297 kvmppc_set_dsisr(vcpu, dsisr); 298 vcpu->arch.fault_dsisr = dsisr; 299 300 kvmppc_book3s_queue_irqprio(vcpu, 301 BOOK3S_INTERRUPT_DATA_STORAGE); 302 } 303 304 break; 305 } 306 default: 307 emulated = EMULATE_FAIL; 308 } 309 break; 310 default: 311 emulated = EMULATE_FAIL; 312 } 313 314 if (emulated == EMULATE_FAIL) 315 emulated = kvmppc_emulate_paired_single(run, vcpu); 316 317 return emulated; 318 } 319 320 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, 321 u32 val) 322 { 323 if (upper) { 324 /* Upper BAT */ 325 u32 bl = (val >> 2) & 0x7ff; 326 bat->bepi_mask = (~bl << 17); 327 bat->bepi = val & 0xfffe0000; 328 bat->vs = (val & 2) ? 1 : 0; 329 bat->vp = (val & 1) ? 1 : 0; 330 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; 331 } else { 332 /* Lower BAT */ 333 bat->brpn = val & 0xfffe0000; 334 bat->wimg = (val >> 3) & 0xf; 335 bat->pp = val & 3; 336 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); 337 } 338 } 339 340 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) 341 { 342 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 343 struct kvmppc_bat *bat; 344 345 switch (sprn) { 346 case SPRN_IBAT0U ... SPRN_IBAT3L: 347 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 348 break; 349 case SPRN_IBAT4U ... SPRN_IBAT7L: 350 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 351 break; 352 case SPRN_DBAT0U ... SPRN_DBAT3L: 353 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 354 break; 355 case SPRN_DBAT4U ... SPRN_DBAT7L: 356 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 357 break; 358 default: 359 BUG(); 360 } 361 362 return bat; 363 } 364 365 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 366 { 367 int emulated = EMULATE_DONE; 368 369 switch (sprn) { 370 case SPRN_SDR1: 371 if (!spr_allowed(vcpu, PRIV_HYPER)) 372 goto unprivileged; 373 to_book3s(vcpu)->sdr1 = spr_val; 374 break; 375 case SPRN_DSISR: 376 kvmppc_set_dsisr(vcpu, spr_val); 377 break; 378 case SPRN_DAR: 379 kvmppc_set_dar(vcpu, spr_val); 380 break; 381 case SPRN_HIOR: 382 to_book3s(vcpu)->hior = spr_val; 383 break; 384 case SPRN_IBAT0U ... SPRN_IBAT3L: 385 case SPRN_IBAT4U ... SPRN_IBAT7L: 386 case SPRN_DBAT0U ... SPRN_DBAT3L: 387 case SPRN_DBAT4U ... SPRN_DBAT7L: 388 { 389 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 390 391 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); 392 /* BAT writes happen so rarely that we're ok to flush 393 * everything here */ 394 kvmppc_mmu_pte_flush(vcpu, 0, 0); 395 kvmppc_mmu_flush_segments(vcpu); 396 break; 397 } 398 case SPRN_HID0: 399 to_book3s(vcpu)->hid[0] = spr_val; 400 break; 401 case SPRN_HID1: 402 to_book3s(vcpu)->hid[1] = spr_val; 403 break; 404 case SPRN_HID2: 405 to_book3s(vcpu)->hid[2] = spr_val; 406 break; 407 case SPRN_HID2_GEKKO: 408 to_book3s(vcpu)->hid[2] = spr_val; 409 /* HID2.PSE controls paired single on gekko */ 410 switch (vcpu->arch.pvr) { 411 case 0x00080200: /* lonestar 2.0 */ 412 case 0x00088202: /* lonestar 2.2 */ 413 case 0x70000100: /* gekko 1.0 */ 414 case 0x00080100: /* gekko 2.0 */ 415 case 0x00083203: /* gekko 2.3a */ 416 case 0x00083213: /* gekko 2.3b */ 417 case 0x00083204: /* gekko 2.4 */ 418 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 419 case 0x00087200: /* broadway */ 420 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { 421 /* Native paired singles */ 422 } else if (spr_val & (1 << 29)) { /* HID2.PSE */ 423 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; 424 kvmppc_giveup_ext(vcpu, MSR_FP); 425 } else { 426 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; 427 } 428 break; 429 } 430 break; 431 case SPRN_HID4: 432 case SPRN_HID4_GEKKO: 433 to_book3s(vcpu)->hid[4] = spr_val; 434 break; 435 case SPRN_HID5: 436 to_book3s(vcpu)->hid[5] = spr_val; 437 /* guest HID5 set can change is_dcbz32 */ 438 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 439 (mfmsr() & MSR_HV)) 440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 441 break; 442 case SPRN_GQR0: 443 case SPRN_GQR1: 444 case SPRN_GQR2: 445 case SPRN_GQR3: 446 case SPRN_GQR4: 447 case SPRN_GQR5: 448 case SPRN_GQR6: 449 case SPRN_GQR7: 450 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 451 break; 452 #ifdef CONFIG_PPC_BOOK3S_64 453 case SPRN_FSCR: 454 kvmppc_set_fscr(vcpu, spr_val); 455 break; 456 case SPRN_BESCR: 457 vcpu->arch.bescr = spr_val; 458 break; 459 case SPRN_EBBHR: 460 vcpu->arch.ebbhr = spr_val; 461 break; 462 case SPRN_EBBRR: 463 vcpu->arch.ebbrr = spr_val; 464 break; 465 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 466 case SPRN_TFHAR: 467 vcpu->arch.tfhar = spr_val; 468 break; 469 case SPRN_TEXASR: 470 vcpu->arch.texasr = spr_val; 471 break; 472 case SPRN_TFIAR: 473 vcpu->arch.tfiar = spr_val; 474 break; 475 #endif 476 #endif 477 case SPRN_ICTC: 478 case SPRN_THRM1: 479 case SPRN_THRM2: 480 case SPRN_THRM3: 481 case SPRN_CTRLF: 482 case SPRN_CTRLT: 483 case SPRN_L2CR: 484 case SPRN_DSCR: 485 case SPRN_MMCR0_GEKKO: 486 case SPRN_MMCR1_GEKKO: 487 case SPRN_PMC1_GEKKO: 488 case SPRN_PMC2_GEKKO: 489 case SPRN_PMC3_GEKKO: 490 case SPRN_PMC4_GEKKO: 491 case SPRN_WPAR_GEKKO: 492 case SPRN_MSSSR0: 493 case SPRN_DABR: 494 #ifdef CONFIG_PPC_BOOK3S_64 495 case SPRN_MMCRS: 496 case SPRN_MMCRA: 497 case SPRN_MMCR0: 498 case SPRN_MMCR1: 499 case SPRN_MMCR2: 500 #endif 501 break; 502 unprivileged: 503 default: 504 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); 505 #ifndef DEBUG_SPR 506 emulated = EMULATE_FAIL; 507 #endif 508 break; 509 } 510 511 return emulated; 512 } 513 514 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 515 { 516 int emulated = EMULATE_DONE; 517 518 switch (sprn) { 519 case SPRN_IBAT0U ... SPRN_IBAT3L: 520 case SPRN_IBAT4U ... SPRN_IBAT7L: 521 case SPRN_DBAT0U ... SPRN_DBAT3L: 522 case SPRN_DBAT4U ... SPRN_DBAT7L: 523 { 524 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 525 526 if (sprn % 2) 527 *spr_val = bat->raw >> 32; 528 else 529 *spr_val = bat->raw; 530 531 break; 532 } 533 case SPRN_SDR1: 534 if (!spr_allowed(vcpu, PRIV_HYPER)) 535 goto unprivileged; 536 *spr_val = to_book3s(vcpu)->sdr1; 537 break; 538 case SPRN_DSISR: 539 *spr_val = kvmppc_get_dsisr(vcpu); 540 break; 541 case SPRN_DAR: 542 *spr_val = kvmppc_get_dar(vcpu); 543 break; 544 case SPRN_HIOR: 545 *spr_val = to_book3s(vcpu)->hior; 546 break; 547 case SPRN_HID0: 548 *spr_val = to_book3s(vcpu)->hid[0]; 549 break; 550 case SPRN_HID1: 551 *spr_val = to_book3s(vcpu)->hid[1]; 552 break; 553 case SPRN_HID2: 554 case SPRN_HID2_GEKKO: 555 *spr_val = to_book3s(vcpu)->hid[2]; 556 break; 557 case SPRN_HID4: 558 case SPRN_HID4_GEKKO: 559 *spr_val = to_book3s(vcpu)->hid[4]; 560 break; 561 case SPRN_HID5: 562 *spr_val = to_book3s(vcpu)->hid[5]; 563 break; 564 case SPRN_CFAR: 565 case SPRN_DSCR: 566 *spr_val = 0; 567 break; 568 case SPRN_PURR: 569 /* 570 * On exit we would have updated purr 571 */ 572 *spr_val = vcpu->arch.purr; 573 break; 574 case SPRN_SPURR: 575 /* 576 * On exit we would have updated spurr 577 */ 578 *spr_val = vcpu->arch.spurr; 579 break; 580 case SPRN_VTB: 581 *spr_val = vcpu->arch.vtb; 582 break; 583 case SPRN_IC: 584 *spr_val = vcpu->arch.ic; 585 break; 586 case SPRN_GQR0: 587 case SPRN_GQR1: 588 case SPRN_GQR2: 589 case SPRN_GQR3: 590 case SPRN_GQR4: 591 case SPRN_GQR5: 592 case SPRN_GQR6: 593 case SPRN_GQR7: 594 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 595 break; 596 #ifdef CONFIG_PPC_BOOK3S_64 597 case SPRN_FSCR: 598 *spr_val = vcpu->arch.fscr; 599 break; 600 case SPRN_BESCR: 601 *spr_val = vcpu->arch.bescr; 602 break; 603 case SPRN_EBBHR: 604 *spr_val = vcpu->arch.ebbhr; 605 break; 606 case SPRN_EBBRR: 607 *spr_val = vcpu->arch.ebbrr; 608 break; 609 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 610 case SPRN_TFHAR: 611 *spr_val = vcpu->arch.tfhar; 612 break; 613 case SPRN_TEXASR: 614 *spr_val = vcpu->arch.texasr; 615 break; 616 case SPRN_TFIAR: 617 *spr_val = vcpu->arch.tfiar; 618 break; 619 #endif 620 #endif 621 case SPRN_THRM1: 622 case SPRN_THRM2: 623 case SPRN_THRM3: 624 case SPRN_CTRLF: 625 case SPRN_CTRLT: 626 case SPRN_L2CR: 627 case SPRN_MMCR0_GEKKO: 628 case SPRN_MMCR1_GEKKO: 629 case SPRN_PMC1_GEKKO: 630 case SPRN_PMC2_GEKKO: 631 case SPRN_PMC3_GEKKO: 632 case SPRN_PMC4_GEKKO: 633 case SPRN_WPAR_GEKKO: 634 case SPRN_MSSSR0: 635 case SPRN_DABR: 636 #ifdef CONFIG_PPC_BOOK3S_64 637 case SPRN_MMCRS: 638 case SPRN_MMCRA: 639 case SPRN_MMCR0: 640 case SPRN_MMCR1: 641 case SPRN_MMCR2: 642 case SPRN_TIR: 643 #endif 644 *spr_val = 0; 645 break; 646 default: 647 unprivileged: 648 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); 649 #ifndef DEBUG_SPR 650 emulated = EMULATE_FAIL; 651 #endif 652 break; 653 } 654 655 return emulated; 656 } 657 658 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) 659 { 660 return make_dsisr(inst); 661 } 662 663 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 664 { 665 #ifdef CONFIG_PPC_BOOK3S_64 666 /* 667 * Linux's fix_alignment() assumes that DAR is valid, so can we 668 */ 669 return vcpu->arch.fault_dar; 670 #else 671 ulong dar = 0; 672 ulong ra = get_ra(inst); 673 ulong rb = get_rb(inst); 674 675 switch (get_op(inst)) { 676 case OP_LFS: 677 case OP_LFD: 678 case OP_STFD: 679 case OP_STFS: 680 if (ra) 681 dar = kvmppc_get_gpr(vcpu, ra); 682 dar += (s32)((s16)inst); 683 break; 684 case 31: 685 if (ra) 686 dar = kvmppc_get_gpr(vcpu, ra); 687 dar += kvmppc_get_gpr(vcpu, rb); 688 break; 689 default: 690 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 691 break; 692 } 693 694 return dar; 695 #endif 696 } 697