1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/reg.h> 24 #include <asm/switch_to.h> 25 #include <asm/time.h> 26 #include "book3s.h" 27 28 #define OP_19_XOP_RFID 18 29 #define OP_19_XOP_RFI 50 30 31 #define OP_31_XOP_MFMSR 83 32 #define OP_31_XOP_MTMSR 146 33 #define OP_31_XOP_MTMSRD 178 34 #define OP_31_XOP_MTSR 210 35 #define OP_31_XOP_MTSRIN 242 36 #define OP_31_XOP_TLBIEL 274 37 #define OP_31_XOP_TLBIE 306 38 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ 39 #define OP_31_XOP_FAKE_SC1 308 40 #define OP_31_XOP_SLBMTE 402 41 #define OP_31_XOP_SLBIE 434 42 #define OP_31_XOP_SLBIA 498 43 #define OP_31_XOP_MFSR 595 44 #define OP_31_XOP_MFSRIN 659 45 #define OP_31_XOP_DCBA 758 46 #define OP_31_XOP_SLBMFEV 851 47 #define OP_31_XOP_EIOIO 854 48 #define OP_31_XOP_SLBMFEE 915 49 50 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 51 #define OP_31_XOP_DCBZ 1010 52 53 #define OP_LFS 48 54 #define OP_LFD 50 55 #define OP_STFS 52 56 #define OP_STFD 54 57 58 #define SPRN_GQR0 912 59 #define SPRN_GQR1 913 60 #define SPRN_GQR2 914 61 #define SPRN_GQR3 915 62 #define SPRN_GQR4 916 63 #define SPRN_GQR5 917 64 #define SPRN_GQR6 918 65 #define SPRN_GQR7 919 66 67 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract 68 * function pointers, so let's just disable the define. */ 69 #undef mfsrin 70 71 enum priv_level { 72 PRIV_PROBLEM = 0, 73 PRIV_SUPER = 1, 74 PRIV_HYPER = 2, 75 }; 76 77 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) 78 { 79 /* PAPR VMs only access supervisor SPRs */ 80 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) 81 return false; 82 83 /* Limit user space to its own small SPR set */ 84 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) 85 return false; 86 87 return true; 88 } 89 90 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 91 unsigned int inst, int *advance) 92 { 93 int emulated = EMULATE_DONE; 94 int rt = get_rt(inst); 95 int rs = get_rs(inst); 96 int ra = get_ra(inst); 97 int rb = get_rb(inst); 98 u32 inst_sc = 0x44000002; 99 100 switch (get_op(inst)) { 101 case 0: 102 emulated = EMULATE_FAIL; 103 if ((kvmppc_get_msr(vcpu) & MSR_LE) && 104 (inst == swab32(inst_sc))) { 105 /* 106 * This is the byte reversed syscall instruction of our 107 * hypercall handler. Early versions of LE Linux didn't 108 * swap the instructions correctly and ended up in 109 * illegal instructions. 110 * Just always fail hypercalls on these broken systems. 111 */ 112 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); 113 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 114 emulated = EMULATE_DONE; 115 } 116 break; 117 case 19: 118 switch (get_xop(inst)) { 119 case OP_19_XOP_RFID: 120 case OP_19_XOP_RFI: 121 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); 122 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); 123 *advance = 0; 124 break; 125 126 default: 127 emulated = EMULATE_FAIL; 128 break; 129 } 130 break; 131 case 31: 132 switch (get_xop(inst)) { 133 case OP_31_XOP_MFMSR: 134 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); 135 break; 136 case OP_31_XOP_MTMSRD: 137 { 138 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 139 if (inst & 0x10000) { 140 ulong new_msr = kvmppc_get_msr(vcpu); 141 new_msr &= ~(MSR_RI | MSR_EE); 142 new_msr |= rs_val & (MSR_RI | MSR_EE); 143 kvmppc_set_msr_fast(vcpu, new_msr); 144 } else 145 kvmppc_set_msr(vcpu, rs_val); 146 break; 147 } 148 case OP_31_XOP_MTMSR: 149 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 150 break; 151 case OP_31_XOP_MFSR: 152 { 153 int srnum; 154 155 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); 156 if (vcpu->arch.mmu.mfsrin) { 157 u32 sr; 158 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 159 kvmppc_set_gpr(vcpu, rt, sr); 160 } 161 break; 162 } 163 case OP_31_XOP_MFSRIN: 164 { 165 int srnum; 166 167 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; 168 if (vcpu->arch.mmu.mfsrin) { 169 u32 sr; 170 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 171 kvmppc_set_gpr(vcpu, rt, sr); 172 } 173 break; 174 } 175 case OP_31_XOP_MTSR: 176 vcpu->arch.mmu.mtsrin(vcpu, 177 (inst >> 16) & 0xf, 178 kvmppc_get_gpr(vcpu, rs)); 179 break; 180 case OP_31_XOP_MTSRIN: 181 vcpu->arch.mmu.mtsrin(vcpu, 182 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, 183 kvmppc_get_gpr(vcpu, rs)); 184 break; 185 case OP_31_XOP_TLBIE: 186 case OP_31_XOP_TLBIEL: 187 { 188 bool large = (inst & 0x00200000) ? true : false; 189 ulong addr = kvmppc_get_gpr(vcpu, rb); 190 vcpu->arch.mmu.tlbie(vcpu, addr, large); 191 break; 192 } 193 #ifdef CONFIG_PPC_BOOK3S_64 194 case OP_31_XOP_FAKE_SC1: 195 { 196 /* SC 1 papr hypercalls */ 197 ulong cmd = kvmppc_get_gpr(vcpu, 3); 198 int i; 199 200 if ((kvmppc_get_msr(vcpu) & MSR_PR) || 201 !vcpu->arch.papr_enabled) { 202 emulated = EMULATE_FAIL; 203 break; 204 } 205 206 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) 207 break; 208 209 run->papr_hcall.nr = cmd; 210 for (i = 0; i < 9; ++i) { 211 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 212 run->papr_hcall.args[i] = gpr; 213 } 214 215 run->exit_reason = KVM_EXIT_PAPR_HCALL; 216 vcpu->arch.hcall_needed = 1; 217 emulated = EMULATE_EXIT_USER; 218 break; 219 } 220 #endif 221 case OP_31_XOP_EIOIO: 222 break; 223 case OP_31_XOP_SLBMTE: 224 if (!vcpu->arch.mmu.slbmte) 225 return EMULATE_FAIL; 226 227 vcpu->arch.mmu.slbmte(vcpu, 228 kvmppc_get_gpr(vcpu, rs), 229 kvmppc_get_gpr(vcpu, rb)); 230 break; 231 case OP_31_XOP_SLBIE: 232 if (!vcpu->arch.mmu.slbie) 233 return EMULATE_FAIL; 234 235 vcpu->arch.mmu.slbie(vcpu, 236 kvmppc_get_gpr(vcpu, rb)); 237 break; 238 case OP_31_XOP_SLBIA: 239 if (!vcpu->arch.mmu.slbia) 240 return EMULATE_FAIL; 241 242 vcpu->arch.mmu.slbia(vcpu); 243 break; 244 case OP_31_XOP_SLBMFEE: 245 if (!vcpu->arch.mmu.slbmfee) { 246 emulated = EMULATE_FAIL; 247 } else { 248 ulong t, rb_val; 249 250 rb_val = kvmppc_get_gpr(vcpu, rb); 251 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); 252 kvmppc_set_gpr(vcpu, rt, t); 253 } 254 break; 255 case OP_31_XOP_SLBMFEV: 256 if (!vcpu->arch.mmu.slbmfev) { 257 emulated = EMULATE_FAIL; 258 } else { 259 ulong t, rb_val; 260 261 rb_val = kvmppc_get_gpr(vcpu, rb); 262 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); 263 kvmppc_set_gpr(vcpu, rt, t); 264 } 265 break; 266 case OP_31_XOP_DCBA: 267 /* Gets treated as NOP */ 268 break; 269 case OP_31_XOP_DCBZ: 270 { 271 ulong rb_val = kvmppc_get_gpr(vcpu, rb); 272 ulong ra_val = 0; 273 ulong addr, vaddr; 274 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 275 u32 dsisr; 276 int r; 277 278 if (ra) 279 ra_val = kvmppc_get_gpr(vcpu, ra); 280 281 addr = (ra_val + rb_val) & ~31ULL; 282 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 283 addr &= 0xffffffff; 284 vaddr = addr; 285 286 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 287 if ((r == -ENOENT) || (r == -EPERM)) { 288 *advance = 0; 289 kvmppc_set_dar(vcpu, vaddr); 290 vcpu->arch.fault_dar = vaddr; 291 292 dsisr = DSISR_ISSTORE; 293 if (r == -ENOENT) 294 dsisr |= DSISR_NOHPTE; 295 else if (r == -EPERM) 296 dsisr |= DSISR_PROTFAULT; 297 298 kvmppc_set_dsisr(vcpu, dsisr); 299 vcpu->arch.fault_dsisr = dsisr; 300 301 kvmppc_book3s_queue_irqprio(vcpu, 302 BOOK3S_INTERRUPT_DATA_STORAGE); 303 } 304 305 break; 306 } 307 default: 308 emulated = EMULATE_FAIL; 309 } 310 break; 311 default: 312 emulated = EMULATE_FAIL; 313 } 314 315 if (emulated == EMULATE_FAIL) 316 emulated = kvmppc_emulate_paired_single(run, vcpu); 317 318 return emulated; 319 } 320 321 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, 322 u32 val) 323 { 324 if (upper) { 325 /* Upper BAT */ 326 u32 bl = (val >> 2) & 0x7ff; 327 bat->bepi_mask = (~bl << 17); 328 bat->bepi = val & 0xfffe0000; 329 bat->vs = (val & 2) ? 1 : 0; 330 bat->vp = (val & 1) ? 1 : 0; 331 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; 332 } else { 333 /* Lower BAT */ 334 bat->brpn = val & 0xfffe0000; 335 bat->wimg = (val >> 3) & 0xf; 336 bat->pp = val & 3; 337 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); 338 } 339 } 340 341 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) 342 { 343 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 344 struct kvmppc_bat *bat; 345 346 switch (sprn) { 347 case SPRN_IBAT0U ... SPRN_IBAT3L: 348 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 349 break; 350 case SPRN_IBAT4U ... SPRN_IBAT7L: 351 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 352 break; 353 case SPRN_DBAT0U ... SPRN_DBAT3L: 354 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 355 break; 356 case SPRN_DBAT4U ... SPRN_DBAT7L: 357 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 358 break; 359 default: 360 BUG(); 361 } 362 363 return bat; 364 } 365 366 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 367 { 368 int emulated = EMULATE_DONE; 369 370 switch (sprn) { 371 case SPRN_SDR1: 372 if (!spr_allowed(vcpu, PRIV_HYPER)) 373 goto unprivileged; 374 to_book3s(vcpu)->sdr1 = spr_val; 375 break; 376 case SPRN_DSISR: 377 kvmppc_set_dsisr(vcpu, spr_val); 378 break; 379 case SPRN_DAR: 380 kvmppc_set_dar(vcpu, spr_val); 381 break; 382 case SPRN_HIOR: 383 to_book3s(vcpu)->hior = spr_val; 384 break; 385 case SPRN_IBAT0U ... SPRN_IBAT3L: 386 case SPRN_IBAT4U ... SPRN_IBAT7L: 387 case SPRN_DBAT0U ... SPRN_DBAT3L: 388 case SPRN_DBAT4U ... SPRN_DBAT7L: 389 { 390 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 391 392 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); 393 /* BAT writes happen so rarely that we're ok to flush 394 * everything here */ 395 kvmppc_mmu_pte_flush(vcpu, 0, 0); 396 kvmppc_mmu_flush_segments(vcpu); 397 break; 398 } 399 case SPRN_HID0: 400 to_book3s(vcpu)->hid[0] = spr_val; 401 break; 402 case SPRN_HID1: 403 to_book3s(vcpu)->hid[1] = spr_val; 404 break; 405 case SPRN_HID2: 406 to_book3s(vcpu)->hid[2] = spr_val; 407 break; 408 case SPRN_HID2_GEKKO: 409 to_book3s(vcpu)->hid[2] = spr_val; 410 /* HID2.PSE controls paired single on gekko */ 411 switch (vcpu->arch.pvr) { 412 case 0x00080200: /* lonestar 2.0 */ 413 case 0x00088202: /* lonestar 2.2 */ 414 case 0x70000100: /* gekko 1.0 */ 415 case 0x00080100: /* gekko 2.0 */ 416 case 0x00083203: /* gekko 2.3a */ 417 case 0x00083213: /* gekko 2.3b */ 418 case 0x00083204: /* gekko 2.4 */ 419 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 420 case 0x00087200: /* broadway */ 421 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { 422 /* Native paired singles */ 423 } else if (spr_val & (1 << 29)) { /* HID2.PSE */ 424 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; 425 kvmppc_giveup_ext(vcpu, MSR_FP); 426 } else { 427 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; 428 } 429 break; 430 } 431 break; 432 case SPRN_HID4: 433 case SPRN_HID4_GEKKO: 434 to_book3s(vcpu)->hid[4] = spr_val; 435 break; 436 case SPRN_HID5: 437 to_book3s(vcpu)->hid[5] = spr_val; 438 /* guest HID5 set can change is_dcbz32 */ 439 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 440 (mfmsr() & MSR_HV)) 441 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 442 break; 443 case SPRN_GQR0: 444 case SPRN_GQR1: 445 case SPRN_GQR2: 446 case SPRN_GQR3: 447 case SPRN_GQR4: 448 case SPRN_GQR5: 449 case SPRN_GQR6: 450 case SPRN_GQR7: 451 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 452 break; 453 #ifdef CONFIG_PPC_BOOK3S_64 454 case SPRN_FSCR: 455 kvmppc_set_fscr(vcpu, spr_val); 456 break; 457 case SPRN_BESCR: 458 vcpu->arch.bescr = spr_val; 459 break; 460 case SPRN_EBBHR: 461 vcpu->arch.ebbhr = spr_val; 462 break; 463 case SPRN_EBBRR: 464 vcpu->arch.ebbrr = spr_val; 465 break; 466 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 467 case SPRN_TFHAR: 468 vcpu->arch.tfhar = spr_val; 469 break; 470 case SPRN_TEXASR: 471 vcpu->arch.texasr = spr_val; 472 break; 473 case SPRN_TFIAR: 474 vcpu->arch.tfiar = spr_val; 475 break; 476 #endif 477 #endif 478 case SPRN_ICTC: 479 case SPRN_THRM1: 480 case SPRN_THRM2: 481 case SPRN_THRM3: 482 case SPRN_CTRLF: 483 case SPRN_CTRLT: 484 case SPRN_L2CR: 485 case SPRN_DSCR: 486 case SPRN_MMCR0_GEKKO: 487 case SPRN_MMCR1_GEKKO: 488 case SPRN_PMC1_GEKKO: 489 case SPRN_PMC2_GEKKO: 490 case SPRN_PMC3_GEKKO: 491 case SPRN_PMC4_GEKKO: 492 case SPRN_WPAR_GEKKO: 493 case SPRN_MSSSR0: 494 case SPRN_DABR: 495 #ifdef CONFIG_PPC_BOOK3S_64 496 case SPRN_MMCRS: 497 case SPRN_MMCRA: 498 case SPRN_MMCR0: 499 case SPRN_MMCR1: 500 case SPRN_MMCR2: 501 case SPRN_UMMCR2: 502 #endif 503 break; 504 unprivileged: 505 default: 506 pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn); 507 if (sprn & 0x10) { 508 if (kvmppc_get_msr(vcpu) & MSR_PR) { 509 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 510 emulated = EMULATE_AGAIN; 511 } 512 } else { 513 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) { 514 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 515 emulated = EMULATE_AGAIN; 516 } 517 } 518 break; 519 } 520 521 return emulated; 522 } 523 524 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 525 { 526 int emulated = EMULATE_DONE; 527 528 switch (sprn) { 529 case SPRN_IBAT0U ... SPRN_IBAT3L: 530 case SPRN_IBAT4U ... SPRN_IBAT7L: 531 case SPRN_DBAT0U ... SPRN_DBAT3L: 532 case SPRN_DBAT4U ... SPRN_DBAT7L: 533 { 534 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 535 536 if (sprn % 2) 537 *spr_val = bat->raw >> 32; 538 else 539 *spr_val = bat->raw; 540 541 break; 542 } 543 case SPRN_SDR1: 544 if (!spr_allowed(vcpu, PRIV_HYPER)) 545 goto unprivileged; 546 *spr_val = to_book3s(vcpu)->sdr1; 547 break; 548 case SPRN_DSISR: 549 *spr_val = kvmppc_get_dsisr(vcpu); 550 break; 551 case SPRN_DAR: 552 *spr_val = kvmppc_get_dar(vcpu); 553 break; 554 case SPRN_HIOR: 555 *spr_val = to_book3s(vcpu)->hior; 556 break; 557 case SPRN_HID0: 558 *spr_val = to_book3s(vcpu)->hid[0]; 559 break; 560 case SPRN_HID1: 561 *spr_val = to_book3s(vcpu)->hid[1]; 562 break; 563 case SPRN_HID2: 564 case SPRN_HID2_GEKKO: 565 *spr_val = to_book3s(vcpu)->hid[2]; 566 break; 567 case SPRN_HID4: 568 case SPRN_HID4_GEKKO: 569 *spr_val = to_book3s(vcpu)->hid[4]; 570 break; 571 case SPRN_HID5: 572 *spr_val = to_book3s(vcpu)->hid[5]; 573 break; 574 case SPRN_CFAR: 575 case SPRN_DSCR: 576 *spr_val = 0; 577 break; 578 case SPRN_PURR: 579 /* 580 * On exit we would have updated purr 581 */ 582 *spr_val = vcpu->arch.purr; 583 break; 584 case SPRN_SPURR: 585 /* 586 * On exit we would have updated spurr 587 */ 588 *spr_val = vcpu->arch.spurr; 589 break; 590 case SPRN_VTB: 591 *spr_val = to_book3s(vcpu)->vtb; 592 break; 593 case SPRN_IC: 594 *spr_val = vcpu->arch.ic; 595 break; 596 case SPRN_GQR0: 597 case SPRN_GQR1: 598 case SPRN_GQR2: 599 case SPRN_GQR3: 600 case SPRN_GQR4: 601 case SPRN_GQR5: 602 case SPRN_GQR6: 603 case SPRN_GQR7: 604 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; 605 break; 606 #ifdef CONFIG_PPC_BOOK3S_64 607 case SPRN_FSCR: 608 *spr_val = vcpu->arch.fscr; 609 break; 610 case SPRN_BESCR: 611 *spr_val = vcpu->arch.bescr; 612 break; 613 case SPRN_EBBHR: 614 *spr_val = vcpu->arch.ebbhr; 615 break; 616 case SPRN_EBBRR: 617 *spr_val = vcpu->arch.ebbrr; 618 break; 619 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 620 case SPRN_TFHAR: 621 *spr_val = vcpu->arch.tfhar; 622 break; 623 case SPRN_TEXASR: 624 *spr_val = vcpu->arch.texasr; 625 break; 626 case SPRN_TFIAR: 627 *spr_val = vcpu->arch.tfiar; 628 break; 629 #endif 630 #endif 631 case SPRN_THRM1: 632 case SPRN_THRM2: 633 case SPRN_THRM3: 634 case SPRN_CTRLF: 635 case SPRN_CTRLT: 636 case SPRN_L2CR: 637 case SPRN_MMCR0_GEKKO: 638 case SPRN_MMCR1_GEKKO: 639 case SPRN_PMC1_GEKKO: 640 case SPRN_PMC2_GEKKO: 641 case SPRN_PMC3_GEKKO: 642 case SPRN_PMC4_GEKKO: 643 case SPRN_WPAR_GEKKO: 644 case SPRN_MSSSR0: 645 case SPRN_DABR: 646 #ifdef CONFIG_PPC_BOOK3S_64 647 case SPRN_MMCRS: 648 case SPRN_MMCRA: 649 case SPRN_MMCR0: 650 case SPRN_MMCR1: 651 case SPRN_MMCR2: 652 case SPRN_UMMCR2: 653 case SPRN_TIR: 654 #endif 655 *spr_val = 0; 656 break; 657 default: 658 unprivileged: 659 pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn); 660 if (sprn & 0x10) { 661 if (kvmppc_get_msr(vcpu) & MSR_PR) { 662 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 663 emulated = EMULATE_AGAIN; 664 } 665 } else { 666 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 || 667 sprn == 4 || sprn == 5 || sprn == 6) { 668 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 669 emulated = EMULATE_AGAIN; 670 } 671 } 672 673 break; 674 } 675 676 return emulated; 677 } 678 679 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) 680 { 681 return make_dsisr(inst); 682 } 683 684 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 685 { 686 #ifdef CONFIG_PPC_BOOK3S_64 687 /* 688 * Linux's fix_alignment() assumes that DAR is valid, so can we 689 */ 690 return vcpu->arch.fault_dar; 691 #else 692 ulong dar = 0; 693 ulong ra = get_ra(inst); 694 ulong rb = get_rb(inst); 695 696 switch (get_op(inst)) { 697 case OP_LFS: 698 case OP_LFD: 699 case OP_STFD: 700 case OP_STFS: 701 if (ra) 702 dar = kvmppc_get_gpr(vcpu, ra); 703 dar += (s32)((s16)inst); 704 break; 705 case 31: 706 if (ra) 707 dar = kvmppc_get_gpr(vcpu, ra); 708 dar += kvmppc_get_gpr(vcpu, rb); 709 break; 710 default: 711 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 712 break; 713 } 714 715 return dar; 716 #endif 717 } 718