1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright Novell Inc 2010 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <asm/kvm.h> 21 #include <asm/kvm_ppc.h> 22 #include <asm/disassemble.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/kvm_fpu.h> 25 #include <asm/reg.h> 26 #include <asm/cacheflush.h> 27 #include <asm/switch_to.h> 28 #include <linux/vmalloc.h> 29 30 /* #define DEBUG */ 31 32 #ifdef DEBUG 33 #define dprintk printk 34 #else 35 #define dprintk(...) do { } while(0); 36 #endif 37 38 #define OP_LFS 48 39 #define OP_LFSU 49 40 #define OP_LFD 50 41 #define OP_LFDU 51 42 #define OP_STFS 52 43 #define OP_STFSU 53 44 #define OP_STFD 54 45 #define OP_STFDU 55 46 #define OP_PSQ_L 56 47 #define OP_PSQ_LU 57 48 #define OP_PSQ_ST 60 49 #define OP_PSQ_STU 61 50 51 #define OP_31_LFSX 535 52 #define OP_31_LFSUX 567 53 #define OP_31_LFDX 599 54 #define OP_31_LFDUX 631 55 #define OP_31_STFSX 663 56 #define OP_31_STFSUX 695 57 #define OP_31_STFX 727 58 #define OP_31_STFUX 759 59 #define OP_31_LWIZX 887 60 #define OP_31_STFIWX 983 61 62 #define OP_59_FADDS 21 63 #define OP_59_FSUBS 20 64 #define OP_59_FSQRTS 22 65 #define OP_59_FDIVS 18 66 #define OP_59_FRES 24 67 #define OP_59_FMULS 25 68 #define OP_59_FRSQRTES 26 69 #define OP_59_FMSUBS 28 70 #define OP_59_FMADDS 29 71 #define OP_59_FNMSUBS 30 72 #define OP_59_FNMADDS 31 73 74 #define OP_63_FCMPU 0 75 #define OP_63_FCPSGN 8 76 #define OP_63_FRSP 12 77 #define OP_63_FCTIW 14 78 #define OP_63_FCTIWZ 15 79 #define OP_63_FDIV 18 80 #define OP_63_FADD 21 81 #define OP_63_FSQRT 22 82 #define OP_63_FSEL 23 83 #define OP_63_FRE 24 84 #define OP_63_FMUL 25 85 #define OP_63_FRSQRTE 26 86 #define OP_63_FMSUB 28 87 #define OP_63_FMADD 29 88 #define OP_63_FNMSUB 30 89 #define OP_63_FNMADD 31 90 #define OP_63_FCMPO 32 91 #define OP_63_MTFSB1 38 // XXX 92 #define OP_63_FSUB 20 93 #define OP_63_FNEG 40 94 #define OP_63_MCRFS 64 95 #define OP_63_MTFSB0 70 96 #define OP_63_FMR 72 97 #define OP_63_MTFSFI 134 98 #define OP_63_FABS 264 99 #define OP_63_MFFS 583 100 #define OP_63_MTFSF 711 101 102 #define OP_4X_PS_CMPU0 0 103 #define OP_4X_PSQ_LX 6 104 #define OP_4XW_PSQ_STX 7 105 #define OP_4A_PS_SUM0 10 106 #define OP_4A_PS_SUM1 11 107 #define OP_4A_PS_MULS0 12 108 #define OP_4A_PS_MULS1 13 109 #define OP_4A_PS_MADDS0 14 110 #define OP_4A_PS_MADDS1 15 111 #define OP_4A_PS_DIV 18 112 #define OP_4A_PS_SUB 20 113 #define OP_4A_PS_ADD 21 114 #define OP_4A_PS_SEL 23 115 #define OP_4A_PS_RES 24 116 #define OP_4A_PS_MUL 25 117 #define OP_4A_PS_RSQRTE 26 118 #define OP_4A_PS_MSUB 28 119 #define OP_4A_PS_MADD 29 120 #define OP_4A_PS_NMSUB 30 121 #define OP_4A_PS_NMADD 31 122 #define OP_4X_PS_CMPO0 32 123 #define OP_4X_PSQ_LUX 38 124 #define OP_4XW_PSQ_STUX 39 125 #define OP_4X_PS_NEG 40 126 #define OP_4X_PS_CMPU1 64 127 #define OP_4X_PS_MR 72 128 #define OP_4X_PS_CMPO1 96 129 #define OP_4X_PS_NABS 136 130 #define OP_4X_PS_ABS 264 131 #define OP_4X_PS_MERGE00 528 132 #define OP_4X_PS_MERGE01 560 133 #define OP_4X_PS_MERGE10 592 134 #define OP_4X_PS_MERGE11 624 135 136 #define SCALAR_NONE 0 137 #define SCALAR_HIGH (1 << 0) 138 #define SCALAR_LOW (1 << 1) 139 #define SCALAR_NO_PS0 (1 << 2) 140 #define SCALAR_NO_PS1 (1 << 3) 141 142 #define GQR_ST_TYPE_MASK 0x00000007 143 #define GQR_ST_TYPE_SHIFT 0 144 #define GQR_ST_SCALE_MASK 0x00003f00 145 #define GQR_ST_SCALE_SHIFT 8 146 #define GQR_LD_TYPE_MASK 0x00070000 147 #define GQR_LD_TYPE_SHIFT 16 148 #define GQR_LD_SCALE_MASK 0x3f000000 149 #define GQR_LD_SCALE_SHIFT 24 150 151 #define GQR_QUANTIZE_FLOAT 0 152 #define GQR_QUANTIZE_U8 4 153 #define GQR_QUANTIZE_U16 5 154 #define GQR_QUANTIZE_S8 6 155 #define GQR_QUANTIZE_S16 7 156 157 #define FPU_LS_SINGLE 0 158 #define FPU_LS_DOUBLE 1 159 #define FPU_LS_SINGLE_LOW 2 160 161 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) 162 { 163 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); 164 } 165 166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 167 { 168 u32 dsisr; 169 u64 msr = kvmppc_get_msr(vcpu); 170 171 msr = kvmppc_set_field(msr, 33, 36, 0); 172 msr = kvmppc_set_field(msr, 42, 47, 0); 173 kvmppc_set_msr(vcpu, msr); 174 kvmppc_set_dar(vcpu, eaddr); 175 /* Page Fault */ 176 dsisr = kvmppc_set_field(0, 33, 33, 1); 177 if (is_store) 178 dsisr = kvmppc_set_field(dsisr, 38, 38, 1); 179 kvmppc_set_dsisr(vcpu, dsisr); 180 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 181 } 182 183 static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 184 int rs, ulong addr, int ls_type) 185 { 186 int emulated = EMULATE_FAIL; 187 int r; 188 char tmp[8]; 189 int len = sizeof(u32); 190 191 if (ls_type == FPU_LS_DOUBLE) 192 len = sizeof(u64); 193 194 /* read from memory */ 195 r = kvmppc_ld(vcpu, &addr, len, tmp, true); 196 vcpu->arch.paddr_accessed = addr; 197 198 if (r < 0) { 199 kvmppc_inject_pf(vcpu, addr, false); 200 goto done_load; 201 } else if (r == EMULATE_DO_MMIO) { 202 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, 203 len, 1); 204 goto done_load; 205 } 206 207 emulated = EMULATE_DONE; 208 209 /* put in registers */ 210 switch (ls_type) { 211 case FPU_LS_SINGLE: 212 kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs)); 213 vcpu->arch.qpr[rs] = *((u32*)tmp); 214 break; 215 case FPU_LS_DOUBLE: 216 VCPU_FPR(vcpu, rs) = *((u64*)tmp); 217 break; 218 } 219 220 dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp, 221 addr, len); 222 223 done_load: 224 return emulated; 225 } 226 227 static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 228 int rs, ulong addr, int ls_type) 229 { 230 int emulated = EMULATE_FAIL; 231 int r; 232 char tmp[8]; 233 u64 val; 234 int len; 235 236 switch (ls_type) { 237 case FPU_LS_SINGLE: 238 kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp); 239 val = *((u32*)tmp); 240 len = sizeof(u32); 241 break; 242 case FPU_LS_SINGLE_LOW: 243 *((u32*)tmp) = VCPU_FPR(vcpu, rs); 244 val = VCPU_FPR(vcpu, rs) & 0xffffffff; 245 len = sizeof(u32); 246 break; 247 case FPU_LS_DOUBLE: 248 *((u64*)tmp) = VCPU_FPR(vcpu, rs); 249 val = VCPU_FPR(vcpu, rs); 250 len = sizeof(u64); 251 break; 252 default: 253 val = 0; 254 len = 0; 255 } 256 257 r = kvmppc_st(vcpu, &addr, len, tmp, true); 258 vcpu->arch.paddr_accessed = addr; 259 if (r < 0) { 260 kvmppc_inject_pf(vcpu, addr, true); 261 } else if (r == EMULATE_DO_MMIO) { 262 emulated = kvmppc_handle_store(run, vcpu, val, len, 1); 263 } else { 264 emulated = EMULATE_DONE; 265 } 266 267 dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n", 268 val, addr, len); 269 270 return emulated; 271 } 272 273 static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 274 int rs, ulong addr, bool w, int i) 275 { 276 int emulated = EMULATE_FAIL; 277 int r; 278 float one = 1.0; 279 u32 tmp[2]; 280 281 /* read from memory */ 282 if (w) { 283 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); 284 memcpy(&tmp[1], &one, sizeof(u32)); 285 } else { 286 r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true); 287 } 288 vcpu->arch.paddr_accessed = addr; 289 if (r < 0) { 290 kvmppc_inject_pf(vcpu, addr, false); 291 goto done_load; 292 } else if ((r == EMULATE_DO_MMIO) && w) { 293 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, 294 4, 1); 295 vcpu->arch.qpr[rs] = tmp[1]; 296 goto done_load; 297 } else if (r == EMULATE_DO_MMIO) { 298 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs, 299 8, 1); 300 goto done_load; 301 } 302 303 emulated = EMULATE_DONE; 304 305 /* put in registers */ 306 kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs)); 307 vcpu->arch.qpr[rs] = tmp[1]; 308 309 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], 310 tmp[1], addr, w ? 4 : 8); 311 312 done_load: 313 return emulated; 314 } 315 316 static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 317 int rs, ulong addr, bool w, int i) 318 { 319 int emulated = EMULATE_FAIL; 320 int r; 321 u32 tmp[2]; 322 int len = w ? sizeof(u32) : sizeof(u64); 323 324 kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]); 325 tmp[1] = vcpu->arch.qpr[rs]; 326 327 r = kvmppc_st(vcpu, &addr, len, tmp, true); 328 vcpu->arch.paddr_accessed = addr; 329 if (r < 0) { 330 kvmppc_inject_pf(vcpu, addr, true); 331 } else if ((r == EMULATE_DO_MMIO) && w) { 332 emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1); 333 } else if (r == EMULATE_DO_MMIO) { 334 u64 val = ((u64)tmp[0] << 32) | tmp[1]; 335 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1); 336 } else { 337 emulated = EMULATE_DONE; 338 } 339 340 dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n", 341 tmp[0], tmp[1], addr, len); 342 343 return emulated; 344 } 345 346 /* 347 * Cuts out inst bits with ordering according to spec. 348 * That means the leftmost bit is zero. All given bits are included. 349 */ 350 static inline u32 inst_get_field(u32 inst, int msb, int lsb) 351 { 352 return kvmppc_get_field(inst, msb + 32, lsb + 32); 353 } 354 355 /* 356 * Replaces inst bits with ordering according to spec. 357 */ 358 static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value) 359 { 360 return kvmppc_set_field(inst, msb + 32, lsb + 32, value); 361 } 362 363 bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst) 364 { 365 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) 366 return false; 367 368 switch (get_op(inst)) { 369 case OP_PSQ_L: 370 case OP_PSQ_LU: 371 case OP_PSQ_ST: 372 case OP_PSQ_STU: 373 case OP_LFS: 374 case OP_LFSU: 375 case OP_LFD: 376 case OP_LFDU: 377 case OP_STFS: 378 case OP_STFSU: 379 case OP_STFD: 380 case OP_STFDU: 381 return true; 382 case 4: 383 /* X form */ 384 switch (inst_get_field(inst, 21, 30)) { 385 case OP_4X_PS_CMPU0: 386 case OP_4X_PSQ_LX: 387 case OP_4X_PS_CMPO0: 388 case OP_4X_PSQ_LUX: 389 case OP_4X_PS_NEG: 390 case OP_4X_PS_CMPU1: 391 case OP_4X_PS_MR: 392 case OP_4X_PS_CMPO1: 393 case OP_4X_PS_NABS: 394 case OP_4X_PS_ABS: 395 case OP_4X_PS_MERGE00: 396 case OP_4X_PS_MERGE01: 397 case OP_4X_PS_MERGE10: 398 case OP_4X_PS_MERGE11: 399 return true; 400 } 401 /* XW form */ 402 switch (inst_get_field(inst, 25, 30)) { 403 case OP_4XW_PSQ_STX: 404 case OP_4XW_PSQ_STUX: 405 return true; 406 } 407 /* A form */ 408 switch (inst_get_field(inst, 26, 30)) { 409 case OP_4A_PS_SUM1: 410 case OP_4A_PS_SUM0: 411 case OP_4A_PS_MULS0: 412 case OP_4A_PS_MULS1: 413 case OP_4A_PS_MADDS0: 414 case OP_4A_PS_MADDS1: 415 case OP_4A_PS_DIV: 416 case OP_4A_PS_SUB: 417 case OP_4A_PS_ADD: 418 case OP_4A_PS_SEL: 419 case OP_4A_PS_RES: 420 case OP_4A_PS_MUL: 421 case OP_4A_PS_RSQRTE: 422 case OP_4A_PS_MSUB: 423 case OP_4A_PS_MADD: 424 case OP_4A_PS_NMSUB: 425 case OP_4A_PS_NMADD: 426 return true; 427 } 428 break; 429 case 59: 430 switch (inst_get_field(inst, 21, 30)) { 431 case OP_59_FADDS: 432 case OP_59_FSUBS: 433 case OP_59_FDIVS: 434 case OP_59_FRES: 435 case OP_59_FRSQRTES: 436 return true; 437 } 438 switch (inst_get_field(inst, 26, 30)) { 439 case OP_59_FMULS: 440 case OP_59_FMSUBS: 441 case OP_59_FMADDS: 442 case OP_59_FNMSUBS: 443 case OP_59_FNMADDS: 444 return true; 445 } 446 break; 447 case 63: 448 switch (inst_get_field(inst, 21, 30)) { 449 case OP_63_MTFSB0: 450 case OP_63_MTFSB1: 451 case OP_63_MTFSF: 452 case OP_63_MTFSFI: 453 case OP_63_MCRFS: 454 case OP_63_MFFS: 455 case OP_63_FCMPU: 456 case OP_63_FCMPO: 457 case OP_63_FNEG: 458 case OP_63_FMR: 459 case OP_63_FABS: 460 case OP_63_FRSP: 461 case OP_63_FDIV: 462 case OP_63_FADD: 463 case OP_63_FSUB: 464 case OP_63_FCTIW: 465 case OP_63_FCTIWZ: 466 case OP_63_FRSQRTE: 467 case OP_63_FCPSGN: 468 return true; 469 } 470 switch (inst_get_field(inst, 26, 30)) { 471 case OP_63_FMUL: 472 case OP_63_FSEL: 473 case OP_63_FMSUB: 474 case OP_63_FMADD: 475 case OP_63_FNMSUB: 476 case OP_63_FNMADD: 477 return true; 478 } 479 break; 480 case 31: 481 switch (inst_get_field(inst, 21, 30)) { 482 case OP_31_LFSX: 483 case OP_31_LFSUX: 484 case OP_31_LFDX: 485 case OP_31_LFDUX: 486 case OP_31_STFSX: 487 case OP_31_STFSUX: 488 case OP_31_STFX: 489 case OP_31_STFUX: 490 case OP_31_STFIWX: 491 return true; 492 } 493 break; 494 } 495 496 return false; 497 } 498 499 static int get_d_signext(u32 inst) 500 { 501 int d = inst & 0x8ff; 502 503 if (d & 0x800) 504 return -(d & 0x7ff); 505 506 return (d & 0x7ff); 507 } 508 509 static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, 510 int reg_out, int reg_in1, int reg_in2, 511 int reg_in3, int scalar, 512 void (*func)(u64 *fpscr, 513 u32 *dst, u32 *src1, 514 u32 *src2, u32 *src3)) 515 { 516 u32 *qpr = vcpu->arch.qpr; 517 u32 ps0_out; 518 u32 ps0_in1, ps0_in2, ps0_in3; 519 u32 ps1_in1, ps1_in2, ps1_in3; 520 521 /* RC */ 522 WARN_ON(rc); 523 524 /* PS0 */ 525 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); 526 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); 527 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3); 528 529 if (scalar & SCALAR_LOW) 530 ps0_in2 = qpr[reg_in2]; 531 532 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); 533 534 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 535 ps0_in1, ps0_in2, ps0_in3, ps0_out); 536 537 if (!(scalar & SCALAR_NO_PS0)) 538 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); 539 540 /* PS1 */ 541 ps1_in1 = qpr[reg_in1]; 542 ps1_in2 = qpr[reg_in2]; 543 ps1_in3 = qpr[reg_in3]; 544 545 if (scalar & SCALAR_HIGH) 546 ps1_in2 = ps0_in2; 547 548 if (!(scalar & SCALAR_NO_PS1)) 549 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); 550 551 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 552 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); 553 554 return EMULATE_DONE; 555 } 556 557 static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, 558 int reg_out, int reg_in1, int reg_in2, 559 int scalar, 560 void (*func)(u64 *fpscr, 561 u32 *dst, u32 *src1, 562 u32 *src2)) 563 { 564 u32 *qpr = vcpu->arch.qpr; 565 u32 ps0_out; 566 u32 ps0_in1, ps0_in2; 567 u32 ps1_out; 568 u32 ps1_in1, ps1_in2; 569 570 /* RC */ 571 WARN_ON(rc); 572 573 /* PS0 */ 574 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); 575 576 if (scalar & SCALAR_LOW) 577 ps0_in2 = qpr[reg_in2]; 578 else 579 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); 580 581 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2); 582 583 if (!(scalar & SCALAR_NO_PS0)) { 584 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", 585 ps0_in1, ps0_in2, ps0_out); 586 587 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); 588 } 589 590 /* PS1 */ 591 ps1_in1 = qpr[reg_in1]; 592 ps1_in2 = qpr[reg_in2]; 593 594 if (scalar & SCALAR_HIGH) 595 ps1_in2 = ps0_in2; 596 597 func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2); 598 599 if (!(scalar & SCALAR_NO_PS1)) { 600 qpr[reg_out] = ps1_out; 601 602 dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n", 603 ps1_in1, ps1_in2, qpr[reg_out]); 604 } 605 606 return EMULATE_DONE; 607 } 608 609 static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, 610 int reg_out, int reg_in, 611 void (*func)(u64 *t, 612 u32 *dst, u32 *src1)) 613 { 614 u32 *qpr = vcpu->arch.qpr; 615 u32 ps0_out, ps0_in; 616 u32 ps1_in; 617 618 /* RC */ 619 WARN_ON(rc); 620 621 /* PS0 */ 622 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in); 623 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in); 624 625 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", 626 ps0_in, ps0_out); 627 628 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); 629 630 /* PS1 */ 631 ps1_in = qpr[reg_in]; 632 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in); 633 634 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", 635 ps1_in, qpr[reg_out]); 636 637 return EMULATE_DONE; 638 } 639 640 int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) 641 { 642 u32 inst = kvmppc_get_last_inst(vcpu); 643 enum emulation_result emulated = EMULATE_DONE; 644 645 int ax_rd = inst_get_field(inst, 6, 10); 646 int ax_ra = inst_get_field(inst, 11, 15); 647 int ax_rb = inst_get_field(inst, 16, 20); 648 int ax_rc = inst_get_field(inst, 21, 25); 649 short full_d = inst_get_field(inst, 16, 31); 650 651 u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd); 652 u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra); 653 u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb); 654 u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc); 655 656 bool rcomp = (inst & 1) ? true : false; 657 u32 cr = kvmppc_get_cr(vcpu); 658 #ifdef DEBUG 659 int i; 660 #endif 661 662 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 663 return EMULATE_FAIL; 664 665 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { 666 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); 667 return EMULATE_AGAIN; 668 } 669 670 kvmppc_giveup_ext(vcpu, MSR_FP); 671 preempt_disable(); 672 enable_kernel_fp(); 673 /* Do we need to clear FE0 / FE1 here? Don't think so. */ 674 675 #ifdef DEBUG 676 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { 677 u32 f; 678 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); 679 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", 680 i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]); 681 } 682 #endif 683 684 switch (get_op(inst)) { 685 case OP_PSQ_L: 686 { 687 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; 688 bool w = inst_get_field(inst, 16, 16) ? true : false; 689 int i = inst_get_field(inst, 17, 19); 690 691 addr += get_d_signext(inst); 692 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 693 break; 694 } 695 case OP_PSQ_LU: 696 { 697 ulong addr = kvmppc_get_gpr(vcpu, ax_ra); 698 bool w = inst_get_field(inst, 16, 16) ? true : false; 699 int i = inst_get_field(inst, 17, 19); 700 701 addr += get_d_signext(inst); 702 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 703 704 if (emulated == EMULATE_DONE) 705 kvmppc_set_gpr(vcpu, ax_ra, addr); 706 break; 707 } 708 case OP_PSQ_ST: 709 { 710 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; 711 bool w = inst_get_field(inst, 16, 16) ? true : false; 712 int i = inst_get_field(inst, 17, 19); 713 714 addr += get_d_signext(inst); 715 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 716 break; 717 } 718 case OP_PSQ_STU: 719 { 720 ulong addr = kvmppc_get_gpr(vcpu, ax_ra); 721 bool w = inst_get_field(inst, 16, 16) ? true : false; 722 int i = inst_get_field(inst, 17, 19); 723 724 addr += get_d_signext(inst); 725 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 726 727 if (emulated == EMULATE_DONE) 728 kvmppc_set_gpr(vcpu, ax_ra, addr); 729 break; 730 } 731 case 4: 732 /* X form */ 733 switch (inst_get_field(inst, 21, 30)) { 734 case OP_4X_PS_CMPU0: 735 /* XXX */ 736 emulated = EMULATE_FAIL; 737 break; 738 case OP_4X_PSQ_LX: 739 { 740 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; 741 bool w = inst_get_field(inst, 21, 21) ? true : false; 742 int i = inst_get_field(inst, 22, 24); 743 744 addr += kvmppc_get_gpr(vcpu, ax_rb); 745 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 746 break; 747 } 748 case OP_4X_PS_CMPO0: 749 /* XXX */ 750 emulated = EMULATE_FAIL; 751 break; 752 case OP_4X_PSQ_LUX: 753 { 754 ulong addr = kvmppc_get_gpr(vcpu, ax_ra); 755 bool w = inst_get_field(inst, 21, 21) ? true : false; 756 int i = inst_get_field(inst, 22, 24); 757 758 addr += kvmppc_get_gpr(vcpu, ax_rb); 759 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 760 761 if (emulated == EMULATE_DONE) 762 kvmppc_set_gpr(vcpu, ax_ra, addr); 763 break; 764 } 765 case OP_4X_PS_NEG: 766 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 767 VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL; 768 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 769 vcpu->arch.qpr[ax_rd] ^= 0x80000000; 770 break; 771 case OP_4X_PS_CMPU1: 772 /* XXX */ 773 emulated = EMULATE_FAIL; 774 break; 775 case OP_4X_PS_MR: 776 WARN_ON(rcomp); 777 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 778 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 779 break; 780 case OP_4X_PS_CMPO1: 781 /* XXX */ 782 emulated = EMULATE_FAIL; 783 break; 784 case OP_4X_PS_NABS: 785 WARN_ON(rcomp); 786 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 787 VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL; 788 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 789 vcpu->arch.qpr[ax_rd] |= 0x80000000; 790 break; 791 case OP_4X_PS_ABS: 792 WARN_ON(rcomp); 793 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 794 VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL; 795 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 796 vcpu->arch.qpr[ax_rd] &= ~0x80000000; 797 break; 798 case OP_4X_PS_MERGE00: 799 WARN_ON(rcomp); 800 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); 801 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ 802 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), 803 &vcpu->arch.qpr[ax_rd]); 804 break; 805 case OP_4X_PS_MERGE01: 806 WARN_ON(rcomp); 807 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); 808 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 809 break; 810 case OP_4X_PS_MERGE10: 811 WARN_ON(rcomp); 812 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ 813 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 814 &VCPU_FPR(vcpu, ax_rd)); 815 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ 816 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), 817 &vcpu->arch.qpr[ax_rd]); 818 break; 819 case OP_4X_PS_MERGE11: 820 WARN_ON(rcomp); 821 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ 822 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 823 &VCPU_FPR(vcpu, ax_rd)); 824 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 825 break; 826 } 827 /* XW form */ 828 switch (inst_get_field(inst, 25, 30)) { 829 case OP_4XW_PSQ_STX: 830 { 831 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; 832 bool w = inst_get_field(inst, 21, 21) ? true : false; 833 int i = inst_get_field(inst, 22, 24); 834 835 addr += kvmppc_get_gpr(vcpu, ax_rb); 836 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 837 break; 838 } 839 case OP_4XW_PSQ_STUX: 840 { 841 ulong addr = kvmppc_get_gpr(vcpu, ax_ra); 842 bool w = inst_get_field(inst, 21, 21) ? true : false; 843 int i = inst_get_field(inst, 22, 24); 844 845 addr += kvmppc_get_gpr(vcpu, ax_rb); 846 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 847 848 if (emulated == EMULATE_DONE) 849 kvmppc_set_gpr(vcpu, ax_ra, addr); 850 break; 851 } 852 } 853 /* A form */ 854 switch (inst_get_field(inst, 26, 30)) { 855 case OP_4A_PS_SUM1: 856 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 857 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); 858 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc); 859 break; 860 case OP_4A_PS_SUM0: 861 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 862 ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds); 863 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; 864 break; 865 case OP_4A_PS_MULS0: 866 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 867 ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls); 868 break; 869 case OP_4A_PS_MULS1: 870 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 871 ax_ra, ax_rc, SCALAR_LOW, fps_fmuls); 872 break; 873 case OP_4A_PS_MADDS0: 874 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 875 ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds); 876 break; 877 case OP_4A_PS_MADDS1: 878 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 879 ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds); 880 break; 881 case OP_4A_PS_DIV: 882 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 883 ax_ra, ax_rb, SCALAR_NONE, fps_fdivs); 884 break; 885 case OP_4A_PS_SUB: 886 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 887 ax_ra, ax_rb, SCALAR_NONE, fps_fsubs); 888 break; 889 case OP_4A_PS_ADD: 890 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 891 ax_ra, ax_rb, SCALAR_NONE, fps_fadds); 892 break; 893 case OP_4A_PS_SEL: 894 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 895 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel); 896 break; 897 case OP_4A_PS_RES: 898 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, 899 ax_rb, fps_fres); 900 break; 901 case OP_4A_PS_MUL: 902 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 903 ax_ra, ax_rc, SCALAR_NONE, fps_fmuls); 904 break; 905 case OP_4A_PS_RSQRTE: 906 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, 907 ax_rb, fps_frsqrte); 908 break; 909 case OP_4A_PS_MSUB: 910 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 911 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs); 912 break; 913 case OP_4A_PS_MADD: 914 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 915 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds); 916 break; 917 case OP_4A_PS_NMSUB: 918 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 919 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs); 920 break; 921 case OP_4A_PS_NMADD: 922 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, 923 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds); 924 break; 925 } 926 break; 927 928 /* Real FPU operations */ 929 930 case OP_LFS: 931 { 932 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 933 934 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 935 FPU_LS_SINGLE); 936 break; 937 } 938 case OP_LFSU: 939 { 940 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 941 942 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 943 FPU_LS_SINGLE); 944 945 if (emulated == EMULATE_DONE) 946 kvmppc_set_gpr(vcpu, ax_ra, addr); 947 break; 948 } 949 case OP_LFD: 950 { 951 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 952 953 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 954 FPU_LS_DOUBLE); 955 break; 956 } 957 case OP_LFDU: 958 { 959 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 960 961 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 962 FPU_LS_DOUBLE); 963 964 if (emulated == EMULATE_DONE) 965 kvmppc_set_gpr(vcpu, ax_ra, addr); 966 break; 967 } 968 case OP_STFS: 969 { 970 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 971 972 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 973 FPU_LS_SINGLE); 974 break; 975 } 976 case OP_STFSU: 977 { 978 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 979 980 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 981 FPU_LS_SINGLE); 982 983 if (emulated == EMULATE_DONE) 984 kvmppc_set_gpr(vcpu, ax_ra, addr); 985 break; 986 } 987 case OP_STFD: 988 { 989 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 990 991 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 992 FPU_LS_DOUBLE); 993 break; 994 } 995 case OP_STFDU: 996 { 997 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 998 999 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 1000 FPU_LS_DOUBLE); 1001 1002 if (emulated == EMULATE_DONE) 1003 kvmppc_set_gpr(vcpu, ax_ra, addr); 1004 break; 1005 } 1006 case 31: 1007 switch (inst_get_field(inst, 21, 30)) { 1008 case OP_31_LFSX: 1009 { 1010 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; 1011 1012 addr += kvmppc_get_gpr(vcpu, ax_rb); 1013 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1014 addr, FPU_LS_SINGLE); 1015 break; 1016 } 1017 case OP_31_LFSUX: 1018 { 1019 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1020 kvmppc_get_gpr(vcpu, ax_rb); 1021 1022 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1023 addr, FPU_LS_SINGLE); 1024 1025 if (emulated == EMULATE_DONE) 1026 kvmppc_set_gpr(vcpu, ax_ra, addr); 1027 break; 1028 } 1029 case OP_31_LFDX: 1030 { 1031 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1032 kvmppc_get_gpr(vcpu, ax_rb); 1033 1034 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1035 addr, FPU_LS_DOUBLE); 1036 break; 1037 } 1038 case OP_31_LFDUX: 1039 { 1040 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1041 kvmppc_get_gpr(vcpu, ax_rb); 1042 1043 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1044 addr, FPU_LS_DOUBLE); 1045 1046 if (emulated == EMULATE_DONE) 1047 kvmppc_set_gpr(vcpu, ax_ra, addr); 1048 break; 1049 } 1050 case OP_31_STFSX: 1051 { 1052 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1053 kvmppc_get_gpr(vcpu, ax_rb); 1054 1055 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1056 addr, FPU_LS_SINGLE); 1057 break; 1058 } 1059 case OP_31_STFSUX: 1060 { 1061 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1062 kvmppc_get_gpr(vcpu, ax_rb); 1063 1064 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1065 addr, FPU_LS_SINGLE); 1066 1067 if (emulated == EMULATE_DONE) 1068 kvmppc_set_gpr(vcpu, ax_ra, addr); 1069 break; 1070 } 1071 case OP_31_STFX: 1072 { 1073 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1074 kvmppc_get_gpr(vcpu, ax_rb); 1075 1076 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1077 addr, FPU_LS_DOUBLE); 1078 break; 1079 } 1080 case OP_31_STFUX: 1081 { 1082 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1083 kvmppc_get_gpr(vcpu, ax_rb); 1084 1085 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1086 addr, FPU_LS_DOUBLE); 1087 1088 if (emulated == EMULATE_DONE) 1089 kvmppc_set_gpr(vcpu, ax_ra, addr); 1090 break; 1091 } 1092 case OP_31_STFIWX: 1093 { 1094 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1095 kvmppc_get_gpr(vcpu, ax_rb); 1096 1097 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1098 addr, 1099 FPU_LS_SINGLE_LOW); 1100 break; 1101 } 1102 break; 1103 } 1104 break; 1105 case 59: 1106 switch (inst_get_field(inst, 21, 30)) { 1107 case OP_59_FADDS: 1108 fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1109 kvmppc_sync_qpr(vcpu, ax_rd); 1110 break; 1111 case OP_59_FSUBS: 1112 fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1113 kvmppc_sync_qpr(vcpu, ax_rd); 1114 break; 1115 case OP_59_FDIVS: 1116 fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1117 kvmppc_sync_qpr(vcpu, ax_rd); 1118 break; 1119 case OP_59_FRES: 1120 fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1121 kvmppc_sync_qpr(vcpu, ax_rd); 1122 break; 1123 case OP_59_FRSQRTES: 1124 fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1125 kvmppc_sync_qpr(vcpu, ax_rd); 1126 break; 1127 } 1128 switch (inst_get_field(inst, 26, 30)) { 1129 case OP_59_FMULS: 1130 fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1131 kvmppc_sync_qpr(vcpu, ax_rd); 1132 break; 1133 case OP_59_FMSUBS: 1134 fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1135 kvmppc_sync_qpr(vcpu, ax_rd); 1136 break; 1137 case OP_59_FMADDS: 1138 fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1139 kvmppc_sync_qpr(vcpu, ax_rd); 1140 break; 1141 case OP_59_FNMSUBS: 1142 fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1143 kvmppc_sync_qpr(vcpu, ax_rd); 1144 break; 1145 case OP_59_FNMADDS: 1146 fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1147 kvmppc_sync_qpr(vcpu, ax_rd); 1148 break; 1149 } 1150 break; 1151 case 63: 1152 switch (inst_get_field(inst, 21, 30)) { 1153 case OP_63_MTFSB0: 1154 case OP_63_MTFSB1: 1155 case OP_63_MCRFS: 1156 case OP_63_MTFSFI: 1157 /* XXX need to implement */ 1158 break; 1159 case OP_63_MFFS: 1160 /* XXX missing CR */ 1161 *fpr_d = vcpu->arch.fp.fpscr; 1162 break; 1163 case OP_63_MTFSF: 1164 /* XXX missing fm bits */ 1165 /* XXX missing CR */ 1166 vcpu->arch.fp.fpscr = *fpr_b; 1167 break; 1168 case OP_63_FCMPU: 1169 { 1170 u32 tmp_cr; 1171 u32 cr0_mask = 0xf0000000; 1172 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1173 1174 fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); 1175 cr &= ~(cr0_mask >> cr_shift); 1176 cr |= (cr & cr0_mask) >> cr_shift; 1177 break; 1178 } 1179 case OP_63_FCMPO: 1180 { 1181 u32 tmp_cr; 1182 u32 cr0_mask = 0xf0000000; 1183 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1184 1185 fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); 1186 cr &= ~(cr0_mask >> cr_shift); 1187 cr |= (cr & cr0_mask) >> cr_shift; 1188 break; 1189 } 1190 case OP_63_FNEG: 1191 fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1192 break; 1193 case OP_63_FMR: 1194 *fpr_d = *fpr_b; 1195 break; 1196 case OP_63_FABS: 1197 fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1198 break; 1199 case OP_63_FCPSGN: 1200 fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1201 break; 1202 case OP_63_FDIV: 1203 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1204 break; 1205 case OP_63_FADD: 1206 fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1207 break; 1208 case OP_63_FSUB: 1209 fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1210 break; 1211 case OP_63_FCTIW: 1212 fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1213 break; 1214 case OP_63_FCTIWZ: 1215 fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1216 break; 1217 case OP_63_FRSP: 1218 fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1219 kvmppc_sync_qpr(vcpu, ax_rd); 1220 break; 1221 case OP_63_FRSQRTE: 1222 { 1223 double one = 1.0f; 1224 1225 /* fD = sqrt(fB) */ 1226 fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1227 /* fD = 1.0f / fD */ 1228 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); 1229 break; 1230 } 1231 } 1232 switch (inst_get_field(inst, 26, 30)) { 1233 case OP_63_FMUL: 1234 fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1235 break; 1236 case OP_63_FSEL: 1237 fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1238 break; 1239 case OP_63_FMSUB: 1240 fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1241 break; 1242 case OP_63_FMADD: 1243 fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1244 break; 1245 case OP_63_FNMSUB: 1246 fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1247 break; 1248 case OP_63_FNMADD: 1249 fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1250 break; 1251 } 1252 break; 1253 } 1254 1255 #ifdef DEBUG 1256 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { 1257 u32 f; 1258 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); 1259 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); 1260 } 1261 #endif 1262 1263 if (rcomp) 1264 kvmppc_set_cr(vcpu, cr); 1265 1266 preempt_enable(); 1267 1268 return emulated; 1269 } 1270