1/* 2 * translate-fp.c 3 * 4 * Standard FPU translation 5 */ 6 7static inline void gen_reset_fpstatus(void) 8{ 9 gen_helper_reset_fpstatus(tcg_env); 10} 11 12static inline void gen_compute_fprf_float64(TCGv_i64 arg) 13{ 14 gen_helper_compute_fprf_float64(tcg_env, arg); 15 gen_helper_float_check_status(tcg_env); 16} 17 18#if defined(TARGET_PPC64) 19static void gen_set_cr1_from_fpscr(DisasContext *ctx) 20{ 21 TCGv_i32 tmp = tcg_temp_new_i32(); 22 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); 23 tcg_gen_shri_i32(cpu_crf[1], tmp, 28); 24} 25#else 26static void gen_set_cr1_from_fpscr(DisasContext *ctx) 27{ 28 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28); 29} 30#endif 31 32/*** Floating-Point arithmetic ***/ 33static bool do_helper_acb(DisasContext *ctx, arg_A *a, 34 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64, 35 TCGv_i64, TCGv_i64)) 36{ 37 TCGv_i64 t0, t1, t2, t3; 38 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 39 REQUIRE_FPU(ctx); 40 t0 = tcg_temp_new_i64(); 41 t1 = tcg_temp_new_i64(); 42 t2 = tcg_temp_new_i64(); 43 t3 = tcg_temp_new_i64(); 44 gen_reset_fpstatus(); 45 get_fpr(t0, a->fra); 46 get_fpr(t1, a->frc); 47 get_fpr(t2, a->frb); 48 helper(t3, tcg_env, t0, t1, t2); 49 set_fpr(a->frt, t3); 50 gen_compute_fprf_float64(t3); 51 if (unlikely(a->rc)) { 52 gen_set_cr1_from_fpscr(ctx); 53 } 54 return true; 55} 56 57static bool do_helper_ab(DisasContext *ctx, arg_A_tab *a, 58 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64, 59 TCGv_i64)) 60{ 61 TCGv_i64 t0, t1, t2; 62 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 63 REQUIRE_FPU(ctx); 64 t0 = tcg_temp_new_i64(); 65 t1 = tcg_temp_new_i64(); 66 t2 = tcg_temp_new_i64(); 67 gen_reset_fpstatus(); 68 get_fpr(t0, a->fra); 69 get_fpr(t1, a->frb); 70 helper(t2, tcg_env, t0, t1); 71 set_fpr(a->frt, t2); 72 gen_compute_fprf_float64(t2); 73 if (unlikely(a->rc)) { 74 gen_set_cr1_from_fpscr(ctx); 75 } 76 return true; 77} 78 79static bool do_helper_ac(DisasContext *ctx, arg_A_tac *a, 80 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64, 81 TCGv_i64)) 82{ 83 TCGv_i64 t0, t1, t2; 84 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 85 REQUIRE_FPU(ctx); 86 t0 = tcg_temp_new_i64(); 87 t1 = tcg_temp_new_i64(); 88 t2 = tcg_temp_new_i64(); 89 gen_reset_fpstatus(); 90 get_fpr(t0, a->fra); 91 get_fpr(t1, a->frc); 92 helper(t2, tcg_env, t0, t1); 93 set_fpr(a->frt, t2); 94 gen_compute_fprf_float64(t2); 95 if (unlikely(a->rc)) { 96 gen_set_cr1_from_fpscr(ctx); 97 } 98 return true; 99} 100 101#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ 102static void gen_f##name(DisasContext *ctx) \ 103{ \ 104 TCGv_i64 t0; \ 105 TCGv_i64 t1; \ 106 if (unlikely(!ctx->fpu_enabled)) { \ 107 gen_exception(ctx, POWERPC_EXCP_FPU); \ 108 return; \ 109 } \ 110 t0 = tcg_temp_new_i64(); \ 111 t1 = tcg_temp_new_i64(); \ 112 gen_reset_fpstatus(); \ 113 get_fpr(t0, rB(ctx->opcode)); \ 114 gen_helper_f##name(t1, tcg_env, t0); \ 115 set_fpr(rD(ctx->opcode), t1); \ 116 if (set_fprf) { \ 117 gen_helper_compute_fprf_float64(tcg_env, t1); \ 118 } \ 119 gen_helper_float_check_status(tcg_env); \ 120 if (unlikely(Rc(ctx->opcode) != 0)) { \ 121 gen_set_cr1_from_fpscr(ctx); \ 122 } \ 123} 124 125static bool do_helper_bs(DisasContext *ctx, arg_A_tb *a, 126 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) 127{ 128 TCGv_i64 t0, t1; 129 REQUIRE_FPU(ctx); 130 t0 = tcg_temp_new_i64(); 131 t1 = tcg_temp_new_i64(); 132 gen_reset_fpstatus(); 133 get_fpr(t0, a->frb); 134 helper(t1, tcg_env, t0); 135 set_fpr(a->frt, t1); 136 gen_compute_fprf_float64(t1); 137 if (unlikely(a->rc)) { 138 gen_set_cr1_from_fpscr(ctx); 139 } 140 return true; 141} 142 143static bool trans_FSEL(DisasContext *ctx, arg_A *a) 144{ 145 TCGv_i64 t0, t1, t2; 146 147 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSEL); 148 REQUIRE_FPU(ctx); 149 150 t0 = tcg_temp_new_i64(); 151 t1 = tcg_temp_new_i64(); 152 t2 = tcg_temp_new_i64(); 153 154 get_fpr(t0, a->fra); 155 get_fpr(t1, a->frb); 156 get_fpr(t2, a->frc); 157 158 gen_helper_FSEL(t0, t0, t1, t2); 159 set_fpr(a->frt, t0); 160 if (a->rc) { 161 gen_set_cr1_from_fpscr(ctx); 162 } 163 return true; 164} 165 166static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, 167 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) 168{ 169 TCGv_i64 t0, t1; 170 171 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSQRT); 172 REQUIRE_FPU(ctx); 173 174 t0 = tcg_temp_new_i64(); 175 t1 = tcg_temp_new_i64(); 176 177 gen_reset_fpstatus(); 178 get_fpr(t0, a->frb); 179 helper(t1, tcg_env, t0); 180 set_fpr(a->frt, t1); 181 gen_compute_fprf_float64(t1); 182 if (unlikely(a->rc != 0)) { 183 gen_set_cr1_from_fpscr(ctx); 184 } 185 return true; 186} 187 188TRANS(FADD, do_helper_ab, gen_helper_FADD); 189TRANS(FADDS, do_helper_ab, gen_helper_FADDS); 190TRANS(FSUB, do_helper_ab, gen_helper_FSUB); 191TRANS(FSUBS, do_helper_ab, gen_helper_FSUBS); 192TRANS(FDIV, do_helper_ab, gen_helper_FDIV); 193TRANS(FDIVS, do_helper_ab, gen_helper_FDIVS); 194TRANS(FMUL, do_helper_ac, gen_helper_FMUL); 195TRANS(FMULS, do_helper_ac, gen_helper_FMULS); 196 197TRANS(FMADD, do_helper_acb, gen_helper_FMADD); 198TRANS(FMADDS, do_helper_acb, gen_helper_FMADDS); 199TRANS(FMSUB, do_helper_acb, gen_helper_FMSUB); 200TRANS(FMSUBS, do_helper_acb, gen_helper_FMSUBS); 201 202TRANS(FNMADD, do_helper_acb, gen_helper_FNMADD); 203TRANS(FNMADDS, do_helper_acb, gen_helper_FNMADDS); 204TRANS(FNMSUB, do_helper_acb, gen_helper_FNMSUB); 205TRANS(FNMSUBS, do_helper_acb, gen_helper_FNMSUBS); 206 207TRANS_FLAGS(FLOAT_EXT, FRE, do_helper_bs, gen_helper_FRE); 208TRANS_FLAGS(FLOAT_FRES, FRES, do_helper_bs, gen_helper_FRES); 209TRANS_FLAGS(FLOAT_FRSQRTE, FRSQRTE, do_helper_bs, gen_helper_FRSQRTE); 210TRANS_FLAGS(FLOAT_FRSQRTES, FRSQRTES, do_helper_bs, gen_helper_FRSQRTES); 211 212TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT); 213TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS); 214 215/*** Floating-Point round & convert ***/ 216/* fctiw */ 217GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); 218/* fctiwu */ 219GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); 220/* fctiwz */ 221GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); 222/* fctiwuz */ 223GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); 224/* frsp */ 225GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); 226/* fcfid */ 227GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); 228/* fcfids */ 229GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); 230/* fcfidu */ 231GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 232/* fcfidus */ 233GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 234/* fctid */ 235GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); 236/* fctidu */ 237GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); 238/* fctidz */ 239GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); 240/* fctidu */ 241GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); 242 243/* frin */ 244GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); 245/* friz */ 246GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); 247/* frip */ 248GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); 249/* frim */ 250GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); 251 252static bool trans_FTDIV(DisasContext *ctx, arg_X_bf *a) 253{ 254 TCGv_i64 t0, t1; 255 REQUIRE_INSNS_FLAGS2(ctx, FP_TST_ISA206); 256 REQUIRE_FPU(ctx); 257 t0 = tcg_temp_new_i64(); 258 t1 = tcg_temp_new_i64(); 259 get_fpr(t0, a->ra); 260 get_fpr(t1, a->rb); 261 gen_helper_FTDIV(cpu_crf[a->bf], t0, t1); 262 return true; 263} 264 265static bool trans_FTSQRT(DisasContext *ctx, arg_X_bf_b *a) 266{ 267 TCGv_i64 t0; 268 REQUIRE_INSNS_FLAGS2(ctx, FP_TST_ISA206); 269 REQUIRE_FPU(ctx); 270 t0 = tcg_temp_new_i64(); 271 get_fpr(t0, a->rb); 272 gen_helper_FTSQRT(cpu_crf[a->bf], t0); 273 return true; 274} 275 276/*** Floating-Point compare ***/ 277 278/* fcmpo */ 279static void gen_fcmpo(DisasContext *ctx) 280{ 281 TCGv_i32 crf; 282 TCGv_i64 t0; 283 TCGv_i64 t1; 284 if (unlikely(!ctx->fpu_enabled)) { 285 gen_exception(ctx, POWERPC_EXCP_FPU); 286 return; 287 } 288 t0 = tcg_temp_new_i64(); 289 t1 = tcg_temp_new_i64(); 290 gen_reset_fpstatus(); 291 crf = tcg_constant_i32(crfD(ctx->opcode)); 292 get_fpr(t0, rA(ctx->opcode)); 293 get_fpr(t1, rB(ctx->opcode)); 294 gen_helper_fcmpo(tcg_env, t0, t1, crf); 295 gen_helper_float_check_status(tcg_env); 296} 297 298/* fcmpu */ 299static void gen_fcmpu(DisasContext *ctx) 300{ 301 TCGv_i32 crf; 302 TCGv_i64 t0; 303 TCGv_i64 t1; 304 if (unlikely(!ctx->fpu_enabled)) { 305 gen_exception(ctx, POWERPC_EXCP_FPU); 306 return; 307 } 308 t0 = tcg_temp_new_i64(); 309 t1 = tcg_temp_new_i64(); 310 gen_reset_fpstatus(); 311 crf = tcg_constant_i32(crfD(ctx->opcode)); 312 get_fpr(t0, rA(ctx->opcode)); 313 get_fpr(t1, rB(ctx->opcode)); 314 gen_helper_fcmpu(tcg_env, t0, t1, crf); 315 gen_helper_float_check_status(tcg_env); 316} 317 318/*** Floating-point move ***/ 319/* fabs */ 320/* XXX: beware that fabs never checks for NaNs nor update FPSCR */ 321static void gen_fabs(DisasContext *ctx) 322{ 323 TCGv_i64 t0; 324 TCGv_i64 t1; 325 if (unlikely(!ctx->fpu_enabled)) { 326 gen_exception(ctx, POWERPC_EXCP_FPU); 327 return; 328 } 329 t0 = tcg_temp_new_i64(); 330 t1 = tcg_temp_new_i64(); 331 get_fpr(t0, rB(ctx->opcode)); 332 tcg_gen_andi_i64(t1, t0, ~(1ULL << 63)); 333 set_fpr(rD(ctx->opcode), t1); 334 if (unlikely(Rc(ctx->opcode))) { 335 gen_set_cr1_from_fpscr(ctx); 336 } 337} 338 339/* fmr - fmr. */ 340/* XXX: beware that fmr never checks for NaNs nor update FPSCR */ 341static void gen_fmr(DisasContext *ctx) 342{ 343 TCGv_i64 t0; 344 if (unlikely(!ctx->fpu_enabled)) { 345 gen_exception(ctx, POWERPC_EXCP_FPU); 346 return; 347 } 348 t0 = tcg_temp_new_i64(); 349 get_fpr(t0, rB(ctx->opcode)); 350 set_fpr(rD(ctx->opcode), t0); 351 if (unlikely(Rc(ctx->opcode))) { 352 gen_set_cr1_from_fpscr(ctx); 353 } 354} 355 356/* fnabs */ 357/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ 358static void gen_fnabs(DisasContext *ctx) 359{ 360 TCGv_i64 t0; 361 TCGv_i64 t1; 362 if (unlikely(!ctx->fpu_enabled)) { 363 gen_exception(ctx, POWERPC_EXCP_FPU); 364 return; 365 } 366 t0 = tcg_temp_new_i64(); 367 t1 = tcg_temp_new_i64(); 368 get_fpr(t0, rB(ctx->opcode)); 369 tcg_gen_ori_i64(t1, t0, 1ULL << 63); 370 set_fpr(rD(ctx->opcode), t1); 371 if (unlikely(Rc(ctx->opcode))) { 372 gen_set_cr1_from_fpscr(ctx); 373 } 374} 375 376/* fneg */ 377/* XXX: beware that fneg never checks for NaNs nor update FPSCR */ 378static void gen_fneg(DisasContext *ctx) 379{ 380 TCGv_i64 t0; 381 TCGv_i64 t1; 382 if (unlikely(!ctx->fpu_enabled)) { 383 gen_exception(ctx, POWERPC_EXCP_FPU); 384 return; 385 } 386 t0 = tcg_temp_new_i64(); 387 t1 = tcg_temp_new_i64(); 388 get_fpr(t0, rB(ctx->opcode)); 389 tcg_gen_xori_i64(t1, t0, 1ULL << 63); 390 set_fpr(rD(ctx->opcode), t1); 391 if (unlikely(Rc(ctx->opcode))) { 392 gen_set_cr1_from_fpscr(ctx); 393 } 394} 395 396/* fcpsgn: PowerPC 2.05 specification */ 397/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ 398static void gen_fcpsgn(DisasContext *ctx) 399{ 400 TCGv_i64 t0; 401 TCGv_i64 t1; 402 TCGv_i64 t2; 403 if (unlikely(!ctx->fpu_enabled)) { 404 gen_exception(ctx, POWERPC_EXCP_FPU); 405 return; 406 } 407 t0 = tcg_temp_new_i64(); 408 t1 = tcg_temp_new_i64(); 409 t2 = tcg_temp_new_i64(); 410 get_fpr(t0, rA(ctx->opcode)); 411 get_fpr(t1, rB(ctx->opcode)); 412 tcg_gen_deposit_i64(t2, t0, t1, 0, 63); 413 set_fpr(rD(ctx->opcode), t2); 414 if (unlikely(Rc(ctx->opcode))) { 415 gen_set_cr1_from_fpscr(ctx); 416 } 417} 418 419static void gen_fmrgew(DisasContext *ctx) 420{ 421 TCGv_i64 b0; 422 TCGv_i64 t0; 423 TCGv_i64 t1; 424 if (unlikely(!ctx->fpu_enabled)) { 425 gen_exception(ctx, POWERPC_EXCP_FPU); 426 return; 427 } 428 b0 = tcg_temp_new_i64(); 429 t0 = tcg_temp_new_i64(); 430 t1 = tcg_temp_new_i64(); 431 get_fpr(t0, rB(ctx->opcode)); 432 tcg_gen_shri_i64(b0, t0, 32); 433 get_fpr(t0, rA(ctx->opcode)); 434 tcg_gen_deposit_i64(t1, t0, b0, 0, 32); 435 set_fpr(rD(ctx->opcode), t1); 436} 437 438static void gen_fmrgow(DisasContext *ctx) 439{ 440 TCGv_i64 t0; 441 TCGv_i64 t1; 442 TCGv_i64 t2; 443 if (unlikely(!ctx->fpu_enabled)) { 444 gen_exception(ctx, POWERPC_EXCP_FPU); 445 return; 446 } 447 t0 = tcg_temp_new_i64(); 448 t1 = tcg_temp_new_i64(); 449 t2 = tcg_temp_new_i64(); 450 get_fpr(t0, rB(ctx->opcode)); 451 get_fpr(t1, rA(ctx->opcode)); 452 tcg_gen_deposit_i64(t2, t0, t1, 32, 32); 453 set_fpr(rD(ctx->opcode), t2); 454} 455 456/*** Floating-Point status & ctrl register ***/ 457 458/* mcrfs */ 459static void gen_mcrfs(DisasContext *ctx) 460{ 461 TCGv tmp = tcg_temp_new(); 462 TCGv_i32 tmask; 463 TCGv_i64 tnew_fpscr = tcg_temp_new_i64(); 464 int bfa; 465 int nibble; 466 int shift; 467 468 if (unlikely(!ctx->fpu_enabled)) { 469 gen_exception(ctx, POWERPC_EXCP_FPU); 470 return; 471 } 472 bfa = crfS(ctx->opcode); 473 nibble = 7 - bfa; 474 shift = 4 * nibble; 475 tcg_gen_shri_tl(tmp, cpu_fpscr, shift); 476 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); 477 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 478 0xf); 479 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); 480 /* Only the exception bits (including FX) should be cleared if read */ 481 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, 482 ~((0xF << shift) & FP_EX_CLEAR_BITS)); 483 /* FEX and VX need to be updated, so don't set fpscr directly */ 484 tmask = tcg_constant_i32(1 << nibble); 485 gen_helper_store_fpscr(tcg_env, tnew_fpscr, tmask); 486} 487 488static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) 489{ 490 TCGv_i64 fpscr = tcg_temp_new_i64(); 491 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 492 493 tcg_gen_extu_tl_i64(fpscr, cpu_fpscr); 494 tcg_gen_andi_i64(fpscr_masked, fpscr, mask); 495 set_fpr(rt, fpscr_masked); 496 497 return fpscr; 498} 499 500static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, 501 TCGv_i64 set_mask, uint32_t store_mask) 502{ 503 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 504 TCGv_i32 st_mask = tcg_constant_i32(store_mask); 505 506 tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); 507 tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); 508 gen_helper_store_fpscr(tcg_env, fpscr_masked, st_mask); 509} 510 511static bool trans_MFFS_ISA207(DisasContext *ctx, arg_X_t_rc *a) 512{ 513 if (!(ctx->insns_flags2 & PPC2_ISA300)) { 514 /* 515 * Before Power ISA v3.0, MFFS bits 11~15 were reserved, any instruction 516 * with OPCD=63 and XO=583 should be decoded as MFFS. 517 */ 518 return trans_MFFS(ctx, a); 519 } 520 /* 521 * For Power ISA v3.0+, return false and let the pattern group 522 * select the correct instruction. 523 */ 524 return false; 525} 526 527static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) 528{ 529 REQUIRE_FPU(ctx); 530 531 gen_reset_fpstatus(); 532 place_from_fpscr(a->rt, UINT64_MAX); 533 if (a->rc) { 534 gen_set_cr1_from_fpscr(ctx); 535 } 536 return true; 537} 538 539static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) 540{ 541 TCGv_i64 fpscr; 542 543 REQUIRE_FPU(ctx); 544 545 gen_reset_fpstatus(); 546 fpscr = place_from_fpscr(a->rt, UINT64_MAX); 547 store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); 548 return true; 549} 550 551static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) 552{ 553 TCGv_i64 t1, fpscr; 554 555 REQUIRE_FPU(ctx); 556 557 t1 = tcg_temp_new_i64(); 558 get_fpr(t1, a->rb); 559 tcg_gen_andi_i64(t1, t1, FP_RN); 560 561 gen_reset_fpstatus(); 562 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 563 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 564 return true; 565} 566 567static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) 568{ 569 TCGv_i64 t1, fpscr; 570 571 REQUIRE_FPU(ctx); 572 573 t1 = tcg_temp_new_i64(); 574 get_fpr(t1, a->rb); 575 tcg_gen_andi_i64(t1, t1, FP_DRN); 576 577 gen_reset_fpstatus(); 578 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 579 store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); 580 return true; 581} 582 583static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) 584{ 585 TCGv_i64 t1, fpscr; 586 587 REQUIRE_FPU(ctx); 588 589 t1 = tcg_temp_new_i64(); 590 tcg_gen_movi_i64(t1, a->imm); 591 592 gen_reset_fpstatus(); 593 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 594 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 595 return true; 596} 597 598static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) 599{ 600 TCGv_i64 t1, fpscr; 601 602 REQUIRE_FPU(ctx); 603 604 t1 = tcg_temp_new_i64(); 605 tcg_gen_movi_i64(t1, (uint64_t)a->imm << FPSCR_DRN0); 606 607 gen_reset_fpstatus(); 608 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 609 store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); 610 return true; 611} 612 613static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) 614{ 615 REQUIRE_FPU(ctx); 616 617 gen_reset_fpstatus(); 618 place_from_fpscr(a->rt, FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); 619 return true; 620} 621 622/* mtfsb0 */ 623static void gen_mtfsb0(DisasContext *ctx) 624{ 625 uint8_t crb; 626 627 if (unlikely(!ctx->fpu_enabled)) { 628 gen_exception(ctx, POWERPC_EXCP_FPU); 629 return; 630 } 631 crb = 31 - crbD(ctx->opcode); 632 gen_reset_fpstatus(); 633 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { 634 gen_helper_fpscr_clrbit(tcg_env, tcg_constant_i32(crb)); 635 } 636 if (unlikely(Rc(ctx->opcode) != 0)) { 637 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 638 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 639 } 640} 641 642/* mtfsb1 */ 643static void gen_mtfsb1(DisasContext *ctx) 644{ 645 uint8_t crb; 646 647 if (unlikely(!ctx->fpu_enabled)) { 648 gen_exception(ctx, POWERPC_EXCP_FPU); 649 return; 650 } 651 crb = 31 - crbD(ctx->opcode); 652 /* XXX: we pretend we can only do IEEE floating-point computations */ 653 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { 654 gen_helper_fpscr_setbit(tcg_env, tcg_constant_i32(crb)); 655 } 656 if (unlikely(Rc(ctx->opcode) != 0)) { 657 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 658 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 659 } 660 /* We can raise a deferred exception */ 661 gen_helper_fpscr_check_status(tcg_env); 662} 663 664/* mtfsf */ 665static void gen_mtfsf(DisasContext *ctx) 666{ 667 TCGv_i32 t0; 668 TCGv_i64 t1; 669 int flm, l, w; 670 671 if (unlikely(!ctx->fpu_enabled)) { 672 gen_exception(ctx, POWERPC_EXCP_FPU); 673 return; 674 } 675 flm = FPFLM(ctx->opcode); 676 l = FPL(ctx->opcode); 677 w = FPW(ctx->opcode); 678 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 679 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 680 return; 681 } 682 if (!l) { 683 t0 = tcg_constant_i32(flm << (w * 8)); 684 } else if (ctx->insns_flags2 & PPC2_ISA205) { 685 t0 = tcg_constant_i32(0xffff); 686 } else { 687 t0 = tcg_constant_i32(0xff); 688 } 689 t1 = tcg_temp_new_i64(); 690 get_fpr(t1, rB(ctx->opcode)); 691 gen_helper_store_fpscr(tcg_env, t1, t0); 692 if (unlikely(Rc(ctx->opcode) != 0)) { 693 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 694 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 695 } 696 /* We can raise a deferred exception */ 697 gen_helper_fpscr_check_status(tcg_env); 698} 699 700/* mtfsfi */ 701static void gen_mtfsfi(DisasContext *ctx) 702{ 703 int bf, sh, w; 704 TCGv_i64 t0; 705 TCGv_i32 t1; 706 707 if (unlikely(!ctx->fpu_enabled)) { 708 gen_exception(ctx, POWERPC_EXCP_FPU); 709 return; 710 } 711 w = FPW(ctx->opcode); 712 bf = FPBF(ctx->opcode); 713 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 714 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 715 return; 716 } 717 sh = (8 * w) + 7 - bf; 718 t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); 719 t1 = tcg_constant_i32(1 << sh); 720 gen_helper_store_fpscr(tcg_env, t0, t1); 721 if (unlikely(Rc(ctx->opcode) != 0)) { 722 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 723 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 724 } 725 /* We can raise a deferred exception */ 726 gen_helper_fpscr_check_status(tcg_env); 727} 728 729static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) 730{ 731 TCGv_i32 tmp = tcg_temp_new_i32(); 732 tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 733 gen_helper_todouble(dest, tmp); 734} 735 736/* lfdepx (external PID lfdx) */ 737static void gen_lfdepx(DisasContext *ctx) 738{ 739 TCGv EA; 740 TCGv_i64 t0; 741 CHK_SV(ctx); 742 if (unlikely(!ctx->fpu_enabled)) { 743 gen_exception(ctx, POWERPC_EXCP_FPU); 744 return; 745 } 746 gen_set_access_type(ctx, ACCESS_FLOAT); 747 EA = tcg_temp_new(); 748 t0 = tcg_temp_new_i64(); 749 gen_addr_reg_index(ctx, EA); 750 tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); 751 set_fpr(rD(ctx->opcode), t0); 752} 753 754/* lfdp */ 755static void gen_lfdp(DisasContext *ctx) 756{ 757 TCGv EA; 758 TCGv_i64 t0; 759 if (unlikely(!ctx->fpu_enabled)) { 760 gen_exception(ctx, POWERPC_EXCP_FPU); 761 return; 762 } 763 gen_set_access_type(ctx, ACCESS_FLOAT); 764 EA = tcg_temp_new(); 765 gen_addr_imm_index(ctx, EA, 0); 766 t0 = tcg_temp_new_i64(); 767 /* 768 * We only need to swap high and low halves. gen_qemu_ld64_i64 769 * does necessary 64-bit byteswap already. 770 */ 771 if (unlikely(ctx->le_mode)) { 772 gen_qemu_ld64_i64(ctx, t0, EA); 773 set_fpr(rD(ctx->opcode) + 1, t0); 774 tcg_gen_addi_tl(EA, EA, 8); 775 gen_qemu_ld64_i64(ctx, t0, EA); 776 set_fpr(rD(ctx->opcode), t0); 777 } else { 778 gen_qemu_ld64_i64(ctx, t0, EA); 779 set_fpr(rD(ctx->opcode), t0); 780 tcg_gen_addi_tl(EA, EA, 8); 781 gen_qemu_ld64_i64(ctx, t0, EA); 782 set_fpr(rD(ctx->opcode) + 1, t0); 783 } 784} 785 786/* lfdpx */ 787static void gen_lfdpx(DisasContext *ctx) 788{ 789 TCGv EA; 790 TCGv_i64 t0; 791 if (unlikely(!ctx->fpu_enabled)) { 792 gen_exception(ctx, POWERPC_EXCP_FPU); 793 return; 794 } 795 gen_set_access_type(ctx, ACCESS_FLOAT); 796 EA = tcg_temp_new(); 797 gen_addr_reg_index(ctx, EA); 798 t0 = tcg_temp_new_i64(); 799 /* 800 * We only need to swap high and low halves. gen_qemu_ld64_i64 801 * does necessary 64-bit byteswap already. 802 */ 803 if (unlikely(ctx->le_mode)) { 804 gen_qemu_ld64_i64(ctx, t0, EA); 805 set_fpr(rD(ctx->opcode) + 1, t0); 806 tcg_gen_addi_tl(EA, EA, 8); 807 gen_qemu_ld64_i64(ctx, t0, EA); 808 set_fpr(rD(ctx->opcode), t0); 809 } else { 810 gen_qemu_ld64_i64(ctx, t0, EA); 811 set_fpr(rD(ctx->opcode), t0); 812 tcg_gen_addi_tl(EA, EA, 8); 813 gen_qemu_ld64_i64(ctx, t0, EA); 814 set_fpr(rD(ctx->opcode) + 1, t0); 815 } 816} 817 818/* lfiwax */ 819static void gen_lfiwax(DisasContext *ctx) 820{ 821 TCGv EA; 822 TCGv t0; 823 TCGv_i64 t1; 824 if (unlikely(!ctx->fpu_enabled)) { 825 gen_exception(ctx, POWERPC_EXCP_FPU); 826 return; 827 } 828 gen_set_access_type(ctx, ACCESS_FLOAT); 829 EA = tcg_temp_new(); 830 t0 = tcg_temp_new(); 831 t1 = tcg_temp_new_i64(); 832 gen_addr_reg_index(ctx, EA); 833 gen_qemu_ld32s(ctx, t0, EA); 834 tcg_gen_ext_tl_i64(t1, t0); 835 set_fpr(rD(ctx->opcode), t1); 836} 837 838/* lfiwzx */ 839static void gen_lfiwzx(DisasContext *ctx) 840{ 841 TCGv EA; 842 TCGv_i64 t0; 843 if (unlikely(!ctx->fpu_enabled)) { 844 gen_exception(ctx, POWERPC_EXCP_FPU); 845 return; 846 } 847 gen_set_access_type(ctx, ACCESS_FLOAT); 848 EA = tcg_temp_new(); 849 t0 = tcg_temp_new_i64(); 850 gen_addr_reg_index(ctx, EA); 851 gen_qemu_ld32u_i64(ctx, t0, EA); 852 set_fpr(rD(ctx->opcode), t0); 853} 854 855#define GEN_STXF(name, stop, opc2, opc3, type) \ 856static void glue(gen_, name##x)(DisasContext *ctx) \ 857{ \ 858 TCGv EA; \ 859 TCGv_i64 t0; \ 860 if (unlikely(!ctx->fpu_enabled)) { \ 861 gen_exception(ctx, POWERPC_EXCP_FPU); \ 862 return; \ 863 } \ 864 gen_set_access_type(ctx, ACCESS_FLOAT); \ 865 EA = tcg_temp_new(); \ 866 t0 = tcg_temp_new_i64(); \ 867 gen_addr_reg_index(ctx, EA); \ 868 get_fpr(t0, rS(ctx->opcode)); \ 869 gen_qemu_##stop(ctx, t0, EA); \ 870} 871 872static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) 873{ 874 TCGv_i32 tmp = tcg_temp_new_i32(); 875 gen_helper_tosingle(tmp, src); 876 tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 877} 878 879/* stfdepx (external PID lfdx) */ 880static void gen_stfdepx(DisasContext *ctx) 881{ 882 TCGv EA; 883 TCGv_i64 t0; 884 CHK_SV(ctx); 885 if (unlikely(!ctx->fpu_enabled)) { 886 gen_exception(ctx, POWERPC_EXCP_FPU); 887 return; 888 } 889 gen_set_access_type(ctx, ACCESS_FLOAT); 890 EA = tcg_temp_new(); 891 t0 = tcg_temp_new_i64(); 892 gen_addr_reg_index(ctx, EA); 893 get_fpr(t0, rD(ctx->opcode)); 894 tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); 895} 896 897/* stfdp */ 898static void gen_stfdp(DisasContext *ctx) 899{ 900 TCGv EA; 901 TCGv_i64 t0; 902 if (unlikely(!ctx->fpu_enabled)) { 903 gen_exception(ctx, POWERPC_EXCP_FPU); 904 return; 905 } 906 gen_set_access_type(ctx, ACCESS_FLOAT); 907 EA = tcg_temp_new(); 908 t0 = tcg_temp_new_i64(); 909 gen_addr_imm_index(ctx, EA, 0); 910 /* 911 * We only need to swap high and low halves. gen_qemu_st64_i64 912 * does necessary 64-bit byteswap already. 913 */ 914 if (unlikely(ctx->le_mode)) { 915 get_fpr(t0, rD(ctx->opcode) + 1); 916 gen_qemu_st64_i64(ctx, t0, EA); 917 tcg_gen_addi_tl(EA, EA, 8); 918 get_fpr(t0, rD(ctx->opcode)); 919 gen_qemu_st64_i64(ctx, t0, EA); 920 } else { 921 get_fpr(t0, rD(ctx->opcode)); 922 gen_qemu_st64_i64(ctx, t0, EA); 923 tcg_gen_addi_tl(EA, EA, 8); 924 get_fpr(t0, rD(ctx->opcode) + 1); 925 gen_qemu_st64_i64(ctx, t0, EA); 926 } 927} 928 929/* stfdpx */ 930static void gen_stfdpx(DisasContext *ctx) 931{ 932 TCGv EA; 933 TCGv_i64 t0; 934 if (unlikely(!ctx->fpu_enabled)) { 935 gen_exception(ctx, POWERPC_EXCP_FPU); 936 return; 937 } 938 gen_set_access_type(ctx, ACCESS_FLOAT); 939 EA = tcg_temp_new(); 940 t0 = tcg_temp_new_i64(); 941 gen_addr_reg_index(ctx, EA); 942 /* 943 * We only need to swap high and low halves. gen_qemu_st64_i64 944 * does necessary 64-bit byteswap already. 945 */ 946 if (unlikely(ctx->le_mode)) { 947 get_fpr(t0, rD(ctx->opcode) + 1); 948 gen_qemu_st64_i64(ctx, t0, EA); 949 tcg_gen_addi_tl(EA, EA, 8); 950 get_fpr(t0, rD(ctx->opcode)); 951 gen_qemu_st64_i64(ctx, t0, EA); 952 } else { 953 get_fpr(t0, rD(ctx->opcode)); 954 gen_qemu_st64_i64(ctx, t0, EA); 955 tcg_gen_addi_tl(EA, EA, 8); 956 get_fpr(t0, rD(ctx->opcode) + 1); 957 gen_qemu_st64_i64(ctx, t0, EA); 958 } 959} 960 961/* Optional: */ 962static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) 963{ 964 TCGv t0 = tcg_temp_new(); 965 tcg_gen_trunc_i64_tl(t0, arg1), 966 gen_qemu_st32(ctx, t0, arg2); 967} 968/* stfiwx */ 969GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); 970 971/* Floating-point Load/Store Instructions */ 972static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, 973 bool update, bool store, bool single) 974{ 975 TCGv ea; 976 TCGv_i64 t0; 977 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 978 REQUIRE_FPU(ctx); 979 if (update && ra == 0) { 980 gen_invalid(ctx); 981 return true; 982 } 983 gen_set_access_type(ctx, ACCESS_FLOAT); 984 t0 = tcg_temp_new_i64(); 985 ea = do_ea_calc(ctx, ra, displ); 986 if (store) { 987 get_fpr(t0, rt); 988 if (single) { 989 gen_qemu_st32fs(ctx, t0, ea); 990 } else { 991 gen_qemu_st64_i64(ctx, t0, ea); 992 } 993 } else { 994 if (single) { 995 gen_qemu_ld32fs(ctx, t0, ea); 996 } else { 997 gen_qemu_ld64_i64(ctx, t0, ea); 998 } 999 set_fpr(rt, t0); 1000 } 1001 if (update) { 1002 tcg_gen_mov_tl(cpu_gpr[ra], ea); 1003 } 1004 return true; 1005} 1006 1007static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, 1008 bool single) 1009{ 1010 return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, 1011 single); 1012} 1013 1014static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, 1015 bool store, bool single) 1016{ 1017 arg_D d; 1018 if (!resolve_PLS_D(ctx, &d, a)) { 1019 return true; 1020 } 1021 return do_lsfp_D(ctx, &d, update, store, single); 1022} 1023 1024static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, 1025 bool store, bool single) 1026{ 1027 return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); 1028} 1029 1030TRANS(LFS, do_lsfp_D, false, false, true) 1031TRANS(LFSU, do_lsfp_D, true, false, true) 1032TRANS(LFSX, do_lsfp_X, false, false, true) 1033TRANS(LFSUX, do_lsfp_X, true, false, true) 1034TRANS(PLFS, do_lsfp_PLS_D, false, false, true) 1035 1036TRANS(LFD, do_lsfp_D, false, false, false) 1037TRANS(LFDU, do_lsfp_D, true, false, false) 1038TRANS(LFDX, do_lsfp_X, false, false, false) 1039TRANS(LFDUX, do_lsfp_X, true, false, false) 1040TRANS(PLFD, do_lsfp_PLS_D, false, false, false) 1041 1042TRANS(STFS, do_lsfp_D, false, true, true) 1043TRANS(STFSU, do_lsfp_D, true, true, true) 1044TRANS(STFSX, do_lsfp_X, false, true, true) 1045TRANS(STFSUX, do_lsfp_X, true, true, true) 1046TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) 1047 1048TRANS(STFD, do_lsfp_D, false, true, false) 1049TRANS(STFDU, do_lsfp_D, true, true, false) 1050TRANS(STFDX, do_lsfp_X, false, true, false) 1051TRANS(STFDUX, do_lsfp_X, true, true, false) 1052TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) 1053 1054#undef GEN_FLOAT_B 1055 1056#undef GEN_LDF 1057#undef GEN_LDUF 1058#undef GEN_LDUXF 1059#undef GEN_LDXF 1060#undef GEN_LDFS 1061 1062#undef GEN_STF 1063#undef GEN_STUF 1064#undef GEN_STUXF 1065#undef GEN_STXF 1066#undef GEN_STFS 1067