1/* 2 * translate-fp.c 3 * 4 * Standard FPU translation 5 */ 6 7static inline void gen_reset_fpstatus(void) 8{ 9 gen_helper_reset_fpstatus(cpu_env); 10} 11 12static inline void gen_compute_fprf_float64(TCGv_i64 arg) 13{ 14 gen_helper_compute_fprf_float64(cpu_env, arg); 15 gen_helper_float_check_status(cpu_env); 16} 17 18#if defined(TARGET_PPC64) 19static void gen_set_cr1_from_fpscr(DisasContext *ctx) 20{ 21 TCGv_i32 tmp = tcg_temp_new_i32(); 22 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); 23 tcg_gen_shri_i32(cpu_crf[1], tmp, 28); 24} 25#else 26static void gen_set_cr1_from_fpscr(DisasContext *ctx) 27{ 28 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28); 29} 30#endif 31 32/*** Floating-Point arithmetic ***/ 33#define _GEN_FLOAT_ACB(name, op1, op2, set_fprf, type) \ 34static void gen_f##name(DisasContext *ctx) \ 35{ \ 36 TCGv_i64 t0; \ 37 TCGv_i64 t1; \ 38 TCGv_i64 t2; \ 39 TCGv_i64 t3; \ 40 if (unlikely(!ctx->fpu_enabled)) { \ 41 gen_exception(ctx, POWERPC_EXCP_FPU); \ 42 return; \ 43 } \ 44 t0 = tcg_temp_new_i64(); \ 45 t1 = tcg_temp_new_i64(); \ 46 t2 = tcg_temp_new_i64(); \ 47 t3 = tcg_temp_new_i64(); \ 48 gen_reset_fpstatus(); \ 49 get_fpr(t0, rA(ctx->opcode)); \ 50 get_fpr(t1, rC(ctx->opcode)); \ 51 get_fpr(t2, rB(ctx->opcode)); \ 52 gen_helper_f##name(t3, cpu_env, t0, t1, t2); \ 53 set_fpr(rD(ctx->opcode), t3); \ 54 if (set_fprf) { \ 55 gen_compute_fprf_float64(t3); \ 56 } \ 57 if (unlikely(Rc(ctx->opcode) != 0)) { \ 58 gen_set_cr1_from_fpscr(ctx); \ 59 } \ 60} 61 62#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ 63_GEN_FLOAT_ACB(name, 0x3F, op2, set_fprf, type); \ 64_GEN_FLOAT_ACB(name##s, 0x3B, op2, set_fprf, type); 65 66#define _GEN_FLOAT_AB(name, op1, op2, inval, set_fprf, type) \ 67static void gen_f##name(DisasContext *ctx) \ 68{ \ 69 TCGv_i64 t0; \ 70 TCGv_i64 t1; \ 71 TCGv_i64 t2; \ 72 if (unlikely(!ctx->fpu_enabled)) { \ 73 gen_exception(ctx, POWERPC_EXCP_FPU); \ 74 return; \ 75 } \ 76 t0 = tcg_temp_new_i64(); \ 77 t1 = tcg_temp_new_i64(); \ 78 t2 = tcg_temp_new_i64(); \ 79 gen_reset_fpstatus(); \ 80 get_fpr(t0, rA(ctx->opcode)); \ 81 get_fpr(t1, rB(ctx->opcode)); \ 82 gen_helper_f##name(t2, cpu_env, t0, t1); \ 83 set_fpr(rD(ctx->opcode), t2); \ 84 if (set_fprf) { \ 85 gen_compute_fprf_float64(t2); \ 86 } \ 87 if (unlikely(Rc(ctx->opcode) != 0)) { \ 88 gen_set_cr1_from_fpscr(ctx); \ 89 } \ 90} 91#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ 92_GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ 93_GEN_FLOAT_AB(name##s, 0x3B, op2, inval, set_fprf, type); 94 95#define _GEN_FLOAT_AC(name, op1, op2, inval, set_fprf, type) \ 96static void gen_f##name(DisasContext *ctx) \ 97{ \ 98 TCGv_i64 t0; \ 99 TCGv_i64 t1; \ 100 TCGv_i64 t2; \ 101 if (unlikely(!ctx->fpu_enabled)) { \ 102 gen_exception(ctx, POWERPC_EXCP_FPU); \ 103 return; \ 104 } \ 105 t0 = tcg_temp_new_i64(); \ 106 t1 = tcg_temp_new_i64(); \ 107 t2 = tcg_temp_new_i64(); \ 108 gen_reset_fpstatus(); \ 109 get_fpr(t0, rA(ctx->opcode)); \ 110 get_fpr(t1, rC(ctx->opcode)); \ 111 gen_helper_f##name(t2, cpu_env, t0, t1); \ 112 set_fpr(rD(ctx->opcode), t2); \ 113 if (set_fprf) { \ 114 gen_compute_fprf_float64(t2); \ 115 } \ 116 if (unlikely(Rc(ctx->opcode) != 0)) { \ 117 gen_set_cr1_from_fpscr(ctx); \ 118 } \ 119} 120#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ 121_GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ 122_GEN_FLOAT_AC(name##s, 0x3B, op2, inval, set_fprf, type); 123 124#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ 125static void gen_f##name(DisasContext *ctx) \ 126{ \ 127 TCGv_i64 t0; \ 128 TCGv_i64 t1; \ 129 if (unlikely(!ctx->fpu_enabled)) { \ 130 gen_exception(ctx, POWERPC_EXCP_FPU); \ 131 return; \ 132 } \ 133 t0 = tcg_temp_new_i64(); \ 134 t1 = tcg_temp_new_i64(); \ 135 gen_reset_fpstatus(); \ 136 get_fpr(t0, rB(ctx->opcode)); \ 137 gen_helper_f##name(t1, cpu_env, t0); \ 138 set_fpr(rD(ctx->opcode), t1); \ 139 if (set_fprf) { \ 140 gen_helper_compute_fprf_float64(cpu_env, t1); \ 141 } \ 142 gen_helper_float_check_status(cpu_env); \ 143 if (unlikely(Rc(ctx->opcode) != 0)) { \ 144 gen_set_cr1_from_fpscr(ctx); \ 145 } \ 146} 147 148#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ 149static void gen_f##name(DisasContext *ctx) \ 150{ \ 151 TCGv_i64 t0; \ 152 TCGv_i64 t1; \ 153 if (unlikely(!ctx->fpu_enabled)) { \ 154 gen_exception(ctx, POWERPC_EXCP_FPU); \ 155 return; \ 156 } \ 157 t0 = tcg_temp_new_i64(); \ 158 t1 = tcg_temp_new_i64(); \ 159 gen_reset_fpstatus(); \ 160 get_fpr(t0, rB(ctx->opcode)); \ 161 gen_helper_f##name(t1, cpu_env, t0); \ 162 set_fpr(rD(ctx->opcode), t1); \ 163 if (set_fprf) { \ 164 gen_compute_fprf_float64(t1); \ 165 } \ 166 if (unlikely(Rc(ctx->opcode) != 0)) { \ 167 gen_set_cr1_from_fpscr(ctx); \ 168 } \ 169} 170 171/* fadd - fadds */ 172GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); 173/* fdiv - fdivs */ 174GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); 175/* fmul - fmuls */ 176GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); 177 178/* fre */ 179GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); 180 181/* fres */ 182GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); 183 184/* frsqrte */ 185GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); 186 187/* frsqrtes */ 188static void gen_frsqrtes(DisasContext *ctx) 189{ 190 TCGv_i64 t0; 191 TCGv_i64 t1; 192 if (unlikely(!ctx->fpu_enabled)) { 193 gen_exception(ctx, POWERPC_EXCP_FPU); 194 return; 195 } 196 t0 = tcg_temp_new_i64(); 197 t1 = tcg_temp_new_i64(); 198 gen_reset_fpstatus(); 199 get_fpr(t0, rB(ctx->opcode)); 200 gen_helper_frsqrtes(t1, cpu_env, t0); 201 set_fpr(rD(ctx->opcode), t1); 202 gen_compute_fprf_float64(t1); 203 if (unlikely(Rc(ctx->opcode) != 0)) { 204 gen_set_cr1_from_fpscr(ctx); 205 } 206} 207 208static bool trans_FSEL(DisasContext *ctx, arg_A *a) 209{ 210 TCGv_i64 t0, t1, t2; 211 212 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSEL); 213 REQUIRE_FPU(ctx); 214 215 t0 = tcg_temp_new_i64(); 216 t1 = tcg_temp_new_i64(); 217 t2 = tcg_temp_new_i64(); 218 219 get_fpr(t0, a->fra); 220 get_fpr(t1, a->frb); 221 get_fpr(t2, a->frc); 222 223 gen_helper_FSEL(t0, t0, t1, t2); 224 set_fpr(a->frt, t0); 225 if (a->rc) { 226 gen_set_cr1_from_fpscr(ctx); 227 } 228 return true; 229} 230 231/* fsub - fsubs */ 232GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); 233/* Optional: */ 234 235static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, 236 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) 237{ 238 TCGv_i64 t0, t1; 239 240 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSQRT); 241 REQUIRE_FPU(ctx); 242 243 t0 = tcg_temp_new_i64(); 244 t1 = tcg_temp_new_i64(); 245 246 gen_reset_fpstatus(); 247 get_fpr(t0, a->frb); 248 helper(t1, cpu_env, t0); 249 set_fpr(a->frt, t1); 250 gen_compute_fprf_float64(t1); 251 if (unlikely(a->rc != 0)) { 252 gen_set_cr1_from_fpscr(ctx); 253 } 254 return true; 255} 256 257TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT); 258TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS); 259 260/*** Floating-Point multiply-and-add ***/ 261/* fmadd - fmadds */ 262GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); 263/* fmsub - fmsubs */ 264GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); 265/* fnmadd - fnmadds */ 266GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); 267/* fnmsub - fnmsubs */ 268GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); 269 270/*** Floating-Point round & convert ***/ 271/* fctiw */ 272GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); 273/* fctiwu */ 274GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); 275/* fctiwz */ 276GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); 277/* fctiwuz */ 278GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); 279/* frsp */ 280GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); 281/* fcfid */ 282GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); 283/* fcfids */ 284GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); 285/* fcfidu */ 286GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 287/* fcfidus */ 288GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 289/* fctid */ 290GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); 291/* fctidu */ 292GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); 293/* fctidz */ 294GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); 295/* fctidu */ 296GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); 297 298/* frin */ 299GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); 300/* friz */ 301GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); 302/* frip */ 303GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); 304/* frim */ 305GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); 306 307static void gen_ftdiv(DisasContext *ctx) 308{ 309 TCGv_i64 t0; 310 TCGv_i64 t1; 311 if (unlikely(!ctx->fpu_enabled)) { 312 gen_exception(ctx, POWERPC_EXCP_FPU); 313 return; 314 } 315 t0 = tcg_temp_new_i64(); 316 t1 = tcg_temp_new_i64(); 317 get_fpr(t0, rA(ctx->opcode)); 318 get_fpr(t1, rB(ctx->opcode)); 319 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1); 320} 321 322static void gen_ftsqrt(DisasContext *ctx) 323{ 324 TCGv_i64 t0; 325 if (unlikely(!ctx->fpu_enabled)) { 326 gen_exception(ctx, POWERPC_EXCP_FPU); 327 return; 328 } 329 t0 = tcg_temp_new_i64(); 330 get_fpr(t0, rB(ctx->opcode)); 331 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0); 332} 333 334 335 336/*** Floating-Point compare ***/ 337 338/* fcmpo */ 339static void gen_fcmpo(DisasContext *ctx) 340{ 341 TCGv_i32 crf; 342 TCGv_i64 t0; 343 TCGv_i64 t1; 344 if (unlikely(!ctx->fpu_enabled)) { 345 gen_exception(ctx, POWERPC_EXCP_FPU); 346 return; 347 } 348 t0 = tcg_temp_new_i64(); 349 t1 = tcg_temp_new_i64(); 350 gen_reset_fpstatus(); 351 crf = tcg_const_i32(crfD(ctx->opcode)); 352 get_fpr(t0, rA(ctx->opcode)); 353 get_fpr(t1, rB(ctx->opcode)); 354 gen_helper_fcmpo(cpu_env, t0, t1, crf); 355 gen_helper_float_check_status(cpu_env); 356} 357 358/* fcmpu */ 359static void gen_fcmpu(DisasContext *ctx) 360{ 361 TCGv_i32 crf; 362 TCGv_i64 t0; 363 TCGv_i64 t1; 364 if (unlikely(!ctx->fpu_enabled)) { 365 gen_exception(ctx, POWERPC_EXCP_FPU); 366 return; 367 } 368 t0 = tcg_temp_new_i64(); 369 t1 = tcg_temp_new_i64(); 370 gen_reset_fpstatus(); 371 crf = tcg_const_i32(crfD(ctx->opcode)); 372 get_fpr(t0, rA(ctx->opcode)); 373 get_fpr(t1, rB(ctx->opcode)); 374 gen_helper_fcmpu(cpu_env, t0, t1, crf); 375 gen_helper_float_check_status(cpu_env); 376} 377 378/*** Floating-point move ***/ 379/* fabs */ 380/* XXX: beware that fabs never checks for NaNs nor update FPSCR */ 381static void gen_fabs(DisasContext *ctx) 382{ 383 TCGv_i64 t0; 384 TCGv_i64 t1; 385 if (unlikely(!ctx->fpu_enabled)) { 386 gen_exception(ctx, POWERPC_EXCP_FPU); 387 return; 388 } 389 t0 = tcg_temp_new_i64(); 390 t1 = tcg_temp_new_i64(); 391 get_fpr(t0, rB(ctx->opcode)); 392 tcg_gen_andi_i64(t1, t0, ~(1ULL << 63)); 393 set_fpr(rD(ctx->opcode), t1); 394 if (unlikely(Rc(ctx->opcode))) { 395 gen_set_cr1_from_fpscr(ctx); 396 } 397} 398 399/* fmr - fmr. */ 400/* XXX: beware that fmr never checks for NaNs nor update FPSCR */ 401static void gen_fmr(DisasContext *ctx) 402{ 403 TCGv_i64 t0; 404 if (unlikely(!ctx->fpu_enabled)) { 405 gen_exception(ctx, POWERPC_EXCP_FPU); 406 return; 407 } 408 t0 = tcg_temp_new_i64(); 409 get_fpr(t0, rB(ctx->opcode)); 410 set_fpr(rD(ctx->opcode), t0); 411 if (unlikely(Rc(ctx->opcode))) { 412 gen_set_cr1_from_fpscr(ctx); 413 } 414} 415 416/* fnabs */ 417/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ 418static void gen_fnabs(DisasContext *ctx) 419{ 420 TCGv_i64 t0; 421 TCGv_i64 t1; 422 if (unlikely(!ctx->fpu_enabled)) { 423 gen_exception(ctx, POWERPC_EXCP_FPU); 424 return; 425 } 426 t0 = tcg_temp_new_i64(); 427 t1 = tcg_temp_new_i64(); 428 get_fpr(t0, rB(ctx->opcode)); 429 tcg_gen_ori_i64(t1, t0, 1ULL << 63); 430 set_fpr(rD(ctx->opcode), t1); 431 if (unlikely(Rc(ctx->opcode))) { 432 gen_set_cr1_from_fpscr(ctx); 433 } 434} 435 436/* fneg */ 437/* XXX: beware that fneg never checks for NaNs nor update FPSCR */ 438static void gen_fneg(DisasContext *ctx) 439{ 440 TCGv_i64 t0; 441 TCGv_i64 t1; 442 if (unlikely(!ctx->fpu_enabled)) { 443 gen_exception(ctx, POWERPC_EXCP_FPU); 444 return; 445 } 446 t0 = tcg_temp_new_i64(); 447 t1 = tcg_temp_new_i64(); 448 get_fpr(t0, rB(ctx->opcode)); 449 tcg_gen_xori_i64(t1, t0, 1ULL << 63); 450 set_fpr(rD(ctx->opcode), t1); 451 if (unlikely(Rc(ctx->opcode))) { 452 gen_set_cr1_from_fpscr(ctx); 453 } 454} 455 456/* fcpsgn: PowerPC 2.05 specification */ 457/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ 458static void gen_fcpsgn(DisasContext *ctx) 459{ 460 TCGv_i64 t0; 461 TCGv_i64 t1; 462 TCGv_i64 t2; 463 if (unlikely(!ctx->fpu_enabled)) { 464 gen_exception(ctx, POWERPC_EXCP_FPU); 465 return; 466 } 467 t0 = tcg_temp_new_i64(); 468 t1 = tcg_temp_new_i64(); 469 t2 = tcg_temp_new_i64(); 470 get_fpr(t0, rA(ctx->opcode)); 471 get_fpr(t1, rB(ctx->opcode)); 472 tcg_gen_deposit_i64(t2, t0, t1, 0, 63); 473 set_fpr(rD(ctx->opcode), t2); 474 if (unlikely(Rc(ctx->opcode))) { 475 gen_set_cr1_from_fpscr(ctx); 476 } 477} 478 479static void gen_fmrgew(DisasContext *ctx) 480{ 481 TCGv_i64 b0; 482 TCGv_i64 t0; 483 TCGv_i64 t1; 484 if (unlikely(!ctx->fpu_enabled)) { 485 gen_exception(ctx, POWERPC_EXCP_FPU); 486 return; 487 } 488 b0 = tcg_temp_new_i64(); 489 t0 = tcg_temp_new_i64(); 490 t1 = tcg_temp_new_i64(); 491 get_fpr(t0, rB(ctx->opcode)); 492 tcg_gen_shri_i64(b0, t0, 32); 493 get_fpr(t0, rA(ctx->opcode)); 494 tcg_gen_deposit_i64(t1, t0, b0, 0, 32); 495 set_fpr(rD(ctx->opcode), t1); 496} 497 498static void gen_fmrgow(DisasContext *ctx) 499{ 500 TCGv_i64 t0; 501 TCGv_i64 t1; 502 TCGv_i64 t2; 503 if (unlikely(!ctx->fpu_enabled)) { 504 gen_exception(ctx, POWERPC_EXCP_FPU); 505 return; 506 } 507 t0 = tcg_temp_new_i64(); 508 t1 = tcg_temp_new_i64(); 509 t2 = tcg_temp_new_i64(); 510 get_fpr(t0, rB(ctx->opcode)); 511 get_fpr(t1, rA(ctx->opcode)); 512 tcg_gen_deposit_i64(t2, t0, t1, 32, 32); 513 set_fpr(rD(ctx->opcode), t2); 514} 515 516/*** Floating-Point status & ctrl register ***/ 517 518/* mcrfs */ 519static void gen_mcrfs(DisasContext *ctx) 520{ 521 TCGv tmp = tcg_temp_new(); 522 TCGv_i32 tmask; 523 TCGv_i64 tnew_fpscr = tcg_temp_new_i64(); 524 int bfa; 525 int nibble; 526 int shift; 527 528 if (unlikely(!ctx->fpu_enabled)) { 529 gen_exception(ctx, POWERPC_EXCP_FPU); 530 return; 531 } 532 bfa = crfS(ctx->opcode); 533 nibble = 7 - bfa; 534 shift = 4 * nibble; 535 tcg_gen_shri_tl(tmp, cpu_fpscr, shift); 536 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); 537 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 538 0xf); 539 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); 540 /* Only the exception bits (including FX) should be cleared if read */ 541 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, 542 ~((0xF << shift) & FP_EX_CLEAR_BITS)); 543 /* FEX and VX need to be updated, so don't set fpscr directly */ 544 tmask = tcg_const_i32(1 << nibble); 545 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); 546} 547 548static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) 549{ 550 TCGv_i64 fpscr = tcg_temp_new_i64(); 551 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 552 553 tcg_gen_extu_tl_i64(fpscr, cpu_fpscr); 554 tcg_gen_andi_i64(fpscr_masked, fpscr, mask); 555 set_fpr(rt, fpscr_masked); 556 557 return fpscr; 558} 559 560static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, 561 TCGv_i64 set_mask, uint32_t store_mask) 562{ 563 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 564 TCGv_i32 st_mask = tcg_constant_i32(store_mask); 565 566 tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); 567 tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); 568 gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); 569} 570 571static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) 572{ 573 REQUIRE_FPU(ctx); 574 575 gen_reset_fpstatus(); 576 place_from_fpscr(a->rt, UINT64_MAX); 577 if (a->rc) { 578 gen_set_cr1_from_fpscr(ctx); 579 } 580 return true; 581} 582 583static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) 584{ 585 TCGv_i64 fpscr; 586 587 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 588 REQUIRE_FPU(ctx); 589 590 gen_reset_fpstatus(); 591 fpscr = place_from_fpscr(a->rt, UINT64_MAX); 592 store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); 593 return true; 594} 595 596static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) 597{ 598 TCGv_i64 t1, fpscr; 599 600 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 601 REQUIRE_FPU(ctx); 602 603 t1 = tcg_temp_new_i64(); 604 get_fpr(t1, a->rb); 605 tcg_gen_andi_i64(t1, t1, FP_RN); 606 607 gen_reset_fpstatus(); 608 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 609 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 610 return true; 611} 612 613static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) 614{ 615 TCGv_i64 t1, fpscr; 616 617 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 618 REQUIRE_FPU(ctx); 619 620 t1 = tcg_temp_new_i64(); 621 get_fpr(t1, a->rb); 622 tcg_gen_andi_i64(t1, t1, FP_DRN); 623 624 gen_reset_fpstatus(); 625 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 626 store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); 627 return true; 628} 629 630static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) 631{ 632 TCGv_i64 t1, fpscr; 633 634 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 635 REQUIRE_FPU(ctx); 636 637 t1 = tcg_temp_new_i64(); 638 tcg_gen_movi_i64(t1, a->imm); 639 640 gen_reset_fpstatus(); 641 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 642 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 643 return true; 644} 645 646static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) 647{ 648 TCGv_i64 t1, fpscr; 649 650 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 651 REQUIRE_FPU(ctx); 652 653 t1 = tcg_temp_new_i64(); 654 tcg_gen_movi_i64(t1, (uint64_t)a->imm << FPSCR_DRN0); 655 656 gen_reset_fpstatus(); 657 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 658 store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); 659 return true; 660} 661 662static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) 663{ 664 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 665 REQUIRE_FPU(ctx); 666 667 gen_reset_fpstatus(); 668 place_from_fpscr(a->rt, FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); 669 return true; 670} 671 672/* mtfsb0 */ 673static void gen_mtfsb0(DisasContext *ctx) 674{ 675 uint8_t crb; 676 677 if (unlikely(!ctx->fpu_enabled)) { 678 gen_exception(ctx, POWERPC_EXCP_FPU); 679 return; 680 } 681 crb = 31 - crbD(ctx->opcode); 682 gen_reset_fpstatus(); 683 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { 684 TCGv_i32 t0; 685 t0 = tcg_const_i32(crb); 686 gen_helper_fpscr_clrbit(cpu_env, t0); 687 } 688 if (unlikely(Rc(ctx->opcode) != 0)) { 689 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 690 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 691 } 692} 693 694/* mtfsb1 */ 695static void gen_mtfsb1(DisasContext *ctx) 696{ 697 uint8_t crb; 698 699 if (unlikely(!ctx->fpu_enabled)) { 700 gen_exception(ctx, POWERPC_EXCP_FPU); 701 return; 702 } 703 crb = 31 - crbD(ctx->opcode); 704 /* XXX: we pretend we can only do IEEE floating-point computations */ 705 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { 706 TCGv_i32 t0; 707 t0 = tcg_const_i32(crb); 708 gen_helper_fpscr_setbit(cpu_env, t0); 709 } 710 if (unlikely(Rc(ctx->opcode) != 0)) { 711 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 712 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 713 } 714 /* We can raise a deferred exception */ 715 gen_helper_fpscr_check_status(cpu_env); 716} 717 718/* mtfsf */ 719static void gen_mtfsf(DisasContext *ctx) 720{ 721 TCGv_i32 t0; 722 TCGv_i64 t1; 723 int flm, l, w; 724 725 if (unlikely(!ctx->fpu_enabled)) { 726 gen_exception(ctx, POWERPC_EXCP_FPU); 727 return; 728 } 729 flm = FPFLM(ctx->opcode); 730 l = FPL(ctx->opcode); 731 w = FPW(ctx->opcode); 732 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 733 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 734 return; 735 } 736 if (l) { 737 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); 738 } else { 739 t0 = tcg_const_i32(flm << (w * 8)); 740 } 741 t1 = tcg_temp_new_i64(); 742 get_fpr(t1, rB(ctx->opcode)); 743 gen_helper_store_fpscr(cpu_env, t1, t0); 744 if (unlikely(Rc(ctx->opcode) != 0)) { 745 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 746 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 747 } 748 /* We can raise a deferred exception */ 749 gen_helper_fpscr_check_status(cpu_env); 750} 751 752/* mtfsfi */ 753static void gen_mtfsfi(DisasContext *ctx) 754{ 755 int bf, sh, w; 756 TCGv_i64 t0; 757 TCGv_i32 t1; 758 759 if (unlikely(!ctx->fpu_enabled)) { 760 gen_exception(ctx, POWERPC_EXCP_FPU); 761 return; 762 } 763 w = FPW(ctx->opcode); 764 bf = FPBF(ctx->opcode); 765 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 766 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 767 return; 768 } 769 sh = (8 * w) + 7 - bf; 770 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); 771 t1 = tcg_const_i32(1 << sh); 772 gen_helper_store_fpscr(cpu_env, t0, t1); 773 if (unlikely(Rc(ctx->opcode) != 0)) { 774 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 775 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 776 } 777 /* We can raise a deferred exception */ 778 gen_helper_fpscr_check_status(cpu_env); 779} 780 781static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) 782{ 783 TCGv_i32 tmp = tcg_temp_new_i32(); 784 tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 785 gen_helper_todouble(dest, tmp); 786} 787 788/* lfdepx (external PID lfdx) */ 789static void gen_lfdepx(DisasContext *ctx) 790{ 791 TCGv EA; 792 TCGv_i64 t0; 793 CHK_SV(ctx); 794 if (unlikely(!ctx->fpu_enabled)) { 795 gen_exception(ctx, POWERPC_EXCP_FPU); 796 return; 797 } 798 gen_set_access_type(ctx, ACCESS_FLOAT); 799 EA = tcg_temp_new(); 800 t0 = tcg_temp_new_i64(); 801 gen_addr_reg_index(ctx, EA); 802 tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); 803 set_fpr(rD(ctx->opcode), t0); 804} 805 806/* lfdp */ 807static void gen_lfdp(DisasContext *ctx) 808{ 809 TCGv EA; 810 TCGv_i64 t0; 811 if (unlikely(!ctx->fpu_enabled)) { 812 gen_exception(ctx, POWERPC_EXCP_FPU); 813 return; 814 } 815 gen_set_access_type(ctx, ACCESS_FLOAT); 816 EA = tcg_temp_new(); 817 gen_addr_imm_index(ctx, EA, 0); 818 t0 = tcg_temp_new_i64(); 819 /* 820 * We only need to swap high and low halves. gen_qemu_ld64_i64 821 * does necessary 64-bit byteswap already. 822 */ 823 if (unlikely(ctx->le_mode)) { 824 gen_qemu_ld64_i64(ctx, t0, EA); 825 set_fpr(rD(ctx->opcode) + 1, t0); 826 tcg_gen_addi_tl(EA, EA, 8); 827 gen_qemu_ld64_i64(ctx, t0, EA); 828 set_fpr(rD(ctx->opcode), t0); 829 } else { 830 gen_qemu_ld64_i64(ctx, t0, EA); 831 set_fpr(rD(ctx->opcode), t0); 832 tcg_gen_addi_tl(EA, EA, 8); 833 gen_qemu_ld64_i64(ctx, t0, EA); 834 set_fpr(rD(ctx->opcode) + 1, t0); 835 } 836} 837 838/* lfdpx */ 839static void gen_lfdpx(DisasContext *ctx) 840{ 841 TCGv EA; 842 TCGv_i64 t0; 843 if (unlikely(!ctx->fpu_enabled)) { 844 gen_exception(ctx, POWERPC_EXCP_FPU); 845 return; 846 } 847 gen_set_access_type(ctx, ACCESS_FLOAT); 848 EA = tcg_temp_new(); 849 gen_addr_reg_index(ctx, EA); 850 t0 = tcg_temp_new_i64(); 851 /* 852 * We only need to swap high and low halves. gen_qemu_ld64_i64 853 * does necessary 64-bit byteswap already. 854 */ 855 if (unlikely(ctx->le_mode)) { 856 gen_qemu_ld64_i64(ctx, t0, EA); 857 set_fpr(rD(ctx->opcode) + 1, t0); 858 tcg_gen_addi_tl(EA, EA, 8); 859 gen_qemu_ld64_i64(ctx, t0, EA); 860 set_fpr(rD(ctx->opcode), t0); 861 } else { 862 gen_qemu_ld64_i64(ctx, t0, EA); 863 set_fpr(rD(ctx->opcode), t0); 864 tcg_gen_addi_tl(EA, EA, 8); 865 gen_qemu_ld64_i64(ctx, t0, EA); 866 set_fpr(rD(ctx->opcode) + 1, t0); 867 } 868} 869 870/* lfiwax */ 871static void gen_lfiwax(DisasContext *ctx) 872{ 873 TCGv EA; 874 TCGv t0; 875 TCGv_i64 t1; 876 if (unlikely(!ctx->fpu_enabled)) { 877 gen_exception(ctx, POWERPC_EXCP_FPU); 878 return; 879 } 880 gen_set_access_type(ctx, ACCESS_FLOAT); 881 EA = tcg_temp_new(); 882 t0 = tcg_temp_new(); 883 t1 = tcg_temp_new_i64(); 884 gen_addr_reg_index(ctx, EA); 885 gen_qemu_ld32s(ctx, t0, EA); 886 tcg_gen_ext_tl_i64(t1, t0); 887 set_fpr(rD(ctx->opcode), t1); 888} 889 890/* lfiwzx */ 891static void gen_lfiwzx(DisasContext *ctx) 892{ 893 TCGv EA; 894 TCGv_i64 t0; 895 if (unlikely(!ctx->fpu_enabled)) { 896 gen_exception(ctx, POWERPC_EXCP_FPU); 897 return; 898 } 899 gen_set_access_type(ctx, ACCESS_FLOAT); 900 EA = tcg_temp_new(); 901 t0 = tcg_temp_new_i64(); 902 gen_addr_reg_index(ctx, EA); 903 gen_qemu_ld32u_i64(ctx, t0, EA); 904 set_fpr(rD(ctx->opcode), t0); 905} 906 907#define GEN_STXF(name, stop, opc2, opc3, type) \ 908static void glue(gen_, name##x)(DisasContext *ctx) \ 909{ \ 910 TCGv EA; \ 911 TCGv_i64 t0; \ 912 if (unlikely(!ctx->fpu_enabled)) { \ 913 gen_exception(ctx, POWERPC_EXCP_FPU); \ 914 return; \ 915 } \ 916 gen_set_access_type(ctx, ACCESS_FLOAT); \ 917 EA = tcg_temp_new(); \ 918 t0 = tcg_temp_new_i64(); \ 919 gen_addr_reg_index(ctx, EA); \ 920 get_fpr(t0, rS(ctx->opcode)); \ 921 gen_qemu_##stop(ctx, t0, EA); \ 922} 923 924static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) 925{ 926 TCGv_i32 tmp = tcg_temp_new_i32(); 927 gen_helper_tosingle(tmp, src); 928 tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 929} 930 931/* stfdepx (external PID lfdx) */ 932static void gen_stfdepx(DisasContext *ctx) 933{ 934 TCGv EA; 935 TCGv_i64 t0; 936 CHK_SV(ctx); 937 if (unlikely(!ctx->fpu_enabled)) { 938 gen_exception(ctx, POWERPC_EXCP_FPU); 939 return; 940 } 941 gen_set_access_type(ctx, ACCESS_FLOAT); 942 EA = tcg_temp_new(); 943 t0 = tcg_temp_new_i64(); 944 gen_addr_reg_index(ctx, EA); 945 get_fpr(t0, rD(ctx->opcode)); 946 tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); 947} 948 949/* stfdp */ 950static void gen_stfdp(DisasContext *ctx) 951{ 952 TCGv EA; 953 TCGv_i64 t0; 954 if (unlikely(!ctx->fpu_enabled)) { 955 gen_exception(ctx, POWERPC_EXCP_FPU); 956 return; 957 } 958 gen_set_access_type(ctx, ACCESS_FLOAT); 959 EA = tcg_temp_new(); 960 t0 = tcg_temp_new_i64(); 961 gen_addr_imm_index(ctx, EA, 0); 962 /* 963 * We only need to swap high and low halves. gen_qemu_st64_i64 964 * does necessary 64-bit byteswap already. 965 */ 966 if (unlikely(ctx->le_mode)) { 967 get_fpr(t0, rD(ctx->opcode) + 1); 968 gen_qemu_st64_i64(ctx, t0, EA); 969 tcg_gen_addi_tl(EA, EA, 8); 970 get_fpr(t0, rD(ctx->opcode)); 971 gen_qemu_st64_i64(ctx, t0, EA); 972 } else { 973 get_fpr(t0, rD(ctx->opcode)); 974 gen_qemu_st64_i64(ctx, t0, EA); 975 tcg_gen_addi_tl(EA, EA, 8); 976 get_fpr(t0, rD(ctx->opcode) + 1); 977 gen_qemu_st64_i64(ctx, t0, EA); 978 } 979} 980 981/* stfdpx */ 982static void gen_stfdpx(DisasContext *ctx) 983{ 984 TCGv EA; 985 TCGv_i64 t0; 986 if (unlikely(!ctx->fpu_enabled)) { 987 gen_exception(ctx, POWERPC_EXCP_FPU); 988 return; 989 } 990 gen_set_access_type(ctx, ACCESS_FLOAT); 991 EA = tcg_temp_new(); 992 t0 = tcg_temp_new_i64(); 993 gen_addr_reg_index(ctx, EA); 994 /* 995 * We only need to swap high and low halves. gen_qemu_st64_i64 996 * does necessary 64-bit byteswap already. 997 */ 998 if (unlikely(ctx->le_mode)) { 999 get_fpr(t0, rD(ctx->opcode) + 1); 1000 gen_qemu_st64_i64(ctx, t0, EA); 1001 tcg_gen_addi_tl(EA, EA, 8); 1002 get_fpr(t0, rD(ctx->opcode)); 1003 gen_qemu_st64_i64(ctx, t0, EA); 1004 } else { 1005 get_fpr(t0, rD(ctx->opcode)); 1006 gen_qemu_st64_i64(ctx, t0, EA); 1007 tcg_gen_addi_tl(EA, EA, 8); 1008 get_fpr(t0, rD(ctx->opcode) + 1); 1009 gen_qemu_st64_i64(ctx, t0, EA); 1010 } 1011} 1012 1013/* Optional: */ 1014static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) 1015{ 1016 TCGv t0 = tcg_temp_new(); 1017 tcg_gen_trunc_i64_tl(t0, arg1), 1018 gen_qemu_st32(ctx, t0, arg2); 1019} 1020/* stfiwx */ 1021GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); 1022 1023/* Floating-point Load/Store Instructions */ 1024static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, 1025 bool update, bool store, bool single) 1026{ 1027 TCGv ea; 1028 TCGv_i64 t0; 1029 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 1030 REQUIRE_FPU(ctx); 1031 if (update && ra == 0) { 1032 gen_invalid(ctx); 1033 return true; 1034 } 1035 gen_set_access_type(ctx, ACCESS_FLOAT); 1036 t0 = tcg_temp_new_i64(); 1037 ea = do_ea_calc(ctx, ra, displ); 1038 if (store) { 1039 get_fpr(t0, rt); 1040 if (single) { 1041 gen_qemu_st32fs(ctx, t0, ea); 1042 } else { 1043 gen_qemu_st64_i64(ctx, t0, ea); 1044 } 1045 } else { 1046 if (single) { 1047 gen_qemu_ld32fs(ctx, t0, ea); 1048 } else { 1049 gen_qemu_ld64_i64(ctx, t0, ea); 1050 } 1051 set_fpr(rt, t0); 1052 } 1053 if (update) { 1054 tcg_gen_mov_tl(cpu_gpr[ra], ea); 1055 } 1056 return true; 1057} 1058 1059static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, 1060 bool single) 1061{ 1062 return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, 1063 single); 1064} 1065 1066static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, 1067 bool store, bool single) 1068{ 1069 arg_D d; 1070 if (!resolve_PLS_D(ctx, &d, a)) { 1071 return true; 1072 } 1073 return do_lsfp_D(ctx, &d, update, store, single); 1074} 1075 1076static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, 1077 bool store, bool single) 1078{ 1079 return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); 1080} 1081 1082TRANS(LFS, do_lsfp_D, false, false, true) 1083TRANS(LFSU, do_lsfp_D, true, false, true) 1084TRANS(LFSX, do_lsfp_X, false, false, true) 1085TRANS(LFSUX, do_lsfp_X, true, false, true) 1086TRANS(PLFS, do_lsfp_PLS_D, false, false, true) 1087 1088TRANS(LFD, do_lsfp_D, false, false, false) 1089TRANS(LFDU, do_lsfp_D, true, false, false) 1090TRANS(LFDX, do_lsfp_X, false, false, false) 1091TRANS(LFDUX, do_lsfp_X, true, false, false) 1092TRANS(PLFD, do_lsfp_PLS_D, false, false, false) 1093 1094TRANS(STFS, do_lsfp_D, false, true, true) 1095TRANS(STFSU, do_lsfp_D, true, true, true) 1096TRANS(STFSX, do_lsfp_X, false, true, true) 1097TRANS(STFSUX, do_lsfp_X, true, true, true) 1098TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) 1099 1100TRANS(STFD, do_lsfp_D, false, true, false) 1101TRANS(STFDU, do_lsfp_D, true, true, false) 1102TRANS(STFDX, do_lsfp_X, false, true, false) 1103TRANS(STFDUX, do_lsfp_X, true, true, false) 1104TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) 1105 1106#undef _GEN_FLOAT_ACB 1107#undef GEN_FLOAT_ACB 1108#undef _GEN_FLOAT_AB 1109#undef GEN_FLOAT_AB 1110#undef _GEN_FLOAT_AC 1111#undef GEN_FLOAT_AC 1112#undef GEN_FLOAT_B 1113#undef GEN_FLOAT_BS 1114 1115#undef GEN_LDF 1116#undef GEN_LDUF 1117#undef GEN_LDUXF 1118#undef GEN_LDXF 1119#undef GEN_LDFS 1120 1121#undef GEN_STF 1122#undef GEN_STUF 1123#undef GEN_STUXF 1124#undef GEN_STXF 1125#undef GEN_STFS 1126